SearchEngine.java revision 1327
715N/A/*
715N/A * CDDL HEADER START
715N/A *
715N/A * The contents of this file are subject to the terms of the
715N/A * Common Development and Distribution License (the "License").
715N/A * You may not use this file except in compliance with the License.
715N/A *
715N/A * See LICENSE.txt included in this distribution for the specific
715N/A * language governing permissions and limitations under the License.
715N/A *
715N/A * When distributing Covered Code, include this CDDL HEADER in each
715N/A * file and include the License file at LICENSE.txt.
715N/A * If applicable, add the following below this CDDL HEADER, with the
715N/A * fields enclosed by brackets "[]" replaced with your own identifying
715N/A * information: Portions Copyright [yyyy] [name of copyright owner]
715N/A *
715N/A * CDDL HEADER END
715N/A */
715N/A
715N/A/*
1374N/A * Copyright 2010 Sun Micosystems. All rights reserved.
715N/A * Use is subject to license terms.
715N/A */
715N/A
715N/Apackage org.opensolaris.opengrok.search;
715N/A
858N/Aimport java.io.BufferedReader;
857N/Aimport java.io.File;
715N/Aimport java.io.FileInputStream;
715N/Aimport java.io.FileNotFoundException;
715N/Aimport java.io.FileReader;
715N/Aimport java.io.IOException;
1374N/Aimport java.io.InputStreamReader;
715N/Aimport java.io.Reader;
715N/Aimport java.util.ArrayList;
1459N/Aimport java.util.List;
715N/Aimport java.util.concurrent.ExecutorService;
1460N/Aimport java.util.concurrent.Executors;
715N/Aimport java.util.logging.Level;
844N/Aimport java.util.logging.Logger;
1474N/Aimport java.util.zip.GZIPInputStream;
761N/A
761N/Aimport org.apache.lucene.document.Document;
715N/Aimport org.apache.lucene.document.Fieldable;
803N/Aimport org.apache.lucene.index.IndexReader;
844N/Aimport org.apache.lucene.index.MultiReader;
851N/Aimport org.apache.lucene.search.IndexSearcher;
788N/Aimport org.apache.lucene.search.Query;
788N/Aimport org.apache.lucene.search.ScoreDoc;
1327N/Aimport org.apache.lucene.search.TopScoreDocCollector;
760N/Aimport org.apache.lucene.store.FSDirectory;
715N/Aimport org.apache.lucene.util.Version;
726N/Aimport org.opensolaris.opengrok.analysis.CompatibleAnalyser;
726N/Aimport org.opensolaris.opengrok.analysis.Definitions;
726N/Aimport org.opensolaris.opengrok.analysis.FileAnalyzer.Genre;
1195N/Aimport org.opensolaris.opengrok.analysis.TagFilter;
715N/Aimport org.opensolaris.opengrok.configuration.Project;
715N/Aimport org.opensolaris.opengrok.configuration.RuntimeEnvironment;
715N/Aimport org.opensolaris.opengrok.history.HistoryException;
1474N/Aimport org.opensolaris.opengrok.search.Summary.Fragment;
1474N/Aimport org.opensolaris.opengrok.search.context.Context;
899N/Aimport org.opensolaris.opengrok.search.context.HistoryContext;
899N/Aimport org.opensolaris.opengrok.util.IOUtils;
717N/A
747N/A/**
747N/A * This is an encapsulation of the details on how to seach in the index
844N/A * database.
844N/A *
747N/A * @author Trond Norbye 2005
747N/A * @author Lubos Kosco 2010 - upgrade to lucene 3.0.0
788N/A * @author Lubos Kosco 2011 - upgrade to lucene 3.5.0
788N/A */
788N/Apublic class SearchEngine {
788N/A private static final Logger logger = Logger.getLogger(SearchEngine.class.getName());
788N/A /** Message text used when logging exceptions thrown when searching. */
788N/A private static final String SEARCH_EXCEPTION_MSG = "Exception searching";
856N/A
856N/A //NOTE below will need to be changed after new lucene upgrade, if they
856N/A //increase the version - every change of below makes us incompatible with the
856N/A //old index and we need to ask for reindex
856N/A /** version of lucene index common for whole app*/
856N/A public static final Version LUCENE_VERSION=Version.LUCENE_35;
715N/A
715N/A /**
760N/A * Holds value of property definition.
760N/A */
760N/A private String definition;
851N/A
851N/A /**
851N/A * Holds value of property file.
851N/A */
851N/A private String file;
851N/A
851N/A /**
851N/A * Holds value of property freetext.
851N/A */
898N/A private String freetext;
898N/A
898N/A /**
864N/A * Holds value of property history.
864N/A */
864N/A private String history;
803N/A
1238N/A /**
803N/A * Holds value of property symbol.
1182N/A */
857N/A private String symbol;
803N/A
857N/A /**
857N/A * Holds value of property indexDatabase.
857N/A */
803N/A private Query query;
810N/A private final CompatibleAnalyser analyzer = new CompatibleAnalyser();
860N/A private Context sourceContext;
1195N/A private HistoryContext historyContext;
803N/A private Summarizer summarizer;
803N/A // internal structure to hold the results from lucene
803N/A private final List<org.apache.lucene.document.Document> docs;
760N/A private final char[] content = new char[1024*8];
760N/A private String source;
760N/A private String data;
760N/A private static final boolean docsScoredInOrder = false;
1470N/A
1470N/A int hitsPerPage = RuntimeEnvironment.getInstance().getHitsPerPage();
760N/A int cachePages= RuntimeEnvironment.getInstance().getCachePages();
760N/A int totalHits=0;
760N/A
760N/A private ScoreDoc[] hits;
760N/A private TopScoreDocCollector collector;
760N/A private IndexSearcher searcher;
760N/A boolean allCollected;
760N/A
760N/A /**
760N/A * Creates a new instance of SearchEngine
760N/A */
760N/A public SearchEngine() {
760N/A docs = new ArrayList<org.apache.lucene.document.Document>();
788N/A }
887N/A
887N/A /**
887N/A * Create a QueryBuilder using the fields that have been set on this
887N/A * SearchEngine.
887N/A *
887N/A * @return a query builder
887N/A */
887N/A private QueryBuilder createQueryBuilder() {
887N/A return new QueryBuilder()
887N/A .setFreetext(freetext)
788N/A .setDefs(definition)
788N/A .setRefs(symbol)
788N/A .setPath(file)
788N/A .setHist(history);
788N/A }
788N/A
788N/A public boolean isValidQuery() {
788N/A boolean ret;
788N/A try {
788N/A query = createQueryBuilder().build();
1474N/A ret = (query != null);
1474N/A } catch (Exception e) {
793N/A ret = false;
1374N/A }
1374N/A
1374N/A return ret;
1374N/A }
793N/A
1374N/A /**
793N/A *
793N/A * @param paging whether to use paging (if yes, first X pages will load faster)
788N/A * @param root which db to search
1327N/A * @throws IOException
793N/A */
793N/A private void searchSingleDatabase(File root,boolean paging) throws IOException {
788N/A IndexReader ireader = IndexReader.open(FSDirectory.open(root),true);
788N/A searcher = new IndexSearcher(ireader);
788N/A collector = TopScoreDocCollector.create(hitsPerPage*cachePages,docsScoredInOrder);
803N/A searcher.search(query,collector);
803N/A totalHits=collector.getTotalHits();
803N/A if (!paging && totalHits>0) {
803N/A collector = TopScoreDocCollector.create(totalHits,docsScoredInOrder);
803N/A searcher.search(query,collector);
803N/A }
803N/A hits = collector.topDocs().scoreDocs;
803N/A for (int i = 0; i < hits.length; i++) {
803N/A int docId = hits[i].doc;
851N/A Document d = searcher.doc(docId);
715N/A docs.add(d);
715N/A }
1459N/A }
1459N/A
715N/A /**
726N/A *
715N/A * @param paging whether to use paging (if yes, first X pages will load faster)
717N/A * @param root list of projects to search
803N/A * @throws IOException
715N/A */
715N/A private void searchMultiDatabase(List<Project> root,boolean paging) throws IOException {
844N/A IndexReader[] subreaders=new IndexReader[root.size()];
844N/A File droot=new File(RuntimeEnvironment.getInstance().getDataRootFile(), "index");
844N/A int ii=0;
844N/A for (Project project : root) {
1459N/A IndexReader ireader = (IndexReader.open(FSDirectory.open(new File(droot,project.getPath()) ),true));
1459N/A subreaders[ii++]=ireader;
1459N/A }
1459N/A MultiReader searchables=new MultiReader(subreaders, true);
1459N/A if (Runtime.getRuntime().availableProcessors()>1) {
1459N/A int noThreads = 2 + (2 * Runtime.getRuntime().availableProcessors()); //TODO there might be a better way for counting this - or we should honor the command line option here too!
1459N/A ExecutorService executor=Executors.newFixedThreadPool(noThreads);
1459N/A searcher = new IndexSearcher(searchables,executor); }
1459N/A else { searcher = new IndexSearcher(searchables); }
717N/A collector = TopScoreDocCollector.create(hitsPerPage*cachePages,docsScoredInOrder);
803N/A searcher.search(query,collector);
715N/A totalHits=collector.getTotalHits();
715N/A if (!paging && totalHits>0) {
717N/A collector = TopScoreDocCollector.create(totalHits,docsScoredInOrder);
803N/A searcher.search(query,collector);
715N/A }
715N/A hits = collector.topDocs().scoreDocs;
717N/A for (int i = 0; i < hits.length; i++) {
803N/A int docId = hits[i].doc;
841N/A Document d = searcher.doc(docId);
841N/A docs.add(d);
841N/A }
841N/A }
715N/A
1481N/A public String getQuery() {
1481N/A return query.toString();
1481N/A }
1481N/A
715N/A /**
844N/A * Execute a search. Before calling this function, you must set the
844N/A * appropriate seach critera with the set-functions.
844N/A * Note that this search will return the first cachePages of hitsPerPage, for more you need to call more
844N/A *
717N/A * @return The number of hits
803N/A */
715N/A public int search() {
851N/A source = RuntimeEnvironment.getInstance().getSourceRootPath();
898N/A data = RuntimeEnvironment.getInstance().getDataRootPath();
898N/A docs.clear();
898N/A
898N/A QueryBuilder queryBuilder = createQueryBuilder();
898N/A
898N/A try {
851N/A query = queryBuilder.build();
851N/A if (query != null) {
851N/A RuntimeEnvironment env = RuntimeEnvironment.getInstance();
898N/A File root = new File(env.getDataRootFile(), "index");
864N/A
864N/A if (env.hasProjects()) {
864N/A // search all projects
864N/A //TODO support paging per project (in search.java)
864N/A //TODO optimize if only one project by falling back to SingleDatabase ?
864N/A searchMultiDatabase(env.getProjects(),false);
864N/A } else {
864N/A // search the index database
864N/A searchSingleDatabase(root,true);
864N/A }
715N/A }
715N/A } catch (Exception e) {
1459N/A logger.warning(SEARCH_EXCEPTION_MSG + ": " + e.getMessage());
1459N/A logger.log(Level.FINE, "search", e);
1459N/A }
1459N/A
1459N/A if (!docs.isEmpty()) {
1459N/A sourceContext = null;
1459N/A summarizer = null;
1459N/A try {
1459N/A sourceContext = new Context(query, queryBuilder.getQueries());
1459N/A if(sourceContext.isEmpty()) {
1459N/A sourceContext = null;
1459N/A }
1459N/A summarizer = new Summarizer(query, analyzer);
1459N/A } catch (Exception e) {
1459N/A logger.warning("An error occured while creating summary: "
1459N/A + e.getMessage());
1459N/A logger.log(Level.FINE, "search", e);
1459N/A }
1459N/A
1459N/A historyContext = null;
1459N/A try {
1459N/A historyContext = new HistoryContext(query);
1459N/A if(historyContext.isEmpty()) {
1459N/A historyContext = null;
1459N/A }
1459N/A } catch (Exception e) {
715N/A logger.warning("An error occured while getting history context: "
715N/A + e.getMessage());
715N/A logger.log(Level.FINE, "search", e);
715N/A }
715N/A }
720N/A int count=hits==null?0:hits.length;
720N/A return count;
720N/A }
720N/A
720N/A /**
715N/A * get results , if no search was started before, no results are returned
715N/A * this method will requery if end end is more than first query from search,
1459N/A * hence performance hit applies, if you want results in later pages than number of cachePages
1459N/A * also end has to be bigger than start !
1459N/A * @param start start of the hit list
1459N/A * @param end end of the hit list
1459N/A * @param ret list of results from start to end or null/empty if no search was started
1459N/A */
1459N/A public void results(int start, int end, List<Hit> ret) {
1459N/A
1459N/A //return if no start search() was done
1459N/A if (hits == null || (end<start) ) {
1459N/A ret.clear();
851N/A return;
851N/A }
851N/A
851N/A ret.clear();
851N/A
851N/A //TODO check if below fits for if end=old hits.length, or it should include it
851N/A if (end > hits.length & !allCollected) {
851N/A //do the requery, we want more than 5 pages
851N/A collector = TopScoreDocCollector.create(totalHits,docsScoredInOrder);
851N/A try {
851N/A searcher.search(query,collector);
851N/A } catch (Exception e) { // this exception should never be hit, since search() will hit this before
851N/A logger.warning(SEARCH_EXCEPTION_MSG + ": " + e.getMessage());
851N/A logger.log(Level.FINE, "results", e);
851N/A }
851N/A hits = collector.topDocs().scoreDocs;
851N/A Document d=null;
851N/A for (int i = start; i < hits.length; i++) {
851N/A int docId = hits[i].doc;
851N/A try {
851N/A d = searcher.doc(docId);
851N/A } catch (Exception e) {
851N/A logger.warning(SEARCH_EXCEPTION_MSG + ": " + e.getMessage());
851N/A logger.log(Level.FINE, "results", e);
851N/A }
851N/A docs.add(d);
851N/A }
815N/A allCollected=true;
715N/A }
715N/A
760N/A //TODO generation of ret(results) could be cashed and consumers of engine would just print them in whatever form they need, this way we could get rid of docs
760N/A // the only problem is that count of docs is usually smaller than number of results
788N/A for (int ii = start; ii < end; ++ii) {
788N/A boolean alt = (ii % 2 == 0);
788N/A boolean hasContext = false;
720N/A try {
788N/A Document doc = docs.get(ii);
788N/A String filename = doc.get("path");
788N/A
788N/A Genre genre = Genre.get(doc.get("t"));
788N/A Definitions tags = null;
788N/A Fieldable tagsField = doc.getFieldable("tags");
788N/A if (tagsField != null) {
788N/A tags = Definitions.deserialize(tagsField.getBinaryValue());
788N/A }
788N/A int nhits = docs.size();
788N/A
720N/A if(sourceContext != null) {
788N/A try {
720N/A if (Genre.PLAIN == genre && (source != null)) {
715N/A hasContext = sourceContext.getContext(new InputStreamReader(new FileInputStream(source +
715N/A filename)), null, null, null, filename,
715N/A tags, nhits > 100, ret);
715N/A } else if (Genre.XREFABLE == genre && data != null && summarizer != null){
715N/A int l = 0;
715N/A Reader r=null;
803N/A if ( RuntimeEnvironment.getInstance().isCompressXref() ) {
1474N/A r = new TagFilter(new BufferedReader(new InputStreamReader(new GZIPInputStream(new FileInputStream(data + "/xref" + filename+".gz"))))); }
726N/A else {
717N/A r = new TagFilter(new BufferedReader(new FileReader(data + "/xref" + filename))); }
717N/A try {
717N/A l = r.read(content);
815N/A } finally {
764N/A IOUtils.close(r);
1474N/A }
1474N/A //TODO FIX below fragmenter according to either summarizer or context (to get line numbers, might be hard, since xref writers will need to be fixed too, they generate just one line of html code now :( )
1466N/A Summary sum = summarizer.getSummary(new String(content, 0, l));
715N/A Fragment fragments[] = sum.getFragments();
788N/A for (int jj = 0; jj < fragments.length; ++jj) {
788N/A String match = fragments[jj].toString();
1474N/A if (match.length() > 0) {
720N/A if (!fragments[jj].isEllipsis()) {
788N/A Hit hit = new Hit(filename, fragments[jj].toString(), "", true, alt);
788N/A ret.add(hit);
1466N/A }
788N/A hasContext = true;
788N/A }
788N/A }
788N/A } else {
788N/A logger.warning("Unknown genre '" + genre + "' for '"
788N/A + filename + "'");
788N/A hasContext |= sourceContext.getContext(null, null, null, null, filename, tags, false, ret);
788N/A }
720N/A } catch (FileNotFoundException exp) {
788N/A logger.warning("Couldn't read summary from '"
720N/A + filename + "': " + exp.getMessage());
715N/A hasContext |= sourceContext.getContext(null, null, null, null, filename, tags, false, ret);
715N/A }
715N/A }
715N/A if (historyContext != null) {
715N/A hasContext |= historyContext.getContext(source + filename, filename, ret);
715N/A }
715N/A if(!hasContext) {
715N/A ret.add(new Hit(filename, "...", "", false, alt));
715N/A }
715N/A } catch (IOException e) {
715N/A logger.warning(SEARCH_EXCEPTION_MSG + ": " + e.getMessage());
715N/A logger.log(Level.FINE, "results", e);
715N/A } catch (ClassNotFoundException e) {
715N/A logger.warning(SEARCH_EXCEPTION_MSG + ": " + e.getMessage());
763N/A logger.log(Level.FINE, "results", e);
1470N/A } catch (HistoryException e) {
763N/A logger.warning(SEARCH_EXCEPTION_MSG + ": " + e.getMessage());
763N/A logger.log(Level.FINE, "results", e);
763N/A }
1470N/A }
1470N/A
1470N/A }
1470N/A
1470N/A /**
1470N/A * Getter for property definition.
1470N/A *
1470N/A * @return Value of property definition.
1470N/A */
1470N/A public String getDefinition() {
715N/A return this.definition;
715N/A }
761N/A
844N/A /**
844N/A * Setter for property definition.
844N/A *
844N/A * @param definition New value of property definition.
844N/A */
844N/A public void setDefinition(String definition) {
844N/A this.definition = definition;
844N/A }
844N/A
844N/A /**
844N/A * Getter for property file.
844N/A *
844N/A * @return Value of property file.
844N/A */
844N/A public String getFile() {
844N/A return this.file;
844N/A }
844N/A
844N/A /**
844N/A * Setter for property file.
844N/A *
844N/A * @param file New value of property file.
844N/A */
844N/A public void setFile(String file) {
844N/A this.file = file;
844N/A }
844N/A
844N/A /**
844N/A * Getter for property freetext.
844N/A *
844N/A * @return Value of property freetext.
844N/A */
844N/A public String getFreetext() {
844N/A return this.freetext;
844N/A }
844N/A
844N/A /**
844N/A * Setter for property freetext.
844N/A *
844N/A * @param freetext New value of property freetext.
844N/A */
844N/A public void setFreetext(String freetext) {
844N/A this.freetext = freetext;
844N/A }
844N/A
844N/A /**
844N/A * Getter for property history.
844N/A *
844N/A * @return Value of property history.
844N/A */
844N/A public String getHistory() {
844N/A return this.history;
844N/A }
856N/A
856N/A /**
856N/A * Setter for property history.
856N/A *
856N/A * @param history New value of property history.
856N/A */
856N/A public void setHistory(String history) {
856N/A this.history = history;
856N/A }
856N/A
856N/A /**
856N/A * Getter for property symbol.
856N/A *
856N/A * @return Value of property symbol.
856N/A */
856N/A public String getSymbol() {
856N/A return this.symbol;
856N/A }
856N/A
761N/A /**
761N/A * Setter for property symbol.
761N/A *
761N/A * @param symbol New value of property symbol.
779N/A */
1474N/A public void setSymbol(String symbol) {
779N/A this.symbol = symbol;
779N/A }
779N/A}
779N/A