Results.java revision 1195
98N/A/*
98N/A * CDDL HEADER START
943N/A *
98N/A * The contents of this file are subject to the terms of the
98N/A * Common Development and Distribution License (the "License").
919N/A * You may not use this file except in compliance with the License.
919N/A *
919N/A * See LICENSE.txt included in this distribution for the specific
919N/A * language governing permissions and limitations under the License.
919N/A *
919N/A * When distributing Covered Code, include this CDDL HEADER in each
919N/A * file and include the License file at LICENSE.txt.
919N/A * If applicable, add the following below this CDDL HEADER, with the
919N/A * fields enclosed by brackets "[]" replaced with your own identifying
919N/A * information: Portions Copyright [yyyy] [name of copyright owner]
919N/A *
919N/A * CDDL HEADER END
919N/A */
919N/A/*
919N/A * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
919N/A *
919N/A * Portions Copyright 2011 Jens Elkner.
98N/A */
98N/Apackage org.opensolaris.opengrok.search;
98N/A
98N/Aimport java.io.BufferedReader;
810N/Aimport java.io.File;
810N/Aimport java.io.FileInputStream;
810N/Aimport java.io.FileReader;
810N/Aimport java.io.IOException;
354N/Aimport java.io.InputStreamReader;
354N/Aimport java.io.Reader;
354N/Aimport java.io.Writer;
354N/Aimport java.util.ArrayList;
354N/Aimport java.util.LinkedHashMap;
354N/Aimport java.util.Map;
354N/Aimport java.util.logging.Level;
354N/Aimport java.util.zip.GZIPInputStream;
606N/A
810N/Aimport org.apache.lucene.document.Document;
606N/Aimport org.apache.lucene.document.Fieldable;
606N/Aimport org.apache.lucene.index.CorruptIndexException;
354N/Aimport org.apache.lucene.search.ScoreDoc;
810N/Aimport org.apache.lucene.search.Searcher;
824N/Aimport org.opensolaris.opengrok.OpenGrokLogger;
810N/Aimport org.opensolaris.opengrok.analysis.Definitions;
830N/Aimport org.opensolaris.opengrok.analysis.FileAnalyzer.Genre;
810N/Aimport org.opensolaris.opengrok.analysis.TagFilter;
810N/Aimport org.opensolaris.opengrok.history.HistoryException;
810N/Aimport org.opensolaris.opengrok.util.IOUtils;
810N/Aimport org.opensolaris.opengrok.web.Prefix;
810N/Aimport org.opensolaris.opengrok.web.SearchHelper;
810N/Aimport org.opensolaris.opengrok.web.Util;
851N/A
851N/A/**
810N/A * @author Chandan slightly rewritten by Lubos Kosco
810N/A */
810N/Apublic final class Results {
810N/A private Results() {
810N/A // Util class, should not be constructed
851N/A }
810N/A
851N/A /**
810N/A * Create a has map keyed by the directory of the document found.
810N/A * @param searcher searcher to use.
810N/A * @param hits hits produced by the given searcher's search
810N/A * @param startIdx the index of the first hit to check
810N/A * @param stopIdx the index of the last hit to check
810N/A * @return a (directory, hitDocument) hashmap
851N/A * @throws CorruptIndexException
810N/A * @throws IOException
851N/A */
810N/A private static LinkedHashMap<String, ArrayList<Document>>
851N/A createMap(Searcher searcher, ScoreDoc[] hits, int startIdx, int stopIdx)
810N/A throws CorruptIndexException, IOException
810N/A {
810N/A LinkedHashMap<String, ArrayList<Document>> dirHash =
810N/A new LinkedHashMap<String, ArrayList<Document>>();
810N/A for (int i = startIdx; i < stopIdx; i++ ) {
810N/A int docId = hits[i].doc;
810N/A Document doc = searcher.doc(docId);
810N/A String rpath = doc.get("path");
851N/A String parent = rpath.substring(0, rpath.lastIndexOf('/'));
851N/A ArrayList<Document> dirDocs = dirHash.get(parent);
851N/A if (dirDocs == null) {
851N/A dirDocs = new ArrayList<Document>();
851N/A dirHash.put(parent, dirDocs);
98N/A }
824N/A dirDocs.add(doc);
98N/A }
98N/A return dirHash;
830N/A }
851N/A
824N/A private static String getTags(File basedir, String path, boolean compressed) {
571N/A char[] content = new char[1024 * 8];
98N/A FileInputStream fis = null;
98N/A GZIPInputStream gis = null;
98N/A FileReader fr = null;
341N/A Reader r = null;
366N/A // Grrrrrrrrrrrrr - TagFilter takes Readers, only!!!!
354N/A // Why? Is it CS sensible?
425N/A try {
325N/A if (compressed) {
456N/A fis = new FileInputStream(new File(basedir, path + ".gz"));
577N/A gis = new GZIPInputStream(fis);
325N/A r = new TagFilter(new BufferedReader(new InputStreamReader(gis)));
354N/A } else {
278N/A fr = new FileReader(new File(basedir, path));
98N/A r = new TagFilter(new BufferedReader(fr));
543N/A }
98N/A int len = r.read(content);
749N/A return new String(content, 0, len);
749N/A } catch (Exception e) {
98N/A OpenGrokLogger.getLogger().log(
151N/A Level.WARNING, "An error reading tags from " + basedir + path
137N/A + (compressed ? ".gz" : ""), e);
153N/A } finally {
210N/A IOUtils.close(r);
366N/A IOUtils.close(gis);
98N/A IOUtils.close(fis);
179N/A IOUtils.close(fr);
269N/A }
126N/A return "";
98N/A }
98N/A
287N/A /**
366N/A * Prints out results in html form. The following search helper fields are
499N/A * required to be properly initialized:
123N/A * <ul>
98N/A * <li>{@link SearchHelper#dataRoot}</li>
98N/A * <li>{@link SearchHelper#contextPath}</li>
98N/A * <li>{@link SearchHelper#searcher}</li>
98N/A * <li>{@link SearchHelper#hits}</li>
851N/A * <li>{@link SearchHelper#historyContext} (ignored if {@code null})</li>
577N/A * <li>{@link SearchHelper#sourceContext} (ignored if {@code null})</li>
498N/A * <li>{@link SearchHelper#summerizer} (if sourceContext is not {@code null})</li>
98N/A * <li>{@link SearchHelper#compressed} (if sourceContext is not {@code null})</li>
98N/A * <li>{@link SearchHelper#sourceRoot} (if sourceContext or historyContext
98N/A * is not {@code null})</li>
851N/A * </ul>
98N/A *
744N/A * @param out write destination
747N/A * @param sh search helper which has all required fields set
744N/A * @param start index of the first hit to print
744N/A * @param end index of the last hit to print
744N/A * @throws HistoryException
493N/A * @throws IOException
493N/A * @throws ClassNotFoundException
98N/A */
493N/A public static void prettyPrint(Writer out, SearchHelper sh, int start,
493N/A int end)
493N/A throws HistoryException, IOException, ClassNotFoundException
810N/A {
String ctxE = Util.URIEncodePath(sh.contextPath);
String xrefPrefix = sh.contextPath + Prefix.XREF_P;
String morePrefix = sh.contextPath + Prefix.MORE_P;
String xrefPrefixE = ctxE + Prefix.XREF_P;
String histPrefixE = ctxE + Prefix.HIST_L;
String rawPrefixE = ctxE + Prefix.RAW_P;
File xrefDataDir = new File(sh.dataRoot, Prefix.XREF_P.toString());
LinkedHashMap<String, ArrayList<Document>> dirHash =
createMap(sh.searcher, sh.hits, start, end);
for (Map.Entry<String, ArrayList<Document>> entry : dirHash.entrySet())
{
String parent = entry.getKey();
out.write("<tr class=\"dir\"><td colspan=\"3\"><a href=\"");
out.write(xrefPrefixE);
out.write(Util.URIEncodePath(parent));
out.write("/\">");
out.write(parent); // htmlize ???
out.write("/</a>");
if (sh.desc != null) {
out.write(" - <i>");
out.write(sh.desc.get(parent)); // htmlize ???
out.write("</i>");
}
out.write("</td></tr>");
for (Document doc : entry.getValue()) {
String rpath = doc.get("path");
String rpathE = Util.URIEncodePath(rpath);
out.write("<tr><td class=\"q\"><a href=\"");
out.write(histPrefixE);
out.write(rpathE);
out.write("\" title=\"History\">H</a> <a href=\"");
out.write(xrefPrefixE);
out.write(rpathE);
out.write("?a=true\" title=\"Annotate\">A</a> <a href=\"");
out.write(rawPrefixE);
out.write(rpathE);
out.write("\" title=\"Download\">D</a>");
out.write("</td>");
out.write("<td class=\"f\"><a href=\"");
out.write(xrefPrefixE);
out.write(rpathE);
out.write("\">");
out.write(rpath.substring(rpath.lastIndexOf('/') + 1)); // htmlize ???
out.write("</a></td><td><tt class=\"con\">");
if (sh.sourceContext != null) {
Genre genre = Genre.get(doc.get("t"));
Definitions tags = null;
Fieldable tagsField = doc.getFieldable("tags");
if (tagsField != null) {
tags = Definitions.deserialize(tagsField.getBinaryValue());
}
if (Genre.XREFABLE == genre && sh.summerizer != null) {
String xtags = getTags(xrefDataDir, rpath, sh.compressed);
// FIXME use Highlighter from lucene contrib here,
// instead of summarizer, we'd also get rid of
// apache lucene in whole source ...
out.write(sh.summerizer.getSummary(xtags).toString());
} else if (Genre.HTML == genre && sh.summerizer != null) {
String htags = getTags(sh.sourceRoot, rpath, false);
out.write(sh.summerizer.getSummary(htags).toString());
} else {
FileReader r = genre == Genre.PLAIN
? new FileReader(new File(sh.sourceRoot, rpath))
: null;
sh.sourceContext.getContext(r, out, xrefPrefix,
morePrefix, rpath, tags, true, null);
}
}
if (sh.historyContext != null) {
sh.historyContext.getContext(new File(sh.sourceRoot, rpath),
rpath, out, sh.contextPath);
}
out.write("</tt></td></tr>\n");
}
}
}
}