0N/A/*
0N/A * CDDL HEADER START
0N/A *
0N/A * The contents of this file are subject to the terms of the
407N/A * Common Development and Distribution License (the "License").
0N/A * You may not use this file except in compliance with the License.
0N/A *
0N/A * See LICENSE.txt included in this distribution for the specific
0N/A * language governing permissions and limitations under the License.
0N/A *
0N/A * When distributing Covered Code, include this CDDL HEADER in each
0N/A * file and include the License file at LICENSE.txt.
0N/A * If applicable, add the following below this CDDL HEADER, with the
0N/A * fields enclosed by brackets "[]" replaced with your own identifying
0N/A * information: Portions Copyright [yyyy] [name of copyright owner]
0N/A *
0N/A * CDDL HEADER END
0N/A */
0N/A/*
1237N/A * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
1190N/A *
1185N/A * Portions Copyright 2011 Jens Elkner.
0N/A */
0N/Apackage org.opensolaris.opengrok.search;
0N/A
388N/Aimport java.io.BufferedReader;
1185N/Aimport java.io.File;
388N/Aimport java.io.FileReader;
388N/Aimport java.io.IOException;
388N/Aimport java.io.Reader;
388N/Aimport java.io.Writer;
388N/Aimport java.util.ArrayList;
388N/Aimport java.util.LinkedHashMap;
388N/Aimport java.util.Map;
508N/Aimport java.util.logging.Level;
1327N/Aimport java.util.logging.Logger;
1185N/A
388N/Aimport org.apache.lucene.document.Document;
388N/Aimport org.apache.lucene.document.Fieldable;
1185N/Aimport org.apache.lucene.index.CorruptIndexException;
1436N/Aimport org.apache.lucene.search.IndexSearcher;
819N/Aimport org.apache.lucene.search.ScoreDoc;
350N/Aimport org.opensolaris.opengrok.analysis.Definitions;
1185N/Aimport org.opensolaris.opengrok.analysis.FileAnalyzer.Genre;
0N/Aimport org.opensolaris.opengrok.analysis.TagFilter;
1385N/Aimport org.opensolaris.opengrok.analysis.XrefReader;
615N/Aimport org.opensolaris.opengrok.history.HistoryException;
1195N/Aimport org.opensolaris.opengrok.util.IOUtils;
1185N/Aimport org.opensolaris.opengrok.web.Prefix;
1185N/Aimport org.opensolaris.opengrok.web.SearchHelper;
388N/Aimport org.opensolaris.opengrok.web.Util;
0N/A
1111N/A/**
1185N/A * @author Chandan slightly rewritten by Lubos Kosco
1111N/A */
456N/Apublic final class Results {
1327N/A private static final Logger logger = Logger.getLogger(Results.class.getName());
456N/A private Results() {
456N/A // Util class, should not be constructed
456N/A }
1185N/A
1111N/A /**
1185N/A * Create a has map keyed by the directory of the document found.
1185N/A * @param searcher searcher to use.
1185N/A * @param hits hits produced by the given searcher's search
1185N/A * @param startIdx the index of the first hit to check
1185N/A * @param stopIdx the index of the last hit to check
1185N/A * @return a (directory, hitDocument) hashmap
1185N/A * @throws CorruptIndexException
1111N/A * @throws IOException
1111N/A */
1237N/A private static Map<String, ArrayList<Document>>
1318N/A createMap(IndexSearcher searcher, ScoreDoc[] hits, int startIdx, int stopIdx)
1190N/A throws CorruptIndexException, IOException
615N/A {
1190N/A LinkedHashMap<String, ArrayList<Document>> dirHash =
1185N/A new LinkedHashMap<String, ArrayList<Document>>();
1185N/A for (int i = startIdx; i < stopIdx; i++ ) {
1185N/A int docId = hits[i].doc;
819N/A Document doc = searcher.doc(docId);
0N/A String rpath = doc.get("path");
1185N/A String parent = rpath.substring(0, rpath.lastIndexOf('/'));
0N/A ArrayList<Document> dirDocs = dirHash.get(parent);
1185N/A if (dirDocs == null) {
0N/A dirDocs = new ArrayList<Document>();
0N/A dirHash.put(parent, dirDocs);
0N/A }
0N/A dirDocs.add(doc);
0N/A }
1185N/A return dirHash;
1185N/A }
1190N/A
1461N/A @SuppressWarnings("resource")
1385N/A private static String getTags(File basedir, String path, boolean isXref) {
1185N/A char[] content = new char[1024 * 8];
1185N/A Reader r = null;
1185N/A // Grrrrrrrrrrrrr - TagFilter takes Readers, only!!!!
1185N/A // Why? Is it CS sensible?
1385N/A File file = new File(basedir, path);
1185N/A try {
1385N/A if (isXref) {
1385N/A r = new TagFilter(new XrefReader(file));
1185N/A } else {
1385N/A r = new TagFilter(new BufferedReader(new FileReader(file)));
1185N/A }
1185N/A int len = r.read(content);
1185N/A return new String(content, 0, len);
1185N/A } catch (Exception e) {
1385N/A logger.warning("An error reading tags from '" + file + "': "
1385N/A + e.getMessage());
1327N/A logger.log(Level.FINE, "getTags", e);
1185N/A } finally {
1195N/A IOUtils.close(r);
1185N/A }
1185N/A return "";
1185N/A }
460N/A
1185N/A /**
1185N/A * Prints out results in html form. The following search helper fields are
1185N/A * required to be properly initialized:
1185N/A * <ul>
1185N/A * <li>{@link SearchHelper#dataRoot}</li>
1185N/A * <li>{@link SearchHelper#contextPath}</li>
1185N/A * <li>{@link SearchHelper#searcher}</li>
1185N/A * <li>{@link SearchHelper#hits}</li>
1185N/A * <li>{@link SearchHelper#historyContext} (ignored if {@code null})</li>
1185N/A * <li>{@link SearchHelper#sourceContext} (ignored if {@code null})</li>
1185N/A * <li>{@link SearchHelper#summerizer} (if sourceContext is not {@code null})</li>
1190N/A * <li>{@link SearchHelper#sourceRoot} (if sourceContext or historyContext
1185N/A * is not {@code null})</li>
1185N/A * </ul>
1190N/A *
1185N/A * @param out write destination
1185N/A * @param sh search helper which has all required fields set
1185N/A * @param start index of the first hit to print
1185N/A * @param end index of the last hit to print
1190N/A * @throws HistoryException
1190N/A * @throws IOException
1190N/A * @throws ClassNotFoundException
1185N/A */
1190N/A public static void prettyPrint(Writer out, SearchHelper sh, int start,
1185N/A int end)
1185N/A throws HistoryException, IOException, ClassNotFoundException
1185N/A {
1469N/A String ctxE = Util.uriEncodePath(sh.contextPath);
1185N/A String xrefPrefix = sh.contextPath + Prefix.XREF_P;
1185N/A String morePrefix = sh.contextPath + Prefix.MORE_P;
1185N/A String xrefPrefixE = ctxE + Prefix.XREF_P;
1185N/A String histPrefixE = ctxE + Prefix.HIST_L;
1185N/A String rawPrefixE = ctxE + Prefix.RAW_P;
1185N/A File xrefDataDir = new File(sh.dataRoot, Prefix.XREF_P.toString());
158N/A
1237N/A for (Map.Entry<String, ArrayList<Document>> entry :
1237N/A createMap(sh.searcher, sh.hits, start, end).entrySet())
1185N/A {
1185N/A String parent = entry.getKey();
1390N/A out.write("<tr class=\"rsd\"><td colspan=\"3\" class=\"rsdl\"><a href=\"");
1185N/A out.write(xrefPrefixE);
1469N/A out.write(Util.uriEncodePath(parent));
1185N/A out.write("/\">");
1469N/A out.write(Util.htmlize(parent));
1185N/A out.write("/</a>");
1185N/A if (sh.desc != null) {
1390N/A out.write(" - <span class=\"rsdd\">");
1469N/A out.write(Util.htmlize(sh.desc.get(parent)));
1390N/A out.write("</span>");
1185N/A }
1185N/A out.write("</td></tr>");
1185N/A for (Document doc : entry.getValue()) {
0N/A String rpath = doc.get("path");
1469N/A String rpathE = Util.uriEncodePath(rpath);
1390N/A out.write("<tr><td class=\"rsq\"><a href=\"");
1185N/A out.write(histPrefixE);
1185N/A out.write(rpathE);
1185N/A out.write("\" title=\"History\">H</a> <a href=\"");
1185N/A out.write(xrefPrefixE);
1185N/A out.write(rpathE);
1185N/A out.write("?a=true\" title=\"Annotate\">A</a> <a href=\"");
1185N/A out.write(rawPrefixE);
1185N/A out.write(rpathE);
1185N/A out.write("\" title=\"Download\">D</a>");
1111N/A out.write("</td>");
1390N/A out.write("<td class=\"rsf\"><a href=\"");
1185N/A out.write(xrefPrefixE);
1185N/A out.write(rpathE);
1185N/A out.write("\">");
1469N/A out.write(Util.htmlize(rpath.substring(rpath.lastIndexOf('/') + 1)));
1390N/A out.write("</a></td><td><tt class=\"rscon\">");
1185N/A if (sh.sourceContext != null) {
1185N/A Genre genre = Genre.get(doc.get("t"));
350N/A Definitions tags = null;
350N/A Fieldable tagsField = doc.getFieldable("tags");
350N/A if (tagsField != null) {
928N/A tags = Definitions.deserialize(tagsField.getBinaryValue());
350N/A }
1185N/A if (Genre.XREFABLE == genre && sh.summerizer != null) {
1385N/A String xtags = getTags(xrefDataDir, rpath, true);
1185N/A // FIXME use Highlighter from lucene contrib here,
1185N/A // instead of summarizer, we'd also get rid of
1185N/A // apache lucene in whole source ...
1185N/A out.write(sh.summerizer.getSummary(xtags).toString());
1185N/A } else if (Genre.HTML == genre && sh.summerizer != null) {
1185N/A String htags = getTags(sh.sourceRoot, rpath, false);
1185N/A out.write(sh.summerizer.getSummary(htags).toString());
1185N/A } else {
1461N/A @SuppressWarnings("resource")
1190N/A FileReader r = genre == Genre.PLAIN
1185N/A ? new FileReader(new File(sh.sourceRoot, rpath))
1190N/A : null;
1190N/A sh.sourceContext.getContext(r, out, xrefPrefix,
1185N/A morePrefix, rpath, tags, true, null);
0N/A }
0N/A }
1185N/A if (sh.historyContext != null) {
1190N/A sh.historyContext.getContext(new File(sh.sourceRoot, rpath),
1185N/A rpath, out, sh.contextPath);
0N/A }
0N/A out.write("</tt></td></tr>\n");
0N/A }
0N/A }
0N/A }
0N/A}