search.jsp revision c6e0f8b39af7343c04ec7558a085c965159f4ea0
See LICENSE.txt included in this distribution for the specific
file and include the License file at LICENSE.txt.
--%><%@ page import = "javax.servlet.*,
java.io.*,
%><%@ page session="false" %><%@ page errorPage="error.jsp" %><%
String q = request.getParameter("q");
String defs = request.getParameter("defs");
String refs = request.getParameter("refs");
String hist = request.getParameter("hist");
String path = request.getParameter("path");
%><%@ include file="projects.jspf" %><%
Cookie[] cookies = request.getCookies();
sort = cookie.getValue();
String sortParam = request.getParameter("sort");
if (LASTMODTIME.equals(sortParam)) {
} else if (RELEVANCY.equals(sortParam)) {
} else if (BY_PATH.equals(sortParam)) {
Cookie cookie = new Cookie("OpenGrok/sorting", sort);
response.addCookie(cookie);
//List<org.apache.lucene.document.Document> docs=new ArrayList<org.apache.lucene.document.Document>();
if( q!= null && q.equals("")) q = null;
if( defs != null && defs.equals("")) defs = null;
if( refs != null && refs.equals("")) refs = null;
if( hist != null && hist.equals("")) hist = null;
if( path != null && path.equals("")) path = null;
if (project != null && project.size()<1) project = null;
Searcher searcher = null; //the searcher used to open/search the index
IndexReader ireader = null; //the reader used to open/search the index
int max=RuntimeEnvironment.getInstance().getHitsPerPage();
int hitsPerPage = RuntimeEnvironment.getInstance().getHitsPerPage();
int cachePages= RuntimeEnvironment.getInstance().getCachePages();
int thispage = 0; //used for the for/next either max or
String DATA_ROOT = env.getDataRootPath();
if(DATA_ROOT.equals("")) {
throw new Exception("DATA_ROOT parameter is not configured in web.xml!");
if(!data_root.isDirectory()) {
throw new Exception("DATA_ROOT parameter in web.xml does not exist or is not a directory!");
//String date = request.getParameter("date");
//TODO merge paging hitsPerPage with parameter n (has to reflect the search if changed so proper number is cached first time)
qstr = Util.buildQueryString(q, defs, refs, path, hist);
query = qparser.parse(qstr); //parse the
File root = new File(RuntimeEnvironment.getInstance().getDataRootFile(),
if (RuntimeEnvironment.getInstance().hasProjects()) {
if (project.size() > 1) { //more projects
IndexSearcher[] searchables = new IndexSearcher[project.size()];
File droot = new File(RuntimeEnvironment.getInstance().getDataRootFile(), "index");
//TODO might need to rewrite to Project instead of String , need changes in projects.jspf too
ireader = (IndexReader.open(new File(droot, proj)));
if (Runtime.getRuntime().availableProcessors() > 1) {
root = new File(root, project.get(0));
ireader = IndexReader.open(root);
ireader = IndexReader.open(root);
//TODO check if below is somehow reusing sessions so we don't requery again and again, I guess 2min timeout sessions could be usefull, since you click on the next page within 2mins, if not, then wait ;)
if (LASTMODTIME.equals(sort)) {
TopFieldDocs fdocs=searcher.search(query, null,hitsPerPage*cachePages, sortf);
totalHits=fdocs.totalHits;
fdocs=searcher.search(query, null, totalHits, sortf);
hits = fdocs.scoreDocs;
} else if (BY_PATH.equals(sort)) {
TopFieldDocs fdocs=searcher.search(query, null,hitsPerPage*cachePages, sortf);
totalHits=fdocs.totalHits;
fdocs=searcher.search(query, null,totalHits, sortf);
hits = fdocs.scoreDocs;
searcher.search(query,collector);
totalHits=collector.getTotalHits();
searcher.search(query,collector);
hits=collector.topDocs().scoreDocs;
// for (int i = 0; i < hits.length; i++) {
// Document d = searcher.doc(docId);
// docs.add(d);
} catch (BooleanQuery.TooManyClauses e) {
errorMsg = "<b>Error parsing your query:</b><br/>" + Util.htmlize(qstr) +
"\"</a><p/> or read the <a href=\"help.jsp\">Help</a> on query language(eventually <a href=\"help.jsp#escaping\">escape special characters</a> with <b>\\</b>)<p/>" +
if (hits != null && hits.length == 1 && request.getServletPath().equals("/s") && (query != null && query instanceof TermQuery)) {
String preFragmentPath = Util.URIEncodePath(context + "/xref" + searcher.doc(hits[0].doc).get("path"));
String fragment = Util.URIEncode(((TermQuery)query).getTerm().text());
url.append("#");
url.append(fragment);
RuntimeEnvironment environment = RuntimeEnvironment.getInstance();
%><%@ include file="httpheader.jspf" %>
<div id="header"><%@ include file="pageheader.jspf" %></div>
<table border="0" width="100%"><tr><td><a href="<%=context%>" id="home">Home</a></td><td align="right"><%
StringBuffer url = request.getRequestURL();
url.append('?');
String querys = request.getQueryString();
int idx = querys.indexOf("sort=");
url.append(querys);
url.append('&');
url.append("sort=");
if (sort == null || RELEVANCY.equals(sort)) {
%><b>relevance</b> | <a href="<%=url.toString()+LASTMODTIME%>">last modified time</a> | <a href="<%=url.toString()+BY_PATH%>">path</a><%
} else if (LASTMODTIME.equals(sort)) {
%><a href="<%=url.toString()+RELEVANCY%>">relevance</a> | <b>last modified time</b> | <a href="<%=url.toString()+BY_PATH%>">path</a><%
} else if (BY_PATH.equals(sort)) {
%><a href="<%=url.toString()+RELEVANCY%>">relevance</a> | <a href="<%=url.toString()+LASTMODTIME%>">last modified time</a> | <b>path</b><%
%><a href="<%=url.toString()+RELEVANCY%>">relevance</a> | <a href="<%=url.toString()+LASTMODTIME%>">last modified time</a> | <a href="<%=url.toString()+BY_PATH%>">path</a><%
<%@ include file="menu.jspf"%>
//TODO spellchecking cycle below is not that great and we only create suggest links for every token in query, not for a query as whole
} else if (hits.length == 0) {
File spellIndex = new File(env.getDataRootPath(), "spellIndex");
if (RuntimeEnvironment.getInstance().hasProjects()) {
if (project.size() > 1) { //more projects
spellIndexes = new File[project.size()];
//TODO might need to rewrite to Project instead of String , need changes in projects.jspf too
spellIndex = new File(spellIndex, project.get(0));
if (spellIndexes!=null) {count=spellIndexes.length;}
if (spellIndex.exists()) {
FSDirectory spellDirectory = FSDirectory.getDirectory(spellIndex);
toks = q.split("[\t ]+");
for(int j=0; j<toks.length; j++) {
String[] ret = checker.suggestSimilar(toks[j].toLowerCase(), 5);
for(int i = 0;i < ret.length; i++) {
%><p><font color="#cc0000">Did you mean(for <%=spellIndex.getName()%>)</font>:<%
toks = refs.split("[\t ]+");
for(int j=0; j<toks.length; j++) {
String[] ret = checker.suggestSimilar(toks[j].toLowerCase(), 5);
for(int i = 0;i < ret.length; i++) {
%><p><font color="#cc0000">Did you mean(for <%=spellIndex.getName()%>)</font>:<%
//TODO it seems the only true spellchecker is for below field, see IndexDatabase createspellingsuggestions ...
toks = defs.split("[\t ]+");
for(int j=0; j<toks.length; j++) {
String[] ret = checker.suggestSimilar(toks[j].toLowerCase(), 5);
for(int i = 0;i < ret.length; i++) {
%><p><font color="#cc0000">Did you mean(for <%=spellIndex.getName()%>)</font>:<%
%><p> Your search <b><%=query.toString()%></b> did not match any files.
String url = (q == null ? "" : "&q=" + Util.URIEncode(q) ) +
(defs == null ? "" : "&defs=" + Util.URIEncode(defs)) +
(refs == null ? "" : "&refs=" + Util.URIEncode(refs)) +
(path == null ? "" : "&path=" + Util.URIEncode(path)) +
(hist == null ? "" : "&hist=" + Util.URIEncode(hist)) +
(sort == null ? "" : "&sort=" + Util.URIEncode(sort));
labelStart = sstart/max + 1;
slider.append("<span class=\"sel\">" + label + "</span>");
arr = label < 10 ? " " + label : String.valueOf(label);
slider.append("<a class=\"more\" href=\"search?n=" + max + "&start=" + i + url + "\">"+
%> Searched <b><%=query.toString()%></b> (Results <b><%=start+1%> -
slider.toString(): ""%></p>
//TODO also fix the way what and how it is passed to prettyprint, can improve performance! SearchEngine integration is really needed here.
Results.prettyPrintHTML(searcher,hits, start, start+thispage,
ef.close();
<b> Completed in <%=(new Date()).getTime() - starttime.getTime()%> milliseconds </b> <br/>
%><br/></div><%@include file="foot.jspf"%><%