AnalyzerGuru.java revision 210
0N/A/*
0N/A * CDDL HEADER START
0N/A *
0N/A * The contents of this file are subject to the terms of the
0N/A * Common Development and Distribution License (the "License").
0N/A * You may not use this file except in compliance with the License.
0N/A *
0N/A * See LICENSE.txt included in this distribution for the specific
0N/A * language governing permissions and limitations under the License.
0N/A *
0N/A * When distributing Covered Code, include this CDDL HEADER in each
0N/A * file and include the License file at LICENSE.txt.
0N/A * If applicable, add the following below this CDDL HEADER, with the
0N/A * fields enclosed by brackets "[]" replaced with your own identifying
0N/A * information: Portions Copyright [yyyy] [name of copyright owner]
0N/A *
0N/A * CDDL HEADER END
0N/A */
0N/A
0N/A/*
0N/A * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
0N/A * Use is subject to license terms.
0N/A */
0N/Apackage org.opensolaris.opengrok.analysis;
0N/A
0N/Aimport java.io.BufferedInputStream;
0N/Aimport java.io.File;
0N/Aimport java.io.FileInputStream;
0N/Aimport java.io.IOException;
99N/Aimport java.io.InputStream;
0N/Aimport java.io.OutputStreamWriter;
0N/Aimport java.io.Reader;
3N/Aimport java.io.StringReader;
38N/Aimport java.io.Writer;
0N/Aimport java.util.ArrayList;
0N/Aimport java.util.HashMap;
0N/Aimport java.util.Iterator;
0N/Aimport java.util.List;
0N/Aimport java.util.Map;
0N/Aimport java.util.SortedMap;
0N/Aimport java.util.TreeMap;
0N/Aimport org.apache.lucene.analysis.Token;
0N/Aimport org.apache.lucene.analysis.TokenStream;
0N/Aimport org.apache.lucene.document.DateTools;
0N/Aimport org.apache.lucene.document.Document;
58N/Aimport org.apache.lucene.document.Field;
58N/Aimport org.opensolaris.opengrok.analysis.FileAnalyzer.Genre;
0N/Aimport org.opensolaris.opengrok.analysis.archive.BZip2AnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.archive.GZIPAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.archive.TarAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.archive.ZipAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.c.CAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.data.IgnorantAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.data.ImageAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.document.TroffAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.executables.ELFAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.executables.JarAnalyzerFactory;
36N/Aimport org.opensolaris.opengrok.analysis.executables.JavaClassAnalyzerFactory;
36N/Aimport org.opensolaris.opengrok.analysis.java.JavaAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.lisp.LispAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.plain.PlainAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.plain.XMLAnalyzerFactory;
0N/Aimport org.opensolaris.opengrok.analysis.sh.ShAnalyzerFactory;
36N/Aimport org.opensolaris.opengrok.analysis.sql.SQLAnalyzerFactory;
58N/Aimport org.opensolaris.opengrok.configuration.Project;
36N/Aimport org.opensolaris.opengrok.history.Annotation;
36N/Aimport org.opensolaris.opengrok.history.HistoryGuru;
36N/Aimport org.opensolaris.opengrok.history.HistoryReader;
36N/Aimport org.opensolaris.opengrok.web.Util;
36N/A
36N/A/**
36N/A * Manages and porvides Analyzers as needed. Please see
36N/A * <a href="http://www.opensolaris.org/os/project/opengrok/manual/internals/">
36N/A * this</a> page for a great description of the purpose of the AnalyzerGuru.
36N/A *
36N/A * Created on September 22, 2005
36N/A * @author Chandan
36N/A */
36N/Apublic class AnalyzerGuru {
36N/A
36N/A /** The default {@code FileAnalyzerFactory} instance. */
36N/A private static final FileAnalyzerFactory
38N/A DEFAULT_ANALYZER_FACTORY = new FileAnalyzerFactory();
36N/A
58N/A /** Map from file extensions to analyzer factories. */
36N/A private static final Map<String, FileAnalyzerFactory>
36N/A ext = new HashMap<String, FileAnalyzerFactory>();
58N/A
0N/A // TODO: have a comparator
0N/A /** Map from magic strings to analyzer factories. */
0N/A private static final SortedMap<String, FileAnalyzerFactory>
0N/A magics = new TreeMap<String, FileAnalyzerFactory>();
0N/A
0N/A /**
36N/A * List of matcher objects which can be used to determine which analyzer
0N/A * factory to use.
0N/A */
36N/A private static final List<FileAnalyzerFactory.Matcher>
0N/A matchers = new ArrayList<FileAnalyzerFactory.Matcher>();
0N/A
0N/A /** List of all registered {@code FileAnalyzerFactory} instances. */
0N/A private static final List<FileAnalyzerFactory>
0N/A factories = new ArrayList<FileAnalyzerFactory>();
36N/A
0N/A /*
0N/A * If you write your own analyzer please register it here
0N/A */
0N/A static {
0N/A FileAnalyzerFactory[] analyzers = {
0N/A DEFAULT_ANALYZER_FACTORY,
0N/A new IgnorantAnalyzerFactory(),
0N/A new BZip2AnalyzerFactory(),
0N/A new XMLAnalyzerFactory(),
0N/A new TroffAnalyzerFactory(),
0N/A new ELFAnalyzerFactory(),
0N/A new JavaClassAnalyzerFactory(),
0N/A new ImageAnalyzerFactory(),
0N/A new JarAnalyzerFactory(),
0N/A new ZipAnalyzerFactory(),
0N/A new TarAnalyzerFactory(),
0N/A new CAnalyzerFactory(),
0N/A new ShAnalyzerFactory(),
0N/A new PlainAnalyzerFactory(),
0N/A new GZIPAnalyzerFactory(),
0N/A new JavaAnalyzerFactory(),
0N/A new LispAnalyzerFactory(),
0N/A new SQLAnalyzerFactory(),
0N/A };
0N/A
0N/A for (FileAnalyzerFactory analyzer : analyzers) {
0N/A registerAnalyzer(analyzer);
0N/A }
0N/A }
0N/A
0N/A /**
0N/A * Register a {@code FileAnalyzerFactory} instance.
0N/A */
36N/A private static void registerAnalyzer(FileAnalyzerFactory factory) {
0N/A for (String suffix : factory.getSuffixes()) {
0N/A FileAnalyzerFactory old = ext.put(suffix, factory);
0N/A assert old == null :
0N/A "suffix '" + suffix + "' used in multiple analyzers";
0N/A }
0N/A for (String magic : factory.getMagicStrings()) {
0N/A magics.put(magic, factory);
0N/A }
0N/A matchers.addAll(factory.getMatchers());
0N/A factories.add(factory);
0N/A }
0N/A
0N/A /**
0N/A * Instruct the AnalyzerGuru to use a given analyzer for a given
0N/A * file extension.
0N/A * @param extension the file-extension to add
0N/A * @param factory a factory which creates
36N/A * the analyzer to use for the given extension
0N/A * (if you pass null as the analyzer, you will disable
0N/A * the analyzer used for that extension)
0N/A */
0N/A public static void addExtension(String extension,
0N/A FileAnalyzerFactory factory) {
0N/A if (factory == null) {
0N/A ext.remove(extension);
0N/A } else {
0N/A ext.put(extension, factory);
0N/A }
0N/A }
0N/A
0N/A /**
0N/A * Get the default Analyzer.
0N/A */
0N/A public static FileAnalyzer getAnalyzer() {
0N/A return DEFAULT_ANALYZER_FACTORY.getAnalyzer();
0N/A }
0N/A
0N/A /**
0N/A * Get an analyzer suited to analyze a file. This function will reuse
0N/A * analyzers since they are costly.
99N/A *
99N/A * @param in Input stream containing data to be analyzed
99N/A * @param file Name of the file to be analyzed
99N/A * @return An analyzer suited for that file content
0N/A * @throws java.io.IOException If an error occurs while accessing the
25N/A * data in the input stream.
0N/A */
99N/A public static FileAnalyzer getAnalyzer(InputStream in, String file) throws IOException {
0N/A FileAnalyzerFactory factory = find(in, file);
0N/A if (factory == null) {
0N/A return getAnalyzer();
99N/A }
0N/A return factory.getAnalyzer();
99N/A }
0N/A
99N/A /**
58N/A * Create a Lucene document and fill in the required fields
58N/A * @param file The file to index
58N/A * @param in The data to generate the index for
58N/A * @param path Where the file is located (from source root)
58N/A * @return The Lucene document to add to the index database
58N/A * @throws java.io.IOException If an exception occurs while collecting the
58N/A * datas
99N/A */
58N/A public Document getDocument(File file, InputStream in, String path) throws IOException {
58N/A Document doc = new Document();
58N/A String date = DateTools.timeToString(file.lastModified(), DateTools.Resolution.MILLISECOND);
58N/A doc.add(new Field("u", Util.uid(path, date), Field.Store.YES, Field.Index.UN_TOKENIZED));
0N/A doc.add(new Field("fullpath", file.getAbsolutePath(), Field.Store.YES, Field.Index.TOKENIZED));
0N/A
0N/A try {
0N/A HistoryReader hr = HistoryGuru.getInstance().getHistoryReader(file);
0N/A if (hr != null) {
0N/A doc.add(new Field("hist", hr));
0N/A // date = hr.getLastCommentDate() //RFE
0N/A }
0N/A } catch (IOException e) {
0N/A e.printStackTrace();
99N/A }
0N/A doc.add(new Field("date", date, Field.Store.YES, Field.Index.UN_TOKENIZED));
99N/A if (path != null) {
0N/A doc.add(new Field("path", path, Field.Store.YES, Field.Index.TOKENIZED));
99N/A Project project = Project.getProject(path);
0N/A if (project != null) {
0N/A doc.add(new Field("project", project.getPath(), Field.Store.YES, Field.Index.TOKENIZED));
0N/A }
0N/A }
0N/A FileAnalyzer fa = null;
0N/A try {
0N/A fa = getAnalyzer(in, path);
58N/A } catch (Exception e) {
0N/A }
0N/A if (fa != null) {
0N/A try {
0N/A Genre g = fa.getGenre();
0N/A if (g == Genre.PLAIN) {
0N/A doc.add(new Field("t", "p", Field.Store.YES, Field.Index.UN_TOKENIZED));
0N/A } else if (g == Genre.XREFABLE) {
36N/A doc.add(new Field("t", "x", Field.Store.YES, Field.Index.UN_TOKENIZED));
0N/A } else if (g == Genre.HTML) {
0N/A doc.add(new Field("t", "h", Field.Store.YES, Field.Index.UN_TOKENIZED));
0N/A }
0N/A fa.analyze(doc, in);
36N/A } catch (Exception e) {
0N/A // Ignoring any errors while analysing
0N/A }
0N/A }
36N/A doc.removeField("fullpath");
0N/A
0N/A return doc;
0N/A }
0N/A
0N/A /**
0N/A * Get the content type for a named file.
0N/A *
0N/A * @param in The input stream we want to get the content type for (if
0N/A * we cannot determine the content type by the filename)
0N/A * @param file The name of the file
0N/A * @return The contentType suitable for printing to response.setContentType()
36N/A * @throws java.io.IOException If an error occurs while accessing the input
89N/A * stream.
36N/A */
0N/A public static String getContentType(InputStream in, String file) throws IOException {
0N/A return find(in, file).getContentType();
89N/A }
89N/A
0N/A /**
0N/A * Write a browsable version of the file
0N/A *
0N/A * @param factory The analyzer factory for this filetype
0N/A * @param in The input stream containing the data
0N/A * @param out Where to write the result
0N/A * @param annotation Annotation information for the file
0N/A * @throws java.io.IOException If an error occurs while creating the
0N/A * output
0N/A */
0N/A public static void writeXref(FileAnalyzerFactory factory, InputStream in,
0N/A Writer out, Annotation annotation)
0N/A throws IOException
0N/A {
0N/A factory.writeXref(in, out, annotation);
0N/A }
0N/A
0N/A /**
0N/A * Get the genre of a file
0N/A *
0N/A * @param file The file to inpect
0N/A * @return The genre suitable to decide how to display the file
0N/A */
0N/A public static Genre getGenre(String file) {
0N/A return getGenre(find(file));
0N/A }
0N/A
0N/A /**
0N/A * Get the genre of a bulk of data
0N/A *
0N/A * @param in A stream containing the data
0N/A * @return The genre suitable to decide how to display the file
0N/A * @throws java.io.IOException If an error occurs while getting the content
0N/A */
0N/A public static Genre getGenre(InputStream in) throws IOException {
0N/A return getGenre(find(in));
0N/A }
0N/A
0N/A /**
0N/A * Get the genre for a named class (this is most likely an analyzer)
0N/A * @param factory the analyzer factory to get the genre for
0N/A * @return The genre of this class (null if not found)
0N/A */
36N/A public static Genre getGenre(FileAnalyzerFactory factory) {
36N/A if (factory != null) {
36N/A return factory.getGenre();
0N/A }
0N/A return null;
0N/A }
0N/A
0N/A /**
0N/A * Find a {@code FileAnalyzerFactory} with the specified class name. If one
36N/A * doesn't exist, create one and register it.
0N/A *
0N/A * @param factoryClassName name of the factory class
0N/A * @return a file analyzer factory
0N/A *
0N/A * @throws ClassNotFoundException if there is no class with that name
0N/A * @throws ClassCastException if the class is not a subclass of {@code
0N/A * FileAnalyzerFactory}
0N/A * @throws IllegalAccessException if the constructor cannot be accessed
36N/A * @throws InstantiationException if the class cannot be instantiated
58N/A */
0N/A public static FileAnalyzerFactory findFactory(String factoryClassName)
0N/A throws ClassNotFoundException, IllegalAccessException,
0N/A InstantiationException
0N/A {
0N/A return findFactory(Class.forName(factoryClassName));
0N/A }
0N/A
0N/A /**
36N/A * Find a {@code FileAnalyzerFactory} which is an instance of the specified
58N/A * class. If one doesn't exist, create one and register it.
0N/A *
0N/A * @param factoryClass the factory class
0N/A * @return a file analyzer factory
0N/A *
0N/A * @throws ClassCastException if the class is not a subclass of {@code
0N/A * FileAnalyzerFactory}
36N/A * @throws IllegalAccessException if the constructor cannot be accessed
0N/A * @throws InstantiationException if the class cannot be instantiated
0N/A */
0N/A private static FileAnalyzerFactory findFactory(Class factoryClass)
0N/A throws InstantiationException, IllegalAccessException
58N/A {
36N/A for (FileAnalyzerFactory f : factories) {
36N/A if (f.getClass() == factoryClass) {
36N/A return f;
58N/A }
58N/A }
36N/A FileAnalyzerFactory f =
36N/A (FileAnalyzerFactory) factoryClass.newInstance();
0N/A registerAnalyzer(f);
0N/A return f;
32N/A }
0N/A
0N/A /**
0N/A * Finds a suitable analyser class for file name. If the analyzer cannot
0N/A * be determined by the file extension, try to look at the data in the
0N/A * InputStream to find a suitable analyzer.
0N/A *
36N/A * Use if you just want to find file type.
0N/A *
0N/A *
0N/A * @param in The input stream containing the data
0N/A * @param file The file name to get the analyzer for
0N/A * @return the analyzer factory to use
0N/A * @throws java.io.IOException If a problem occurs while reading the data
0N/A */
36N/A public static FileAnalyzerFactory find(InputStream in, String file)
36N/A throws IOException
0N/A {
0N/A FileAnalyzerFactory factory = find(file);
0N/A if (factory != null) {
0N/A return factory;
0N/A }
0N/A return find(in);
0N/A }
0N/A
0N/A /**
0N/A * Finds a suitable analyser class for file name.
0N/A *
0N/A * @param file The file name to get the analyzer for
0N/A * @return the analyzer factory to use
0N/A */
0N/A public static FileAnalyzerFactory find(String file) {
36N/A int i = 0;
0N/A if ((i = file.lastIndexOf('/')) > 0 || (i = file.lastIndexOf('\\')) > 0) {
0N/A if (i + 1 < file.length()) {
0N/A file = file.substring(i + 1);
0N/A }
0N/A }
0N/A file = file.toUpperCase();
99N/A int dotpos = file.lastIndexOf('.');
99N/A if (dotpos >= 0) {
99N/A FileAnalyzerFactory factory =
99N/A ext.get(file.substring(dotpos + 1).toUpperCase());
0N/A if (factory != null) {
0N/A return factory;
0N/A }
0N/A }
0N/A // file doesn't have any of the extensions we know
0N/A return null;
0N/A }
0N/A
0N/A /**
0N/A * Finds a suitable analyser class for the data in this stream
0N/A *
0N/A * @param in The stream containing the data to analyze
0N/A * @return the analyzer factory to use
0N/A * @throws java.io.IOException if an error occurs while reading data from
0N/A * the stream
0N/A */
0N/A public static FileAnalyzerFactory find(InputStream in) throws IOException {
0N/A in.mark(8);
0N/A byte[] content = new byte[8];
0N/A int len = in.read(content);
0N/A in.reset();
0N/A if (len < 4) {
0N/A return null;
0N/A }
0N/A
0N/A FileAnalyzerFactory factory = find(content);
0N/A if (factory != null) {
0N/A return factory;
0N/A }
0N/A
0N/A for (FileAnalyzerFactory.Matcher matcher : matchers) {
0N/A FileAnalyzerFactory fac = matcher.isMagic(content);
0N/A if (fac != null) {
0N/A return fac;
}
}
return null;
}
/**
* Finds a suitable analyser class for a magic signature
*
* @param signature the magic signature look up
* @return the analyzer factory to use
*/
public static FileAnalyzerFactory find(byte[] signature) {
char[] chars = new char[signature.length > 8 ? 8 : signature.length];
for (int i = 0; i < chars.length; i++) {
chars[i] = (char) (0xFF & signature[i]);
}
return findMagic(new String(chars));
}
/**
* Get an analyzer by looking up the "magic signature"
* @param signature the signature to look up
* @return the analyzer factory to handle data with this signature
*/
public static FileAnalyzerFactory findMagic(String signature) {
FileAnalyzerFactory a = magics.get(signature);
if (a == null) {
String sigWithoutBOM = stripBOM(signature);
for (Map.Entry<String, FileAnalyzerFactory> entry :
magics.entrySet()) {
if (signature.startsWith(entry.getKey())) {
return entry.getValue();
}
// See if text files have the magic sequence if we remove the
// byte-order marker
if (sigWithoutBOM != null &&
entry.getValue().getGenre() == Genre.PLAIN &&
sigWithoutBOM.startsWith(entry.getKey())) {
return entry.getValue();
}
}
}
return a;
}
/** Byte-order markers. */
private static final String[] BOMS = {
new String(new char[] { 0xEF, 0xBB, 0xBF }), // UTF-8 BOM
new String(new char[] { 0xFE, 0xFF }), // UTF-16BE BOM
new String(new char[] { 0xFF, 0xFE }), // UTF-16LE BOM
};
/**
* Strip away the byte-order marker from the string, if it has one.
*
* @param str the string to remove the BOM from
* @return a string without the byte-order marker, or <code>null</code> if
* the string doesn't start with a BOM
*/
private static String stripBOM(String str) {
for (String bom : BOMS) {
if (str.startsWith(bom)) {
return str.substring(bom.length());
}
}
return null;
}
public static void main(String[] args) throws Exception {
AnalyzerGuru af = new AnalyzerGuru();
System.out.println("<pre wrap=true>");
for (String arg : args) {
try {
FileAnalyzerFactory an = AnalyzerGuru.find(arg);
File f = new File(arg);
BufferedInputStream in = new BufferedInputStream(new FileInputStream(f));
FileAnalyzer fa = AnalyzerGuru.getAnalyzer(in, arg);
System.out.println("\nANALYZER = " + fa);
Document doc = af.getDocument(f, in, arg);
System.out.println("\nDOCUMENT = " + doc);
Iterator iterator = doc.getFields().iterator();
while (iterator.hasNext()) {
org.apache.lucene.document.Field field = (org.apache.lucene.document.Field) iterator.next();
if (field.isTokenized()) {
Reader r = field.readerValue();
if (r == null) {
r = new StringReader(field.stringValue());
}
TokenStream ts = fa.tokenStream(field.name(), r);
System.out.println("\nFIELD = " + field.name() + " TOKEN STREAM = " + ts.getClass().getName());
Token t;
while ((t = ts.next()) != null) {
System.out.print(t.termText());
System.out.print(' ');
}
System.out.println();
}
if (field.isStored()) {
System.out.println("\nFIELD = " + field.name());
if (field.readerValue() == null) {
System.out.println(field.stringValue());
} else {
System.out.println("STORING THE READER");
}
}
}
System.out.println("Writing XREF--------------");
Writer out = new OutputStreamWriter(System.out);
fa.writeXref(out);
out.flush();
} catch (Exception e) {
System.err.println("ERROR: " + e.getMessage());
e.printStackTrace();
}
}
}
}