JFlexTokenizerTest.java revision 1076
58N/A/*
58N/A * CDDL HEADER START
58N/A *
58N/A * The contents of this file are subject to the terms of the
58N/A * Common Development and Distribution License (the "License").
58N/A * You may not use this file except in compliance with the License.
58N/A *
58N/A * See LICENSE.txt included in this distribution for the specific
58N/A * language governing permissions and limitations under the License.
58N/A *
58N/A * When distributing Covered Code, include this CDDL HEADER in each
58N/A * file and include the License file at LICENSE.txt.
58N/A * If applicable, add the following below this CDDL HEADER, with the
58N/A * fields enclosed by brackets "[]" replaced with your own identifying
58N/A * information: Portions Copyright [yyyy] [name of copyright owner]
58N/A *
58N/A * CDDL HEADER END
58N/A */
830N/A
58N/A/*
58N/A * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
58N/A */
830N/A
58N/Apackage org.opensolaris.opengrok.analysis;
830N/A
830N/Aimport java.io.Reader;
58N/Aimport java.io.StringReader;
128N/Aimport org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
58N/Aimport org.apache.lucene.analysis.tokenattributes.TermAttribute;
128N/Aimport org.junit.Test;
58N/Aimport org.opensolaris.opengrok.analysis.c.CSymbolTokenizer;
58N/Aimport org.opensolaris.opengrok.analysis.c.CxxSymbolTokenizer;
58N/Aimport org.opensolaris.opengrok.analysis.document.TroffFullTokenizer;
58N/Aimport org.opensolaris.opengrok.analysis.fortran.FortranSymbolTokenizer;
58N/Aimport org.opensolaris.opengrok.analysis.java.JavaSymbolTokenizer;
77N/Aimport org.opensolaris.opengrok.analysis.lisp.LispSymbolTokenizer;
77N/Aimport org.opensolaris.opengrok.analysis.perl.PerlSymbolTokenizer;
830N/Aimport org.opensolaris.opengrok.analysis.plain.PlainFullTokenizer;
77N/Aimport org.opensolaris.opengrok.analysis.plain.PlainSymbolTokenizer;
77N/Aimport org.opensolaris.opengrok.analysis.sh.ShSymbolTokenizer;
58N/Aimport org.opensolaris.opengrok.analysis.tcl.TclSymbolTokenizer;
58N/Aimport static org.junit.Assert.*;
58N/A
58N/A/**
58N/A * Unit tests for JFlexTokenizer.
830N/A */
830N/Apublic class JFlexTokenizerTest {
830N/A
830N/A /**
58N/A * Test that the various sub-classes of JFlexTokenizerTest return the
58N/A * correct offsets for the tokens. They used to give wrong values for
58N/A * the last token. Bug #15858.
58N/A */
77N/A @Test
58N/A public void testOffsetAttribute() throws Exception {
830N/A testOffsetAttribute(ShSymbolTokenizer.class);
242N/A testOffsetAttribute(TroffFullTokenizer.class);
830N/A testOffsetAttribute(PerlSymbolTokenizer.class);
830N/A testOffsetAttribute(PlainSymbolTokenizer.class);
830N/A testOffsetAttribute(PlainFullTokenizer.class);
830N/A testOffsetAttribute(CSymbolTokenizer.class);
830N/A testOffsetAttribute(CxxSymbolTokenizer.class);
830N/A testOffsetAttribute(JavaSymbolTokenizer.class);
830N/A testOffsetAttribute(LispSymbolTokenizer.class);
830N/A testOffsetAttribute(TclSymbolTokenizer.class);
58N/A
242N/A // The Fortran tokenizer doesn't accept the default input text, so
830N/A // create a text fragment that it understands
830N/A testOffsetAttribute(FortranSymbolTokenizer.class,
830N/A "1 token1 = token2 + token3",
830N/A new String[] {"token1", "token2", "token3"});
58N/A }
58N/A
144N/A /**
* Helper method for {@link #testOffsetAttribute()} that runs the test
* on one single implementation class.
*/
private void testOffsetAttribute(Class<? extends JFlexTokenizer> klass)
throws Exception {
String inputText = "alpha beta gamma delta";
String[] expectedTokens = inputText.split(" ");
testOffsetAttribute(klass, inputText, expectedTokens);
}
/**
* Helper method for {@link #testOffsetAttribute()} that runs the test
* on one single implementation class with the specified input text and
* expected tokens.
*/
private void testOffsetAttribute(Class<? extends JFlexTokenizer> klass,
String inputText, String[] expectedTokens)
throws Exception {
JFlexTokenizer tokenizer = klass.getConstructor(Reader.class)
.newInstance(new StringReader(inputText));
TermAttribute term = tokenizer.addAttribute(TermAttribute.class);
OffsetAttribute offset = tokenizer.addAttribute(OffsetAttribute.class);
int count = 0;
while (tokenizer.incrementToken()) {
assertTrue("too many tokens", count < expectedTokens.length);
String expected = expectedTokens[count];
assertEquals("term", expected, term.term());
assertEquals("start",
inputText.indexOf(expected), offset.startOffset());
assertEquals("end",
inputText.indexOf(expected) + expected.length(),
offset.endOffset());
count++;
}
assertEquals("wrong number of tokens", expectedTokens.length, count);
}
/**
* The fix for bug #15858 caused a regression in ShSymbolTokenizer where
* variables on the form {@code ${VARIABLE}} were not correctly indexed
* if they were inside a quoted string. The closing brace would be part of
* the indexed term in that case.
*/
@Test
public void testShellVariableInBraces() throws Exception {
// Shell command to tokenize
String inputText = "echo \"${VARIABLE} $abc xyz\"";
// "echo" is an ignored token in ShSymbolTokenizer, "xyz" is a string
// and not a symbol. Therefore, expect just the two tokens that name
// variables.
String[] expectedTokens = {"VARIABLE", "abc"};
testOffsetAttribute(ShSymbolTokenizer.class, inputText, expectedTokens);
}
}