/lucene-3.6.0/lucene/core/src/test/org/apache/lucene/index/ |
H A D | TestByteSlices.java | 69 for(int stream=0;stream<NUM_STREAM;stream++) { 70 starts[stream] = -1; 71 counters[stream] = 0; 76 int stream = random.nextInt(NUM_STREAM); 78 System.out.println("write stream=" + stream); 80 if (starts[stream] == -1) { 82 starts[stream] [all...] |
/lucene-3.6.0/lucene/backwards/src/test/org/apache/lucene/index/ |
H A D | TestByteSlices.java | 69 for(int stream=0;stream<NUM_STREAM;stream++) { 70 starts[stream] = -1; 71 counters[stream] = 0; 76 int stream = random.nextInt(NUM_STREAM); 78 System.out.println("write stream=" + stream); 80 if (starts[stream] == -1) { 82 starts[stream] [all...] |
/lucene-3.6.0/solr/core/src/test/org/apache/solr/analysis/ |
H A D | DoubleMetaphoneFilterTest.java | 29 TokenStream stream = new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader("international")); 30 TokenStream filter = new DoubleMetaphoneFilter(stream, 4, false); 35 TokenStream stream = new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader("international")); 36 TokenStream filter = new DoubleMetaphoneFilter(stream, 4, true); 41 TokenStream stream = new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader("Kuczewski")); 42 TokenStream filter = new DoubleMetaphoneFilter(stream, 4, false); 47 TokenStream stream = new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader("international")); 48 TokenStream filter = new DoubleMetaphoneFilter(stream, 8, false); 53 TokenStream stream = new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader("12345 #$%@#^%&")); 54 TokenStream filter = new DoubleMetaphoneFilter(stream, [all...] |
H A D | TestHindiFilters.java | 40 TokenStream stream = filterFactory.create(tokenizer); 41 assertTokenStreamContents(stream, new String[] { "ৎ", "और" }); 55 TokenStream stream = indicFilterFactory.create(tokenizer); 56 stream = hindiFilterFactory.create(stream); 57 assertTokenStreamContents(stream, new String[] {"किताब"}); 72 TokenStream stream = indicFilterFactory.create(tokenizer); 73 stream = hindiFilterFactory.create(stream); 74 stream [all...] |
H A D | TestStandardFactories.java | 40 Tokenizer stream = factory.create(reader); 41 assertTokenStreamContents(stream, 58 Tokenizer stream = factory.create(reader); 59 assertTokenStreamContents(stream, 70 Tokenizer stream = factory.create(reader); 71 assertTokenStreamContents(stream, 88 Tokenizer stream = factory.create(reader); 89 assertTokenStreamContents(stream, 103 TokenStream stream = filterFactory.create(tokenizer); 104 assertTokenStreamContents(stream, [all...] |
H A D | TestRussianFilters.java | 37 Tokenizer stream = factory.create(reader); 38 assertTokenStreamContents(stream, new String[] {"Вместе", "с", "тем", "о", 52 TokenStream stream = filterFactory.create(tokenizer); 53 assertTokenStreamContents(stream, new String[] {"вместе", "с", "тем", "о", 69 TokenStream stream = caseFactory.create(tokenizer); 70 stream = stemFactory.create(stream); 71 assertTokenStreamContents(stream, new String[] {"вмест", "с", "тем", "о",
|
H A D | TestUAX29URLEmailTokenizerFactory.java | 38 Tokenizer stream = factory.create(reader); 39 assertTokenStreamContents(stream, 47 Tokenizer stream = factory.create(reader); 48 assertTokenStreamContents(stream, 57 Tokenizer stream = factory.create(reader); 58 assertTokenStreamContents(stream, 66 Tokenizer stream = factory.create(reader); 67 assertTokenStreamContents(stream, 75 Tokenizer stream = factory.create(reader); 76 assertTokenStreamContents(stream, [all...] |
H A D | TestArabicFilters.java | 40 Tokenizer stream = factory.create(reader); 41 assertTokenStreamContents(stream, new String[] {"الذين", "مَلكت", "أيمانكم"}); 54 TokenStream stream = filterFactory.create(tokenizer); 55 assertTokenStreamContents(stream, new String[] {"الذين", "ملكت", "ايمانكم"}); 69 TokenStream stream = normFactory.create(tokenizer); 70 stream = stemFactory.create(stream); 71 assertTokenStreamContents(stream, new String[] {"ذين", "ملكت", "ايمانكم"}); 82 TokenStream stream = tokenizerFactory.create(charfilterFactory.create(CharReader.get(reader))); 83 assertTokenStreamContents(stream, ne [all...] |
H A D | TestShingleFilterFactory.java | 40 TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); 41 assertTokenStreamContents(stream, new String[] {"this", "this is", "is", 54 TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); 55 assertTokenStreamContents(stream, 68 TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); 69 assertTokenStreamContents(stream, 84 TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); 85 assertTokenStreamContents(stream, 101 TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); 102 assertTokenStreamContents(stream, [all...] |
H A D | TestCJKBigramFilterFactory.java | 37 TokenStream stream = factory.create(new StandardTokenizer(TEST_VERSION_CURRENT, reader)); 38 assertTokenStreamContents(stream, 48 TokenStream stream = factory.create(new StandardTokenizer(TEST_VERSION_CURRENT, reader)); 49 assertTokenStreamContents(stream,
|
H A D | TestKeepWordFilter.java | 62 TokenStream stream = factory.create(new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false)); 63 assertTokenStreamContents(stream, new String[] { "aaa", "BBB" }, new int[] { 3, 2 }); 66 stream = new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false); 67 stream = new KeepWordFilter(true, stream, new CharArraySet(TEST_VERSION_CURRENT,words, false)); 68 assertTokenStreamContents(stream, new String[] { "aaa" }, new int[] { 3 }); 79 stream = factory.create(new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false)); 80 assertTokenStreamContents(stream, new String[] { "aaa", "BBB" }, new int[] { 1, 1 }); 92 stream = factory.create(new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false)); 93 assertTokenStreamContents(stream, ne [all...] |
/lucene-3.6.0/lucene/core/src/test/org/apache/lucene/analysis/ |
H A D | TestNumericTokenStream.java | 30 final NumericTokenStream stream=new NumericTokenStream().setLongValue(lvalue); 32 final CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); 33 final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class); 35 assertTrue("New token is available", stream.incrementToken()); 39 assertFalse("No more tokens available", stream.incrementToken()); 43 final NumericTokenStream stream=new NumericTokenStream().setIntValue(ivalue); 45 final CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); 46 final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class); 48 assertTrue("New token is available", stream.incrementToken()); 52 assertFalse("No more tokens available", stream [all...] |
H A D | TestStopAnalyzer.java | 48 TokenStream stream = stop.tokenStream("test", reader); 49 assertTrue(stream != null); 50 CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); 52 while (stream.incrementToken()) { 64 TokenStream stream = newStop.tokenStream("test", reader); 65 assertNotNull(stream); 66 CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); 67 PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class); 69 while (stream.incrementToken()) { 84 TokenStream stream [all...] |
H A D | TestCachingTokenFilter.java | 43 TokenStream stream = new TokenStream() { 62 stream = new CachingTokenFilter(stream); 64 doc.add(new Field("preanalyzed", stream, TermVector.NO)); 67 checkTokens(stream); 68 stream.reset(); 69 checkTokens(stream); 72 // don't reset the stream here, the DocumentWriter should do that implicitly 93 // 3) reset stream and consume tokens again 94 stream 99 checkTokens(TokenStream stream) argument [all...] |
H A D | TestLengthFilter.java | 27 TokenStream stream = new MockTokenizer( 29 LengthFilter filter = new LengthFilter(false, stream, 2, 6); 37 TokenStream stream = new MockTokenizer( 39 LengthFilter filter = new LengthFilter(true, stream, 2, 6);
|
/lucene-3.6.0/lucene/backwards/src/test/org/apache/lucene/analysis/ |
H A D | TestNumericTokenStream.java | 30 final NumericTokenStream stream=new NumericTokenStream().setLongValue(lvalue); 32 final CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); 33 final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class); 35 assertTrue("New token is available", stream.incrementToken()); 39 assertFalse("No more tokens available", stream.incrementToken()); 43 final NumericTokenStream stream=new NumericTokenStream().setIntValue(ivalue); 45 final CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); 46 final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class); 48 assertTrue("New token is available", stream.incrementToken()); 52 assertFalse("No more tokens available", stream [all...] |
H A D | TestLengthFilter.java | 25 TokenStream stream = new MockTokenizer( 27 LengthFilter filter = new LengthFilter(false, stream, 2, 6); 35 TokenStream stream = new MockTokenizer( 37 LengthFilter filter = new LengthFilter(true, stream, 2, 6);
|
H A D | TestStopAnalyzer.java | 48 TokenStream stream = stop.tokenStream("test", reader); 49 assertTrue(stream != null); 50 CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); 52 while (stream.incrementToken()) { 64 TokenStream stream = newStop.tokenStream("test", reader); 65 assertNotNull(stream); 66 CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); 67 PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class); 69 while (stream.incrementToken()) { 84 TokenStream stream [all...] |
H A D | TestCachingTokenFilter.java | 42 TokenStream stream = new TokenStream() { 61 stream = new CachingTokenFilter(stream); 63 doc.add(new Field("preanalyzed", stream, TermVector.NO)); 66 checkTokens(stream); 67 stream.reset(); 68 checkTokens(stream); 71 // don't reset the stream here, the DocumentWriter should do that implicitly 92 // 3) reset stream and consume tokens again 93 stream 98 checkTokens(TokenStream stream) argument [all...] |
/lucene-3.6.0/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/ |
H A D | OffsetLimitTokenFilterTest.java | 36 MockTokenizer stream = new MockTokenizer(new StringReader( 39 stream.setEnableChecks(false); 40 OffsetLimitTokenFilter filter = new OffsetLimitTokenFilter(stream, 10); 43 stream = new MockTokenizer(new StringReader( 46 stream.setEnableChecks(false); 47 filter = new OffsetLimitTokenFilter(stream, 12); 50 stream = new MockTokenizer(new StringReader( 53 stream.setEnableChecks(false); 54 filter = new OffsetLimitTokenFilter(stream, 30);
|
/lucene-3.6.0/solr/solrj/src/test/org/apache/solr/common/util/ |
H A D | ContentStreamTest.java | 42 ContentStreamBase stream = new ContentStreamBase.StringStream( input ); 43 assertEquals( input.length(), stream.getSize().intValue() ); 44 assertEquals( input, IOUtils.toString( stream.getStream(), "UTF-8" ) ); 45 assertEquals( input, IOUtils.toString( stream.getReader() ) ); 57 ContentStreamBase stream = new ContentStreamBase.FileStream( file ); 58 assertEquals( file.length(), stream.getSize().intValue() ); 59 assertTrue( IOUtils.contentEquals( new FileInputStream( file ), stream.getStream() ) ); 60 assertTrue( IOUtils.contentEquals( new FileReader( file ), stream.getReader() ) ); 88 ContentStreamBase stream = new ContentStreamBase.URLStream( url ); 89 in = stream [all...] |
/lucene-3.6.0/lucene/contrib/facet/src/test/org/apache/lucene/facet/index/streaming/ |
H A D | CategoryAttributesStreamTest.java | 49 CategoryAttributesStream stream = new CategoryAttributesStream( 52 while (stream.incrementToken()) { 58 stream.reset(); 60 while (stream.incrementToken()) { 70 stream.reset(); 71 while (stream.incrementToken()) { 72 CategoryAttribute fromStream = stream
|
/lucene-3.6.0/solr/core/src/java/org/apache/solr/handler/ |
H A D | DumpRequestHandler.java | 42 // Cycle through each stream 44 NamedList<Object> stream = new SimpleOrderedMap<Object>(); 45 stream.add( "name", content.getName() ); 46 stream.add( "sourceInfo", content.getSourceInfo() ); 47 stream.add( "size", content.getSize() ); 48 stream.add( "contentType", content.getContentType() ); 51 stream.add( "stream", IOUtils.toString(reader) ); 55 streams.add( stream );
|
/lucene-3.6.0/lucene/contrib/analyzers/kuromoji/src/java/org/apache/lucene/analysis/ja/ |
H A D | JapaneseAnalyzer.java | 91 TokenStream stream = new JapaneseBaseFormFilter(tokenizer); 92 stream = new JapanesePartOfSpeechStopFilter(true, stream, stoptags); 93 stream = new CJKWidthFilter(stream); 94 stream = new StopFilter(matchVersion, stream, stopwords); 95 stream = new JapaneseKatakanaStemFilter(stream); 96 stream [all...] |
/lucene-3.6.0/solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/ |
H A D | TestICUTokenizerFactory.java | 30 TokenStream stream = factory.create(reader); 31 assertTokenStreamContents(stream,
|