Searched defs:stream (Results 1 - 25 of 37) sorted by relevance

12

/lucene-3.6.0/lucene/core/src/test/org/apache/lucene/analysis/
H A DTestCachingTokenFilter.java43 TokenStream stream = new TokenStream() {
62 stream = new CachingTokenFilter(stream);
64 doc.add(new Field("preanalyzed", stream, TermVector.NO));
67 checkTokens(stream);
68 stream.reset();
69 checkTokens(stream);
72 // don't reset the stream here, the DocumentWriter should do that implicitly
93 // 3) reset stream and consume tokens again
94 stream
99 checkTokens(TokenStream stream) argument
[all...]
H A DTestISOLatin1AccentFilter.java25 TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
26 ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream);
106 void assertTermEquals(String expected, TokenStream stream, CharTermAttribute termAtt) throws Exception { argument
107 assertTrue(stream.incrementToken());
H A DTestASCIIFoldingFilter.java39 TokenStream stream = new MockTokenizer(new StringReader
43 ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
1900 TokenStream stream = new MockTokenizer(new StringReader(inputText.toString()), MockTokenizer.WHITESPACE, false);
1901 ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
1911 void assertTermEquals(String expected, TokenStream stream, CharTermAttribute termAtt) throws Exception { argument
1912 assertTrue(stream.incrementToken());
/lucene-3.6.0/lucene/backwards/src/test/org/apache/lucene/analysis/
H A DTestCachingTokenFilter.java42 TokenStream stream = new TokenStream() {
61 stream = new CachingTokenFilter(stream);
63 doc.add(new Field("preanalyzed", stream, TermVector.NO));
66 checkTokens(stream);
67 stream.reset();
68 checkTokens(stream);
71 // don't reset the stream here, the DocumentWriter should do that implicitly
92 // 3) reset stream and consume tokens again
93 stream
98 checkTokens(TokenStream stream) argument
[all...]
H A DTestISOLatin1AccentFilter.java25 TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("Des mot clés À LA CHAÎNE À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï IJ Ð Ñ Ò Ó Ô Õ Ö Ø Œ Þ Ù Ú Û Ü Ý Ÿ à á â ã ä å æ ç è é ê ë ì í î ï ij ð ñ ò ó ô õ ö ø œ ß þ ù ú û ü ý ÿ fi fl"));
26 ISOLatin1AccentFilter filter = new ISOLatin1AccentFilter(stream);
106 void assertTermEquals(String expected, TokenStream stream, CharTermAttribute termAtt) throws Exception { argument
107 assertTrue(stream.incrementToken());
H A DTestASCIIFoldingFilter.java31 TokenStream stream = new MockTokenizer(new StringReader
35 ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
1892 TokenStream stream = new MockTokenizer(new StringReader(inputText.toString()), MockTokenizer.WHITESPACE, false);
1893 ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
1903 void assertTermEquals(String expected, TokenStream stream, CharTermAttribute termAtt) throws Exception { argument
1904 assertTrue(stream.incrementToken());
/lucene-3.6.0/solr/core/src/java/org/apache/solr/analysis/
H A DJapanesePartOfSpeechStopFilterFactory.java62 public TokenStream create(TokenStream stream) { argument
63 return new JapanesePartOfSpeechStopFilter(enablePositionIncrements, stream, stopTags);
/lucene-3.6.0/solr/core/src/java/org/apache/solr/handler/
H A DContentStreamLoader.java42 * Loaders are responsible for closing the stream
46 * @param stream The {@link org.apache.solr.common.util.ContentStream} to add
48 public abstract void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws Exception; argument
H A DXMLLoader.java36 import javax.xml.stream.XMLStreamReader;
37 import javax.xml.stream.XMLStreamException;
38 import javax.xml.stream.FactoryConfigurationError;
39 import javax.xml.stream.XMLStreamConstants;
40 import javax.xml.stream.XMLInputFactory;
61 public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws Exception { argument
62 errHeader = "XMLLoader: " + stream.getSourceInfo();
66 is = stream.getStream();
67 final String charset = ContentStreamBase.getCharsetFromContentType(stream.getContentType());
287 * Given the input stream, rea
[all...]
H A DXsltXMLLoader.java32 import javax.xml.stream.XMLStreamReader;
33 import javax.xml.stream.XMLStreamException;
34 import javax.xml.stream.XMLInputFactory;
65 public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws Exception { argument
73 is = stream.getStream();
74 final String charset = ContentStreamBase.getCharsetFromContentType(stream.getContentType());
H A DBinaryUpdateRequestHandler.java53 public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws Exception {
56 is = stream.getStream();
67 private void parseAndLoadDocs(SolrQueryRequest req, SolrQueryResponse rsp, InputStream stream, argument
96 FastInputStream in = FastInputStream.wrap(stream);
H A DJsonLoader.java63 public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws Exception { argument
64 errHeader = "JSONLoader: " + stream.getSourceInfo();
67 reader = stream.getReader();
H A DCSVRequestHandler.java351 public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws IOException { argument
352 errHeader = "CSVLoader: input=" + stream.getSourceInfo();
355 reader = stream.getReader();
/lucene-3.6.0/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/
H A DDelimitedPayloadTokenFilterTest.java106 void assertTermEquals(String expected, TokenStream stream, byte[] expectPay) throws Exception { argument
107 CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
108 PayloadAttribute payloadAtt = stream.getAttribute(PayloadAttribute.class);
109 stream.reset();
110 assertTrue(stream.incrementToken());
125 void assertTermEquals(String expected, TokenStream stream, CharTermAttribute termAtt, PayloadAttribute payAtt, byte[] expectPay) throws Exception { argument
126 stream.reset();
127 assertTrue(stream.incrementToken());
/lucene-3.6.0/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/
H A DSimpleFragmenter.java49 public void start(String originalText, TokenStream stream) { argument
50 offsetAtt = stream.addAttribute(OffsetAttribute.class);
/lucene-3.6.0/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/
H A DDistanceFilter.java68 private void readObject(ObjectInputStream stream) throws IOException, ClassNotFoundException { argument
69 stream.defaultReadObject();
/lucene-3.6.0/lucene/core/src/java/org/apache/lucene/util/
H A DIOUtils.java253 * @param stream the stream to wrap in a reader
257 public static Reader getDecodingReader(InputStream stream, Charset charSet) { argument
261 return new BufferedReader(new InputStreamReader(stream, charSetDecoder));
277 FileInputStream stream = null;
280 stream = new FileInputStream(file);
281 final Reader reader = getDecodingReader(stream, charSet);
287 IOUtils.close(stream);
307 InputStream stream = null;
310 stream
[all...]
/lucene-3.6.0/lucene/test-framework/src/java/org/apache/lucene/analysis/
H A DMockAnalyzer.java39 * <li>Payload data is randomly injected into the stream for more thorough testing
130 private synchronized TokenFilter maybePayload(TokenFilter stream, String fieldName) { argument
148 return stream;
150 return new MockVariableLengthPayloadFilter(random, stream);
152 return new MockFixedLengthPayloadFilter(random, stream, val);
/lucene-3.6.0/solr/contrib/extraction/src/java/org/apache/solr/handler/extraction/
H A DExtractingDocumentLoader.java151 * @param stream
155 public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream) throws IOException { argument
156 errHeader = "ExtractingDocumentLoader: " + stream.getSourceInfo();
175 // Provide stream's content type as hint for auto detection
176 if(stream.getContentType() != null) {
177 metadata.add(HttpHeaders.CONTENT_TYPE, stream.getContentType());
182 inputStream = stream.getStream();
183 metadata.add(ExtractingMetadataConstants.STREAM_NAME, stream.getName());
184 metadata.add(ExtractingMetadataConstants.STREAM_SOURCE_INFO, stream.getSourceInfo());
185 metadata.add(ExtractingMetadataConstants.STREAM_SIZE, String.valueOf(stream
[all...]
/lucene-3.6.0/lucene/backwards/src/test-framework/java/org/apache/lucene/analysis/
H A DMockAnalyzer.java39 * <li>Payload data is randomly injected into the stream for more thorough testing
129 private synchronized TokenFilter maybePayload(TokenFilter stream, String fieldName) { argument
147 return stream;
149 return new MockVariableLengthPayloadFilter(random, stream);
151 return new MockFixedLengthPayloadFilter(random, stream, val);
/lucene-3.6.0/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/surround/parser/
H A DQueryParserTokenManager.java540 public QueryParserTokenManager(CharStream stream){ argument
541 input_stream = stream;
545 public QueryParserTokenManager(CharStream stream, int lexState){ argument
546 this(stream);
551 public void ReInit(CharStream stream) argument
555 input_stream = stream;
567 public void ReInit(CharStream stream, int lexState) argument
569 ReInit(stream);
/lucene-3.6.0/lucene/core/src/java/org/apache/lucene/index/
H A DCompoundFileReader.java32 * Class for accessing a compound stream.
49 private IndexInput stream; field in class:CompoundFileReader
65 stream = dir.openInput(name, readBufferSize);
69 int firstInt = stream.readVInt();
76 + firstInt + " expected " + CompoundFileWriter.FORMAT_CURRENT + " (resource: " + stream + ")");
79 count = stream.readVInt();
89 long offset = stream.readLong();
90 String id = stream.readString();
110 entry.length = stream.length() - entry.offset;
116 if (!success && (stream !
[all...]
H A DFieldsWriter.java105 void setFieldsStream(IndexOutput stream) { argument
106 this.fieldsStream = stream;
109 // Writes the contents of buffer into the fields stream
111 // stream. This assumes the buffer was already written
218 * document. The stream IndexInput is the
221 final void addRawDocuments(IndexInput stream, int[] lengths, int numDocs) throws IOException { argument
228 fieldsStream.copyBytes(stream, position-start);
H A DTermsHashPerField.java141 public void initReader(ByteSliceReader reader, int termID, int stream) { argument
142 assert stream < streamCount;
147 postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
148 ints[upto+stream]);
325 // Init stream slices
460 // Init stream slices
495 void writeByte(int stream, byte b) { argument
496 int upto = intUptos[intUptoStart+stream];
504 intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
507 (intUptos[intUptoStart+stream])
510 writeBytes(int stream, byte[] b, int offset, int len) argument
517 writeVInt(int stream, int i) argument
[all...]
/lucene-3.6.0/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/parser/
H A DStandardSyntaxParser.java639 public StandardSyntaxParser(java.io.InputStream stream) { argument
640 this(stream, null);
643 public StandardSyntaxParser(java.io.InputStream stream, String encoding) { argument
644 try { jj_input_stream = new JavaCharStream(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); }
654 public void ReInit(java.io.InputStream stream) { argument
655 ReInit(stream, null);
658 public void ReInit(java.io.InputStream stream, String encoding) { argument
659 try { jj_input_stream.ReInit(stream, encoding, 1, 1); } catch(java.io.UnsupportedEncodingException e) { throw new RuntimeException(e); }
669 public StandardSyntaxParser(java.io.Reader stream) { argument
670 jj_input_stream = new JavaCharStream(stream,
680 ReInit(java.io.Reader stream) argument
[all...]

Completed in 66 milliseconds

12