Searched refs:tokens (Results 1 - 25 of 61) sorted by relevance

123

/lucene-3.6.0/solr/core/src/test/org/apache/solr/spelling/
H A DSpellingQueryConverterTest.java42 Collection<Token> tokens = converter.convert("field:foo");
43 assertTrue("tokens is null and it shouldn't be", tokens != null);
44 assertTrue("tokens Size: " + tokens.size() + " is not: " + 1, tokens.size() == 1);
53 Collection<Token> tokens = converter.convert(original);
54 assertTrue("tokens is null and it shouldn't be", tokens != null);
55 assertEquals("tokens Siz
84 isOffsetCorrect(String s, Collection<Token> tokens) argument
[all...]
H A DFileBasedSpellCheckerTest.java80 Collection<Token> tokens = queryConverter.convert("fob");
81 SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getReader());
84 Map<String, Integer> suggestions = result.get(tokens.iterator().next());
89 spellOpts.tokens = queryConverter.convert("super");
92 suggestions = result.get(tokens.iterator().next());
118 Collection<Token> tokens = queryConverter.convert("Solar");
120 SpellingOptions spellOpts = new SpellingOptions(tokens, searcher.get().getReader());
124 Map<String, Integer> suggestions = result.get(tokens.iterator().next());
131 spellOpts.tokens = queryConverter.convert("super");
134 suggestions = result.get(tokens
[all...]
H A DIndexBasedSpellCheckerTest.java125 Collection<Token> tokens = queryConverter.convert("documemt");
126 SpellingOptions spellOpts = new SpellingOptions(tokens, reader);
130 Map<String, Integer> suggestions = result.get(spellOpts.tokens.iterator().next());
138 spellOpts.tokens = queryConverter.convert("super");
141 suggestions = result.get(spellOpts.tokens.iterator().next());
145 spellOpts.tokens = queryConverter.convert("document");
148 suggestions = result.get(spellOpts.tokens.iterator().next());
152 spellOpts.tokens = queryConverter.convert("red");
156 suggestions = result.get(spellOpts.tokens.iterator().next());
160 spellOpts.tokens
[all...]
/lucene-3.6.0/solr/core/src/java/org/apache/solr/spelling/
H A DSpellingOptions.java33 * The tokens to spell check
35 public Collection<Token> tokens; field in class:SpellingOptions
68 public SpellingOptions(Collection<Token> tokens, int count) { argument
69 this.tokens = tokens;
73 public SpellingOptions(Collection<Token> tokens, IndexReader reader) { argument
74 this.tokens = tokens;
78 public SpellingOptions(Collection<Token> tokens, IndexReader reader, int count) { argument
79 this.tokens
85 SpellingOptions(Collection<Token> tokens, IndexReader reader, int count, boolean onlyMorePopular, boolean extendedResults, float accuracy, SolrParams customParams) argument
[all...]
H A DSolrSpellChecker.java99 public SpellingResult getSuggestions(Collection<Token> tokens, IndexReader reader) throws IOException { argument
100 return getSuggestions(tokens, reader, 1, false, false);
111 public SpellingResult getSuggestions(Collection<Token> tokens, IndexReader reader, int count) throws IOException { argument
112 return getSuggestions(tokens, reader, count, false, false);
124 public SpellingResult getSuggestions(Collection<Token> tokens, IndexReader reader, boolean onlyMorePopular, boolean extendedResults) throws IOException { argument
125 return getSuggestions(tokens, reader, 1, onlyMorePopular, extendedResults);
132 * @param tokens The Tokens to be spell checked.
142 public abstract SpellingResult getSuggestions(Collection<Token> tokens, IndexReader reader, int count, argument
159 return getSuggestions(options.tokens, options.reader, options.count, options.onlyMorePopular, options.extendedResults);
H A DSpellingResult.java34 private Collection<Token> tokens; field in class:SpellingResult
48 public SpellingResult(Collection<Token> tokens) { argument
49 this.tokens = tokens;
144 * @return The original tokens
147 return tokens;
150 public void setTokens(Collection<Token> tokens) { argument
151 this.tokens = tokens;
/lucene-3.6.0/solr/core/src/test/org/apache/solr/handler/
H A DAnalysisRequestHandlerTest.java61 NamedList<NamedList<Object>> tokens = theTokens.get("name");
62 assertTrue("tokens is null and it shouldn't be", tokens != null);
63 assertTrue("tokens Size: " + tokens.size() + " is not : " + 3, tokens.size() == 3);
66 token = tokens.get("token", 0);
69 token = tokens.get("token", 1);
73 token = tokens.get("token", 2);
77 tokens
[all...]
/lucene-3.6.0/solr/core/src/test/org/apache/solr/analysis/
H A DTestSlowSynonymFilter.java81 map.add(strings("a b"), tokens("ab"), orig, merge);
82 map.add(strings("a c"), tokens("ac"), orig, merge);
83 map.add(strings("a"), tokens("aa"), orig, merge);
84 map.add(strings("b"), tokens("bb"), orig, merge);
85 map.add(strings("z x c v"), tokens("zxcv"), orig, merge);
86 map.add(strings("x c"), tokens("xc"), orig, merge);
98 map.add(strings("a b"), tokens("ab"), orig, merge);
99 map.add(strings("a b"), tokens("ab"), orig, merge);
105 map.add(strings("zoo"), tokens("zoo"), orig, merge);
107 map.add(strings("zoo"), tokens("zo
334 private List<Token> tokens(String str) { method in class:TestSlowSynonymFilter
381 final Token tokens[]; field in class:TestSlowSynonymFilter.IterTokenStream
390 IterTokenStream(Token... tokens) argument
395 IterTokenStream(Collection<Token> tokens) argument
[all...]
H A DTestTrimFilter.java91 final Token tokens[]; field in class:TestTrimFilter.IterTokenStream
100 public IterTokenStream(Token... tokens) { argument
102 this.tokens = tokens;
105 public IterTokenStream(Collection<Token> tokens) { argument
106 this(tokens.toArray(new Token[tokens.size()]));
111 if (index >= tokens.length)
115 Token token = tokens[index++];
/lucene-3.6.0/lucene/core/src/test/org/apache/lucene/index/
H A DTestPositionBasedTermVectorMapper.java25 protected String[] tokens; field in class:TestPositionBasedTermVectorMapper
33 tokens = new String[]{"here", "is", "some", "text", "to", "test", "extra"};
34 thePositions = new int[tokens.length][];
35 offsets = new TermVectorOffsetInfo[tokens.length][];
38 for (int i = 0; i < tokens.length - 1; i++)
50 thePositions[tokens.length - 1] = new int[1];
51 thePositions[tokens.length - 1][0] = 0;//put this at the same position as "here"
52 offsets[tokens.length - 1] = new TermVectorOffsetInfo[1];
53 offsets[tokens.length - 1][0] = new TermVectorOffsetInfo(0, 1);
59 mapper.setExpectations("test", tokens
[all...]
/lucene-3.6.0/lucene/backwards/src/test/org/apache/lucene/index/
H A DTestPositionBasedTermVectorMapper.java25 protected String[] tokens; field in class:TestPositionBasedTermVectorMapper
33 tokens = new String[]{"here", "is", "some", "text", "to", "test", "extra"};
34 thePositions = new int[tokens.length][];
35 offsets = new TermVectorOffsetInfo[tokens.length][];
38 for (int i = 0; i < tokens.length - 1; i++)
50 thePositions[tokens.length - 1] = new int[1];
51 thePositions[tokens.length - 1][0] = 0;//put this at the same position as "here"
52 offsets[tokens.length - 1] = new TermVectorOffsetInfo[1];
53 offsets[tokens.length - 1][0] = new TermVectorOffsetInfo(0, 1);
59 mapper.setExpectations("test", tokens
[all...]
/lucene-3.6.0/lucene/test-framework/src/java/org/apache/lucene/analysis/
H A DCannedTokenStream.java31 private final Token[] tokens; field in class:CannedTokenStream
38 public CannedTokenStream(Token[] tokens) { argument
39 this.tokens = tokens;
44 if (upto < tokens.length) {
45 final Token token = tokens[upto++];
/lucene-3.6.0/lucene/contrib/misc/src/java/org/apache/lucene/index/
H A DTermVectorAccessor.java69 private List<String> tokens; field in class:TermVectorAccessor
90 if (tokens == null) {
91 tokens = new ArrayList<String>(500);
95 tokens.clear();
107 tokens.add(termEnum.term().text());
126 mapper.setExpectations(field, tokens.size(), false, !mapper.isIgnoringPositions());
127 for (int i = 0; i < tokens.size(); i++) {
128 mapper.map(tokens.get(i), frequencies.get(i).intValue(), (TermVectorOffsetInfo[]) null, positions.get(i));
/lucene-3.6.0/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/
H A DTestShingleMatrixFilter.java69 LinkedList<Token> tokens;
73 tokens = new LinkedList<Token>();
74 tokens.add(createToken("please", 0, 6));
75 tokens.add(createToken("divide", 7, 13));
76 tokens.add(createToken("this", 14, 18));
77 tokens.add(createToken("sentence", 19, 27));
78 tokens.add(createToken("into", 28, 32));
79 tokens.add(createToken("shingles", 33, 39));
81 tls = new TokenListStream(tokens);
105 LinkedList<Token> tokens;
489 private Collection<Token> tokens; field in class:TestShingleMatrixFilter.TokenListStream
497 TokenListStream(Collection<Token> tokens) argument
[all...]
/lucene-3.6.0/lucene/core/src/test/org/apache/lucene/analysis/
H A DTestCachingTokenFilter.java36 private String[] tokens = new String[] {"term1", "term2", "term3", "term2"}; field in class:TestCachingTokenFilter
50 if (index == tokens.length) {
54 termAtt.append(tokens[index++]);
66 // 1) we consume all tokens twice before we add the doc to the index
71 // 2) now add the document to the index and verify if all tokens are indexed
93 // 3) reset stream and consume tokens again
104 assertTrue(count < tokens.length);
105 assertEquals(tokens[count], termAtt.toString());
109 assertEquals(tokens.length, count);
/lucene-3.6.0/lucene/backwards/src/test/org/apache/lucene/analysis/
H A DTestCachingTokenFilter.java35 private String[] tokens = new String[] {"term1", "term2", "term3", "term2"}; field in class:TestCachingTokenFilter
49 if (index == tokens.length) {
53 termAtt.append(tokens[index++]);
65 // 1) we consume all tokens twice before we add the doc to the index
70 // 2) now add the document to the index and verify if all tokens are indexed
92 // 3) reset stream and consume tokens again
103 assertTrue(count < tokens.length);
104 assertEquals(tokens[count], termAtt.toString());
108 assertEquals(tokens.length, count);
/lucene-3.6.0/solr/core/src/java/org/apache/solr/handler/
H A DAnalysisRequestHandlerBase.java81 * @return NamedList containing the tokens produced by analyzing the given value
117 List<AttributeSource> tokens = analyzeTokenStream(tokenStream);
119 namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(tokens, context));
121 ListBasedTokenStream listBasedTokenStream = new ListBasedTokenStream(tokens);
124 for (final AttributeSource tok : tokens) {
128 tokens = analyzeTokenStream(tokenStream);
129 namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(tokens, context));
130 listBasedTokenStream = new ListBasedTokenStream(tokens);
137 * Analyzes the given text using the given analyzer and returns the produced tokens.
153 * Analyzes the given text using the given analyzer and returns the produced tokens
329 private final List<AttributeSource> tokens; field in class:AnalysisRequestHandlerBase.ListBasedTokenStream
337 ListBasedTokenStream(List<AttributeSource> tokens) argument
[all...]
/lucene-3.6.0/solr/solrj/src/java/org/apache/solr/client/solrj/response/
H A DAnalysisResponseBase.java69 List<NamedList> tokens = (List<NamedList>) phaseEntry.getValue();
70 for (NamedList token : tokens) {
113 * A phase in the analysis process. The phase holds the tokens produced in this phase and the name of the class that
119 private List<TokenInfo> tokens = new ArrayList<TokenInfo>(); field in class:AnalysisResponseBase.AnalysisPhase
135 tokens.add(tokenInfo);
139 * Returns a list of tokens which represent the token stream produced in this phase.
141 * @return A list of tokens which represent the token stream produced in this phase.
144 return tokens;
174 * @param match Indicates whether this token matches one of the the query tokens.
243 * Returns whether this token matches one of the query tokens (i
[all...]
/lucene-3.6.0/solr/core/src/java/org/apache/solr/update/processor/
H A DTextProfileSignature.java36 * <li>split the text into tokens (all consecutive non-whitespace characters),</li>
37 * <li>discard tokens equal or shorter than MIN_TOKEN_LEN (default 2 characters),</li>
38 * <li>sort the list of tokens by decreasing frequency,</li>
39 * <li>round down the counts of tokens to the nearest multiple of QUANT
43 * means that tokens with frequency 1 are always discarded).</li>
44 * <li>tokens, which frequency after quantization falls below QUANT, are discarded.</li>
45 * <li>create a list of tokens and their quantized frequency, separated by spaces,
68 HashMap<String, Token> tokens = new HashMap<String, Token>();
81 Token tok = tokens.get(s);
84 tokens
[all...]
/lucene-3.6.0/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/
H A DTokenGroup.java26 * One, or several overlapping tokens, along with the score(s) and the scope of
32 Token [] tokens=new Token[MAX_NUM_TOKENS_PER_GROUP]; field in class:TokenGroup
72 tokens[numTokens] = token;
93 return tokens[index];
113 * @return the number of tokens in this group
127 * @return all tokens' scores summed up
H A DTokenSources.java151 // an object used to iterate across an array of tokens
153 Token tokens[];
163 StoredTokenStream(Token tokens[]) {
164 this.tokens = tokens;
172 if (currentToken >= tokens.length) {
175 Token token = tokens[currentToken++];
181 || tokens[currentToken - 1].startOffset() > tokens[currentToken - 2]
204 // try get the token position info to speed up assembly of tokens int
[all...]
/lucene-3.6.0/lucene/contrib/icu/src/test/org/apache/lucene/analysis/icu/segmentation/
H A DTestLaoBreakIterator.java43 private void assertBreaksTo(BreakIterator iterator, String sourceText, String tokens[]) { argument
49 for (int i = 0; i < tokens.length; i++) {
57 assertEquals(tokens[i], new String(text, start, end - start));
/lucene-3.6.0/lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/
H A DHighlighterPhraseTest.java298 private Token[] tokens; field in class:HighlighterPhraseTest.TokenStreamSparse
313 if (this.i >= this.tokens.length) {
317 termAttribute.setEmpty().append(this.tokens[i]);
318 offsetAttribute.setOffset(this.tokens[i].startOffset(), this.tokens[i]
320 positionIncrementAttribute.setPositionIncrement(this.tokens[i]
328 this.tokens = new Token[] {
333 this.tokens[3].setPositionIncrement(2);
338 private Token[] tokens; field in class:HighlighterPhraseTest.TokenStreamConcurrent
353 if (this.i >= this.tokens
[all...]
/lucene-3.6.0/solr/core/src/test/org/apache/solr/handler/component/
H A DDummyCustomParamSpellChecker.java50 public SpellingResult getSuggestions(Collection<Token> tokens, IndexReader reader, int count, boolean onlyMorePopular, boolean extendedResults) throws IOException { argument
51 return getSuggestions(new SpellingOptions(tokens, reader, count, onlyMorePopular, extendedResults, 0, null));
/lucene-3.6.0/lucene/contrib/analyzers/common/src/java/org/apache/lucene/analysis/wikipedia/
H A DWikipediaTokenizer.java95 * Only output tokens
99 * Only output untokenized tokens, which are tokens that would normally be split into several tokens
107 * This flag is used to indicate that the produced "Token" would, if {@link #TOKENS_ONLY} was used, produce multiple tokens.
117 private Iterator<AttributeSource.State> tokens = null; field in class:WikipediaTokenizer
191 if (tokens != null && tokens.hasNext()){
192 AttributeSource.State state = tokens.next();
210 //collapse into a single token, add it to tokens AN
[all...]

Completed in 1862 milliseconds

123