Searched refs:tf (Results 1 - 25 of 52) sorted by relevance

123

/lucene-3.6.0/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/wikipedia/
H A DWikipediaTokenizerTest.java43 WikipediaTokenizer tf = new WikipediaTokenizer(new StringReader(text));
44 assertTokenStreamContents(tf,
65 WikipediaTokenizer tf = new WikipediaTokenizer(new StringReader(test));
66 assertTokenStreamContents(tf,
106 WikipediaTokenizer tf = new WikipediaTokenizer(new StringReader(LINK_PHRASES));
107 checkLinkPhrases(tf);
110 private void checkLinkPhrases(WikipediaTokenizer tf) throws IOException { argument
111 assertTokenStreamContents(tf,
119 WikipediaTokenizer tf = new WikipediaTokenizer(new StringReader(test));
120 assertTokenStreamContents(tf,
[all...]
/lucene-3.6.0/lucene/contrib/spellchecker/src/test/org/apache/lucene/search/suggest/fst/
H A DFSTLookupTest.java41 public static TermFreq tf(String t, long v) { method in class:FSTLookupTest
56 tf("one", 1),
57 tf("oneness", 2),
58 tf("onerous", 2),
59 tf("onesimus", 2),
60 tf("two", 2),
61 tf("twofold", 2),
62 tf("twonk", 2),
63 tf("thrive", 2),
64 tf("throug
[all...]
H A DFSTCompletionTest.java31 public static TermFreq tf(String t, int v) { method in class:FSTCompletionTest
42 for (TermFreq tf : evalKeys()) {
43 builder.add(tf.term, (int) tf.v);
51 tf("one", 0),
52 tf("oneness", 1),
53 tf("onerous", 1),
54 tf("onesimus", 1),
55 tf("two", 1),
56 tf("twofol
[all...]
/lucene-3.6.0/lucene/contrib/misc/src/test/org/apache/lucene/misc/
H A DSweetSpotSimilarityTest.java157 // tf equal
162 assertEquals("tf: i="+i,
163 d.tf(i), s.tf(i), 0.0f);
166 // tf higher
171 assertTrue("tf: i="+i+" : d="+d.tf(i)+
172 " < s="+s.tf(i),
173 d.tf(i) < s.tf(
[all...]
/lucene-3.6.0/lucene/contrib/queries/src/test/org/apache/lucene/search/
H A DTermsFilterTest.java65 TermsFilter tf=new TermsFilter();
66 tf.addTerm(new Term(fieldName,"19"));
67 FixedBitSet bits = (FixedBitSet)tf.getDocIdSet(reader);
70 tf.addTerm(new Term(fieldName,"20"));
71 bits = (FixedBitSet)tf.getDocIdSet(reader);
74 tf.addTerm(new Term(fieldName,"10"));
75 bits = (FixedBitSet)tf.getDocIdSet(reader);
78 tf.addTerm(new Term(fieldName,"00"));
79 bits = (FixedBitSet)tf.getDocIdSet(reader);
105 TermsFilter tf
[all...]
/lucene-3.6.0/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/
H A DTestCompoundWordTokenFilter.java50 HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(TEST_VERSION_CURRENT,
56 assertTokenStreamContents(tf,
70 HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(TEST_VERSION_CURRENT,
75 assertTokenStreamContents(tf,
91 HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(
99 assertTokenStreamContents(tf,
103 tf = new HyphenationCompoundWordTokenFilter(
111 assertTokenStreamContents(tf,
115 tf = new HyphenationCompoundWordTokenFilter(
123 assertTokenStreamContents(tf,
[all...]
/lucene-3.6.0/solr/core/src/test/org/apache/solr/analysis/
H A DTestDelimitedPayloadTokenFilterFactory.java44 DelimitedPayloadTokenFilter tf = factory.create(input);
45 tf.reset();
46 while (tf.incrementToken()){
47 PayloadAttribute payAttr = tf.getAttribute(PayloadAttribute.class);
67 DelimitedPayloadTokenFilter tf = factory.create(input);
68 tf.reset();
69 while (tf.incrementToken()){
70 PayloadAttribute payAttr = tf.getAttribute(PayloadAttribute.class);
H A DTestSynonymMap.java248 BaseTokenizerFactory tf = new NGramTokenizerFactory();
252 tf.init( args );
258 SlowSynonymFilterFactory.parseRules( rules, synMap, "=>", ",", true, tf);
/lucene-3.6.0/lucene/contrib/spellchecker/src/test/org/apache/lucene/search/suggest/
H A DTestHighFrequencyDictionary.java37 BytesRefIterator tf = dictionary.getWordsIterator();
38 assertNull(tf.getComparator());
39 assertNull(tf.next());
H A DLookupBenchmarkTest.java193 for (TermFreq tf : benchmarkInput) {
194 input.add(tf.term.utf8ToString().substring(0, Math.min(tf.term.length,
/lucene-3.6.0/lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/
H A DTermsFilterBuilder.java56 TermsFilter tf = new TermsFilter();
75 tf.addTerm(term);
85 return tf;
/lucene-3.6.0/lucene/core/src/java/org/apache/lucene/search/
H A DSimilarityDelegator.java51 public float tf(float freq) { method in class:SimilarityDelegator
52 return delegee.tf(freq);
H A DDefaultSimilarity.java51 public float tf(float freq) { method in class:DefaultSimilarity
H A DTermQuery.java144 int tf = 0;
149 tf = termDocs.freq();
154 tfExplanation.setValue(similarity.tf(tf));
155 tfExplanation.setDescription("tf(termFreq("+term+")="+tf+")");
H A DSimilarity.java274 * <A HREF="#formula_tf"><font color="#993399">tf(t in d)</font></A> &nbsp;&middot;&nbsp;
299 * <b><i>tf(t in d)</i></b>
303 * Note that <i>tf(t in q)</i> is assumed to be <i>1</i> and therefore it does not appear in this equation,
307 * The default computation for <i>tf(t in d)</i> in
308 * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) DefaultSimilarity} is:
314 * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) tf(t in d)} &nbsp; = &nbsp;
733 * <p>The default implementation calls {@link #tf(float)}.
738 public float tf(int freq) { method in class:Similarity
739 return tf((floa
770 public abstract float tf(float freq); method in class:Similarity
[all...]
H A DTermScorer.java62 scoreCache[i] = getSimilarity().tf(i) * weightValue;
128 float raw = // compute tf(f)*weight
131 : getSimilarity().tf(freq)*weightValue; // cache miss
H A DPhraseScorer.java88 float raw = getSimilarity().tf(freq) * value; // raw score
/lucene-3.6.0/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/
H A DTestHindiNormalizer.java67 TokenFilter tf = new HindiNormalizationFilter(tokenizer);
68 assertTokenStreamContents(tf, new String[] { output });
H A DTestHindiStemmer.java89 TokenFilter tf = new HindiStemFilter(tokenizer);
90 assertTokenStreamContents(tf, new String[] { output });
/lucene-3.6.0/lucene/contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/
H A DTestIndicNormalizer.java52 TokenFilter tf = new IndicNormalizationFilter(tokenizer);
53 assertTokenStreamContents(tf, new String[] { output });
/lucene-3.6.0/lucene/core/src/java/org/apache/lucene/search/spans/
H A DSpanScorer.java95 float raw = getSimilarity().tf(freq) * value; // raw score
112 tfExplanation.setValue(getSimilarity().tf(phraseFreq));
113 tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")");
/lucene-3.6.0/solr/core/src/java/org/apache/solr/analysis/
H A DFSTSynonymFilterFactory.java67 String tf = args.get("tokenizerFactory");
69 final TokenizerFactory factory = tf == null ? null : loadTokenizerFactory(loader, tf, args);
H A DSlowSynonymFilterFactory.java57 String tf = args.get("tokenizerFactory");
59 if( tf != null ){
60 tokFactory = loadTokenizerFactory( loader, tf, args );
/lucene-3.6.0/lucene/contrib/misc/src/java/org/apache/lucene/index/
H A DIndexSorter.java109 final int tf = original.freq(); // buffer tf & positions
110 out.writeVInt(tf);
112 for (int j = tf; j > 0; j--) { // delta encode positions
/lucene-3.6.0/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/
H A DMoreLikeThis.java58 * or at all. Since you're trying to maximize a tf*idf score, you're probably most interested
59 * in terms with a high tf. Choosing a tf threshold even as low as two or three will radically
716 * Create a PriorityQueue from a word->tf map.
732 int tf = words.get(word).x; // term freq in the source doc
733 if (minTermFreq > 0 && tf < minTermFreq) {
760 float score = tf * idf;
768 Integer.valueOf(tf)});

Completed in 8268 milliseconds

123