/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Base test class for testing Unicode collation.
*/
/**
* Convenience method to perform the same function as CollationKeyFilter.
*
* @param keyBits the result from
* collator.getCollationKey(original).toByteArray()
* @return The encoded collation key for the original String
*/
// Ensure that the backing char[] array is large enough to hold the encoded
// Binary String
char[] encodedBegArray = new char[encodedLength];
return new String(encodedBegArray);
}
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a TermRangeFilter with a Farsi
// Collator (or an Arabic one for the case when Farsi searcher not
// supported).
}
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a TermRangeQuery with a Farsi
// Collator (or an Arabic one for the case when Farsi is not supported).
}
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a TermRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
csrq = new TermRangeQuery
farsiIndex.close();
}
// Test using various international locales with accented characters (which
// sort differently depending on locale)
//
// Copied (and slightly modified) from
// org.apache.lucene.search.TestSort.testInternationalSort()
//
// TODO: this test is really fragile. there are already 3 different cases,
// depending upon unicode version.
// document data:
// the tracer field is used to determine which document was hit
// tracer contents US France Sweden (sv_SE) Denmark (da_DK)
{ "A", "x", "p\u00EAche", "p\u00EAche", "p\u00EAche", "p\u00EAche" },
{ "B", "y", "HAT", "HAT", "HAT", "HAT" },
{ "C", "x", "p\u00E9ch\u00E9", "p\u00E9ch\u00E9", "p\u00E9ch\u00E9", "p\u00E9ch\u00E9" },
{ "D", "y", "HUT", "HUT", "HUT", "HUT" },
{ "E", "x", "peach", "peach", "peach", "peach" },
{ "F", "y", "H\u00C5T", "H\u00C5T", "H\u00C5T", "H\u00C5T" },
{ "G", "x", "sin", "sin", "sin", "sin" },
{ "H", "y", "H\u00D8T", "H\u00D8T", "H\u00D8T", "H\u00D8T" },
{ "I", "x", "s\u00EDn", "s\u00EDn", "s\u00EDn", "s\u00EDn" },
{ "J", "y", "HOT", "HOT", "HOT", "HOT" },
};
}
indexStore.close();
}
// Make sure the documents returned by the search match the expected list
// Copied from TestSort.java
for (int i = 0 ; i < n ; ++i) {
for (int j = 0 ; j < v.length ; ++j) {
}
}
}
int numTestPoints = 100;
// create a map<String,SortKey> up front.
// then with multiple threads, generate sort keys for all the keys in the map
// and ensure they are the same as the ones we produced in serial fashion.
for (int i = 0; i < numTestPoints; i++) {
// ensure we make a copy of the actual bytes too
}
for (int i = 0; i < numThreads; i++) {
public void run() {
try {
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
};
}
for (int i = 0; i < numThreads; i++) {
}
for (int i = 0; i < numThreads; i++) {
}
}
}