OnDiskMergeImporter.java revision 3f3becc26b7631dad31b5aee4290fc61cf552815
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * CDDL HEADER START
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * The contents of this file are subject to the terms of the
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * Common Development and Distribution License, Version 1.0 only
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * (the "License"). You may not use this file except in compliance
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * with the License.
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * or http://forgerock.org/license/CDDLv1.0.html.
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * See the License for the specific language governing permissions
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * and limitations under the License.
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * When distributing Covered Code, include this CDDL HEADER in each
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * file and include the License file at legal-notices/CDDLv1_0.txt.
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * If applicable, add the following below this CDDL HEADER, with the
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * fields enclosed by brackets "[]" replaced with your own identifying
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * information:
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * Portions Copyright [yyyy] [name of copyright owner]
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * CDDL HEADER END
fb98811412f1a61efdc30b38091bf4708148a806Allan Foster * Copyright 2015 ForgeRock AS.
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport static org.opends.messages.BackendMessages.*;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport static org.opends.server.util.DynamicConstants.*;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport static org.opends.server.util.StaticUtils.*;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport java.nio.file.FileAlreadyExistsException;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport java.util.concurrent.ExecutorCompletionService;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport java.util.concurrent.RejectedExecutionHandler;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport java.util.concurrent.ScheduledExecutorService;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport java.util.concurrent.atomic.AtomicInteger;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.forgerock.i18n.slf4j.LocalizedLogger;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.forgerock.opendj.config.server.ConfigException;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.admin.std.meta.BackendIndexCfgDefn.IndexType;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.admin.std.server.BackendIndexCfg;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.admin.std.server.PluggableBackendCfg;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.RebuildConfig;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.AttributeIndex.MatchingRuleIndex;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.CursorTransformer.SequentialCursorAdapter;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.DN2ID.TreeVisitor;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.ImportLDIFReader.EntryInformation;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.OnDiskMergeImporter.ExternalSortChunk.InMemorySortedChunk;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.spi.Cursor;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.spi.Importer;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.spi.ReadableTransaction;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.spi.SequentialCursor;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.spi.StorageRuntimeException;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.spi.TreeName;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.spi.UpdateFunction;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.backends.pluggable.spi.WriteableTransaction;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.types.DirectoryException;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.types.InitializationException;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.types.LDIFImportConfig;
fb98811412f1a61efdc30b38091bf4708148a806Allan Fosterimport org.opends.server.types.LDIFImportResult;
* Imports LDIF data contained in files into the database. Because of the B-Tree structure used in backend, import is
* faster when records are inserted in ascending order. This prevents node locking/re-writing due to B-Tree inner nodes
* split. This is why import is performed in two phases: the first phase encode and sort all records while the second
* phase copy the sorted records into the database. Entries are read from an LDIF file by the {@link ImportLDIFReader}.
* Then, each entry are optionally validated and finally imported into a {@link Chunk} by the {@link EntryContainer}
* {@link PhaseOneWriteableTransaction#getChunks()} get all the chunks which will be copied into the database
final class OnDiskMergeImporter
* Shim that allows properly constructing an {@link OnDiskMergeImporter} without polluting {@link ImportStrategy} and
StrategyImpl(ServerContext serverContext, RootContainer rootContainer, PluggableBackendCfg backendCfg)
final int threadCount =
logger.info(NOTE_IMPORT_PHASE_STATS, importer.getTotalTimeInMillis() / 1000, importer.getPhaseOneTimeInMillis()
.getEntriesIgnored());
return indexCount;
final long totalEntries =
final Set<String> indexesToRebuild = selectIndexesToRebuild(entryContainer, rebuildConfig, totalEntries);
visitIndexes(entryContainer, new SpecificIndexFilter(new TrustModifier(dbStorage, true), indexesToRebuild));
new RebuildIndexStrategy(rootContainer.getEntryContainers(), dbStorage, tempDir, bufferPool, sorter,
new ID2EntrySource(entryContainer, dbStorage, PHASE1_REBUILDER_THREAD_NAME, threadCount, totalEntries));
private static final Set<String> selectIndexesToRebuild(EntryContainer entryContainer, RebuildConfig rebuildConfig,
long totalEntries)
case ALL:
case DEGRADED:
case USER_DEFINED:
logger.info(NOTE_REBUILD_START, Utils.joinAsString(", ", rebuildConfig.getRebuildList()), totalEntries);
throws InitializationException
new File(getFileForPath(tmpDirectory != null ? tmpDirectory : DEFAULT_TMP_DIR), backendCfg.getBackendId());
return tempDir;
private static int computeBufferSize(int nbBuffer, long availableMemory) throws InitializationException
return MAX_BUFFER_SIZE;
throw new InitializationException(ERR_IMPORT_LDIF_LACK_MEM.get(availableMemory, nbBuffer * MIN_BUFFER_SIZE
+ REQUIRED_FREE_MEMORY));
return bufferSize;
* Calculates the amount of available memory which can be used by this import, taking into account whether or not
private long calculateAvailableMemory()
final long totalAvailableMemory;
final long usedMemory = runtime.totalMemory() - runtime.freeMemory() + DB_CACHE_SIZE + REQUIRED_FREE_MEMORY;
private interface Source
interface EntryProcessor
boolean isCancelled();
private final int nbThreads;
LDIFReaderSource(RootContainer rootContainer, LDIFImportConfig importConfig, String threadNameTemplate,
this.executor = Executors.newFixedThreadPool(nbThreads, newThreadFactory(null, threadNameTemplate, true));
Executors.newSingleThreadScheduledExecutor(newThreadFactory(null, PHASE1_REPORTER_THREAD_NAME, true));
catch (DirectoryException e)
catch (Exception e)
return null;
long getEntriesRead()
long getEntriesIgnored()
long getEntriesRejected()
public boolean isCancelled()
private long previousCount;
private long previousTime;
public PhaseOneProgressReporter()
public void run()
private final long nbTotalEntries;
private volatile boolean interrupted;
ID2EntrySource(EntryContainer entryContainer, Importer importer, String threadNameTemplate, int nbThread,
long nbTotalEntries)
this.executor =
new RejectedExecutionHandler()
catch (InterruptedException e)
Executors.newSingleThreadScheduledExecutor(newThreadFactory(null, PHASE1_REPORTER_THREAD_NAME, true));
public void run()
catch (Exception e)
interrupted = true;
public boolean isCancelled()
return interrupted;
private long previousCount;
private long previousTime;
public PhaseOneProgressReporter()
public void run()
final float progressPercent = nbTotalEntries > 0 ? Math.round((100f * entriesRead) / nbTotalEntries) : 0;
private long phaseOneTimeMs;
private long phaseTwoTimeMs;
private OnDiskMergeImporter(String phase2ThreadNameTemplate, AbstractTwoPhaseImportStrategy importStrategy)
public void processEntry(EntryContainer container, EntryID entryID, Entry entry) throws DirectoryException,
tasks.add(importStrategy.newPhaseTwoTask(treeChunk.getKey(), treeChunk.getValue(), progressReporter));
public long getImportedCount()
public long getPhaseOneTimeInMillis()
return phaseOneTimeMs;
public long getPhaseTwoTimeInMillis()
return phaseTwoTimeMs;
public long getTotalTimeInMillis()
private interface ChunkFactory
AbstractTwoPhaseImportStrategy(Collection<EntryContainer> entryContainers, Importer importer, File tempDir,
abstract void validate(EntryContainer entryContainer, EntryID entryID, Entry entry) throws DirectoryException;
abstract Callable<Void> newPhaseTwoTask(TreeName treeName, Chunk chunk, PhaseTwoProgressReporter progressReporter);
return new DN2IDImporterTask(progressReporter, importer, tempDir, bufferPool, entryContainer.getDN2ID(), chunk,
return null;
* {@link ExternalSortChunk} before being imported into the {@link Importer}. id2entry which is directly copied into
SortAndImportWithoutDNValidation(Collection<EntryContainer> entryContainers, Importer importer, File tempDir,
* This strategy performs two validations by ensuring that there is no duplicate entry (entry with same DN) and that
* the given entry has an existing parent. To do so, the dn2id is directly imported into the database in addition of
private static final class SortAndImportWithDNValidation extends AbstractTwoPhaseImportStrategy implements
SortAndImportWithDNValidation(Collection<EntryContainer> entryContainers, Importer importer, File tempDir,
public void validate(EntryContainer entryContainer, EntryID entryID, Entry entry) throws DirectoryException
throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS, ERR_ADD_ENTRY_ALREADY_EXISTS.get(entry));
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();
visitIndexes(entryContainer, new SpecificIndexFilter(new TrustModifier(importer, false), indexNames));
visitIndexes(entryContainer, new SpecificIndexFilter(new TrustModifier(importer, true), indexNames));
return nullChunk();
public Callable<Void> newPhaseTwoTask(TreeName treeName, Chunk chunk, PhaseTwoProgressReporter progressReporter)
public void validate(EntryContainer entryContainer, EntryID entryID, Entry entry) throws DirectoryException
final ExecutorService executor = Executors.newCachedThreadPool(newThreadFactory(null, threadNameTemplate, true));
* A {@link WriteableTransaction} delegates the storage of data to {@link Chunk}s which are created on-demand for each
* {@link TreeName} through the provided {@link ChunkFactory}. Once there is no more data to import, call
* {@link #getChunks()} to get the resulting {@link Chunk}s containing the sorted data to import into database.
* {@link #put(TreeName, ByteSequence, ByteSequence)} is thread-safe. Since there is only one {@link Chunk} created
* per {@link TreeName}, the {@link Chunk#put(ByteSequence, ByteSequence)} method of returned {@link Chunk} must be
return chunks;
* Store record into a {@link Chunk}. Creating one if none is existing for the given treeName. This method is
catch (Exception e)
throw new StorageRuntimeException(e);
return alreadyExistingChunk;
return alreadyExistingChunk;
return newChunk;
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();
* Chunk implementations are a data storage with an optional limited capacity. Chunk are typically used by first
* adding data to the storage using {@link #put(ByteSequence, ByteSequence)} later on data can be sequentially
interface Chunk
* Flip this chunk from write-only to read-only in order to get the previously stored data. This method must be
* Return size of data contained in this chunk. This size is guaranteed to be consistent only if there is no pending
long size();
* While chunk's memory and files are automatically garbage collected/deleted at exit, this method can be called to
void delete();
* Store and sort data into multiple chunks. Thanks to the chunk rolling mechanism, this chunk can sort and store an
* database tree, shared across all phase-one importer threads, in charge of storing/sorting records.
return nullChunk();
ExternalSortChunk(File tempDir, String name, BufferPool bufferPool, Collector<?, ByteString> collector,
return new CollectorCursor<>(
throw new StorageRuntimeException(e);
public long size()
public void delete()
int getNbSortedChunks()
* de-duplication performed by the CollectorCursor. Thanks to SPARSE_FILE option, the delta between size
* Store data inside fixed-size byte arrays. Data stored in this chunk are sorted by key during the flip() so that
* they can be cursored ascendantly. Byte arrays are supplied through a {@link BufferPool}. To allow sort operation,
* data must be accessible randomly. To do so, offsets of each key/value records are stored in the buffer. To
* maximize space occupation, buffer content is split in two parts: one contains records offset, the other contains
* ----------> offset writer direction ----------------> |<- free ->| <---- record writer direction ---
* +-----------------+-----------------+-----------------+----------+----------+----------+----------+
* | offset record 1 | offset record 2 | offset record n | .........| record n | record 2 | record 1 |
* +-----------------+-----------------+-----------------+----------+----------+----------+----------+
* Each record is the concatenation of a key/value (length are encoded using {@link PackedLong} representation)
private long totalBytes;
private int indexPos;
private int dataPos;
private int nbRecords;
nbRecords++;
public long size()
return totalBytes;
return valueA;
public int size()
return nbRecords;
return new InMemorySortedChunkCursor();
public void delete()
private volatile long bytesRead;
private int indexOffset;
public boolean next()
public boolean isDefined()
throwIfUndefined(this);
return key;
throwIfUndefined(this);
return value;
public void close()
return metricName;
public long getNbBytesRead()
return bytesRead;
public long getNbBytesTotal()
return totalBytes;
* Store data inside a region contained in a file. A regions is delimited by an offset and a length. The region is
* memory-mapped and the data are appended in the memory-mapped region until it is full. Region store a
* concatenation of key/value records: (Key & value sizes are stored using {@link PackedLong} format.)
private final long startOffset;
private long size;
* Make sure that the file is big-enough to encapsulate this memory-mapped region. Thanks to SPARSE_FILE this
final int recordSize =
PackedLong.getEncodedSize(key.length()) + key.length() + PackedLong.getEncodedSize(value.length()) + value
.length();
catch (IOException e)
throw new StorageRuntimeException(e);
public long size()
* We force OS to write dirty pages now so that they don't accumulate. Indeed, huge number of dirty pages might
* cause the OS to freeze the producer of those dirty pages (this importer) while it is swapping-out the pages.
catch (IOException e)
throw new StorageRuntimeException(e);
public void delete()
public boolean next()
final int keyLength;
final int valueLength;
catch (IOException e)
throw new StorageRuntimeException(e);
public boolean isDefined()
throwIfUndefined(this);
return key;
throwIfUndefined(this);
return value;
public void close()
return metricName;
public long getNbBytesRead()
public long getNbBytesTotal()
private boolean isDefined;
private K key;
private V value;
public boolean next()
if (isDefined)
return isDefined;
private void accumulateValues()
throwIfUndefined(this);
// Delegate is one step beyond. When delegate.isDefined() return false, we have to return true once more.
isDefined = true;
public boolean isDefined()
return isDefined;
throwIfUndefined(this);
return key;
throwIfUndefined(this);
return value;
public void close()
public long getNbBytesRead()
public long getNbBytesTotal()
static final class CompositeCursor<K extends Comparable<? super K>, V> implements MeteredCursor<K, V>
private final long totalBytes;
private volatile long bytesRead;
private K key;
private V value;
// Never return 0. Otherwise both cursors are considered equal and only one of them is kept by this set
* Try to get the next record from the cursor containing the lowest entry. If it reaches the end of the lowest
public boolean next()
public boolean isDefined()
throwIfUndefined(this);
return key;
throwIfUndefined(this);
return value;
public void close()
return metricName;
public long getNbBytesRead()
return bytesRead;
public long getNbBytesTotal()
return totalBytes;
ChunkCopierTask(PhaseTwoProgressReporter reporter, Chunk source, TreeName treeName, Importer destination)
try (final SequentialCursor<ByteString, ByteString> sourceCursor = trackCursorProgress(reporter, source.flip()))
return null;
private static void copyIntoChunk(SequentialCursor<ByteString, ByteString> source, Chunk destination)
* This task optionally copy the dn2id chunk into the database and takes advantages of it's cursoring to compute the
DN2IDImporterTask(PhaseTwoProgressReporter progressReporter, Importer importer, File tempDir, BufferPool bufferPool,
boolean dn2idAlreadyImported)
final TreeVisitor<ChildrenCount> visitor = new ID2CountTreeVisitorImporter(asImporter(id2CountChunk));
return null;
private static final class ChildrenCount
private long numberOfChildren;
* Delegates the storage of data to the {@link Importer}. This class has same thread-safeness as the supplied
public long size()
public void delete()
* Delegates the {@link #put(TreeName, ByteSequence, ByteSequence)} method of {@link Importer} to a {@link Chunk}.
* {@link #createTree(TreeName)} is a no-op, other methods throw {@link UnsupportedOperationException}. This class has
catch (Exception e)
throw new StorageRuntimeException(e);
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();
public void close()
* Write records into a delegated {@link Chunk} after performing a reordering of those records in regards of their key
* by using a best-effort algorithm. This class is intended to be used when records are initially ordered but might
* actually hit a chunk slightly disordered due to scheduling occurring in a multi-threaded environment. Records are
* buffered and sorted before being written to the delegated chunk. Because of the buffer mechanism, records might be
* written into the chunk after some delay. It's guaranteed that all entries will be written into the chunk only after
* the flip() method has been called. {@link #put(TreeName, ByteSequence, ByteSequence)} is thread-safe.
* Number of items to queue before writing them to the storage. This number must be at least equal to the number of
* threads which will access the put() method. If underestimated, {@link #put(ByteSequence, ByteSequence)} might
private final int queueSize;
public void delete()
* Maximum size reached, take the record with the smallest key and persist it in the delegate chunk. this
* ensures records are (mostly) inserted in ascending key order, which is the optimal insert order for B-trees.
public long size()
public long size()
public void delete()
public boolean next()
public boolean isDefined()
throw new NoSuchElementException();
throw new NoSuchElementException();
public void close()
public long getNbBytesRead()
public long getNbBytesTotal()
return new Executor()
private static <K> List<K> waitTasksTermination(CompletionService<K> completionService, int nbTasks)
return results;
Executors.newSingleThreadScheduledExecutor(newThreadFactory(null, PHASE2_REPORTER_THREAD_NAME, true));
public synchronized void run()
final int progressPercent = totalBytes > 0 ? Math.round((100f * newValue) / cursor.getNbBytesTotal()) : 0;
logger.info(NOTE_IMPORT_LDIF_PHASE_TWO_REPORT, cursor.getMetricName(), progressPercent, progressRemaining,
public synchronized void close()
int length();
* Pre-allocate and maintain a fixed number of re-usable {@code Buffer}s. This allow to keep controls of heap memory
private final int bufferSize;
private static final long BYTE_ARRAY_OFFSET;
catch (Exception e)
throw new RuntimeException(e);
private static boolean supportOffHeap()
public int getBufferSize()
return bufferSize;
catch (InterruptedException e)
throw new StorageRuntimeException(e);
catch (InterruptedException e)
throw new StorageRuntimeException(e);
get();
public void close()
private final long address;
private final int size;
private int position;
private boolean closed;
catch (IOException e)
throw new StorageRuntimeException(e);
catch (IOException e)
throw new IllegalStateException(e);
public int length()
return size;
if (!closed)
closed = true;
catch (IOException e)
throw new StorageRuntimeException(e);
catch (IOException e)
throw new IllegalArgumentException(e);
public int length()
public void close()
long getNbBytesRead();
long getNbBytesTotal();
private static <K, V> SequentialCursor<K, V> trackCursorProgress(final PhaseTwoProgressReporter reporter,
public void close()
throw new NoSuchElementException();
* Get a new {@link Collector} which can be used to merge encoded values. The types of values to merged is deduced
private static Collector<?, ByteString> newCollector(final EntryContainer entryContainer, final TreeName treeName)
return index;
return null;
* A mutable reduction operation that accumulates input elements into a mutable result container, optionally
* transforming the accumulated result into a final representation after all input elements have been processed.
* Reduction operations can be performed either sequentially or in parallel. A Collector is specified by three
* functions that work together to accumulate entries into a mutable result container, and optionally perform a final
* transform on the result. They are: Creation of a new result container (get()), incorporating a new data element
* into a result container (accept()), performing an optional final transform on the container (merge)
interface Collector<A, R>
* Creates and returns a new mutable result container. Equivalent to A java.util.function.Collector.supplier().get()
A get();
* Accepts two partial results and merges them. The combiner function may fold state from one argument into the
* Perform the final transformation from the intermediate accumulation type A to the final result type R. Equivalent
public V get()
return null;
throw new IllegalArgumentException("Cannot accept multiple values (current=" + previousValue + ", new=" + value
return value;
return latestValue;
* {@link Collector} that accepts encoded {@link EntryIDSet} objects and produces a {@link ByteString} representing
private final int indexLimit;
return new LinkedList<>();
* else EntryIDSet is above index entry limits, discard additional values to avoid blowing up memory now, then
return resultContainer;
* {@link Collector} that accepts {@code long} values encoded into {@link ByteString} objects and produces a
private static final class MeteredSequentialCursorDecorator extends
SequentialCursorDecorator<SequentialCursor<ByteString, ByteString>, ByteString, ByteString>implements
private final long totalSize;
private volatile long bytesRead;
private MeteredSequentialCursorDecorator(SequentialCursor<ByteString, ByteString> delegate, String metricName,
long totalSize)
super(delegate);
public boolean next()
public long getNbBytesRead()
return bytesRead;
return metricName;
public long getNbBytesTotal()
return totalSize;
/** Helper allowing to create {@link SequentialCursor} decorator without having to re-implement all methods. */
SequentialCursor<K, V>
protected final D delegate;
public boolean next()
public boolean isDefined()
public void close()
nbVisited++;
nbVisited++;
return nbVisited;
private static abstract class IndexVisitor
private final boolean trustValue;
return indexNames;
* Thread-safe fixed-size cache which, once full, remove the least recently accessed entry. Composition is used here
* to ensure that only methods generating entry-access in the LinkedHashMap are actually used. Otherwise, the least
private static final class LRUPresenceCache<T>
counter++;
return counter;
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();
throw new UnsupportedOperationException();