/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* See LICENSE.txt included in this distribution for the specific
* language governing permissions and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at LICENSE.txt.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
private boolean historyIndexDone = false;
/**
* The schema in which the tables live.
*/
/**
* The names of all the tables created by this class.
*/
"REPOSITORIES", "FILES", "AUTHORS", "CHANGESETS", "FILECHANGES",
"DIRECTORIES", "DIRCHANGES"
};
/**
* SQL queries used by this class.
*/
static {
}
} catch (IOException ioe) {
throw new ExceptionInInitializerError(ioe);
}
}
/**
* The number of times to retry an operation that failed in a way that
* indicates that it may succeed if it's tried again.
*/
/**
* The maximum number of characters in commit messages. Longer messages will
* be truncated.
*/
/**
* The id to be used for the next row inserted into FILES.
*/
/**
* The id to be used for the next row inserted into DIRECTORIES.
*/
/**
* The id to be used for the next row inserted into CHANGESETS.
*/
/**
* The id to be used for the next row inserted into AUTHORS.
*/
/**
* Info string to return from {@link #getInfo()}.
*/
/**
* Create a new cache instance with the default JDBC driver and URL.
*/
JDBCHistoryCache() {
}
/**
* Create a new cache instance with the specified JDBC driver and URL.
*
* @param jdbcDriverClass JDBC driver class to access the database backend
* @param url the JDBC url to the database
*/
this.jdbcDriverClass = jdbcDriverClass;
this.jdbcConnectionURL = url;
}
/**
* Check whether this cache implementation can store history for the given
* repository. Only repositories that support retrieval of history for the
* whole directory at once are supported.
*/
return repository.hasHistoryForDirectories();
}
/**
* Handle an {@code SQLException}. If the exception indicates that the
* operation may succeed if it's retried and the number of attempts hasn't
* exceeded the limit defined by {@link #MAX_RETRIES}, ignore it and let the
* caller retry the operation. Otherwise, re-throw the exception.
*
* @param sqle the exception to handle
* @param attemptNo the attempt number, first attempt is 0
* @throws SQLException if the operation shouldn't be retried
*/
throws SQLException {
boolean isTransient = false;
if (cause instanceof SQLTransientException) {
isTransient = true;
break;
}
}
} else {
throw sqle;
}
}
/**
* Get the SQL text for a name query.
*
* @param key name of the query
* @return SQL text for the query
*/
}
// TODO Store a database version which is incremented on each
// format change. When a version change is detected, drop the database
// or, if possible, upgrade the database to the new format. For now,
// check if the tables exist, and create them if necessary.
}
}
// Databases created with 0.11 or earlier versions don't have a
// PARENT column in the DIRECTORIES table. If the column is missing,
// create it and populate it. Bug #3174.
}
}
}
// Create a composite index on the repository in ascending order
// and the id in descending order. This index may allow faster
// retrieval of history in reverse chronological order.
}
}
}
}
// Derby has some performance problems with auto-generated identity
// columns when multiple threads insert into the same table
// concurrently. Therefore, we have our own light-weight id generators
// that we initialize on start-up. Details can be found in Derby's
// bug tracker: https://issues.apache.org/jira/browse/DERBY-4437
append("\n");
}
/**
* Fill the PARENT column of the DIRECTORIES table with correct values. Used
* when upgrading a database from an old format that doesn't have the PARENT
* column.
*/
throws SQLException {
getQuery("updateDirectoriesParent"));
}
}
}
private static boolean tableExists(
throws SQLException {
}
}
private static boolean columnExists(
throws SQLException {
}
}
/**
* Initialize the {@code AtomicInteger} object that holds the value of the
* id to use for the next row in a certain table. If there are rows in the
* table, take the maximum value and increment it by one. Otherwise, the
* {@code AtomicInteger} will be left at its current value (presumably 0).
*
* @param s a statement object on which the max query is executed
* @param stmtKey name of the query to execute in order to get max id
* @param generator the {@code AtomicInteger} object to initialize
*/
private static void initIdGenerator(
throws SQLException {
}
}
}
}
try {
for (int i = 0;; i++) {
final ConnectionResource conn
try {
}
// Success! Break out of the loop.
return;
} catch (SQLException sqle) {
handleSQLException(sqle, i);
} finally {
}
}
} catch (Exception e) {
throw new HistoryException(e);
}
}
// We do check the return value from ResultSet.next(), but PMD doesn't
// understand it, so suppress the warning.
@SuppressWarnings("PMD.CheckResultSet")
throws HistoryException {
assert file.isDirectory();
try {
for (int i = 0;; i++) {
final ConnectionResource conn
try {
}
} catch (SQLException sqle) {
handleSQLException(sqle, i);
} finally {
}
}
} catch (SQLException sqle) {
throw new HistoryException(sqle);
}
}
/**
* Get path name with all file separators replaced with '/'.
*/
}
/**
* Get path name with all file separators replaced with '/'.
*/
try {
} catch (IOException ioe) {
throw new HistoryException(ioe);
}
}
/**
* Get the path of a file relative to the source root.
*
* @param file the file to get the path for
* @return relative path for {@code file} with unix file separators
*/
throws HistoryException {
}
/**
* Get the path of a file relative to the specified root directory.
*
* @param filePath the canonical path of the file to get the relative path
* for
* @param rootPath the canonical path of the root directory
* @return relative path with unix file separators
*/
}
/**
* Get the base name of a path (with unix file separators).
*
* @param fullPath the full path of the file with unix file separators
* @return the base name of the file
*/
}
/**
* Get the path to the parent of the specified file.
*
* @param fullPath the full path of the file with unix file separators
* @return the full path of the file's parent
*/
}
/**
* Split a full (unix-style) path into an array of path elements.
*
* @param fullPath the full unix-style path name
* @return an array with each separate element of the path
* @throws IllegalArgumentException if fullPath doesn't start with '/'
*/
}
}
/**
* Reconstruct a path previously split by {@link #splitPath(String)}, or
* possibly just a part of it (only the {@code num} first elements will be
* used).
*
* @param pathElts the elements of the path
* @param num the number of elements to use when reconstructing the path
* @return a path name
*/
for (int i = 0; i < num; i++) {
}
}
/**
* Truncate a string to the given length.
*
* @param str the string to truncate
* @param length the length of the string after truncation
* @return the truncated string
* @throws IllegalArgumentException if the string is not longer than the
* specified length
*/
throw new IllegalArgumentException();
}
}
/**
* Statement that gets the history for the specified file and repository.
* The result is ordered in reverse chronological order to match the
* required ordering for {@link HistoryCache#get(File, Repository)}.
*/
/**
* Statement that gets the history for all files matching a pattern in the
* given repository. The result is ordered in reverse chronological order to
* match the required ordering for
* {@link HistoryCache#get(File, Repository)}.
*/
/**
* Statement that retrieves all the files touched by a given changeset.
*/
/**
* Statement for getting ID of given revision
*/
throws HistoryException {
try {
for (int i = 0;; i++) {
try {
} catch (SQLException sqle) {
handleSQLException(sqle, i);
}
}
} catch (SQLException sqle) {
throw new HistoryException(sqle);
}
}
/**
* Get the number of rows in the FILEMOVES table. This is used as a
* workaround/optimization since JavaDB cannot currently handle the
* GET_FILE_HISTORY very well.
*
* @return number of rows in the FILEMOVES table
* @throws SQLException
*/
final ConnectionResource conn;
try {
}
}
} finally {
}
return -1;
}
/**
* Helper for {@link #get(File, Repository)}.
*/
throws HistoryException, SQLException {
final ConnectionResource conn
try {
final PreparedStatement ps;
if (file.isDirectory()) {
// Fetch history for all files under this directory.
} else {
// Fetch history for a single file only.
&& (getFilemovesCount() > 0)
}
final PreparedStatement filePS
// Get the information about a changeset
// Fill the list of files touched by the changeset, if
// requested.
if (withFiles) {
}
}
}
}
}
} finally {
}
}
return history;
}
/**
* store history for repository. Note that after this method returns it is
* not guaranteed that the data will be returned in full in get() method
* since some of the threads can be still running.
*/
throws HistoryException {
try {
final ConnectionResource conn
try {
} finally {
}
} catch (SQLException sqle) {
throw new HistoryException(sqle);
}
}
/**
* Get ID value for revision string by querying the DB.
*
* @param revision
* @return ID
*/
final ConnectionResource conn
try {
"getIdForRevision exception{0}", e);
return -1;
} finally {
}
}
// Return immediately when there is nothing to do.
return;
}
"Storing history for repo {0}",
for (int i = 0;; i++) {
try {
}
}
directories = dirs;
}
if (addChangeset == null) {
}
if (addDirchange == null) {
}
if (addFilechange == null) {
}
// Success! Break out of the loop.
break;
} catch (SQLException sqle) {
handleSQLException(sqle, i);
}
}
// getHistoryEntries() returns the entries in reverse chronological
// order, but we want to insert them in chronological order so that
// their auto-generated identity column can be used as a chronological
// ordering column. Otherwise, incremental updates will make the
// identity column unusable for chronological ordering. So therefore
// we walk the list backwards.
it.hasPrevious();) {
for (int i = 0;; i++) {
try {
// Truncate the message if it can't fit in a VARCHAR
// (bug #11663).
}
// Add one row for each file in FILECHANGES, and one row
// for each path element of the directories in DIRCHANGES.
// ignore non-existent files
try {
} catch (IOException ex) {
"File exception{0}", ex);
continue;
}
|| !env.isHandleHistoryOfRenamedFiles()) {
}
// Only add to DIRCHANGES if we haven't already
}
}
}
// Successfully added the entry. Break out of retry loop.
break retry;
} catch (SQLException sqle) {
handleSQLException(sqle, i);
}
}
}
if (!env.isHandleHistoryOfRenamedFiles()) {
return;
}
/*
* Special handling for certain files - this is mainly for files which
* have been renamed in Mercurial repository.
* This ensures that their complete history (follow) will be saved.
*/
public void run() {
try {
// We want to catch any exception since we are in thread.
"doRenamedHistory exception {0}", ex);
} finally {
}
}
});
}
// Wait for the executors to finish.
try {
} catch (InterruptedException ex) {
"latch exception{0}", ex);
}
"Done storing history for repo {0}",
}
/**
* Optimize how the cache is stored on disk. In particular, make sure index
* cardinality statistics are up to date, and perform a checkpoint to make
* sure all changes are forced to the tables on disk and that the unneeded
* transaction log is deleted.
*
* @throws HistoryException if an error happens when optimizing the cache
*/
try {
final ConnectionResource conn
try {
} finally {
}
} catch (SQLException sqle) {
throw new HistoryException(sqle);
}
}
/**
* <p>
* Make sure Derby's index cardinality statistics are up to date. Otherwise,
* the optimizer may choose a bad execution strategy for some queries. This
* method should be called if the size of the tables has changed
* significantly.
* </p>
*
* <p>
* This is a workaround for the problems described in
* <a href="https://issues.apache.org/jira/browse/DERBY-269">DERBY-269</a>
* and
* <a href="https://issues.apache.org/jira/browse/DERBY-3788">DERBY-3788</a>.
* When automatic update of index cardinality statistics has been
* implemented in Derby, the workaround may be removed.
* </p>
*
* <p>
* Without this workaround, poor performance has been observed in
* {@code get()} due to bad choices made by the optimizer.
* </p>
*
* <p>
* Note that this method uses a system procedure introduced in Derby 10.5.
* If this procedure does not exist, this method is a no-op.
* </p>
*/
throws SQLException {
"CALL SYSCS_UTIL.SYSCS_UPDATE_STATISTICS(?, ?, NULL)")) {
for (int i = 0;; i++) {
try {
// Successfully executed statement. Break out of
// retry loop.
break retry;
} catch (SQLException sqle) {
handleSQLException(sqle, i);
}
}
}
}
}
}
/**
* If this is a Derby database, force a checkpoint so that the disk space
* occupied by the transaction log is freed as early as possible.
*/
throws SQLException {
s.execute("CALL SYSCS_UTIL.SYSCS_CHECKPOINT_DATABASE()");
}
}
}
/**
* Check if a stored database procedure exists.
*
* @param dmd the meta-data object used for checking
* @param schema the procedure's schema
* @param proc the name of the procedure
* @return {@code true} if the procedure exists, {@code false} otherwise
* @throws SQLException if an error happens when reading the meta-data
*/
throws SQLException {
// If there's a row, there is such a procedure.
}
}
/**
* Get the id of a repository in the database. If the repository is not
* stored in the database, add it and return its id.
*
* @param conn the connection to the database
* @param repository the repository whose id to get
* @return the id of the repository
*/
throws SQLException {
}
}
// Repository is not in the database. Add it.
return getGeneratedIntKey(insert);
}
/**
* Get a map from author names to their ids in the database. The authors
* that are not in the database are added to it.
*
* @param conn the connection to the database
* @param history the history to get the author names from
* @param reposId the id of the repository
* @return a map from author names to author ids
*/
throws SQLException {
}
}
}
}
return map;
}
/**
* Build maps from directory names and file names to their respective
* identifiers in the database. The directories and files that are not
* already in the database, are added to it.
*
* @param conn the connection to the database
* @param history the history to get the file and directory names from
* @param reposId the id of the repository
* @param dirMap a map which will be filled with directory names and ids
* @param fileMap a map which will be filled with file names and ids
*/
private void getFilesAndDirectories(
throws SQLException {
int insertCount = 0;
// Add the file to the database and to the map if it isn't
// there already. Assumption: If the file is in the database,
// all its parent directories are also there.
// Get the dir id for this file, potentially adding the
// parent directories to the db and to dirMap.
// Commit every now and then to allow the database to free
// resources (like locks and transaction log), but not too
// frequently, since that may kill the performance. It is
// OK not to commit for every file added, since the worst
// thing that could happen is that we need to re-insert
// the files added since the last commit in case of a crash.
insertCount++;
}
}
}
}
}
/**
* DIRECTORIES tables associated with a specified repository id.
*
* @param ps the statement used to get path names and ids from the correct
* table. It should take one parameter: the repository id.
* @param reposId the id of the repository to scan
*/
private void populateFileOrDirMap(
throws SQLException {
}
}
}
/**
* Add all the parent directories of a specified file to the database, if
* they haven't already been added, and also put their paths and ids into a
* map.
*
* @param ps statement that inserts a directory into the DIRECTORY table.
* Takes three parameters: (1) the id of the repository, (2) the path of the
* directory, and (3) the id to use for the directory.
* @param reposId id of the repository to which the file belongs
* @param fullPath the file whose parents to add
* @param map a map from directory path to id for the directories already in
* the database. When a new directory is added, it's also added to this map.
* @return the id of the first parent of {@code fullPath}
*/
private int addAllDirs(
ps.executeUpdate();
}
}
}
return dir;
}
/**
* Return the integer key generated by the previous execution of a
* statement. The key should be a single INTEGER, and the statement should
* insert exactly one row, so there should be only one key.
*
* @param stmt a statement that has just inserted a row
* @return the integer key for the newly inserted row, or {@code null} if
* there is no key
*/
}
}
throws HistoryException {
try {
for (int i = 0;; i++) {
try {
return getLatestRevisionForRepository(repository);
} catch (SQLException sqle) {
handleSQLException(sqle, i);
}
}
} catch (SQLException sqle) {
throw new HistoryException(sqle);
}
}
/**
* Helper for {@link #getLatestCachedRevision(Repository)}.
*/
throws SQLException {
final ConnectionResource conn
try {
}
} finally {
}
}
throws HistoryException {
try {
for (int i = 0;; i++) {
try {
return getLastModifiedTimesForAllFiles(
} catch (SQLException sqle) {
handleSQLException(sqle, i);
}
}
} catch (SQLException sqle) {
throw new HistoryException(sqle);
}
}
throws HistoryException, SQLException {
final ConnectionResource conn
try {
}
}
} finally {
}
return map;
}
try {
for (int i = 0;; i++) {
try {
return;
} catch (SQLException sqle) {
handleSQLException(sqle, i);
}
}
} catch (SQLException sqle) {
throw new HistoryException(sqle);
}
}
/**
* Helper for {@link #clear(Repository)}.
*/
throws SQLException {
final ConnectionResource conn
try {
getQuery("clearRepository"))) {
}
} finally {
}
}
return info;
}
/*
* Create history cache for file which has been renamed in the past.
* This inserts data both into FILECHANGES and FILEMOVES tables.
*/
throws SQLException {
try {
} catch (HistoryException ex) {
"cannot get history for {0} because of exception {1}",
return;
}
for (int i = 0;; i++) {
final ConnectionResource conn
try {
/*
* If the file exists in the changeset, store it in
* the table tracking moves of the file when it had
* one of its precedent names so it can be found
* when performing historyget on directory.
*/
} else {
}
break retry;
} catch (SQLException sqle) {
handleSQLException(sqle, i);
} finally {
}
}
}
}
public void setHistoryIndexDone() {
historyIndexDone = true;
}
public boolean isHistoryIndexDone() {
return historyIndexDone;
}
}