715N/A/*
715N/A * CDDL HEADER START
715N/A *
715N/A * The contents of this file are subject to the terms of the
715N/A * Common Development and Distribution License (the "License").
715N/A * You may not use this file except in compliance with the License.
715N/A *
715N/A * See LICENSE.txt included in this distribution for the specific
715N/A * language governing permissions and limitations under the License.
715N/A *
715N/A * When distributing Covered Code, include this CDDL HEADER in each
715N/A * file and include the License file at LICENSE.txt.
715N/A * If applicable, add the following below this CDDL HEADER, with the
715N/A * fields enclosed by brackets "[]" replaced with your own identifying
715N/A * information: Portions Copyright [yyyy] [name of copyright owner]
715N/A *
715N/A * CDDL HEADER END
715N/A */
715N/A
715N/A/*
1374N/A * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
715N/A */
715N/A
715N/Apackage org.opensolaris.opengrok.history;
715N/A
715N/Aimport java.io.File;
858N/Aimport java.io.IOException;
857N/Aimport java.io.InputStream;
715N/Aimport java.sql.DatabaseMetaData;
715N/Aimport java.sql.PreparedStatement;
715N/Aimport java.sql.ResultSet;
715N/Aimport java.sql.SQLException;
1374N/Aimport java.sql.SQLTransientException;
715N/Aimport java.sql.Statement;
715N/Aimport java.sql.Timestamp;
1459N/Aimport java.sql.Types;
715N/Aimport java.util.ArrayList;
1460N/Aimport java.util.Date;
715N/Aimport java.util.HashMap;
844N/Aimport java.util.HashSet;
1474N/Aimport java.util.LinkedHashMap;
761N/Aimport java.util.List;
761N/Aimport java.util.ListIterator;
715N/Aimport java.util.Map;
803N/Aimport java.util.Properties;
844N/Aimport java.util.Set;
851N/Aimport java.util.concurrent.atomic.AtomicInteger;
788N/Aimport java.util.logging.Level;
788N/Aimport java.util.logging.Logger;
1327N/A
760N/Aimport org.opensolaris.opengrok.configuration.RuntimeEnvironment;
715N/Aimport org.opensolaris.opengrok.jdbc.ConnectionManager;
726N/Aimport org.opensolaris.opengrok.jdbc.ConnectionResource;
726N/Aimport org.opensolaris.opengrok.jdbc.InsertQuery;
726N/Aimport org.opensolaris.opengrok.jdbc.PreparedQuery;
1195N/Aimport org.opensolaris.opengrok.util.IOUtils;
715N/A
715N/Aclass JDBCHistoryCache implements HistoryCache {
715N/A
1474N/A private static final Logger logger =
1474N/A Logger.getLogger(JDBCHistoryCache.class.getName());
899N/A /** The schema in which the tables live. */
899N/A private static final String SCHEMA = "OPENGROK";
717N/A
747N/A /** The names of all the tables created by this class. */
747N/A private static final String[] TABLES = {
844N/A "REPOSITORIES", "FILES", "AUTHORS", "CHANGESETS", "FILECHANGES",
844N/A "DIRECTORIES", "DIRCHANGES"
747N/A };
747N/A
788N/A /**
788N/A * The number of times to retry an operation that failed in a way that
788N/A * indicates that it may succeed if it's tried again.
788N/A */
788N/A private static final int MAX_RETRIES = 2;
788N/A
856N/A /**
856N/A * The maximum number of characters in commit messages. Longer messages
856N/A * will be truncated.
856N/A */
856N/A private static final int MAX_MESSAGE_LENGTH = 32672;
856N/A
715N/A private ConnectionManager connectionManager;
715N/A
760N/A private final String jdbcDriverClass;
760N/A private final String jdbcConnectionURL;
760N/A
851N/A /** The id to be used for the next row inserted into FILES. */
851N/A private final AtomicInteger nextFileId = new AtomicInteger();
851N/A
851N/A /** The id to be used for the next row inserted into DIRECTORIES. */
851N/A private final AtomicInteger nextDirId = new AtomicInteger();
851N/A
851N/A /** The id to be used for the next row inserted into CHANGESETS. */
851N/A private final AtomicInteger nextChangesetId = new AtomicInteger();
851N/A
898N/A /** The id to be used for the next row inserted into AUTHORS. */
898N/A private final AtomicInteger nextAuthorId = new AtomicInteger();
898N/A
864N/A /** Info string to return from {@link #getInfo()}. */
864N/A private String info;
864N/A
803N/A /** SQL queries used by this class. */
1238N/A private static final Properties QUERIES = new Properties();
803N/A static {
1182N/A Class<?> klazz = JDBCHistoryCache.class;
857N/A InputStream in = null;
803N/A try {
857N/A in = klazz.getResourceAsStream(klazz.getSimpleName() + "_queries.properties");
857N/A if ( in != null ) {
857N/A QUERIES.load(in); }
803N/A } catch (IOException ioe) {
810N/A throw new ExceptionInInitializerError(ioe);
860N/A } finally { //NOPMD
1195N/A IOUtils.close(in);
803N/A }
803N/A }
803N/A
760N/A /**
760N/A * Create a new cache instance with the default JDBC driver and URL.
760N/A */
760N/A JDBCHistoryCache() {
1470N/A this(RuntimeEnvironment.getConfig().getDatabaseDriver(),
1470N/A RuntimeEnvironment.getConfig().getDatabaseUrl());
760N/A }
760N/A
760N/A /**
760N/A * Create a new cache instance with the specified JDBC driver and URL.
760N/A *
760N/A * @param jdbcDriverClass JDBC driver class to access the database backend
760N/A * @param url the JDBC url to the database
760N/A */
760N/A JDBCHistoryCache(String jdbcDriverClass, String url) {
760N/A this.jdbcDriverClass = jdbcDriverClass;
760N/A this.jdbcConnectionURL = url;
760N/A }
760N/A
788N/A /**
887N/A * Check whether this cache implementation can store history for the given
887N/A * repository. Only repositories that support retrieval of history for the
887N/A * whole directory at once are supported.
887N/A */
887N/A @Override
887N/A public boolean supportsRepository(Repository repository) {
887N/A return repository.hasHistoryForDirectories();
887N/A }
887N/A
887N/A /**
788N/A * Handle an {@code SQLException}. If the exception indicates that the
788N/A * operation may succeed if it's retried and the number of attempts hasn't
788N/A * exceeded the limit defined by {@link #MAX_RETRIES}, ignore it and let
788N/A * the caller retry the operation. Otherwise, re-throw the exception.
788N/A *
788N/A * @param sqle the exception to handle
788N/A * @param attemptNo the attempt number, first attempt is 0
788N/A * @throws SQLException if the operation shouldn't be retried
788N/A */
788N/A private static void handleSQLException(SQLException sqle, int attemptNo)
1474N/A throws SQLException
1474N/A {
793N/A boolean isTransient = false;
1374N/A for (Throwable cause : sqle) {
1374N/A if (cause instanceof SQLTransientException) {
1374N/A isTransient = true;
1374N/A break;
793N/A }
1374N/A }
793N/A
793N/A if (isTransient && attemptNo < MAX_RETRIES) {
788N/A logger.info("Transient database failure detected. Retrying.");
1327N/A logger.log(Level.FINE, "Transient database failure details", sqle);
793N/A } else {
793N/A throw sqle;
788N/A }
788N/A }
788N/A
803N/A /**
803N/A * Get the SQL text for a name query.
803N/A * @param key name of the query
803N/A * @return SQL text for the query
803N/A */
803N/A private static String getQuery(String key) {
803N/A return QUERIES.getProperty(key);
803N/A }
803N/A
851N/A private void initDB(Statement s) throws SQLException {
715N/A // TODO Store a database version which is incremented on each
715N/A // format change. When a version change is detected, drop the database
1459N/A // or, if possible, upgrade the database to the new format. For now,
1459N/A // check if the tables exist, and create them if necessary.
715N/A
726N/A DatabaseMetaData dmd = s.getConnection().getMetaData();
715N/A
717N/A if (!tableExists(dmd, SCHEMA, "REPOSITORIES")) {
803N/A s.execute(getQuery("createTableRepositories"));
715N/A }
715N/A
844N/A if (!tableExists(dmd, SCHEMA, "DIRECTORIES")) {
844N/A s.execute(getQuery("createTableDirectories"));
844N/A }
844N/A
1459N/A // Databases created with 0.11 or earlier versions don't have a
1459N/A // PARENT column in the DIRECTORIES table. If the column is missing,
1459N/A // create it and populate it. Bug #3174.
1459N/A if (!columnExists(dmd, SCHEMA, "DIRECTORIES", "PARENT")) {
1459N/A s.execute(getQuery("alterTableDirectoriesParent"));
1459N/A s.execute(getQuery("alterTableDirectoriesParentPathConstraint"));
1459N/A fillDirectoriesParentColumn(s);
1459N/A }
1459N/A
717N/A if (!tableExists(dmd, SCHEMA, "FILES")) {
803N/A s.execute(getQuery("createTableFiles"));
715N/A }
715N/A
717N/A if (!tableExists(dmd, SCHEMA, "AUTHORS")) {
803N/A s.execute(getQuery("createTableAuthors"));
715N/A }
715N/A
717N/A if (!tableExists(dmd, SCHEMA, "CHANGESETS")) {
803N/A s.execute(getQuery("createTableChangesets"));
841N/A // Create a composite index on the repository in ascending order
841N/A // and the id in descending order. This index may allow faster
841N/A // retrieval of history in reverse chronological order.
841N/A s.execute(getQuery("createIndexChangesetsRepoIdDesc"));
715N/A }
1481N/A // before hybrid repo support (hgsubversion) there is no OLD_REV column
1481N/A if (!columnExists(dmd, SCHEMA, "CHANGESETS", "OLD_REV")) {
1481N/A s.execute(getQuery("alterTableChangesetsOldRev"));
1481N/A }
715N/A
844N/A if (!tableExists(dmd, SCHEMA, "DIRCHANGES")) {
844N/A s.execute(getQuery("createTableDirchanges"));
844N/A }
844N/A
717N/A if (!tableExists(dmd, SCHEMA, "FILECHANGES")) {
803N/A s.execute(getQuery("createTableFilechanges"));
715N/A }
851N/A
898N/A // Derby has some performance problems with auto-generated identity
898N/A // columns when multiple threads insert into the same table
898N/A // concurrently. Therefore, we have our own light-weight id generators
898N/A // that we initialize on start-up. Details can be found in Derby's
898N/A // bug tracker: https://issues.apache.org/jira/browse/DERBY-4437
898N/A
851N/A initIdGenerator(s, "getMaxFileId", nextFileId);
851N/A initIdGenerator(s, "getMaxDirId", nextDirId);
851N/A initIdGenerator(s, "getMaxChangesetId", nextChangesetId);
898N/A initIdGenerator(s, "getMaxAuthorId", nextAuthorId);
864N/A
864N/A StringBuilder infoBuilder = new StringBuilder();
864N/A infoBuilder.append(getClass().getSimpleName() + "\n");
864N/A infoBuilder.append("Driver class: " + jdbcDriverClass + "\n");
864N/A infoBuilder.append("URL: " + jdbcConnectionURL + "\n");
864N/A infoBuilder.append("Database name: " +
864N/A dmd.getDatabaseProductName() + "\n");
864N/A infoBuilder.append("Database version: " +
864N/A dmd.getDatabaseProductVersion() + "\n");
864N/A info = infoBuilder.toString();
715N/A }
715N/A
1459N/A /**
1459N/A * Fill the PARENT column of the DIRECTORIES table with correct values.
1459N/A * Used when upgrading a database from an old format that doesn't have
1459N/A * the PARENT column.
1459N/A */
1459N/A private static void fillDirectoriesParentColumn(Statement s)
1459N/A throws SQLException {
1459N/A PreparedStatement update = s.getConnection().prepareStatement(
1459N/A getQuery("updateDirectoriesParent"));
1459N/A try {
1459N/A ResultSet rs = s.executeQuery(getQuery("getAllDirectories"));
1459N/A try {
1459N/A while (rs.next()) {
1459N/A update.setInt(1, rs.getInt("REPOSITORY"));
1459N/A update.setString(2, getParentPath(rs.getString("PATH")));
1459N/A update.setInt(3, rs.getInt("ID"));
1459N/A update.executeUpdate();
1459N/A }
1459N/A } finally {
1459N/A rs.close();
1459N/A }
1459N/A } finally {
1459N/A update.close();
1459N/A }
1459N/A }
1459N/A
715N/A private static boolean tableExists(
715N/A DatabaseMetaData dmd, String schema, String table)
715N/A throws SQLException {
715N/A ResultSet rs = dmd.getTables(
715N/A null, schema, table, new String[] {"TABLE"});
720N/A try {
720N/A return rs.next();
720N/A } finally {
720N/A rs.close();
720N/A }
715N/A }
715N/A
1459N/A private static boolean columnExists(
1459N/A DatabaseMetaData dmd, String schema, String table, String column)
1459N/A throws SQLException {
1459N/A ResultSet rs = dmd.getColumns(null, schema, table, column);
1459N/A try {
1459N/A return rs.next();
1459N/A } finally {
1459N/A rs.close();
1459N/A }
1459N/A }
1459N/A
851N/A /**
851N/A * Initialize the {@code AtomicInteger} object that holds the value of
851N/A * the id to use for the next row in a certain table. If there are rows
851N/A * in the table, take the maximum value and increment it by one. Otherwise,
851N/A * the {@code AtomicInteger} will be left at its current value (presumably
851N/A * 0).
851N/A *
851N/A * @param s a statement object on which the max query is executed
851N/A * @param stmtKey name of the query to execute in order to get max id
851N/A * @param generator the {@code AtomicInteger} object to initialize
851N/A */
851N/A private static void initIdGenerator(
851N/A Statement s, String stmtKey, AtomicInteger generator)
851N/A throws SQLException {
851N/A ResultSet rs = s.executeQuery(getQuery(stmtKey));
851N/A try {
851N/A if (rs.next()) {
851N/A int val = rs.getInt(1);
851N/A if (!rs.wasNull()) {
851N/A generator.set(val + 1);
851N/A }
851N/A }
851N/A } finally {
851N/A rs.close();
851N/A }
851N/A }
851N/A
815N/A @Override
715N/A public void initialize() throws HistoryException {
715N/A try {
760N/A connectionManager =
760N/A new ConnectionManager(jdbcDriverClass, jdbcConnectionURL);
788N/A for (int i = 0;; i++) {
788N/A final ConnectionResource conn =
788N/A connectionManager.getConnectionResource();
720N/A try {
788N/A final Statement stmt = conn.createStatement();
788N/A try {
788N/A initDB(stmt);
788N/A } finally {
788N/A stmt.close();
788N/A }
788N/A conn.commit();
788N/A // Success! Break out of the loop.
788N/A return;
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
720N/A } finally {
788N/A connectionManager.releaseConnection(conn);
720N/A }
715N/A }
715N/A } catch (Exception e) {
715N/A throw new HistoryException(e);
715N/A }
715N/A }
715N/A
803N/A private static final PreparedQuery IS_DIR_IN_CACHE =
1474N/A new PreparedQuery(getQuery("hasCacheForDirectory"));
726N/A
717N/A // We do check the return value from ResultSet.next(), but PMD doesn't
717N/A // understand it, so suppress the warning.
717N/A @SuppressWarnings("PMD.CheckResultSet")
815N/A @Override
764N/A public boolean hasCacheForDirectory(File file, Repository repository)
1474N/A throws HistoryException
1474N/A {
1466N/A // assert file.isDirectory(); - bad for deleted pathes
715N/A try {
788N/A for (int i = 0;; i++) {
788N/A final ConnectionResource conn =
1474N/A connectionManager.getConnectionResource();
720N/A try {
788N/A PreparedStatement ps = conn.getStatement(IS_DIR_IN_CACHE);
788N/A ps.setString(1, toUnixPath(repository.getDirectoryName()));
1466N/A ps.setString(2, getSourceRootRelativePath(file));
788N/A ResultSet rs = ps.executeQuery();
788N/A try {
788N/A return rs.next();
788N/A } finally {
788N/A rs.close();
788N/A }
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
720N/A } finally {
788N/A connectionManager.releaseConnection(conn);
720N/A }
715N/A }
715N/A } catch (SQLException sqle) {
715N/A throw new HistoryException(sqle);
715N/A }
715N/A }
715N/A
715N/A /**
715N/A * Get path name with all file separators replaced with '/'.
715N/A */
715N/A private static String toUnixPath(String path) {
715N/A return path.replace(File.separatorChar, '/');
715N/A }
715N/A
715N/A /**
763N/A * Get the path of a file relative to the source root.
1470N/A * @param file the file to get the path for.
763N/A * @return relative path for {@code file} with unix file separators
763N/A */
763N/A private static String getSourceRootRelativePath(File file)
1470N/A throws HistoryException
1470N/A {
1470N/A String path;
1470N/A try {
1470N/A path = RuntimeEnvironment.getConfig()
1470N/A .getPathRelativeToSourceRoot(file, 0);
1470N/A } catch (IOException e) {
1470N/A throw new HistoryException(e);
1470N/A }
1470N/A return toUnixPath(path);
715N/A }
715N/A
761N/A /**
844N/A * Get the base name of a path (with unix file separators).
844N/A *
844N/A * @param fullPath the full path of the file with unix file separators
844N/A * @return the base name of the file
844N/A */
844N/A private static String getBaseName(String fullPath) {
844N/A int idx = fullPath.lastIndexOf('/');
844N/A return (idx >= 0) ? fullPath.substring(idx + 1) : fullPath;
844N/A }
844N/A
844N/A /**
844N/A * Get the path to the parent of the specified file.
844N/A *
844N/A * @param fullPath the full path of the file with unix file separators
844N/A * @return the full path of the file's parent
844N/A */
844N/A private static String getParentPath(String fullPath) {
844N/A int idx = fullPath.lastIndexOf('/');
844N/A return (idx >= 0) ? fullPath.substring(0, idx) : fullPath;
844N/A }
844N/A
844N/A /**
844N/A * Split a full (unix-style) path into an array of path elements.
844N/A *
844N/A * @param fullPath the full unix-style path name
844N/A * @return an array with each separate element of the path
844N/A * @throws IllegalArgumentException if fullPath doesn't start with '/'
844N/A */
844N/A private static String[] splitPath(String fullPath) {
844N/A if (fullPath.isEmpty() || fullPath.charAt(0) != '/') {
844N/A throw new IllegalArgumentException("Not a full path: " + fullPath);
844N/A }
844N/A return fullPath.substring(1).split("/");
844N/A }
844N/A
844N/A /**
844N/A * Reconstruct a path previously split by {@link #splitPath(String)}, or
844N/A * possibly just a part of it (only the {@code num} first elements will
844N/A * be used).
844N/A *
844N/A * @param pathElts the elements of the path
844N/A * @param num the number of elements to use when reconstructing the path
844N/A * @return a path name
844N/A */
844N/A private static String unsplitPath(String[] pathElts, int num) {
844N/A StringBuilder out = new StringBuilder("");
844N/A for (int i = 0; i < num; i++) {
844N/A out.append("/").append(pathElts[i]);
844N/A }
844N/A return out.toString();
844N/A }
844N/A
844N/A /**
856N/A * Truncate a string to the given length.
856N/A *
856N/A * @param str the string to truncate
856N/A * @param length the length of the string after truncation
856N/A * @return the truncated string
856N/A * @throws IllegalArgumentException if the string is not longer than the
856N/A * specified length
856N/A */
856N/A private static String truncate(String str, int length) {
856N/A if (str.length() < length) {
856N/A throw new IllegalArgumentException();
856N/A }
856N/A String suffix = " (...)";
856N/A return length < suffix.length() ?
856N/A str.substring(0, length) :
856N/A (str.substring(0, length - suffix.length()) + suffix);
856N/A }
856N/A
856N/A /**
761N/A * Statement that gets the history for the specified file and repository.
761N/A * The result is ordered in reverse chronological order to match the
761N/A * required ordering for {@link HistoryCache#get(File, Repository)}.
761N/A */
779N/A private static final PreparedQuery GET_FILE_HISTORY =
1474N/A new PreparedQuery(getQuery("getFileHistory"));
779N/A
779N/A /**
779N/A * Statement that gets the history for all files matching a pattern in the
779N/A * given repository. The result is ordered in reverse chronological order
779N/A * to match the required ordering for
779N/A * {@link HistoryCache#get(File, Repository)}.
779N/A */
779N/A private static final PreparedQuery GET_DIR_HISTORY =
1474N/A new PreparedQuery(getQuery("getDirHistory"));
726N/A
826N/A /** Statement that retrieves all the files touched by a given changeset. */
826N/A private static final PreparedQuery GET_CS_FILES =
1474N/A new PreparedQuery(getQuery("getFilesInChangeset"));
826N/A
815N/A @Override
1466N/A public History get(File file, Repository repository, boolean withFiles,
1466N/A Boolean isDir) throws HistoryException
1466N/A {
788N/A try {
788N/A for (int i = 0;; i++) {
788N/A try {
1466N/A return getHistory(file, repository, withFiles, isDir);
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
788N/A }
788N/A }
788N/A } catch (SQLException sqle) {
788N/A throw new HistoryException(sqle);
788N/A }
788N/A }
788N/A
788N/A /**
788N/A * Helper for {@link #get(File, Repository)}.
788N/A */
1461N/A @SuppressWarnings("null")
1466N/A private History getHistory(File file, Repository repository,
1466N/A boolean withFiles, Boolean isDir) throws HistoryException, SQLException
1466N/A {
763N/A final String filePath = getSourceRootRelativePath(file);
715N/A final String reposPath = toUnixPath(repository.getDirectoryName());
715N/A final ArrayList<HistoryEntry> entries = new ArrayList<HistoryEntry>();
788N/A final ConnectionResource conn =
1474N/A connectionManager.getConnectionResource();
715N/A try {
788N/A final PreparedStatement ps;
1466N/A if (isDir == null) {
1466N/A isDir = Boolean.valueOf(file.isDirectory());
1466N/A }
1466N/A if (isDir.booleanValue()) {
844N/A // Fetch history for all files under this directory.
788N/A ps = conn.getStatement(GET_DIR_HISTORY);
844N/A ps.setString(2, filePath);
788N/A } else {
788N/A // Fetch history for a single file only.
788N/A ps = conn.getStatement(GET_FILE_HISTORY);
844N/A ps.setString(2, getParentPath(filePath));
844N/A ps.setString(3, getBaseName(filePath));
788N/A }
788N/A ps.setString(1, reposPath);
826N/A
839N/A final PreparedStatement filePS =
1474N/A withFiles ? conn.getStatement(GET_CS_FILES) : null;
826N/A
788N/A ResultSet rs = ps.executeQuery();
715N/A try {
788N/A while (rs.next()) {
826N/A // Get the information about a changeset
788N/A String revision = rs.getString(1);
826N/A String author = rs.getString(2);
826N/A Timestamp time = rs.getTimestamp(3);
826N/A String message = rs.getString(4);
1481N/A String oldRev = rs.getString(6);
1481N/A HistoryEntry entry = new HistoryEntry(revision, oldRev,
1481N/A time, author, message, true);
826N/A entries.add(entry);
826N/A
839N/A // Fill the list of files touched by the changeset, if
839N/A // requested.
839N/A if (withFiles) {
839N/A int changeset = rs.getInt(5);
839N/A filePS.setInt(1, changeset);
839N/A
839N/A // We do check next(), but PMD doesn't understand it.
839N/A ResultSet fileRS = filePS.executeQuery(); // NOPMD
839N/A try {
839N/A while (fileRS.next()) {
839N/A entry.addFile(fileRS.getString(1));
839N/A }
839N/A } finally {
839N/A fileRS.close();
826N/A }
720N/A }
715N/A }
715N/A } finally {
788N/A rs.close();
715N/A }
788N/A } finally {
788N/A connectionManager.releaseConnection(conn);
715N/A }
715N/A
715N/A History history = new History();
715N/A history.setHistoryEntries(entries);
715N/A return history;
715N/A }
715N/A
1230N/A private static final PreparedQuery GET_REPOSITORY =
1474N/A new PreparedQuery(getQuery("getRepository"));
726N/A
1230N/A private static final InsertQuery INSERT_REPOSITORY =
1474N/A new InsertQuery(getQuery("addRepository"));
726N/A
815N/A @Override
743N/A public void store(History history, Repository repository)
1466N/A throws HistoryException
1466N/A {
1466N/A if (history == null || history.getHistoryEntries().isEmpty()) {
1466N/A return;
1466N/A }
715N/A try {
726N/A final ConnectionResource conn =
1474N/A connectionManager.getConnectionResource();
715N/A try {
745N/A storeHistory(conn, history, repository);
715N/A } finally {
715N/A connectionManager.releaseConnection(conn);
715N/A }
715N/A } catch (SQLException sqle) {
715N/A throw new HistoryException(sqle);
715N/A }
715N/A }
715N/A
1230N/A private static final InsertQuery ADD_CHANGESET =
1474N/A new InsertQuery(getQuery("addChangeset"));
745N/A
1230N/A private static final PreparedQuery ADD_DIRCHANGE =
1474N/A new PreparedQuery(getQuery("addDirchange"));
844N/A
1230N/A private static final PreparedQuery ADD_FILECHANGE =
1474N/A new PreparedQuery(getQuery("addFilechange"));
745N/A
1461N/A @SuppressWarnings({ "boxing", "null" })
745N/A private void storeHistory(ConnectionResource conn, History history,
1474N/A Repository repository) throws SQLException
1474N/A {
745N/A
788N/A Integer reposId = null;
788N/A Map<String, Integer> authors = null;
788N/A Map<String, Integer> files = null;
844N/A Map<String, Integer> directories = null;
788N/A PreparedStatement addChangeset = null;
844N/A PreparedStatement addDirchange = null;
788N/A PreparedStatement addFilechange = null;
745N/A
788N/A for (int i = 0;; i++) {
788N/A try {
788N/A if (reposId == null) {
788N/A reposId = getRepositoryId(conn, repository);
788N/A conn.commit();
788N/A }
788N/A
788N/A if (authors == null) {
788N/A authors = getAuthors(conn, history, reposId);
788N/A conn.commit();
788N/A }
745N/A
844N/A if (directories == null || files == null) {
844N/A Map<String, Integer> dirs = new HashMap<String, Integer>();
844N/A Map<String, Integer> fls = new HashMap<String, Integer>();
844N/A getFilesAndDirectories(conn, history, reposId, dirs, fls);
788N/A conn.commit();
844N/A directories = dirs;
844N/A files = fls;
788N/A }
788N/A
788N/A if (addChangeset == null) {
788N/A addChangeset = conn.getStatement(ADD_CHANGESET);
788N/A }
745N/A
844N/A if (addDirchange == null) {
844N/A addDirchange = conn.getStatement(ADD_DIRCHANGE);
844N/A }
844N/A
788N/A if (addFilechange == null) {
788N/A addFilechange = conn.getStatement(ADD_FILECHANGE);
788N/A }
788N/A
788N/A // Success! Break out of the loop.
788N/A break;
788N/A
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
788N/A conn.rollback();
788N/A }
788N/A }
788N/A
745N/A addChangeset.setInt(1, reposId);
761N/A
761N/A // getHistoryEntries() returns the entries in reverse chronological
761N/A // order, but we want to insert them in chronological order so that
761N/A // their auto-generated identity column can be used as a chronological
761N/A // ordering column. Otherwise, incremental updates will make the
761N/A // identity column unusable for chronological ordering. So therefore
761N/A // we walk the list backwards.
761N/A List<HistoryEntry> entries = history.getHistoryEntries();
1474N/A ListIterator<HistoryEntry> it = entries.listIterator(entries.size());
1474N/A for ( ; it.hasPrevious(); )
1474N/A {
794N/A HistoryEntry entry = it.previous();
788N/A retry:
788N/A for (int i = 0;; i++) {
788N/A try {
788N/A addChangeset.setString(2, entry.getRevision());
788N/A addChangeset.setInt(3, authors.get(entry.getAuthor()));
788N/A addChangeset.setTimestamp(4,
788N/A new Timestamp(entry.getDate().getTime()));
856N/A String msg = entry.getMessage();
856N/A // Truncate the message if it can't fit in a VARCHAR
856N/A // (bug #11663).
856N/A if (msg.length() > MAX_MESSAGE_LENGTH) {
856N/A msg = truncate(msg, MAX_MESSAGE_LENGTH);
856N/A }
856N/A addChangeset.setString(5, msg);
851N/A int changesetId = nextChangesetId.getAndIncrement();
851N/A addChangeset.setInt(6, changesetId);
1481N/A addChangeset.setString(7, entry.getOldRevision());
788N/A addChangeset.executeUpdate();
745N/A
844N/A // Add one row for each file in FILECHANGES, and one row
844N/A // for each path element of the directories in DIRCHANGES.
844N/A Set<String> addedDirs = new HashSet<String>();
844N/A addDirchange.setInt(1, changesetId);
788N/A addFilechange.setInt(1, changesetId);
788N/A for (String file : entry.getFiles()) {
844N/A String fullPath = toUnixPath(file);
844N/A int fileId = files.get(fullPath);
788N/A addFilechange.setInt(2, fileId);
788N/A addFilechange.executeUpdate();
844N/A String[] pathElts = splitPath(fullPath);
844N/A for (int j = 0; j < pathElts.length; j++) {
844N/A String dir = unsplitPath(pathElts, j);
844N/A // Only add to DIRCHANGES if we haven't already
844N/A // added this dir/changeset combination.
844N/A if (!addedDirs.contains(dir)) {
844N/A addDirchange.setInt(2, directories.get(dir));
844N/A addDirchange.executeUpdate();
844N/A addedDirs.add(dir);
844N/A }
844N/A }
788N/A }
788N/A
788N/A conn.commit();
788N/A
788N/A // Successfully added the entry. Break out of retry loop.
788N/A break retry;
788N/A
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
788N/A conn.rollback();
788N/A }
745N/A }
745N/A }
745N/A }
745N/A
745N/A /**
795N/A * Optimize how the cache is stored on disk. In particular, make sure
795N/A * index cardinality statistics are up to date, and perform a checkpoint
795N/A * to make sure all changes are forced to the tables on disk and that
795N/A * the unneeded transaction log is deleted.
795N/A *
795N/A * @throws HistoryException if an error happens when optimizing the cache
795N/A */
815N/A @Override
795N/A public void optimize() throws HistoryException {
795N/A try {
795N/A final ConnectionResource conn =
1474N/A connectionManager.getConnectionResource();
795N/A try {
795N/A updateIndexCardinalityStatistics(conn);
880N/A checkpointDatabase(conn);
795N/A } finally {
795N/A connectionManager.releaseConnection(conn);
795N/A }
795N/A } catch (SQLException sqle) {
795N/A throw new HistoryException(sqle);
795N/A }
795N/A }
795N/A
795N/A /**
747N/A * <p>
747N/A * Make sure Derby's index cardinality statistics are up to date.
747N/A * Otherwise, the optimizer may choose a bad execution strategy for
747N/A * some queries. This method should be called if the size of the tables
747N/A * has changed significantly.
747N/A * </p>
747N/A *
747N/A * <p>
747N/A * This is a workaround for the problems described in
747N/A * <a href="https://issues.apache.org/jira/browse/DERBY-269">DERBY-269</a> and
747N/A * <a href="https://issues.apache.org/jira/browse/DERBY-3788">DERBY-3788</a>.
747N/A * When automatic update of index cardinality statistics has been
747N/A * implemented in Derby, the workaround may be removed.
747N/A * </p>
747N/A *
747N/A * <p>
795N/A * Without this workaround, poor performance has been observed in
795N/A * {@code get()} due to bad choices made by the optimizer.
795N/A * </p>
795N/A *
795N/A * <p>
747N/A * Note that this method uses a system procedure introduced in Derby 10.5.
880N/A * If this procedure does not exist, this method is a no-op.
747N/A * </p>
747N/A */
1461N/A private static void updateIndexCardinalityStatistics(ConnectionResource conn)
1474N/A throws SQLException
1474N/A {
747N/A DatabaseMetaData dmd = conn.getMetaData();
880N/A if (procedureExists(dmd, "SYSCS_UTIL", "SYSCS_UPDATE_STATISTICS")) {
747N/A PreparedStatement ps = conn.prepareStatement(
1474N/A "CALL SYSCS_UTIL.SYSCS_UPDATE_STATISTICS(?, ?, NULL)");
747N/A try {
747N/A ps.setString(1, SCHEMA);
747N/A for (String table : TABLES) {
747N/A ps.setString(2, table);
788N/A retry:
788N/A for (int i = 0;; i++) {
788N/A try {
788N/A ps.execute();
788N/A // Successfully executed statement. Break out of
788N/A // retry loop.
788N/A break retry;
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
788N/A conn.rollback();
788N/A }
788N/A }
788N/A conn.commit();
747N/A }
747N/A } finally {
747N/A ps.close();
747N/A }
747N/A }
747N/A }
747N/A
747N/A /**
880N/A * If this is a Derby database, force a checkpoint so that the disk space
880N/A * occupied by the transaction log is freed as early as possible.
880N/A */
1461N/A private static void checkpointDatabase(ConnectionResource conn)
1474N/A throws SQLException
1474N/A {
880N/A DatabaseMetaData dmd = conn.getMetaData();
880N/A if (procedureExists(dmd, "SYSCS_UTIL", "SYSCS_CHECKPOINT_DATABASE")) {
880N/A Statement s = conn.createStatement();
880N/A try {
880N/A s.execute("CALL SYSCS_UTIL.SYSCS_CHECKPOINT_DATABASE()");
880N/A } finally {
880N/A s.close();
880N/A }
880N/A conn.commit();
880N/A }
880N/A }
880N/A
880N/A /**
880N/A * Check if a stored database procedure exists.
880N/A *
880N/A * @param dmd the meta-data object used for checking
880N/A * @param schema the procedure's schema
880N/A * @param proc the name of the procedure
880N/A * @return {@code true} if the procedure exists, {@code false} otherwise
880N/A * @throws SQLException if an error happens when reading the meta-data
880N/A */
1474N/A private static boolean procedureExists(DatabaseMetaData dmd, String schema,
1474N/A String proc) throws SQLException
1474N/A {
880N/A ResultSet rs = dmd.getProcedures(null, schema, proc);
880N/A try {
880N/A // If there's a row, there is such a procedure.
880N/A return rs.next();
880N/A } finally {
880N/A rs.close();
880N/A }
880N/A }
880N/A
880N/A /**
745N/A * Get the id of a repository in the database. If the repository is not
745N/A * stored in the database, add it and return its id.
745N/A *
745N/A * @param conn the connection to the database
745N/A * @param repository the repository whose id to get
745N/A * @return the id of the repository
745N/A */
1474N/A private static Integer getRepositoryId(ConnectionResource conn,
1474N/A Repository repository) throws SQLException
1474N/A {
745N/A String reposPath = toUnixPath(repository.getDirectoryName());
745N/A PreparedStatement reposIdPS = conn.getStatement(GET_REPOSITORY);
745N/A reposIdPS.setString(1, reposPath);
745N/A ResultSet reposIdRS = reposIdPS.executeQuery();
745N/A try {
745N/A if (reposIdRS.next()) {
1470N/A return Integer.valueOf(reposIdRS.getInt(1));
745N/A }
745N/A } finally {
745N/A reposIdRS.close();
745N/A }
745N/A
745N/A // Repository is not in the database. Add it.
745N/A PreparedStatement insert =
1474N/A conn.getStatement(INSERT_REPOSITORY);
745N/A insert.setString(1, reposPath);
745N/A insert.executeUpdate();
745N/A return getGeneratedIntKey(insert);
745N/A }
745N/A
1230N/A private static final PreparedQuery GET_AUTHORS =
1474N/A new PreparedQuery(getQuery("getAuthors"));
726N/A
1230N/A private static final InsertQuery ADD_AUTHOR =
1474N/A new InsertQuery(getQuery("addAuthor"));
726N/A
715N/A /**
745N/A * Get a map from author names to their ids in the database. The authors
745N/A * that are not in the database are added to it.
745N/A *
715N/A * @param conn the connection to the database
715N/A * @param history the history to get the author names from
715N/A * @param reposId the id of the repository
715N/A * @return a map from author names to author ids
715N/A */
1461N/A @SuppressWarnings("boxing")
1474N/A private Map<String, Integer> getAuthors(ConnectionResource conn,
1474N/A History history, int reposId) throws SQLException
1474N/A {
715N/A HashMap<String, Integer> map = new HashMap<String, Integer>();
745N/A PreparedStatement ps = conn.getStatement(GET_AUTHORS);
745N/A ps.setInt(1, reposId);
745N/A ResultSet rs = ps.executeQuery();
745N/A try {
745N/A while (rs.next()) {
745N/A map.put(rs.getString(1), rs.getInt(2));
745N/A }
745N/A } finally {
745N/A rs.close();
745N/A }
715N/A
745N/A PreparedStatement insert = conn.getStatement(ADD_AUTHOR);
745N/A insert.setInt(1, reposId);
726N/A for (HistoryEntry entry : history.getHistoryEntries()) {
726N/A String author = entry.getAuthor();
745N/A if (!map.containsKey(author)) {
898N/A int id = nextAuthorId.getAndIncrement();
726N/A insert.setString(2, author);
898N/A insert.setInt(3, id);
726N/A insert.executeUpdate();
745N/A map.put(author, id);
846N/A conn.commit();
721N/A }
715N/A }
715N/A
715N/A return map;
715N/A }
715N/A
1230N/A private static final PreparedQuery GET_DIRS =
1474N/A new PreparedQuery(getQuery("getDirectories"));
844N/A
1230N/A private static final PreparedQuery GET_FILES =
1474N/A new PreparedQuery(getQuery("getFiles"));
745N/A
1230N/A private static final InsertQuery INSERT_DIR =
1474N/A new InsertQuery(getQuery("addDirectory"));
844N/A
1230N/A private static final InsertQuery INSERT_FILE =
1474N/A new InsertQuery(getQuery("addFile"));
745N/A
715N/A /**
844N/A * Build maps from directory names and file names to their respective
844N/A * identifiers in the database. The directories and files that are not
844N/A * already in the database, are added to it.
715N/A *
745N/A * @param conn the connection to the database
844N/A * @param history the history to get the file and directory names from
715N/A * @param reposId the id of the repository
844N/A * @param dirMap a map which will be filled with directory names and ids
844N/A * @param fileMap a map which will be filled with file names and ids
715N/A */
1461N/A @SuppressWarnings("boxing")
1474N/A private void getFilesAndDirectories(ConnectionResource conn, History history,
1474N/A int reposId, Map<String, Integer> dirMap, Map<String, Integer> fileMap)
1474N/A throws SQLException
1474N/A {
844N/A
844N/A populateFileOrDirMap(conn.getStatement(GET_DIRS), reposId, dirMap);
844N/A populateFileOrDirMap(conn.getStatement(GET_FILES), reposId, fileMap);
844N/A
852N/A int insertCount = 0;
852N/A
844N/A PreparedStatement insDir = conn.getStatement(INSERT_DIR);
844N/A PreparedStatement insFile = conn.getStatement(INSERT_FILE);
844N/A for (HistoryEntry entry : history.getHistoryEntries()) {
844N/A for (String file : entry.getFiles()) {
844N/A String fullPath = toUnixPath(file);
844N/A // Add the file to the database and to the map if it isn't
844N/A // there already. Assumption: If the file is in the database,
844N/A // all its parent directories are also there.
844N/A if (!fileMap.containsKey(fullPath)) {
844N/A // Get the dir id for this file, potentially adding the
844N/A // parent directories to the db and to dirMap.
1470N/A Integer dir = addAllDirs(insDir, reposId, fullPath, dirMap);
851N/A int fileId = nextFileId.getAndIncrement();
1470N/A insFile.setInt(1, dir.intValue());
844N/A insFile.setString(2, getBaseName(fullPath));
851N/A insFile.setInt(3, fileId);
844N/A insFile.executeUpdate();
851N/A fileMap.put(fullPath, fileId);
852N/A
852N/A // Commit every now and then to allow the database to free
852N/A // resources (like locks and transaction log), but not too
852N/A // frequently, since that may kill the performance. It is
852N/A // OK not to commit for every file added, since the worst
852N/A // thing that could happen is that we need to re-insert
852N/A // the files added since the last commit in case of a crash.
852N/A insertCount++;
852N/A if (insertCount % 30 == 0) {
852N/A conn.commit();
852N/A }
844N/A }
844N/A }
844N/A }
844N/A }
844N/A
844N/A /**
844N/A * Populate a map with all path/id combinations found in the FILES or
844N/A * DIRECTORIES tables associated with a specified repository id.
844N/A *
844N/A * @param ps the statement used to get path names and ids from the correct
844N/A * table. It should take one parameter: the repository id.
844N/A * @param reposId the id of the repository to scan
844N/A * @param map the map into which to insert the path/id combinations
844N/A */
1461N/A @SuppressWarnings("boxing")
1474N/A private static void populateFileOrDirMap(PreparedStatement ps, int reposId,
1474N/A Map<String, Integer> map) throws SQLException
1474N/A {
745N/A ps.setInt(1, reposId);
745N/A ResultSet rs = ps.executeQuery();
720N/A try {
745N/A while (rs.next()) {
745N/A map.put(rs.getString(1), rs.getInt(2));
720N/A }
720N/A } finally {
745N/A rs.close();
715N/A }
844N/A }
715N/A
844N/A /**
844N/A * Add all the parent directories of a specified file to the database, if
844N/A * they haven't already been added, and also put their paths and ids into
844N/A * a map.
844N/A *
844N/A * @param ps statement that inserts a directory into the DIRECTORY table.
851N/A * Takes three parameters: (1) the id of the repository, (2) the path of
1461N/A * the directory, and (3) the id to use for the directory, (4) the id of
1461N/A * the parent directory.
844N/A * @param reposId id of the repository to which the file belongs
844N/A * @param fullPath the file whose parents to add
844N/A * @param map a map from directory path to id for the directories already
844N/A * in the database. When a new directory is added, it's also added to this
844N/A * map.
844N/A * @return the id of the first parent of {@code fullPath}
844N/A */
1461N/A @SuppressWarnings("boxing")
1474N/A private Integer addAllDirs(PreparedStatement ps, int reposId,
1474N/A String fullPath, Map<String, Integer> map) throws SQLException
1474N/A {
844N/A String[] pathElts = splitPath(fullPath);
844N/A String parent = unsplitPath(pathElts, pathElts.length - 1);
844N/A Integer dir = map.get(parent);
844N/A if (dir == null) {
1459N/A for (int i = 0; i < pathElts.length; i++) {
1459N/A Integer prevDirId = dir;
844N/A String path = unsplitPath(pathElts, i);
1459N/A dir = map.get(path);
1459N/A if (dir == null) {
1459N/A dir = nextDirId.getAndIncrement();
1470N/A ps.setInt(1, reposId);
1470N/A ps.setString(2, path);
1459N/A ps.setInt(3, dir);
1459N/A ps.setObject(4, prevDirId, Types.INTEGER);
1470N/A ps.executeUpdate();
1459N/A map.put(path, dir);
745N/A }
745N/A }
715N/A }
844N/A return dir;
715N/A }
719N/A
719N/A /**
719N/A * Return the integer key generated by the previous execution of a
719N/A * statement. The key should be a single INTEGER, and the statement
719N/A * should insert exactly one row, so there should be only one key.
719N/A * @param stmt a statement that has just inserted a row
719N/A * @return the integer key for the newly inserted row, or {@code null}
719N/A * if there is no key
719N/A */
1461N/A @SuppressWarnings("boxing")
1461N/A private static Integer getGeneratedIntKey(Statement stmt) throws SQLException {
719N/A ResultSet keys = stmt.getGeneratedKeys();
719N/A try {
1183N/A return keys.next() ? keys.getInt(1) : null;
719N/A } finally {
719N/A keys.close();
719N/A }
719N/A }
761N/A
1230N/A private static final PreparedQuery GET_LATEST_REVISION =
1474N/A new PreparedQuery(getQuery("getLatestCachedRevision"));
761N/A
815N/A @Override
761N/A public String getLatestCachedRevision(Repository repository)
1474N/A throws HistoryException
1474N/A {
761N/A try {
788N/A for (int i = 0;; i++) {
761N/A try {
788N/A return getLatestRevisionForRepository(repository);
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
761N/A }
761N/A }
761N/A } catch (SQLException sqle) {
761N/A throw new HistoryException(sqle);
761N/A }
761N/A }
788N/A
788N/A /**
788N/A * Helper for {@link #getLatestCachedRevision(Repository)}.
788N/A */
788N/A private String getLatestRevisionForRepository(Repository repository)
1474N/A throws SQLException
1474N/A {
788N/A final ConnectionResource conn =
1474N/A connectionManager.getConnectionResource();
788N/A try {
788N/A PreparedStatement ps = conn.getStatement(GET_LATEST_REVISION);
788N/A ps.setString(1, toUnixPath(repository.getDirectoryName()));
788N/A ResultSet rs = ps.executeQuery(); // NOPMD (we do check next)
788N/A try {
788N/A return rs.next() ? rs.getString(1) : null;
788N/A } finally {
788N/A rs.close();
788N/A }
788N/A } finally {
788N/A connectionManager.releaseConnection(conn);
788N/A }
788N/A }
864N/A
864N/A @Override
1474N/A public Map<String, Date> getLastModifiedTimes(File directory,
1474N/A Repository repository, Map<String, String> path2rev)
1460N/A throws HistoryException
1460N/A {
1460N/A try {
1460N/A for (int i = 0;; i++) {
1460N/A try {
1474N/A return getLastModifiedTimesForAllFiles(directory, repository,
1474N/A path2rev);
1460N/A } catch (SQLException sqle) {
1460N/A handleSQLException(sqle, i);
1460N/A }
1460N/A }
1460N/A } catch (SQLException sqle) {
1460N/A throw new HistoryException(sqle);
1460N/A }
1460N/A }
1460N/A
1460N/A private static final PreparedQuery GET_LAST_MODIFIED_TIMES =
1474N/A new PreparedQuery(getQuery("getLastModifiedTimes"));
1460N/A
1460N/A private Map<String, Date> getLastModifiedTimesForAllFiles(
1474N/A File directory, Repository repository, Map<String, String> path2rev)
1460N/A throws HistoryException, SQLException
1460N/A {
1474N/A final Map<String, Date> map = new LinkedHashMap<String, Date>();
1460N/A
1460N/A final ConnectionResource conn =
1474N/A connectionManager.getConnectionResource();
1460N/A try {
1460N/A PreparedStatement ps = conn.getStatement(GET_LAST_MODIFIED_TIMES);
1460N/A ps.setString(1, toUnixPath(repository.getDirectoryName()));
1460N/A ps.setString(2, getSourceRootRelativePath(directory));
1460N/A ResultSet rs = ps.executeQuery();
1460N/A try {
1474N/A if (path2rev == null) {
1474N/A while (rs.next()) {
1474N/A map.put(rs.getString(1), rs.getTimestamp(2));
1474N/A }
1474N/A } else {
1474N/A while (rs.next()) {
1474N/A String path = rs.getString(1);
1474N/A map.put(path, rs.getTimestamp(2));
1474N/A path2rev.put(path, rs.getString(3));
1474N/A }
1460N/A }
1460N/A } finally {
1460N/A rs.close();
1460N/A }
1460N/A } finally {
1460N/A connectionManager.releaseConnection(conn);
1460N/A }
1460N/A
1460N/A return map;
1460N/A }
1460N/A
1475N/A private static final PreparedQuery GET_LAST_MODIFIED_TIMES_SRCROOT =
1475N/A new PreparedQuery(getQuery("getLastModifiedTimesSrcRoot"));
1475N/A
1475N/A @Override
1475N/A public Map<String, Date> getLastModifiedTimes(Map<String, String> path2rev)
1475N/A throws HistoryException
1475N/A {
1475N/A try {
1475N/A for (int i = 0;; i++) {
1475N/A try {
1475N/A return getLMDsrcRoot(path2rev);
1475N/A } catch (SQLException sqle) {
1475N/A handleSQLException(sqle, i);
1475N/A }
1475N/A }
1475N/A } catch (SQLException sqle) {
1475N/A throw new HistoryException(sqle);
1475N/A }
1475N/A }
1475N/A
1475N/A private Map<String, Date> getLMDsrcRoot(Map<String, String> path2rev)
1475N/A throws SQLException
1475N/A {
1475N/A final Map<String, Date> map = new LinkedHashMap<String, Date>();
1475N/A
1475N/A final ConnectionResource conn =
1475N/A connectionManager.getConnectionResource();
1475N/A try {
1475N/A PreparedStatement ps =
1475N/A conn.getStatement(GET_LAST_MODIFIED_TIMES_SRCROOT);
1475N/A ResultSet rs = ps.executeQuery();
1475N/A try {
1475N/A if (path2rev == null) {
1475N/A while (rs.next()) {
1475N/A map.put(rs.getString(1), rs.getTimestamp(2));
1475N/A }
1475N/A } else {
1475N/A while (rs.next()) {
1475N/A String path = rs.getString(1);
1475N/A map.put(path, rs.getTimestamp(2));
1475N/A path2rev.put(path, rs.getString(3));
1475N/A }
1475N/A }
1475N/A } finally {
1475N/A rs.close();
1475N/A }
1475N/A } finally {
1475N/A connectionManager.releaseConnection(conn);
1475N/A }
1475N/A return map;
1475N/A }
1475N/A
1460N/A @Override
969N/A public void clear(Repository repository) throws HistoryException {
969N/A try {
969N/A for (int i = 0;; i++) {
969N/A try {
969N/A clearHistoryForRepository(repository);
969N/A return;
969N/A } catch (SQLException sqle) {
969N/A handleSQLException(sqle, i);
969N/A }
969N/A }
969N/A } catch (SQLException sqle) {
969N/A throw new HistoryException(sqle);
969N/A }
969N/A }
969N/A
969N/A /**
969N/A * Helper for {@link #clear(Repository)}.
969N/A */
1470N/A @SuppressWarnings("boxing")
969N/A private void clearHistoryForRepository(Repository repository)
1474N/A throws SQLException
1474N/A {
969N/A final ConnectionResource conn =
1474N/A connectionManager.getConnectionResource();
969N/A try {
969N/A // This statement shouldn't be called very frequently, so don't
969N/A // care about caching it...
1474N/A PreparedStatement ps =
1474N/A conn.prepareStatement(getQuery("clearRepository"));
969N/A try {
969N/A ps.setInt(1, getRepositoryId(conn, repository));
969N/A ps.execute();
969N/A conn.commit();
969N/A } finally {
969N/A ps.close();
969N/A }
969N/A } finally {
969N/A connectionManager.releaseConnection(conn);
969N/A }
969N/A }
969N/A
969N/A @Override
1182N/A public String getInfo() {
864N/A return info;
864N/A }
715N/A}