715N/A/*
715N/A * CDDL HEADER START
715N/A *
715N/A * The contents of this file are subject to the terms of the
715N/A * Common Development and Distribution License (the "License").
715N/A * You may not use this file except in compliance with the License.
715N/A *
715N/A * See LICENSE.txt included in this distribution for the specific
715N/A * language governing permissions and limitations under the License.
715N/A *
715N/A * When distributing Covered Code, include this CDDL HEADER in each
715N/A * file and include the License file at LICENSE.txt.
715N/A * If applicable, add the following below this CDDL HEADER, with the
715N/A * fields enclosed by brackets "[]" replaced with your own identifying
715N/A * information: Portions Copyright [yyyy] [name of copyright owner]
715N/A *
715N/A * CDDL HEADER END
715N/A */
715N/A
715N/A/*
1338N/A * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
715N/A */
715N/A
715N/Apackage org.opensolaris.opengrok.history;
715N/A
715N/Aimport java.io.File;
858N/Aimport java.io.IOException;
857N/Aimport java.io.InputStream;
715N/Aimport java.sql.DatabaseMetaData;
715N/Aimport java.sql.PreparedStatement;
715N/Aimport java.sql.ResultSet;
715N/Aimport java.sql.SQLException;
1338N/Aimport java.sql.SQLTransientException;
715N/Aimport java.sql.Statement;
715N/Aimport java.sql.Timestamp;
1402N/Aimport java.sql.Types;
715N/Aimport java.util.ArrayList;
1403N/Aimport java.util.Date;
715N/Aimport java.util.HashMap;
844N/Aimport java.util.HashSet;
761N/Aimport java.util.List;
761N/Aimport java.util.ListIterator;
715N/Aimport java.util.Map;
803N/Aimport java.util.Properties;
844N/Aimport java.util.Set;
851N/Aimport java.util.concurrent.atomic.AtomicInteger;
788N/Aimport java.util.logging.Level;
788N/Aimport java.util.logging.Logger;
788N/Aimport org.opensolaris.opengrok.OpenGrokLogger;
760N/Aimport org.opensolaris.opengrok.configuration.RuntimeEnvironment;
715N/Aimport org.opensolaris.opengrok.jdbc.ConnectionManager;
726N/Aimport org.opensolaris.opengrok.jdbc.ConnectionResource;
726N/Aimport org.opensolaris.opengrok.jdbc.InsertQuery;
726N/Aimport org.opensolaris.opengrok.jdbc.PreparedQuery;
1195N/Aimport org.opensolaris.opengrok.util.IOUtils;
715N/A
715N/Aclass JDBCHistoryCache implements HistoryCache {
715N/A
899N/A /** The schema in which the tables live. */
899N/A private static final String SCHEMA = "OPENGROK";
717N/A
747N/A /** The names of all the tables created by this class. */
747N/A private static final String[] TABLES = {
1421N/A "REPOSITORIES", "FILES", "AUTHORS", "TAGS", "CHANGESETS", "FILECHANGES",
844N/A "DIRECTORIES", "DIRCHANGES"
747N/A };
747N/A
788N/A /**
788N/A * The number of times to retry an operation that failed in a way that
788N/A * indicates that it may succeed if it's tried again.
788N/A */
788N/A private static final int MAX_RETRIES = 2;
788N/A
856N/A /**
856N/A * The maximum number of characters in commit messages. Longer messages
856N/A * will be truncated.
856N/A */
856N/A private static final int MAX_MESSAGE_LENGTH = 32672;
856N/A
715N/A private ConnectionManager connectionManager;
715N/A
760N/A private final String jdbcDriverClass;
760N/A private final String jdbcConnectionURL;
760N/A
851N/A /** The id to be used for the next row inserted into FILES. */
851N/A private final AtomicInteger nextFileId = new AtomicInteger();
851N/A
851N/A /** The id to be used for the next row inserted into DIRECTORIES. */
851N/A private final AtomicInteger nextDirId = new AtomicInteger();
851N/A
851N/A /** The id to be used for the next row inserted into CHANGESETS. */
851N/A private final AtomicInteger nextChangesetId = new AtomicInteger();
851N/A
898N/A /** The id to be used for the next row inserted into AUTHORS. */
898N/A private final AtomicInteger nextAuthorId = new AtomicInteger();
898N/A
1421N/A /** The id to be used for the next row inserted into TAGS. */
1421N/A private final AtomicInteger nextTagId = new AtomicInteger();
1421N/A
864N/A /** Info string to return from {@link #getInfo()}. */
864N/A private String info;
864N/A
803N/A /** SQL queries used by this class. */
1238N/A private static final Properties QUERIES = new Properties();
803N/A static {
1182N/A Class<?> klazz = JDBCHistoryCache.class;
857N/A InputStream in = null;
803N/A try {
857N/A in = klazz.getResourceAsStream(klazz.getSimpleName() + "_queries.properties");
857N/A if ( in != null ) {
857N/A QUERIES.load(in); }
803N/A } catch (IOException ioe) {
810N/A throw new ExceptionInInitializerError(ioe);
860N/A } finally { //NOPMD
1195N/A IOUtils.close(in);
803N/A }
803N/A }
803N/A
760N/A /**
760N/A * Create a new cache instance with the default JDBC driver and URL.
760N/A */
760N/A JDBCHistoryCache() {
833N/A this(RuntimeEnvironment.getInstance().getDatabaseDriver(),
833N/A RuntimeEnvironment.getInstance().getDatabaseUrl());
760N/A }
760N/A
760N/A /**
760N/A * Create a new cache instance with the specified JDBC driver and URL.
760N/A *
760N/A * @param jdbcDriverClass JDBC driver class to access the database backend
760N/A * @param url the JDBC url to the database
760N/A */
760N/A JDBCHistoryCache(String jdbcDriverClass, String url) {
760N/A this.jdbcDriverClass = jdbcDriverClass;
760N/A this.jdbcConnectionURL = url;
760N/A }
760N/A
788N/A /**
887N/A * Check whether this cache implementation can store history for the given
887N/A * repository. Only repositories that support retrieval of history for the
887N/A * whole directory at once are supported.
887N/A */
887N/A @Override
887N/A public boolean supportsRepository(Repository repository) {
887N/A return repository.hasHistoryForDirectories();
887N/A }
887N/A
887N/A /**
788N/A * Handle an {@code SQLException}. If the exception indicates that the
788N/A * operation may succeed if it's retried and the number of attempts hasn't
788N/A * exceeded the limit defined by {@link #MAX_RETRIES}, ignore it and let
788N/A * the caller retry the operation. Otherwise, re-throw the exception.
788N/A *
788N/A * @param sqle the exception to handle
788N/A * @param attemptNo the attempt number, first attempt is 0
788N/A * @throws SQLException if the operation shouldn't be retried
788N/A */
788N/A private static void handleSQLException(SQLException sqle, int attemptNo)
788N/A throws SQLException {
793N/A boolean isTransient = false;
1338N/A for (Throwable cause : sqle) {
1338N/A if (cause instanceof SQLTransientException) {
1338N/A isTransient = true;
1338N/A break;
793N/A }
1338N/A }
793N/A
793N/A if (isTransient && attemptNo < MAX_RETRIES) {
788N/A Logger logger = OpenGrokLogger.getLogger();
788N/A logger.info("Transient database failure detected. Retrying.");
788N/A logger.log(Level.FINE, "Transient database failure details:", sqle);
793N/A } else {
793N/A throw sqle;
788N/A }
788N/A }
788N/A
803N/A /**
803N/A * Get the SQL text for a name query.
803N/A * @param key name of the query
803N/A * @return SQL text for the query
803N/A */
803N/A private static String getQuery(String key) {
803N/A return QUERIES.getProperty(key);
803N/A }
803N/A
851N/A private void initDB(Statement s) throws SQLException {
715N/A // TODO Store a database version which is incremented on each
715N/A // format change. When a version change is detected, drop the database
1402N/A // or, if possible, upgrade the database to the new format. For now,
1402N/A // check if the tables exist, and create them if necessary.
715N/A
726N/A DatabaseMetaData dmd = s.getConnection().getMetaData();
715N/A
717N/A if (!tableExists(dmd, SCHEMA, "REPOSITORIES")) {
803N/A s.execute(getQuery("createTableRepositories"));
715N/A }
715N/A
844N/A if (!tableExists(dmd, SCHEMA, "DIRECTORIES")) {
844N/A s.execute(getQuery("createTableDirectories"));
844N/A }
844N/A
1402N/A // Databases created with 0.11 or earlier versions don't have a
1402N/A // PARENT column in the DIRECTORIES table. If the column is missing,
1402N/A // create it and populate it. Bug #3174.
1402N/A if (!columnExists(dmd, SCHEMA, "DIRECTORIES", "PARENT")) {
1402N/A s.execute(getQuery("alterTableDirectoriesParent"));
1402N/A s.execute(getQuery("alterTableDirectoriesParentPathConstraint"));
1402N/A fillDirectoriesParentColumn(s);
1402N/A }
1402N/A
717N/A if (!tableExists(dmd, SCHEMA, "FILES")) {
803N/A s.execute(getQuery("createTableFiles"));
715N/A }
715N/A
717N/A if (!tableExists(dmd, SCHEMA, "AUTHORS")) {
803N/A s.execute(getQuery("createTableAuthors"));
715N/A }
715N/A
1421N/A if (!tableExists(dmd, SCHEMA, "TAGS")) {
1421N/A s.execute(getQuery("createTableTags"));
1421N/A }
1421N/A
717N/A if (!tableExists(dmd, SCHEMA, "CHANGESETS")) {
803N/A s.execute(getQuery("createTableChangesets"));
841N/A // Create a composite index on the repository in ascending order
841N/A // and the id in descending order. This index may allow faster
841N/A // retrieval of history in reverse chronological order.
841N/A s.execute(getQuery("createIndexChangesetsRepoIdDesc"));
715N/A }
715N/A
844N/A if (!tableExists(dmd, SCHEMA, "DIRCHANGES")) {
844N/A s.execute(getQuery("createTableDirchanges"));
844N/A }
844N/A
717N/A if (!tableExists(dmd, SCHEMA, "FILECHANGES")) {
803N/A s.execute(getQuery("createTableFilechanges"));
715N/A }
851N/A
898N/A // Derby has some performance problems with auto-generated identity
898N/A // columns when multiple threads insert into the same table
898N/A // concurrently. Therefore, we have our own light-weight id generators
898N/A // that we initialize on start-up. Details can be found in Derby's
898N/A // bug tracker: https://issues.apache.org/jira/browse/DERBY-4437
898N/A
851N/A initIdGenerator(s, "getMaxFileId", nextFileId);
851N/A initIdGenerator(s, "getMaxDirId", nextDirId);
851N/A initIdGenerator(s, "getMaxChangesetId", nextChangesetId);
898N/A initIdGenerator(s, "getMaxAuthorId", nextAuthorId);
1421N/A initIdGenerator(s, "getMaxTagId", nextTagId);
864N/A
864N/A StringBuilder infoBuilder = new StringBuilder();
864N/A infoBuilder.append(getClass().getSimpleName() + "\n");
864N/A infoBuilder.append("Driver class: " + jdbcDriverClass + "\n");
864N/A infoBuilder.append("URL: " + jdbcConnectionURL + "\n");
864N/A infoBuilder.append("Database name: " +
864N/A dmd.getDatabaseProductName() + "\n");
864N/A infoBuilder.append("Database version: " +
864N/A dmd.getDatabaseProductVersion() + "\n");
864N/A info = infoBuilder.toString();
715N/A }
715N/A
1402N/A /**
1402N/A * Fill the PARENT column of the DIRECTORIES table with correct values.
1402N/A * Used when upgrading a database from an old format that doesn't have
1402N/A * the PARENT column.
1402N/A */
1402N/A private static void fillDirectoriesParentColumn(Statement s)
1402N/A throws SQLException {
1402N/A PreparedStatement update = s.getConnection().prepareStatement(
1402N/A getQuery("updateDirectoriesParent"));
1402N/A try {
1402N/A ResultSet rs = s.executeQuery(getQuery("getAllDirectories"));
1402N/A try {
1402N/A while (rs.next()) {
1402N/A update.setInt(1, rs.getInt("REPOSITORY"));
1402N/A update.setString(2, getParentPath(rs.getString("PATH")));
1402N/A update.setInt(3, rs.getInt("ID"));
1402N/A update.executeUpdate();
1402N/A }
1402N/A } finally {
1402N/A rs.close();
1402N/A }
1402N/A } finally {
1402N/A update.close();
1402N/A }
1402N/A }
1402N/A
715N/A private static boolean tableExists(
715N/A DatabaseMetaData dmd, String schema, String table)
715N/A throws SQLException {
715N/A ResultSet rs = dmd.getTables(
715N/A null, schema, table, new String[] {"TABLE"});
720N/A try {
720N/A return rs.next();
720N/A } finally {
720N/A rs.close();
720N/A }
715N/A }
715N/A
1402N/A private static boolean columnExists(
1402N/A DatabaseMetaData dmd, String schema, String table, String column)
1402N/A throws SQLException {
1402N/A ResultSet rs = dmd.getColumns(null, schema, table, column);
1402N/A try {
1402N/A return rs.next();
1402N/A } finally {
1402N/A rs.close();
1402N/A }
1402N/A }
1402N/A
851N/A /**
851N/A * Initialize the {@code AtomicInteger} object that holds the value of
851N/A * the id to use for the next row in a certain table. If there are rows
851N/A * in the table, take the maximum value and increment it by one. Otherwise,
851N/A * the {@code AtomicInteger} will be left at its current value (presumably
851N/A * 0).
851N/A *
851N/A * @param s a statement object on which the max query is executed
851N/A * @param stmtKey name of the query to execute in order to get max id
851N/A * @param generator the {@code AtomicInteger} object to initialize
851N/A */
851N/A private static void initIdGenerator(
851N/A Statement s, String stmtKey, AtomicInteger generator)
851N/A throws SQLException {
851N/A ResultSet rs = s.executeQuery(getQuery(stmtKey));
851N/A try {
851N/A if (rs.next()) {
851N/A int val = rs.getInt(1);
851N/A if (!rs.wasNull()) {
851N/A generator.set(val + 1);
851N/A }
851N/A }
851N/A } finally {
851N/A rs.close();
851N/A }
851N/A }
851N/A
815N/A @Override
715N/A public void initialize() throws HistoryException {
715N/A try {
760N/A connectionManager =
760N/A new ConnectionManager(jdbcDriverClass, jdbcConnectionURL);
788N/A for (int i = 0;; i++) {
788N/A final ConnectionResource conn =
788N/A connectionManager.getConnectionResource();
720N/A try {
788N/A final Statement stmt = conn.createStatement();
788N/A try {
788N/A initDB(stmt);
788N/A } finally {
788N/A stmt.close();
788N/A }
788N/A conn.commit();
788N/A // Success! Break out of the loop.
788N/A return;
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
720N/A } finally {
788N/A connectionManager.releaseConnection(conn);
720N/A }
715N/A }
715N/A } catch (Exception e) {
715N/A throw new HistoryException(e);
715N/A }
715N/A }
715N/A
803N/A private static final PreparedQuery IS_DIR_IN_CACHE =
803N/A new PreparedQuery(getQuery("hasCacheForDirectory"));
726N/A
717N/A // We do check the return value from ResultSet.next(), but PMD doesn't
717N/A // understand it, so suppress the warning.
717N/A @SuppressWarnings("PMD.CheckResultSet")
815N/A @Override
764N/A public boolean hasCacheForDirectory(File file, Repository repository)
715N/A throws HistoryException {
715N/A assert file.isDirectory();
715N/A try {
788N/A for (int i = 0;; i++) {
788N/A final ConnectionResource conn =
788N/A connectionManager.getConnectionResource();
720N/A try {
788N/A PreparedStatement ps = conn.getStatement(IS_DIR_IN_CACHE);
788N/A ps.setString(1, toUnixPath(repository.getDirectoryName()));
1407N/A ps.setString(2, getSourceRootRelativePath(file));
788N/A ResultSet rs = ps.executeQuery();
788N/A try {
788N/A return rs.next();
788N/A } finally {
788N/A rs.close();
788N/A }
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
720N/A } finally {
788N/A connectionManager.releaseConnection(conn);
720N/A }
715N/A }
715N/A } catch (SQLException sqle) {
715N/A throw new HistoryException(sqle);
715N/A }
715N/A }
715N/A
715N/A /**
715N/A * Get path name with all file separators replaced with '/'.
715N/A */
715N/A private static String toUnixPath(String path) {
715N/A return path.replace(File.separatorChar, '/');
715N/A }
715N/A
715N/A /**
715N/A * Get path name with all file separators replaced with '/'.
715N/A */
715N/A private static String toUnixPath(File file) throws HistoryException {
715N/A try {
715N/A return toUnixPath(file.getCanonicalPath());
715N/A } catch (IOException ioe) {
715N/A throw new HistoryException(ioe);
715N/A }
715N/A }
715N/A
715N/A /**
763N/A * Get the path of a file relative to the source root.
763N/A * @param file the file to get the path for
763N/A * @return relative path for {@code file} with unix file separators
763N/A */
763N/A private static String getSourceRootRelativePath(File file)
763N/A throws HistoryException {
763N/A String filePath = toUnixPath(file);
763N/A String rootPath = RuntimeEnvironment.getInstance().getSourceRootPath();
763N/A return getRelativePath(filePath, rootPath);
763N/A }
763N/A
763N/A /**
763N/A * Get the path of a file relative to the specified root directory.
763N/A * @param filePath the canonical path of the file to get the relative
763N/A * path for
763N/A * @param rootPath the canonical path of the root directory
763N/A * @return relative path with unix file separators
763N/A */
763N/A private static String getRelativePath(String filePath, String rootPath) {
763N/A assert filePath.startsWith(rootPath);
763N/A return filePath.substring(rootPath.length());
715N/A }
715N/A
761N/A /**
844N/A * Get the base name of a path (with unix file separators).
844N/A *
844N/A * @param fullPath the full path of the file with unix file separators
844N/A * @return the base name of the file
844N/A */
844N/A private static String getBaseName(String fullPath) {
844N/A int idx = fullPath.lastIndexOf('/');
844N/A return (idx >= 0) ? fullPath.substring(idx + 1) : fullPath;
844N/A }
844N/A
844N/A /**
844N/A * Get the path to the parent of the specified file.
844N/A *
844N/A * @param fullPath the full path of the file with unix file separators
844N/A * @return the full path of the file's parent
844N/A */
844N/A private static String getParentPath(String fullPath) {
844N/A int idx = fullPath.lastIndexOf('/');
844N/A return (idx >= 0) ? fullPath.substring(0, idx) : fullPath;
844N/A }
844N/A
844N/A /**
844N/A * Split a full (unix-style) path into an array of path elements.
844N/A *
844N/A * @param fullPath the full unix-style path name
844N/A * @return an array with each separate element of the path
844N/A * @throws IllegalArgumentException if fullPath doesn't start with '/'
844N/A */
844N/A private static String[] splitPath(String fullPath) {
844N/A if (fullPath.isEmpty() || fullPath.charAt(0) != '/') {
844N/A throw new IllegalArgumentException("Not a full path: " + fullPath);
844N/A }
844N/A return fullPath.substring(1).split("/");
844N/A }
844N/A
844N/A /**
844N/A * Reconstruct a path previously split by {@link #splitPath(String)}, or
844N/A * possibly just a part of it (only the {@code num} first elements will
844N/A * be used).
844N/A *
844N/A * @param pathElts the elements of the path
844N/A * @param num the number of elements to use when reconstructing the path
844N/A * @return a path name
844N/A */
844N/A private static String unsplitPath(String[] pathElts, int num) {
844N/A StringBuilder out = new StringBuilder("");
844N/A for (int i = 0; i < num; i++) {
844N/A out.append("/").append(pathElts[i]);
844N/A }
844N/A return out.toString();
844N/A }
844N/A
844N/A /**
856N/A * Truncate a string to the given length.
856N/A *
856N/A * @param str the string to truncate
856N/A * @param length the length of the string after truncation
856N/A * @return the truncated string
856N/A * @throws IllegalArgumentException if the string is not longer than the
856N/A * specified length
856N/A */
856N/A private static String truncate(String str, int length) {
856N/A if (str.length() < length) {
856N/A throw new IllegalArgumentException();
856N/A }
856N/A String suffix = " (...)";
856N/A return length < suffix.length() ?
856N/A str.substring(0, length) :
856N/A (str.substring(0, length - suffix.length()) + suffix);
856N/A }
856N/A
856N/A /**
761N/A * Statement that gets the history for the specified file and repository.
761N/A * The result is ordered in reverse chronological order to match the
761N/A * required ordering for {@link HistoryCache#get(File, Repository)}.
761N/A */
779N/A private static final PreparedQuery GET_FILE_HISTORY =
803N/A new PreparedQuery(getQuery("getFileHistory"));
779N/A
779N/A /**
779N/A * Statement that gets the history for all files matching a pattern in the
779N/A * given repository. The result is ordered in reverse chronological order
779N/A * to match the required ordering for
779N/A * {@link HistoryCache#get(File, Repository)}.
779N/A */
779N/A private static final PreparedQuery GET_DIR_HISTORY =
803N/A new PreparedQuery(getQuery("getDirHistory"));
726N/A
826N/A /** Statement that retrieves all the files touched by a given changeset. */
826N/A private static final PreparedQuery GET_CS_FILES =
826N/A new PreparedQuery(getQuery("getFilesInChangeset"));
826N/A
815N/A @Override
839N/A public History get(File file, Repository repository, boolean withFiles)
715N/A throws HistoryException {
788N/A try {
788N/A for (int i = 0;; i++) {
788N/A try {
839N/A return getHistory(file, repository, withFiles);
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
788N/A }
788N/A }
788N/A } catch (SQLException sqle) {
788N/A throw new HistoryException(sqle);
788N/A }
788N/A }
788N/A
788N/A /**
788N/A * Helper for {@link #get(File, Repository)}.
788N/A */
839N/A private History getHistory(
839N/A File file, Repository repository, boolean withFiles)
788N/A throws HistoryException, SQLException {
763N/A final String filePath = getSourceRootRelativePath(file);
715N/A final String reposPath = toUnixPath(repository.getDirectoryName());
715N/A final ArrayList<HistoryEntry> entries = new ArrayList<HistoryEntry>();
788N/A final ConnectionResource conn =
788N/A connectionManager.getConnectionResource();
715N/A try {
788N/A final PreparedStatement ps;
788N/A if (file.isDirectory()) {
844N/A // Fetch history for all files under this directory.
788N/A ps = conn.getStatement(GET_DIR_HISTORY);
844N/A ps.setString(2, filePath);
788N/A } else {
788N/A // Fetch history for a single file only.
788N/A ps = conn.getStatement(GET_FILE_HISTORY);
844N/A ps.setString(2, getParentPath(filePath));
844N/A ps.setString(3, getBaseName(filePath));
788N/A }
788N/A ps.setString(1, reposPath);
826N/A
839N/A final PreparedStatement filePS =
839N/A withFiles ? conn.getStatement(GET_CS_FILES) : null;
826N/A
788N/A ResultSet rs = ps.executeQuery();
715N/A try {
788N/A while (rs.next()) {
826N/A // Get the information about a changeset
788N/A String revision = rs.getString(1);
826N/A String author = rs.getString(2);
1421N/A String tags = rs.getString(3);
1421N/A Timestamp time = rs.getTimestamp(4);
1421N/A String message = rs.getString(5);
826N/A HistoryEntry entry = new HistoryEntry(
1421N/A revision, time, author, tags, message, true);
826N/A entries.add(entry);
826N/A
839N/A // Fill the list of files touched by the changeset, if
839N/A // requested.
839N/A if (withFiles) {
1421N/A int changeset = rs.getInt(6);
839N/A filePS.setInt(1, changeset);
839N/A
839N/A // We do check next(), but PMD doesn't understand it.
839N/A ResultSet fileRS = filePS.executeQuery(); // NOPMD
839N/A try {
839N/A while (fileRS.next()) {
839N/A entry.addFile(fileRS.getString(1));
839N/A }
839N/A } finally {
839N/A fileRS.close();
826N/A }
720N/A }
715N/A }
715N/A } finally {
788N/A rs.close();
715N/A }
788N/A } finally {
788N/A connectionManager.releaseConnection(conn);
715N/A }
715N/A
715N/A History history = new History();
715N/A history.setHistoryEntries(entries);
715N/A return history;
715N/A }
715N/A
1230N/A private static final PreparedQuery GET_REPOSITORY =
803N/A new PreparedQuery(getQuery("getRepository"));
726N/A
1230N/A private static final InsertQuery INSERT_REPOSITORY =
803N/A new InsertQuery(getQuery("addRepository"));
726N/A
815N/A @Override
743N/A public void store(History history, Repository repository)
743N/A throws HistoryException {
715N/A try {
726N/A final ConnectionResource conn =
726N/A connectionManager.getConnectionResource();
715N/A try {
745N/A storeHistory(conn, history, repository);
715N/A } finally {
715N/A connectionManager.releaseConnection(conn);
715N/A }
715N/A } catch (SQLException sqle) {
715N/A throw new HistoryException(sqle);
715N/A }
715N/A }
715N/A
1230N/A private static final InsertQuery ADD_CHANGESET =
803N/A new InsertQuery(getQuery("addChangeset"));
745N/A
1230N/A private static final PreparedQuery ADD_DIRCHANGE =
844N/A new PreparedQuery(getQuery("addDirchange"));
844N/A
1230N/A private static final PreparedQuery ADD_FILECHANGE =
803N/A new PreparedQuery(getQuery("addFilechange"));
745N/A
745N/A private void storeHistory(ConnectionResource conn, History history,
745N/A Repository repository) throws SQLException {
745N/A
788N/A Integer reposId = null;
788N/A Map<String, Integer> authors = null;
1421N/A Map<String, Integer> tags = null;
788N/A Map<String, Integer> files = null;
844N/A Map<String, Integer> directories = null;
788N/A PreparedStatement addChangeset = null;
844N/A PreparedStatement addDirchange = null;
788N/A PreparedStatement addFilechange = null;
745N/A
788N/A for (int i = 0;; i++) {
788N/A try {
788N/A if (reposId == null) {
788N/A reposId = getRepositoryId(conn, repository);
788N/A conn.commit();
788N/A }
788N/A
788N/A if (authors == null) {
788N/A authors = getAuthors(conn, history, reposId);
788N/A conn.commit();
788N/A }
1421N/A
1421N/A if (tags == null) {
1421N/A tags = getTags(conn, history, reposId);
1421N/A conn.commit();
1421N/A }
745N/A
844N/A if (directories == null || files == null) {
844N/A Map<String, Integer> dirs = new HashMap<String, Integer>();
844N/A Map<String, Integer> fls = new HashMap<String, Integer>();
844N/A getFilesAndDirectories(conn, history, reposId, dirs, fls);
788N/A conn.commit();
844N/A directories = dirs;
844N/A files = fls;
788N/A }
788N/A
788N/A if (addChangeset == null) {
788N/A addChangeset = conn.getStatement(ADD_CHANGESET);
788N/A }
745N/A
844N/A if (addDirchange == null) {
844N/A addDirchange = conn.getStatement(ADD_DIRCHANGE);
844N/A }
844N/A
788N/A if (addFilechange == null) {
788N/A addFilechange = conn.getStatement(ADD_FILECHANGE);
788N/A }
788N/A
788N/A // Success! Break out of the loop.
788N/A break;
788N/A
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
788N/A conn.rollback();
788N/A }
788N/A }
788N/A
745N/A addChangeset.setInt(1, reposId);
761N/A
761N/A // getHistoryEntries() returns the entries in reverse chronological
761N/A // order, but we want to insert them in chronological order so that
761N/A // their auto-generated identity column can be used as a chronological
761N/A // ordering column. Otherwise, incremental updates will make the
761N/A // identity column unusable for chronological ordering. So therefore
761N/A // we walk the list backwards.
761N/A List<HistoryEntry> entries = history.getHistoryEntries();
761N/A for (ListIterator<HistoryEntry> it =
761N/A entries.listIterator(entries.size());
761N/A it.hasPrevious();) {
794N/A HistoryEntry entry = it.previous();
788N/A retry:
788N/A for (int i = 0;; i++) {
788N/A try {
788N/A addChangeset.setString(2, entry.getRevision());
788N/A addChangeset.setInt(3, authors.get(entry.getAuthor()));
1421N/A if (entry.getTags() != null) {
1421N/A addChangeset.setInt(4, tags.get(entry.getTags()));
1421N/A } else {
1421N/A addChangeset.setNull(4, java.sql.Types.INTEGER);
1421N/A }
1421N/A addChangeset.setTimestamp(5,
788N/A new Timestamp(entry.getDate().getTime()));
856N/A String msg = entry.getMessage();
856N/A // Truncate the message if it can't fit in a VARCHAR
856N/A // (bug #11663).
856N/A if (msg.length() > MAX_MESSAGE_LENGTH) {
856N/A msg = truncate(msg, MAX_MESSAGE_LENGTH);
856N/A }
1421N/A addChangeset.setString(6, msg);
851N/A int changesetId = nextChangesetId.getAndIncrement();
1421N/A addChangeset.setInt(7, changesetId);
788N/A addChangeset.executeUpdate();
745N/A
844N/A // Add one row for each file in FILECHANGES, and one row
844N/A // for each path element of the directories in DIRCHANGES.
844N/A Set<String> addedDirs = new HashSet<String>();
844N/A addDirchange.setInt(1, changesetId);
788N/A addFilechange.setInt(1, changesetId);
788N/A for (String file : entry.getFiles()) {
844N/A String fullPath = toUnixPath(file);
844N/A int fileId = files.get(fullPath);
788N/A addFilechange.setInt(2, fileId);
788N/A addFilechange.executeUpdate();
844N/A String[] pathElts = splitPath(fullPath);
844N/A for (int j = 0; j < pathElts.length; j++) {
844N/A String dir = unsplitPath(pathElts, j);
844N/A // Only add to DIRCHANGES if we haven't already
844N/A // added this dir/changeset combination.
844N/A if (!addedDirs.contains(dir)) {
844N/A addDirchange.setInt(2, directories.get(dir));
844N/A addDirchange.executeUpdate();
844N/A addedDirs.add(dir);
844N/A }
844N/A }
788N/A }
788N/A
788N/A conn.commit();
788N/A
788N/A // Successfully added the entry. Break out of retry loop.
788N/A break retry;
788N/A
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
788N/A conn.rollback();
788N/A }
745N/A }
745N/A }
745N/A }
745N/A
745N/A /**
795N/A * Optimize how the cache is stored on disk. In particular, make sure
795N/A * index cardinality statistics are up to date, and perform a checkpoint
795N/A * to make sure all changes are forced to the tables on disk and that
795N/A * the unneeded transaction log is deleted.
795N/A *
795N/A * @throws HistoryException if an error happens when optimizing the cache
795N/A */
815N/A @Override
795N/A public void optimize() throws HistoryException {
795N/A try {
795N/A final ConnectionResource conn =
795N/A connectionManager.getConnectionResource();
795N/A try {
795N/A updateIndexCardinalityStatistics(conn);
880N/A checkpointDatabase(conn);
795N/A } finally {
795N/A connectionManager.releaseConnection(conn);
795N/A }
795N/A } catch (SQLException sqle) {
795N/A throw new HistoryException(sqle);
795N/A }
795N/A }
795N/A
795N/A /**
747N/A * <p>
747N/A * Make sure Derby's index cardinality statistics are up to date.
747N/A * Otherwise, the optimizer may choose a bad execution strategy for
747N/A * some queries. This method should be called if the size of the tables
747N/A * has changed significantly.
747N/A * </p>
747N/A *
747N/A * <p>
747N/A * This is a workaround for the problems described in
747N/A * <a href="https://issues.apache.org/jira/browse/DERBY-269">DERBY-269</a> and
747N/A * <a href="https://issues.apache.org/jira/browse/DERBY-3788">DERBY-3788</a>.
747N/A * When automatic update of index cardinality statistics has been
747N/A * implemented in Derby, the workaround may be removed.
747N/A * </p>
747N/A *
747N/A * <p>
795N/A * Without this workaround, poor performance has been observed in
795N/A * {@code get()} due to bad choices made by the optimizer.
795N/A * </p>
795N/A *
795N/A * <p>
747N/A * Note that this method uses a system procedure introduced in Derby 10.5.
880N/A * If this procedure does not exist, this method is a no-op.
747N/A * </p>
747N/A */
747N/A private void updateIndexCardinalityStatistics(ConnectionResource conn)
747N/A throws SQLException {
747N/A DatabaseMetaData dmd = conn.getMetaData();
880N/A if (procedureExists(dmd, "SYSCS_UTIL", "SYSCS_UPDATE_STATISTICS")) {
747N/A PreparedStatement ps = conn.prepareStatement(
747N/A "CALL SYSCS_UTIL.SYSCS_UPDATE_STATISTICS(?, ?, NULL)");
747N/A try {
747N/A ps.setString(1, SCHEMA);
747N/A for (String table : TABLES) {
747N/A ps.setString(2, table);
788N/A retry:
788N/A for (int i = 0;; i++) {
788N/A try {
788N/A ps.execute();
788N/A // Successfully executed statement. Break out of
788N/A // retry loop.
788N/A break retry;
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
788N/A conn.rollback();
788N/A }
788N/A }
788N/A conn.commit();
747N/A }
747N/A } finally {
747N/A ps.close();
747N/A }
747N/A }
747N/A }
747N/A
747N/A /**
880N/A * If this is a Derby database, force a checkpoint so that the disk space
880N/A * occupied by the transaction log is freed as early as possible.
880N/A */
880N/A private void checkpointDatabase(ConnectionResource conn)
880N/A throws SQLException {
880N/A DatabaseMetaData dmd = conn.getMetaData();
880N/A if (procedureExists(dmd, "SYSCS_UTIL", "SYSCS_CHECKPOINT_DATABASE")) {
880N/A Statement s = conn.createStatement();
880N/A try {
880N/A s.execute("CALL SYSCS_UTIL.SYSCS_CHECKPOINT_DATABASE()");
880N/A } finally {
880N/A s.close();
880N/A }
880N/A conn.commit();
880N/A }
880N/A }
880N/A
880N/A /**
880N/A * Check if a stored database procedure exists.
880N/A *
880N/A * @param dmd the meta-data object used for checking
880N/A * @param schema the procedure's schema
880N/A * @param proc the name of the procedure
880N/A * @return {@code true} if the procedure exists, {@code false} otherwise
880N/A * @throws SQLException if an error happens when reading the meta-data
880N/A */
880N/A private boolean procedureExists(DatabaseMetaData dmd,
880N/A String schema, String proc)
880N/A throws SQLException {
880N/A ResultSet rs = dmd.getProcedures(null, schema, proc);
880N/A try {
880N/A // If there's a row, there is such a procedure.
880N/A return rs.next();
880N/A } finally {
880N/A rs.close();
880N/A }
880N/A }
880N/A
880N/A /**
745N/A * Get the id of a repository in the database. If the repository is not
745N/A * stored in the database, add it and return its id.
745N/A *
745N/A * @param conn the connection to the database
745N/A * @param repository the repository whose id to get
745N/A * @return the id of the repository
745N/A */
745N/A private int getRepositoryId(ConnectionResource conn, Repository repository)
745N/A throws SQLException {
745N/A String reposPath = toUnixPath(repository.getDirectoryName());
745N/A PreparedStatement reposIdPS = conn.getStatement(GET_REPOSITORY);
745N/A reposIdPS.setString(1, reposPath);
745N/A ResultSet reposIdRS = reposIdPS.executeQuery();
745N/A try {
745N/A if (reposIdRS.next()) {
745N/A return reposIdRS.getInt(1);
745N/A }
745N/A } finally {
745N/A reposIdRS.close();
745N/A }
745N/A
745N/A // Repository is not in the database. Add it.
745N/A PreparedStatement insert =
745N/A conn.getStatement(INSERT_REPOSITORY);
745N/A insert.setString(1, reposPath);
745N/A insert.executeUpdate();
745N/A return getGeneratedIntKey(insert);
745N/A }
745N/A
1230N/A private static final PreparedQuery GET_AUTHORS =
803N/A new PreparedQuery(getQuery("getAuthors"));
726N/A
1230N/A private static final InsertQuery ADD_AUTHOR =
803N/A new InsertQuery(getQuery("addAuthor"));
726N/A
1421N/A private static final PreparedQuery GET_TAGS =
1421N/A new PreparedQuery(getQuery("getTags"));
1421N/A
1421N/A private static final InsertQuery ADD_TAGS =
1421N/A new InsertQuery(getQuery("addTags"));
1421N/A
715N/A /**
745N/A * Get a map from author names to their ids in the database. The authors
745N/A * that are not in the database are added to it.
745N/A *
715N/A * @param conn the connection to the database
715N/A * @param history the history to get the author names from
715N/A * @param reposId the id of the repository
715N/A * @return a map from author names to author ids
715N/A */
715N/A private Map<String, Integer> getAuthors(
726N/A ConnectionResource conn, History history, int reposId)
715N/A throws SQLException {
715N/A HashMap<String, Integer> map = new HashMap<String, Integer>();
745N/A PreparedStatement ps = conn.getStatement(GET_AUTHORS);
745N/A ps.setInt(1, reposId);
745N/A ResultSet rs = ps.executeQuery();
745N/A try {
745N/A while (rs.next()) {
745N/A map.put(rs.getString(1), rs.getInt(2));
745N/A }
745N/A } finally {
745N/A rs.close();
745N/A }
715N/A
745N/A PreparedStatement insert = conn.getStatement(ADD_AUTHOR);
745N/A insert.setInt(1, reposId);
726N/A for (HistoryEntry entry : history.getHistoryEntries()) {
726N/A String author = entry.getAuthor();
745N/A if (!map.containsKey(author)) {
898N/A int id = nextAuthorId.getAndIncrement();
726N/A insert.setString(2, author);
898N/A insert.setInt(3, id);
726N/A insert.executeUpdate();
745N/A map.put(author, id);
846N/A conn.commit();
721N/A }
715N/A }
715N/A
715N/A return map;
715N/A }
1421N/A
1421N/A private Map<String, Integer> getTags(
1421N/A ConnectionResource conn, History history, int reposId)
1421N/A throws SQLException {
1421N/A HashMap<String, Integer> map = new HashMap<String, Integer>();
1421N/A PreparedStatement ps = conn.getStatement(GET_TAGS);
1421N/A ps.setInt(1, reposId);
1421N/A ResultSet rs = ps.executeQuery();
1421N/A try {
1421N/A while (rs.next()) {
1421N/A map.put(rs.getString(1), rs.getInt(2));
1421N/A }
1421N/A } finally {
1421N/A rs.close();
1421N/A }
1421N/A
1421N/A PreparedStatement insert = conn.getStatement(ADD_TAGS);
1421N/A insert.setInt(1, reposId);
1421N/A for (HistoryEntry entry : history.getHistoryEntries()) {
1421N/A String tags = entry.getTags();
1421N/A if (tags != null && !map.containsKey(tags)) {
1421N/A int id = nextTagId.getAndIncrement();
1421N/A insert.setString(2, tags);
1421N/A insert.setInt(3, id);
1421N/A insert.executeUpdate();
1421N/A map.put(tags, id);
1421N/A conn.commit();
1421N/A }
1421N/A }
1421N/A
1421N/A return map;
1421N/A }
715N/A
1230N/A private static final PreparedQuery GET_DIRS =
844N/A new PreparedQuery(getQuery("getDirectories"));
844N/A
1230N/A private static final PreparedQuery GET_FILES =
803N/A new PreparedQuery(getQuery("getFiles"));
745N/A
1230N/A private static final InsertQuery INSERT_DIR =
844N/A new InsertQuery(getQuery("addDirectory"));
844N/A
1230N/A private static final InsertQuery INSERT_FILE =
803N/A new InsertQuery(getQuery("addFile"));
745N/A
715N/A /**
844N/A * Build maps from directory names and file names to their respective
844N/A * identifiers in the database. The directories and files that are not
844N/A * already in the database, are added to it.
715N/A *
745N/A * @param conn the connection to the database
844N/A * @param history the history to get the file and directory names from
715N/A * @param reposId the id of the repository
844N/A * @param dirMap a map which will be filled with directory names and ids
844N/A * @param fileMap a map which will be filled with file names and ids
715N/A */
844N/A private void getFilesAndDirectories(
844N/A ConnectionResource conn, History history, int reposId,
844N/A Map<String, Integer> dirMap, Map<String, Integer> fileMap)
745N/A throws SQLException {
844N/A
844N/A populateFileOrDirMap(conn.getStatement(GET_DIRS), reposId, dirMap);
844N/A populateFileOrDirMap(conn.getStatement(GET_FILES), reposId, fileMap);
844N/A
852N/A int insertCount = 0;
852N/A
844N/A PreparedStatement insDir = conn.getStatement(INSERT_DIR);
844N/A PreparedStatement insFile = conn.getStatement(INSERT_FILE);
844N/A for (HistoryEntry entry : history.getHistoryEntries()) {
844N/A for (String file : entry.getFiles()) {
844N/A String fullPath = toUnixPath(file);
844N/A // Add the file to the database and to the map if it isn't
844N/A // there already. Assumption: If the file is in the database,
844N/A // all its parent directories are also there.
844N/A if (!fileMap.containsKey(fullPath)) {
844N/A // Get the dir id for this file, potentially adding the
844N/A // parent directories to the db and to dirMap.
844N/A int dir = addAllDirs(insDir, reposId, fullPath, dirMap);
851N/A int fileId = nextFileId.getAndIncrement();
844N/A insFile.setInt(1, dir);
844N/A insFile.setString(2, getBaseName(fullPath));
851N/A insFile.setInt(3, fileId);
844N/A insFile.executeUpdate();
851N/A fileMap.put(fullPath, fileId);
852N/A
852N/A // Commit every now and then to allow the database to free
852N/A // resources (like locks and transaction log), but not too
852N/A // frequently, since that may kill the performance. It is
852N/A // OK not to commit for every file added, since the worst
852N/A // thing that could happen is that we need to re-insert
852N/A // the files added since the last commit in case of a crash.
852N/A insertCount++;
852N/A if (insertCount % 30 == 0) {
852N/A conn.commit();
852N/A }
844N/A }
844N/A }
844N/A }
844N/A }
844N/A
844N/A /**
844N/A * Populate a map with all path/id combinations found in the FILES or
844N/A * DIRECTORIES tables associated with a specified repository id.
844N/A *
844N/A * @param ps the statement used to get path names and ids from the correct
844N/A * table. It should take one parameter: the repository id.
844N/A * @param reposId the id of the repository to scan
844N/A * @param map the map into which to insert the path/id combinations
844N/A */
844N/A private void populateFileOrDirMap(
844N/A PreparedStatement ps, int reposId, Map<String, Integer> map)
844N/A throws SQLException {
745N/A ps.setInt(1, reposId);
745N/A ResultSet rs = ps.executeQuery();
720N/A try {
745N/A while (rs.next()) {
745N/A map.put(rs.getString(1), rs.getInt(2));
720N/A }
720N/A } finally {
745N/A rs.close();
715N/A }
844N/A }
715N/A
844N/A /**
844N/A * Add all the parent directories of a specified file to the database, if
844N/A * they haven't already been added, and also put their paths and ids into
844N/A * a map.
844N/A *
844N/A * @param ps statement that inserts a directory into the DIRECTORY table.
851N/A * Takes three parameters: (1) the id of the repository, (2) the path of
851N/A * the directory, and (3) the id to use for the directory.
844N/A * @param reposId id of the repository to which the file belongs
844N/A * @param fullPath the file whose parents to add
844N/A * @param map a map from directory path to id for the directories already
844N/A * in the database. When a new directory is added, it's also added to this
844N/A * map.
844N/A * @return the id of the first parent of {@code fullPath}
844N/A */
844N/A private int addAllDirs(
844N/A PreparedStatement ps, int reposId, String fullPath,
844N/A Map<String, Integer> map) throws SQLException {
844N/A String[] pathElts = splitPath(fullPath);
844N/A String parent = unsplitPath(pathElts, pathElts.length - 1);
844N/A Integer dir = map.get(parent);
844N/A if (dir == null) {
1402N/A for (int i = 0; i < pathElts.length; i++) {
1402N/A Integer prevDirId = dir;
844N/A String path = unsplitPath(pathElts, i);
1402N/A dir = map.get(path);
1402N/A if (dir == null) {
1402N/A dir = nextDirId.getAndIncrement();
1402N/A ps.setInt(1, reposId);
1402N/A ps.setString(2, path);
1402N/A ps.setInt(3, dir);
1402N/A ps.setObject(4, prevDirId, Types.INTEGER);
1402N/A ps.executeUpdate();
1402N/A map.put(path, dir);
745N/A }
745N/A }
715N/A }
844N/A return dir;
715N/A }
719N/A
719N/A /**
719N/A * Return the integer key generated by the previous execution of a
719N/A * statement. The key should be a single INTEGER, and the statement
719N/A * should insert exactly one row, so there should be only one key.
719N/A * @param stmt a statement that has just inserted a row
719N/A * @return the integer key for the newly inserted row, or {@code null}
719N/A * if there is no key
719N/A */
719N/A private Integer getGeneratedIntKey(Statement stmt) throws SQLException {
719N/A ResultSet keys = stmt.getGeneratedKeys();
719N/A try {
1183N/A return keys.next() ? keys.getInt(1) : null;
719N/A } finally {
719N/A keys.close();
719N/A }
719N/A }
761N/A
1230N/A private static final PreparedQuery GET_LATEST_REVISION =
803N/A new PreparedQuery(getQuery("getLatestCachedRevision"));
761N/A
815N/A @Override
761N/A public String getLatestCachedRevision(Repository repository)
761N/A throws HistoryException {
761N/A try {
788N/A for (int i = 0;; i++) {
761N/A try {
788N/A return getLatestRevisionForRepository(repository);
788N/A } catch (SQLException sqle) {
788N/A handleSQLException(sqle, i);
761N/A }
761N/A }
761N/A } catch (SQLException sqle) {
761N/A throw new HistoryException(sqle);
761N/A }
761N/A }
788N/A
788N/A /**
788N/A * Helper for {@link #getLatestCachedRevision(Repository)}.
788N/A */
788N/A private String getLatestRevisionForRepository(Repository repository)
788N/A throws SQLException {
788N/A final ConnectionResource conn =
788N/A connectionManager.getConnectionResource();
788N/A try {
788N/A PreparedStatement ps = conn.getStatement(GET_LATEST_REVISION);
788N/A ps.setString(1, toUnixPath(repository.getDirectoryName()));
788N/A ResultSet rs = ps.executeQuery(); // NOPMD (we do check next)
788N/A try {
788N/A return rs.next() ? rs.getString(1) : null;
788N/A } finally {
788N/A rs.close();
788N/A }
788N/A } finally {
788N/A connectionManager.releaseConnection(conn);
788N/A }
788N/A }
864N/A
864N/A @Override
1403N/A public Map<String, Date> getLastModifiedTimes(
1403N/A File directory, Repository repository)
1403N/A throws HistoryException
1403N/A {
1403N/A try {
1403N/A for (int i = 0;; i++) {
1403N/A try {
1403N/A return getLastModifiedTimesForAllFiles(
1403N/A directory, repository);
1403N/A } catch (SQLException sqle) {
1403N/A handleSQLException(sqle, i);
1403N/A }
1403N/A }
1403N/A } catch (SQLException sqle) {
1403N/A throw new HistoryException(sqle);
1403N/A }
1403N/A }
1403N/A
1403N/A private static final PreparedQuery GET_LAST_MODIFIED_TIMES =
1403N/A new PreparedQuery(getQuery("getLastModifiedTimes"));
1403N/A
1403N/A private Map<String, Date> getLastModifiedTimesForAllFiles(
1403N/A File directory, Repository repository)
1403N/A throws HistoryException, SQLException
1403N/A {
1403N/A final Map<String, Date> map = new HashMap<String, Date>();
1403N/A
1403N/A final ConnectionResource conn =
1403N/A connectionManager.getConnectionResource();
1403N/A try {
1403N/A PreparedStatement ps = conn.getStatement(GET_LAST_MODIFIED_TIMES);
1403N/A ps.setString(1, toUnixPath(repository.getDirectoryName()));
1403N/A ps.setString(2, getSourceRootRelativePath(directory));
1403N/A ResultSet rs = ps.executeQuery();
1403N/A try {
1403N/A while (rs.next()) {
1403N/A map.put(rs.getString(1), rs.getTimestamp(2));
1403N/A }
1403N/A } finally {
1403N/A rs.close();
1403N/A }
1403N/A } finally {
1403N/A connectionManager.releaseConnection(conn);
1403N/A }
1403N/A
1403N/A return map;
1403N/A }
1403N/A
1403N/A @Override
969N/A public void clear(Repository repository) throws HistoryException {
969N/A try {
969N/A for (int i = 0;; i++) {
969N/A try {
969N/A clearHistoryForRepository(repository);
969N/A return;
969N/A } catch (SQLException sqle) {
969N/A handleSQLException(sqle, i);
969N/A }
969N/A }
969N/A } catch (SQLException sqle) {
969N/A throw new HistoryException(sqle);
969N/A }
969N/A }
969N/A
969N/A /**
969N/A * Helper for {@link #clear(Repository)}.
969N/A */
969N/A private void clearHistoryForRepository(Repository repository)
969N/A throws SQLException {
969N/A final ConnectionResource conn =
969N/A connectionManager.getConnectionResource();
969N/A try {
969N/A // This statement shouldn't be called very frequently, so don't
969N/A // care about caching it...
969N/A PreparedStatement ps = conn.prepareStatement(
969N/A getQuery("clearRepository"));
969N/A try {
969N/A ps.setInt(1, getRepositoryId(conn, repository));
969N/A ps.execute();
969N/A conn.commit();
969N/A } finally {
969N/A ps.close();
969N/A }
969N/A } finally {
969N/A connectionManager.releaseConnection(conn);
969N/A }
969N/A }
969N/A
969N/A @Override
1182N/A public String getInfo() {
864N/A return info;
864N/A }
715N/A}