/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* sqlite is not compatible with _FILE_OFFSET_BITS=64, but we need to
* be able to statvfs(2) possibly large systems. This define gives us
* access to the transitional interfaces. See lfcompile64(5) for how
* _LARGEFILE64_SOURCE works.
*/
#define _LARGEFILE64_SOURCE
#include <assert.h>
#include <atomic.h>
#include <door.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <pthread.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <time.h>
#include <unistd.h>
#include <zone.h>
#include <libscf_priv.h>
#include "configd.h"
#include "repcache_protocol.h"
#include <sqlite.h>
#include <sqlite-misc.h>
/*
* This file has two purposes:
*
* 1. It contains the database schema, and the code for setting up our backend
* databases, including installing said schema.
*
* 2. It provides a simplified interface to the SQL database library, and
* synchronizes MT access to the database.
*/
typedef enum backend_switch_results {
BACKEND_SWITCH_OK = 0,
typedef struct backend_spent {
typedef struct backend_totals {
/*
* There are times when svcadm asks configd to move the BACKEND_TYPE_NORMAL
* repository to volatile storage. See backend_switch(). When the
* repository is on volatile storage, we save the location of the permanent
* repository in be_ppath. We use the saved path when the time comes to
* move the repository back. When the repository is on permanent storage,
* be_ppath is set to NULL. Also see the definition of IS_VOLATILE() above
* for testing if the repository is on volatile storage.
*/
typedef struct sqlite_backend {
/* backend is volatile */
struct backend_tx {
int bt_readonly;
int bt_type;
};
}
struct backend_query {
char *bq_buf;
};
struct backend_tbl_info {
const char *bti_name;
const char *bti_cols;
};
struct backend_idx_info {
const char *bxi_tbl;
const char *bxi_idx;
const char *bxi_cols;
};
/* Definitions for the flight recorder: */
typedef enum be_flight_type {
/* backend_create_backup_locked() */
/* for boot time backup */
/* restart */
typedef enum be_flight_status {
/* internal call */
typedef struct be_flight_event {
/* Data for the flight_recorder. */
/* interval between read-only checks while starting up */
/*
* Any incompatible change to the below schema should bump the version number.
* The schema has been changed to support value ordering, but this change
* is backwards-compatible - i.e. a previous svc.configd can use a
* repository database with the new schema perfectly well. As a result,
* the schema version has not been updated, allowing downgrade of systems
* without losing repository data.
*/
/*
* service_tbl holds all services. svc_id is the identifier of the
* service.
*/
{
"service_tbl",
"svc_id INTEGER PRIMARY KEY,"
"svc_name CHAR(256) NOT NULL"
},
/*
* instance_tbl holds all of the instances. The parent service id
* is instance_svc.
*/
{
"instance_tbl",
"instance_id INTEGER PRIMARY KEY,"
"instance_name CHAR(256) NOT NULL,"
"instance_svc INTEGER NOT NULL"
},
/*
* snapshot_lnk_tbl links (instance, snapshot name) with snapshots.
*/
{
"snapshot_lnk_tbl",
"lnk_id INTEGER PRIMARY KEY,"
"lnk_inst_id INTEGER NOT NULL,"
"lnk_snap_name CHAR(256) NOT NULL,"
"lnk_snap_id INTEGER NOT NULL"
},
/*
* snaplevel_tbl maps a snapshot id to a set of named, ordered
* snaplevels.
*/
{
"snaplevel_tbl",
"snap_id INTEGER NOT NULL,"
"snap_level_num INTEGER NOT NULL,"
"snap_level_id INTEGER NOT NULL,"
"snap_level_service_id INTEGER NOT NULL,"
"snap_level_service CHAR(256) NOT NULL,"
"snap_level_instance_id INTEGER NULL,"
"snap_level_instance CHAR(256) NULL"
},
/*
* snaplevel_lnk_tbl links snaplevels to property groups.
* snaplvl_pg_* is identical to the original property group,
* and snaplvl_gen_id overrides the generation number.
*/
{
"snaplevel_lnk_tbl",
"snaplvl_level_id INTEGER NOT NULL,"
"snaplvl_pg_id INTEGER NOT NULL,"
"snaplvl_pg_name CHAR(256) NOT NULL,"
"snaplvl_pg_type CHAR(256) NOT NULL,"
"snaplvl_pg_flags INTEGER NOT NULL,"
"snaplvl_gen_id INTEGER NOT NULL"
},
};
{ "service_tbl", "name", "svc_name" },
{ "instance_tbl", "name", "instance_svc, instance_name" },
{ "snapshot_lnk_tbl", "name", "lnk_inst_id, lnk_snap_name" },
{ "snapshot_lnk_tbl", "snapid", "lnk_snap_id" },
{ "snaplevel_tbl", "id", "snap_id" },
{ "snaplevel_lnk_tbl", "id", "snaplvl_pg_id" },
{ "snaplevel_lnk_tbl", "level", "snaplvl_level_id" },
};
};
};
/*
* pg_tbl defines property groups. They are associated with a single
* service or instance. The pg_gen_id links them with the latest
* "edited" version of its properties.
*/
{
"pg_tbl",
"pg_id INTEGER PRIMARY KEY,"
"pg_parent_id INTEGER NOT NULL,"
"pg_name CHAR(256) NOT NULL,"
"pg_type CHAR(256) NOT NULL,"
"pg_flags INTEGER NOT NULL,"
"pg_gen_id INTEGER NOT NULL"
},
/*
* prop_lnk_tbl links a particular pg_id and gen_id to a set of
* (prop_name, prop_type, val_id) trios.
*/
{
"prop_lnk_tbl",
"lnk_prop_id INTEGER PRIMARY KEY,"
"lnk_pg_id INTEGER NOT NULL,"
"lnk_gen_id INTEGER NOT NULL,"
"lnk_prop_name CHAR(256) NOT NULL,"
"lnk_prop_type CHAR(2) NOT NULL,"
"lnk_val_id INTEGER"
},
/*
* value_tbl maps a value_id to a set of values. For any given
* value_id, value_type is constant. The table definition here
* is repeated in backend_check_upgrade(), and must be kept in-sync.
*/
{
"value_tbl",
"value_id INTEGER NOT NULL,"
"value_type CHAR(1) NOT NULL,"
"value_value VARCHAR NOT NULL,"
"value_order INTEGER DEFAULT 0"
},
/*
* id_tbl has one row per id space
*/
{
"id_tbl",
"id_name STRING NOT NULL,"
"id_next INTEGER NOT NULL"
},
/*
* schema_version has a single row, which contains
* BACKEND_SCHEMA_VERSION at the time of creation.
*/
{
"schema_version",
"schema_version INTEGER"
},
};
/*
* The indexing of value_tbl is repeated in backend_check_upgrade() and
* must be kept in sync with the indexing specification here.
*/
{ "pg_tbl", "parent", "pg_parent_id" },
{ "pg_tbl", "name", "pg_parent_id, pg_name" },
{ "pg_tbl", "type", "pg_parent_id, pg_type" },
{ "prop_lnk_tbl", "base", "lnk_pg_id, lnk_gen_id" },
{ "prop_lnk_tbl", "val", "lnk_val_id" },
{ "value_tbl", "id", "value_id" },
{ "id_tbl", "id", "id_name" },
};
struct run_single_int_info {
int rs_result;
};
static rep_protocol_responseid_t backend_copy_repository(const char *,
const char *, int);
static rep_protocol_responseid_t backend_do_copy(const char *, int,
const char *, int, size_t *);
/*
* The flight recorder keeps track of events that happen primarily while
* the system is booting. Once the system is up an running, one can take a
* gcore(1) of configd and examine the events with mdb. Since we're most
* interested in early boot events, we stop recording events when the
* recorder is full.
*/
static void
{
if (pthread_mutex_lock(&backend_flight_recorder_lock) != 0) {
return;
}
/* Hit end of the array. No more event recording. */
} else {
item = flight_recorder_next++;
}
if (item >= MAX_FLIGHT_RECORDER_EVENTS) {
/* Array is filled. Stop recording events */
return;
}
}
/*ARGSUSED*/
static int
{
return (BACKEND_CALLBACK_CONTINUE);
errno = 0;
return (BACKEND_CALLBACK_CONTINUE);
}
/*ARGSUSED*/
int
{
return (BACKEND_CALLBACK_ABORT);
}
/*
* check to see if we can successfully start a transaction; if not, the
* filesystem is mounted read-only.
*/
static int
{
int r;
return (SQLITE_READONLY);
r = sqlite_exec(db,
"BEGIN TRANSACTION; "
"UPDATE schema_version SET schema_version = schema_version; ",
return (r);
}
static void
{
if (backend_print_trace) {
}
}
/*
* For a native build, repositories are created from scratch, so upgrade
* is not an issue. This variable is implicitly protected by
* bes[BACKEND_TYPE_NORMAL]->be_lock.
*/
#ifdef NATIVE_BUILD
#else
#endif /* NATIVE_BUILD */
/*
* Has backend been upgraded? In nonpersistent case, answer is always
* yes.
*/
{
return (B_TRUE);
return (be_normal_upgraded);
}
/*
* backend_panic() -- some kind of database problem or corruption has been hit.
* We attempt to quiesce the other database users -- all of the backend sql
* entry points will call backend_panic(NULL) if a panic is in progress, as
* will any attempt to start a transaction.
*
* We give threads holding a backend lock 50ms (BACKEND_PANIC_TIMEOUT) to
* either drop the lock or call backend_panic(). If they don't respond in
* time, we'll just exit anyway.
*/
void
{
int i;
int failed = 0;
(void) pthread_mutex_lock(&backend_panic_lock);
if (backend_panic_thread != 0) {
(void) pthread_mutex_unlock(&backend_panic_lock);
/*
* first, drop any backend locks we're holding, then
* sleep forever on the panic_cv.
*/
for (i = 0; i < BACKEND_TYPE_TOTAL; i++) {
}
(void) pthread_mutex_lock(&backend_panic_lock);
for (;;)
(void) pthread_cond_wait(&backend_panic_cv,
}
(void) pthread_mutex_unlock(&backend_panic_lock);
for (i = 0; i < BACKEND_TYPE_TOTAL; i++) {
}
for (i = 0; i < BACKEND_TYPE_TOTAL; i++) {
&rel) != 0)
failed++;
}
}
if (failed) {
configd_critical("unable to quiesce database\n");
}
if (backend_panic_abort)
abort();
}
/*
* Returns
* _SUCCESS
* _DONE - callback aborted query
* _NO_RESOURCES - out of memory (_FULL & _TOOBIG?)
*/
static int
{
return (REP_PROTOCOL_SUCCESS);
switch (error) {
case SQLITE_ABORT:
return (REP_PROTOCOL_DONE);
case SQLITE_NOMEM:
case SQLITE_FULL:
case SQLITE_TOOBIG:
return (REP_PROTOCOL_FAIL_NO_RESOURCES);
default:
/*NOTREACHED*/
}
}
static void
{
while (out_sz-- > 0)
}
/*
* builds a inverse-time-sorted array of backup files. The path is a
* a single buffer, and the pointers look like:
*
* ^pathname ^ ^(pathname+pathlen)
* basename
*
* dirname will either be pathname, or ".".
*
* Returns the number of elements in the array, 0 if there are no previous
* backups, or -1 on error.
*/
static ssize_t
{
char *name, *p;
char *pathend;
/*
* year, month, day, hour, min, sec, plus an '_'.
*/
*pathend = '\0';
basename++;
} else {
dirname = ".";
}
/*
* munge the string temporarily for the opendir(), then restore it.
*/
basename[0] = '\0';
goto fail;
/*
* Must match:
* basename-YYYYMMDD_HHMMSS
* or we ignore it.
*/
continue;
continue;
char c = p[idx];
break;
break;
}
continue;
/*
* We have a match. insertion-sort it into our list.
*/
goto fail_closedir;
if (cmp == 0)
if (cmp == 0) {
break;
} else if (cmp > 0) {
p = tp;
}
}
goto fail_closedir;
}
} else {
}
}
return (count);
fail:
return (-1);
}
/*
* Copies the repository path into out, a buffer of out_len bytes,
* removes the ".db" (or whatever) extension, and, if name is non-NULL,
* appends "-name" to it. If name is non-NULL, it can fail with:
*
* _TRUNCATED will not fit in buffer.
* _BAD_REQUEST name is not a valid identifier
*/
static rep_protocol_responseid_t
{
char *p, *q;
/*
* '.'.
*/
out_len);
*q = 0;
/*
* verify that the name tag is entirely alphabetic,
* non-empty, and not too long.
*/
return (REP_PROTOCOL_FAIL_BAD_REQUEST);
return (REP_PROTOCOL_FAIL_TRUNCATED);
}
return (REP_PROTOCOL_SUCCESS);
}
/*
* Make a checkpoint of the repository, so that we can use it for a backup
* repository into a temporary file and then rename it to
* REPOSITORY_CHECKPOINT. This is protection against configd crashing in
* the middle of the copy and leaving a partial copy at
* REPOSITORY_CHECKPOINT. Renames are atomic.
*/
static rep_protocol_responseid_t
{
if (r == REP_PROTOCOL_SUCCESS)
r == REP_PROTOCOL_SUCCESS ? BE_FLIGHT_ST_SUCCESS :
return (r);
}
/*
* See if a backup is needed. We do a backup unless both files are
* byte-for-byte identical.
*/
static int
{
goto fail;
goto fail;
/*
* if they are the same file, we need to do a backup to break the
* hard link or symlink involved.
*/
goto fail;
goto fail;
goto fail;
do {
goto fail;
return (0);
}
fail:
if (repfd >= 0)
if (fd >= 0)
return (1);
}
/*
* This interface is called to perform the actual copy
*
* Return:
* _FAIL_NO_RESOURCES out of memory
* _SUCCESS copy succeeds
*/
static rep_protocol_responseid_t
{
char *buf;
return (REP_PROTOCOL_FAIL_NO_RESOURCES);
if (nrd < 0) {
continue;
"Backend copy failed: fails to read from %s "
return (REP_PROTOCOL_FAIL_UNKNOWN);
}
nwr = 0;
do {
continue;
"Backend copy failed: fails to write to %s "
return (REP_PROTOCOL_FAIL_UNKNOWN);
}
nwr += n;
w_off += n;
}
if (sz)
return (REP_PROTOCOL_SUCCESS);
}
/*
* Can return:
* _BAD_REQUEST name is not valid
* _TRUNCATED name is too long for current repository path
* _UNKNOWN failed for unknown reason (details written to
* console)
* _BACKEND_READONLY backend is not writable
* _NO_RESOURCES out of memory
* _SUCCESS Backup completed successfully.
*/
static rep_protocol_responseid_t
{
const char **old_list;
char *finalname;
char *finalpath;
char *tmppath;
const char *src;
int use_checkpoint;
} else {
}
return (REP_PROTOCOL_FAIL_NO_RESOURCES);
return (REP_PROTOCOL_FAIL_NO_RESOURCES);
}
if (be->be_readonly) {
goto out;
}
if (result != REP_PROTOCOL_SUCCESS)
goto out;
/*
* If this is a boot backup and if we made a checkpoint before the
* checkpoint as the source. Otherwise, we'll use the actual
* repository as the source.
*/
use_checkpoint = 1;
} else {
use_checkpoint = 0;
}
/*
* No changes, so there is no need for a backup.
*/
goto out;
}
/*
* remember the original length, and the basename location
*/
finalname++;
else
goto out;
}
"\"%s\" backup failed: localtime(3C) failed: %s\n", name,
goto out;
}
goto out;
}
if (infd < 0) {
goto out;
}
if (outfd < 0) {
configd_critical("\"%s\" backup failed: mkstemp(%s): %s\n",
goto out;
}
goto fail;
/*
* grab the old list before doing our re-name.
*/
if (old_max > 0)
"\"%s\" backup failed: rename(%s, %s): %s\n",
goto fail;
}
"\"%s\" backup completed, but updating "
"\"%s\" symlink to \"%s\" failed: %s\n",
}
/* unlink all but the first (old_max - 1) files */
"\"%s\" backup completed, but removing old "
"file \"%s\" failed: %s\n",
}
}
fail:
if (result != REP_PROTOCOL_SUCCESS) {
}
out:
/* Get rid of the checkpoint file now that we've used it. */
}
return (result);
}
/*
* Check if value_tbl has been upgraded in the main database, and
* if not (if the value_order column is not present), and do_upgrade is true,
* upgrade value_tbl in repository to contain the additional value_order
* column. The version of sqlite used means ALTER TABLE is not
* available, so we cannot simply use "ALTER TABLE value_tbl ADD COLUMN".
* Rather we need to create a temporary table with the additional column,
* import the value_tbl, drop the original value_tbl, recreate the value_tbl
* with the additional column, import the values from value_tbl_tmp,
* reindex and finally drop value_tbl_tmp. During boot, we wish to check
* if the repository has been upgraded before it is writable, so that
* property value retrieval can use the appropriate form of the SELECT
* statement that retrieves property values. As a result, we need to check
* if the repository has been upgraded prior to the point when we can
* actually carry out the update.
*/
void
{
char *errp;
int r;
if (be_normal_upgraded)
return;
/*
* Test if upgrade is needed. If value_order column does not exist,
* we need to upgrade the schema.
*/
if (r == SQLITE_ERROR && do_upgrade) {
/* No value_order column - needs upgrade */
configd_info("Upgrading SMF repository format...");
"BEGIN TRANSACTION; "
"CREATE TABLE value_tbl_tmp ( "
"value_id INTEGER NOT NULL, "
"value_type CHAR(1) NOT NULL, "
"value_value VARCHAR NOT NULL, "
"value_order INTEGER DEFAULT 0); "
"INSERT INTO value_tbl_tmp "
"(value_id, value_type, value_value) "
"SELECT value_id, value_type, value_value FROM value_tbl; "
"DROP TABLE value_tbl; "
"CREATE TABLE value_tbl( "
"value_id INTEGER NOT NULL, "
"value_type CHAR(1) NOT NULL, "
"value_value VARCHAR NOT NULL, "
"value_order INTEGER DEFAULT 0); "
"INSERT INTO value_tbl SELECT * FROM value_tbl_tmp; "
"CREATE INDEX value_tbl_id ON value_tbl (value_id); "
"DROP TABLE value_tbl_tmp; "
"COMMIT TRANSACTION; "
"VACUUM; ",
if (r == SQLITE_OK) {
configd_info("SMF repository upgrade is complete.");
} else {
backend_panic("%s: repository upgrade failed: %s",
/* NOTREACHED */
}
}
if (r == SQLITE_OK)
else
}
static int
{
const char *check_path;
char *errp;
int r;
/*
* If we don't *need* to be writable, only check every once in a
* while.
*/
if (!writing) {
return (REP_PROTOCOL_SUCCESS);
be->be_lastcheck = t;
}
/*
* It could be that the repository has been moved to non-persistent
* storage for performance reasons. In this case we need to check
* the persistent path to see if it is writable. The
* non-persistent path will always be writable.
*/
/*NOTREACHED*/
}
if (r != SQLITE_OK) {
/*
* The underlying storage for the permanent repository is
* still read-only, so we don't want to change the state or
* move the checkpointed backup if it exists. On the other
* hand if the repository has been copied to volatile
* storage, we'll let our caller go ahead and write to the
* database.
*/
return (REP_PROTOCOL_FAIL_BACKEND_READONLY);
return (REP_PROTOCOL_SUCCESS);
}
/*
* We can write! If the repository is not on volatile storage,
* swap the db handles. Mark ourself as writable, upgrade the
* repository if necessary and make a backup.
*/
be->be_readonly = 0;
if (IS_VOLATILE(be)) {
/*
* If the repository is on volatile storage, don't switch
* the handles. We'll continue to use the repository that
* is on tmpfs until we're told to move it back by one of
* our clients. Clients, specifically manifest_import,
* move the repository to tmpfs for performance reasons,
* and that is the reason to not switch it back until we're
* told to do so.
*/
} else {
}
"unable to create \"%s\" backup of \"%s\"\n",
}
return (REP_PROTOCOL_SUCCESS);
}
/*
* If t is not BACKEND_TYPE_NORMAL, can fail with
* _BACKEND_ACCESS - backend does not exist
*
* If writing is nonzero, can also fail with
* _BACKEND_READONLY - backend is read-only
*/
static int
{
assert(t == BACKEND_TYPE_NORMAL ||
t == BACKEND_TYPE_NONPERSIST);
if (t == BACKEND_TYPE_NORMAL)
return (REP_PROTOCOL_FAIL_BACKEND_ACCESS);
if (backend_panic_thread != 0)
vts = gethrvtime();
if (backend_panic_thread != 0) {
}
if (be->be_readonly) {
int r;
assert(t == BACKEND_TYPE_NORMAL);
if (r != REP_PROTOCOL_SUCCESS) {
return (r);
}
}
if (backend_do_trace)
else
return (REP_PROTOCOL_SUCCESS);
}
static void
{
be->be_writing = 0;
}
static void
{
}
}
static void
{
}
static int
{
int written;
while (len > 0) {
return (-1);
}
return (0);
}
/*
* Can return:
* _BAD_REQUEST name is not valid
* _TRUNCATED name is too long for current repository path
* _UNKNOWN failed for unknown reason (details written to
* console)
* _BACKEND_READONLY backend is not writable
* _NO_RESOURCES out of memory
* _SUCCESS Backup completed successfully.
*/
{
return (result);
}
/*
* This function makes a copy of the repository at src, placing the copy at
* dst. It is used to copy a repository on permanent storage to volatile
* storage or vice versa. If the source file is on volatile storage, it is
* often times desirable to delete it after the copy has been made and
* verified. To remove the source repository, set remove_src to 1.
*
* Can return:
*
* REP_PROTOCOL_SUCCESS successful copy and rename
* REP_PROTOCOL_FAIL_UNKNOWN file operation error
* REP_PROTOCOL_FAIL_NO_RESOURCES out of memory
*/
static rep_protocol_responseid_t
{
goto out;
}
/*
* Create and open the related db files
*/
"Backend copy failed: strlcat %s: overflow\n", tmppath);
abort();
}
configd_critical("Backend copy failed: mkstemp %s: %s\n",
goto out;
}
configd_critical("Backend copy failed: opening %s: %s\n",
goto errexit;
}
/*
* fstat the backend before copy for sanity check.
*/
configd_critical("Backend copy failed: fstat %s: %s\n",
goto errexit;
}
goto errexit;
configd_critical("Backend copy failed: incomplete copy\n");
goto errexit;
}
/*
* Rename tmppath to dst
*/
"Backend copy failed: rename %s to %s: %s\n",
}
"Backend copy failed: remove %s: %s\n",
out:
if (remove_src) {
"Backend copy failed: remove %s: %s\n",
}
return (res);
}
/*
* Perform sanity check on the repository.
* Return 0 if check succeeds or -1 if fails.
*/
static int
{
int r;
r = sqlite_exec(be_db,
"SELECT schema_version FROM schema_version;",
if (r == SQLITE_OK &&
return (0);
else
return (-1);
}
/*
* backend_switch() implements the REP_PROTOCOL_SWITCH request from
* clients. First, it blocks all other clients from accessing the
* repository by calling backend_lock to lock the repository. It either
* copies the repository from it's permanent storage location
* (REPOSITORY_DB) to its fast volatile location (FAST_REPOSITORY_DB), or
* vice versa. dir determines the direction of the copy.
*
* dir = 0 Copy from permanent location to volatile location.
* dir = 1 Copy from volatile location to permanent location.
*
* Can return:
* REP_PROTOCOL_SUCCESS successful switch
* REP_PROTOCOL_FAIL_BACKEND_ACCESS backen access fails
* REP_PROTOCOL_FAIL_BACKEND_READONLY backend is not writable
* REP_PROTOCOL_FAIL_UNKNOWN file operation error
* REP_PROTOCOL_FAIL_NO_RESOURCES out of memory
*/
{
char *errp;
const char *dst;
/*
* If switching back to the main repository, lock for writing.
* Otherwise, lock for reading.
*/
&be);
if (result != REP_PROTOCOL_SUCCESS)
return (result);
if (dir) {
dst = REPOSITORY_DB;
} else {
}
/*
* Do the actual copy and rename
*/
goto errout;
}
if (result != REP_PROTOCOL_SUCCESS) {
goto errout;
}
/*
* Do the backend sanity check and switch
*/
/*
* Sanity check
*/
"Backend switch failed: strdup %s: %s\n",
} else {
if (dir) {
/* We're back on permanent storage. */
} else {
/*
* Repository is now on volatile
* storage. Save the location of
* the persistent repository.
*/
}
}
} else {
"Backend switch failed: integrity check %s: %s\n",
}
} else {
configd_critical("Backend switch failed: sqlite_open %s: %s\n",
}
if (result == REP_PROTOCOL_SUCCESS) {
} else {
}
return (result);
}
/*
* This routine is called to attempt the recovery of
* the most recent valid repository if possible when configd
* is restarted for some reasons or when system crashes
* during the switch operation. The repository databases
* referenced here are indicators of successful switch
* operations.
*/
static backend_switch_results_t
backend_switch_recovery(void)
{
int r;
/*
* A good transient db containing most recent data can
* exist if svc.configd crashes during the
* switch operation. If that is the case, check its
* integrity and use it.
*/
return (BACKEND_SWITCH_OK);
}
/* Determine if persistent repository is read-only */
configd_critical("Unable to open \"%s\". %s\n",
return (BACKEND_SWITCH_FATAL);
}
if (r != SQLITE_OK) {
if (r == SQLITE_READONLY) {
return (BACKEND_SWITCH_RO);
}
return (BACKEND_SWITCH_FATAL);
}
/*
* Do sanity check on the db
*/
}
}
}
/*
* If we get to this point, the fast_db has either been copied or
* it is useless. Either way, get rid of it.
*/
return (res);
}
/*ARGSUSED*/
static int
{
char *new;
const char *info;
int x;
for (x = 0; x < narg; x++) {
return (BACKEND_CALLBACK_ABORT);
new[0] = 0;
}
}
return (BACKEND_CALLBACK_CONTINUE);
}
#define BACKEND_CREATE_SUCCESS 0
static int
{
char *errp;
int r;
int fd;
perror("malloc");
goto fail;
}
goto fail;
}
/* report it as an integrity failure */
goto integrity_fail;
}
/*
* check if we are inited and of the correct schema version
*
*/
if (r == SQLITE_ERROR &&
/*
* Could be an empty repository, could be pre-schema_version
* schema. Check for id_tbl, which has always been there.
*/
if (r == SQLITE_ERROR &&
return (BACKEND_CREATE_NEED_INIT);
}
goto fail;
}
if (r == SQLITE_BUSY || r == SQLITE_LOCKED) {
return (BACKEND_CREATE_LOCKED);
}
if (r == SQLITE_OK) {
val != BACKEND_SCHEMA_VERSION) {
configd_critical("%s: schema version mismatch\n",
db_file);
goto fail;
}
}
/*
* pull in the whole database sequentially.
*/
;
}
}
/*
* run an integrity check
*/
if (r == SQLITE_BUSY || r == SQLITE_LOCKED) {
return (BACKEND_CREATE_LOCKED);
}
if (r == SQLITE_ABORT) {
integrity_results = "out of memory running integrity check\n";
}
if (integrity_results != NULL) {
} else {
": PRAGMA integrity_check; failed. Results:\n") <
}
}
if (!is_main_repository ||
"%s: integrity check failed. Details in "
else
"%s: integrity check failed.\n",
db_file);
} else {
"\n"
"svc.configd: smf(5) database integrity check of:\n"
"\n"
" %s\n"
"\n"
" failed. The database might be damaged or a media error might have\n"
" prevented it from being verified. Additional information useful to\n"
" your service provider%s%s\n"
"\n"
" The system will not be able to boot until you have restored a working\n"
" database. svc.startd(1M) will provide a sulogin(1M) prompt for recovery\n"
" purposes. The command:\n"
"\n"
" /lib/svc/bin/restore_repository\n"
"\n"
" can be run to restore a backup version of your repository. See\n"
" http://illumos.org/msg/SMF-8000-MY for more information.\n"
"\n",
}
goto fail;
}
/*
* Simply do check if backend has been upgraded. We do not wish
* to actually carry out upgrade here - the main repository may
* not be writable at this point. Actual upgrade is carried out
* via backend_check_readonly(). This check is done so that
* we determine repository state - upgraded or not - and then
* the appropriate SELECT statement (value-ordered or not)
* can be used when retrieving property values early in boot.
*/
if (backend_id == BACKEND_TYPE_NORMAL)
/*
* check if we are writable
*/
if (r == SQLITE_BUSY || r == SQLITE_LOCKED) {
return (BACKEND_CREATE_LOCKED);
}
if (r != SQLITE_OK && r != SQLITE_FULL) {
return (BACKEND_CREATE_READONLY);
}
return (BACKEND_CREATE_SUCCESS);
fail:
return (BACKEND_CREATE_FAIL);
}
/*
* (arg & -arg) is, through the magic of twos-complement arithmetic, the
* lowest set bit in arg.
*/
static size_t
{
/*
* Don't allow a zero result.
*/
return (arg);
}
/*
* Returns
* _NO_RESOURCES - out of memory
* _BACKEND_ACCESS - backend type t (other than _NORMAL) doesn't exist
* _DONE - callback aborted query
* _SUCCESS
*/
int
{
int ret;
return (REP_PROTOCOL_FAIL_NO_RESOURCES);
return (ret);
vts = gethrvtime();
return (ret);
}
/*
* Starts a "read-only" transaction -- i.e., locks out writers as long
* as it is active.
*
* Fails with
* _NO_RESOURCES - out of memory
*
* If t is not _NORMAL, can also fail with
* _BACKEND_ACCESS - backend does not exist
*
* If writable is true, can also fail with
* _BACKEND_READONLY
*/
static int
{
int r;
return (REP_PROTOCOL_FAIL_NO_RESOURCES);
return (r);
}
return (REP_PROTOCOL_SUCCESS);
}
int
{
return (backend_tx_begin_common(t, txp, 0));
}
static void
{
/*
* sqlite tends to be sticky with SQLITE_FULL, so we try
* to get a fresh database handle if we got a FULL warning
* along the way. If that fails, no harm done.
*/
}
}
}
void
{
}
/*
* Fails with
* _NO_RESOURCES - out of memory
* _BACKEND_ACCESS
* _BACKEND_READONLY
*/
int
{
int r;
char *errmsg;
if (r != REP_PROTOCOL_SUCCESS)
return (r);
vts = gethrvtime();
&errmsg);
if (r == SQLITE_FULL)
if (r != REP_PROTOCOL_SUCCESS) {
assert(r != REP_PROTOCOL_DONE);
return (r);
}
(*txp)->bt_readonly = 0;
return (REP_PROTOCOL_SUCCESS);
}
void
{
int r;
char *errmsg;
vts = gethrvtime();
&errmsg);
if (r == SQLITE_FULL)
}
/*
* Fails with
* _NO_RESOURCES - out of memory
*/
int
{
int r, r2;
char *errmsg;
vts = gethrvtime();
&errmsg);
if (r == SQLITE_FULL)
assert(r != REP_PROTOCOL_DONE);
if (r != REP_PROTOCOL_SUCCESS) {
&errmsg);
if (r2 != REP_PROTOCOL_SUCCESS)
backend_panic("cannot rollback failed commit");
return (r);
}
return (REP_PROTOCOL_SUCCESS);
}
static const char *
{
switch (id) {
return ("SI");
case BACKEND_ID_PROPERTYGRP:
return ("PG");
case BACKEND_ID_GENERATION:
return ("GEN");
case BACKEND_ID_PROPERTY:
return ("PROP");
case BACKEND_ID_VALUE:
return ("VAL");
case BACKEND_ID_SNAPNAME:
return ("SNAME");
case BACKEND_ID_SNAPSHOT:
return ("SHOT");
case BACKEND_ID_SNAPLEVEL:
return ("SLVL");
default:
abort();
/*NOTREACHED*/
}
}
/*
* Returns a new id or 0 if the id argument is invalid or the query fails.
*/
{
char *errmsg;
int ret;
vts = gethrvtime();
"SELECT id_next FROM id_tbl WHERE (id_name = '%q');"
"UPDATE id_tbl SET id_next = id_next + 1 WHERE (id_name = '%q');",
if (ret == SQLITE_FULL)
if (ret != REP_PROTOCOL_SUCCESS) {
return (0);
}
return (new_id);
}
/*
* Returns
* _NO_RESOURCES - out of memory
* _DONE - callback aborted query
* _SUCCESS
*/
int
{
int ret;
return (REP_PROTOCOL_FAIL_NO_RESOURCES);
vts = gethrvtime();
if (ret == SQLITE_FULL)
return (ret);
}
/*
* Returns
* _NO_RESOURCES - out of memory
* _NOT_FOUND - the query returned no results
* _SUCCESS - the query returned a single integer
*/
int
{
int ret;
if (ret != REP_PROTOCOL_SUCCESS)
return (ret);
}
/*
* Fails with
* _NO_RESOURCES - out of memory
*/
int
{
va_list a;
char *errmsg;
int ret;
vts = gethrvtime();
if (ret == SQLITE_FULL)
va_end(a);
return (ret);
}
/*
* returns REP_PROTOCOL_FAIL_NOT_FOUND if no changes occured
*/
int
{
va_list a;
char *errmsg;
int ret;
vts = gethrvtime();
if (ret == SQLITE_FULL)
va_end(a);
return (ret);
}
static int
{
int i;
char *errmsg;
int ret;
/*
* Create the tables.
*/
for (i = 0; i < tbl_count; i++) {
break;
}
"CREATE TABLE %s (%s);\n",
"%s: %s table creation fails: %s\n", file,
return (-1);
}
}
/*
* Make indices on key tables and columns.
*/
for (i = 0; i < idx_count; i++) {
break;
}
"CREATE INDEX %s_%s ON %s (%s);\n",
"%s: %s_%s index creation fails: %s\n", file,
return (-1);
}
}
return (0);
}
static int
{
int i;
char *errmsg;
int ret;
if (t == BACKEND_TYPE_NORMAL) {
} else if (t == BACKEND_TYPE_NONPERSIST) {
} else {
abort(); /* can't happen */
}
if (ret < 0) {
return (ret);
}
if (ret < 0) {
return (ret);
}
/*
* Add the schema version to the table
*/
"INSERT INTO schema_version (schema_version) VALUES (%d)",
"setting schema version fails: %s\n", errmsg);
}
/*
* Populate id_tbl with initial IDs.
*/
for (i = 0; i < BACKEND_ID_INVALID; i++) {
"INSERT INTO id_tbl (id_name, id_next) "
return (-1);
}
}
/*
* Set the persistance of the database. The normal database is marked
* "synchronous", so that all writes are synchronized to stable storage
* before proceeding.
*/
"PRAGMA default_synchronous = %s; PRAGMA synchronous = %s;",
return (-1);
}
return (0);
}
int
{
char *errp;
int r;
/* set up our temporary directory */
sqlite_temp_directory = "/etc/svc/volatile";
configd_critical("Mismatched link! (%s should be %s)\n",
return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
}
is_main_repository = 0;
}
/*
* If the svc.configd crashed, there might be a leftover transient
* database at FAST_REPOSITORY_DB,which contains useful
* information. Both early manifest import and late manifest
* import use svcadm to copy the repository to FAST_REPOSITORY_DB.
* One reason for doing this is that it improves the performance of
* manifest import. The other reason is that the repository may be
* on read-only root in the case of early manifest import.
*
* If FAST_REPOSITORY_DB exists, it is an indication that
* svc.configd has been restarted for some reason. Since we have
* no way of knowing where we are in the boot process, the safe
* thing to do is to move the repository back to it's non-transient
* location, REPOSITORY_DB. This may slow manifest import
* performance, but it avoids the problem of missing the command to
* move the repository to permanent storage.
*
* There is a caveat, though. If root is read-only, we'll need to
* leave the repository at FAST_REPOSITORY_DB. If root is
* read-only, late manifest import has not yet run, so it will move
* the repository back to permanent storage when it runs.
*/
if (is_main_repository)
switch (r) {
case BACKEND_CREATE_FAIL:
return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
case BACKEND_CREATE_LOCKED:
return (CONFIGD_EXIT_DATABASE_LOCKED);
case BACKEND_CREATE_SUCCESS:
break; /* success */
case BACKEND_CREATE_READONLY:
writable_persist = 0;
break;
case BACKEND_CREATE_NEED_INIT:
return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
}
break;
default:
abort();
/*NOTREACHED*/
}
/*
* If there was a transient repository that could not be copied
* back because the root file system was read-only, switch over to
* using the transient repository.
*/
if (switch_result == BACKEND_SWITCH_RO) {
/* Can't open fast repository. Stick with permanent. */
configd_critical("Cannot open \"%s\". %s\n",
} else {
if (db_name_copy == NULL) {
configd_critical("backend_init: out of "
"memory.\n");
return (CONFIGD_EXIT_INIT_FAILED);
} else {
}
}
}
if (have_np) {
switch (r) {
case BACKEND_CREATE_SUCCESS:
break; /* success */
case BACKEND_CREATE_FAIL:
return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
case BACKEND_CREATE_LOCKED:
return (CONFIGD_EXIT_DATABASE_LOCKED);
case BACKEND_CREATE_READONLY:
return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
case BACKEND_CREATE_NEED_INIT:
return (CONFIGD_EXIT_DATABASE_INIT_FAILED);
}
break;
default:
abort();
/*NOTREACHED*/
}
if (r != BACKEND_CREATE_NEED_INIT) {
}
/*
* If we started up with a writable filesystem, but the
* non-persistent database needed initialization, we are
* booting a non-global zone or a system with a writable
* root (ZFS), so do a backup. Checking to see if the
* non-persistent database needed initialization also keeps
* us from making additional backups if configd gets
* restarted.
*/
if (r == BACKEND_CREATE_NEED_INIT && writable_persist &&
"unable to create \"%s\" backup of "
"\"%s\"\n", REPOSITORY_BOOT_BACKUP,
}
}
/*
* On the other hand if we started with a read-only file
* system and the non-persistent database needed
* initialization, then we need to take a checkpoint of the
* repository. We grab the checkpoint now before Early
* Manifest Import starts modifying the repository. Then
* when the file system becomes writable, the checkpoint
* can be used to create the boot time backup of the
* repository. Checking that the non-persistent database
* needed initialization, keeps us from making additional
* checkpoints if configd gets restarted.
*/
if (r == BACKEND_CREATE_NEED_INIT && writable_persist == 0 &&
if (r != REP_PROTOCOL_SUCCESS) {
configd_critical("unable to create checkpoint "
}
}
/*
* If the non-persistent database did not need
* initialization, svc.configd has been restarted. See if
* the boot time checkpoint exists. If it does, use it to
* make a backup if root is writable.
*/
if (r != BACKEND_CREATE_NEED_INIT &&
}
/*
* If we have a checkpoint and root is writable,
* make the backup now.
*/
"unable to create \"%s\" backup of "
"\"%s\"\n", REPOSITORY_BOOT_BACKUP,
}
}
}
}
/*
* If the persistent backend is writable at this point, upgrade it.
* This can occur in a few cases, most notably on UFS roots if
* we are operating on the backend from another root, as is the case
* during alternate-root BFU.
*
* Otherwise, upgrade will occur via backend_check_readonly() when
* the repository is re-opened read-write.
*/
if (writable_persist) {
assert(r == REP_PROTOCOL_SUCCESS);
}
return (CONFIGD_EXIT_OKAY);
}
/*
* quiesce all database activity prior to exiting
*/
void
backend_fini(void)
{
}
backend_query_alloc(void)
{
backend_query_t *q;
if (q != NULL) {
q->bq_size = QUERY_BASE;
q->bq_size = 0;
}
}
return (q);
}
void
{
char *alloc;
int count;
if (q == NULL) {
/* We'll discover the error when we try to run the query. */
return;
}
break; /* success */
break; /* can't grow */
}
}
}
void
{
char *new;
return;
return;
}
backend_query_append(q, new);
}
void
{
if (q != NULL) {
}
free(q);
}
}