/*-
* See the file LICENSE for redistribution information.
*
* Copyright (c) 1996, 1997, 1998
* Sleepycat Software. All rights reserved.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#include "config.h"
#ifndef lint
#endif /* not lint */
#ifndef NO_SYSTEM_INCLUDES
#include <ctype.h>
#include <errno.h>
#include <string.h>
#endif
#include "db_int.h"
#include "shqueue.h"
#include "db_shash.h"
#include "lock.h"
#include "common_ext.h"
static const char *
int
const char *path;
int mode;
DB_LOCKTAB **ltp;
{
int ret;
/* Validate arguments. */
#ifdef HAVE_SPINLOCKS
#else
#endif
return (ret);
/* Create the lock table structure. */
return (ret);
/* Grab the values that we need to compute the region size. */
regflags = 0;
}
regflags = 0;
}
}
else
goto err;
goto err;
/* Now set up the pointer to the region. */
/* Initialize the region if we created it. */
goto err;
} else {
/* Check for an unexpected region. */
"lock_open: %s: bad magic number", path);
goto err;
}
}
/* Check for automatic deadlock detection. */
"lock_open: incompatible deadlock detector mode");
goto err;
}
}
/* Set up remaining pointers into region. */
return (0);
}
return (ret);
}
/*
* __lock_panic --
* Panic a lock region.
*
* PUBLIC: void __lock_panic __P((DB_ENV *));
*/
void
{
}
/*
* __lock_tabinit --
* Initialize the lock region.
*/
static int
{
/*
* These fields (lrp->maxlocks, lrp->nmodes) are initialized
* in the caller, since we had to grab those values to size
* the region.
*/
lrp->nconflicts = 0;
lrp->ndeadlocks = 0;
/*
* As we write the region, we've got to maintain the alignment
* for the structures that follow each chunk. This information
* ends up being encapsulated both in here as well as in the
* lock.h file for the XXX_SIZE macros.
*/
/* Initialize conflict matrix. */
/*
* Initialize hash table.
*/
/*
* Initialize locks onto a free list. Since locks contains mutexes,
* we need to make sure that each lock is aligned on a MUTEX_ALIGNMENT
* boundary.
*/
}
/* Initialize objects onto a free list. */
}
/*
* Initialize the string space; as for all shared memory allocation
* regions, this requires size_t alignment, since we store the
* lengths of malloc'd areas in the area.
*/
return (0);
}
int
DB_LOCKTAB *lt;
{
int ret;
return (ret);
return (0);
}
int
const char *path;
int force;
{
int ret;
return (ret);
return (ret);
}
/*
* __lock_validate_region --
* Called at every interface to verify if the region has changed size,
* and if so, to remap the region in and reset the process' pointers.
*
* PUBLIC: int __lock_validate_region __P((DB_LOCKTAB *));
*/
int
DB_LOCKTAB *lt;
{
int ret;
return (0);
return (ret);
/* Reset region information. */
return (0);
}
/*
* __lock_grow_region --
* We have run out of space; time to grow the region.
*
* PUBLIC: int __lock_grow_region __P((DB_LOCKTAB *, int, size_t));
*/
int
DB_LOCKTAB *lt;
int which;
{
int ret;
/* Figure out how much of each sort of space we have. */
/*
* Figure out what fraction of the used space belongs to each
* different type of "thing" in the region. Then partition the
* new space up according to this ratio.
*/
usedobjs * sizeof(DB_LOCKOBJ);
lock_ratio = usedlocks *
(newobjs * sizeof(DB_LOCKOBJ) +
/*
* Make sure we allocate enough memory for the object being
* requested.
*/
switch (which) {
case DB_LOCK_LOCK:
if (newlocks == 0) {
newlocks = 10;
}
break;
case DB_LOCK_OBJ:
if (newobjs == 0) {
newobjs = 10;
}
break;
case DB_LOCK_MEM:
}
break;
}
/*
* Since we are going to be allocating locks at the beginning of the
* new chunk, we need to make sure that the chunk is MUTEX_ALIGNMENT
* aligned. We did not guarantee this when we created the region, so
* we may need to pad the old region by extra bytes to ensure this
* alignment.
*/
"Growing lock region: %lu locks %lu objs %lu bytes",
return (ret);
/* Update region parameters. */
/* Put new locks onto the free list. */
for (i = 0; i++ < newlocks;
}
/* Put new objects onto the free list. */
}
return (0);
}
static void
DB_LOCKTAB *lt;
{
}
/*
* lock_stat --
* Return LOCK statistics.
*/
int
DB_LOCKTAB *lt;
DB_LOCK_STAT **gspp;
{
int ret;
return (ret);
/* Copy out the global statistics. */
return (0);
}
static u_int32_t
{
count = 0;
count++;
return (count);
}
static u_int32_t
{
count = 0;
count++;
return (count);
}
/*
* __lock_dump_region --
*
* PUBLIC: void __lock_dump_region __P((DB_LOCKTAB *, char *, FILE *));
*/
void
DB_LOCKTAB *lt;
char *area;
{
int label;
/* Make it easy to call from the debugger. */
switch (*area) {
case 'A':
break;
case 'c':
break;
case 'f':
break;
case 'l':
break;
case 'm':
break;
case 'o':
break;
}
if (LF_ISSET(LOCK_DUMP_CONF)) {
}
}
for (i = 0; i < lrp->table_size; i++) {
label = 1;
if (LF_ISSET(LOCK_DUMP_LOCKERS) &&
if (label) {
"Bucket %lu:\n", (u_long)i);
label = 0;
}
}
if (LF_ISSET(LOCK_DUMP_OBJECTS) &&
if (label) {
"Bucket %lu:\n", (u_long)i);
label = 0;
}
}
}
}
}
if (LF_ISSET(LOCK_DUMP_FREE)) {
}
if (LF_ISSET(LOCK_DUMP_MEM))
}
static void
DB_LOCKTAB *lt;
DB_LOCKOBJ *op;
{
void *ptr;
return;
}
}
static void
DB_LOCKTAB *lt;
DB_LOCKOBJ *op;
{
u_int32_t j;
}
for (lp =
}
}
static const char *
{
switch (status) {
case DB_LSTAT_ABORTED:
return ("aborted");
case DB_LSTAT_ERR:
return ("err");
case DB_LSTAT_FREE:
return ("free");
case DB_LSTAT_HELD:
return ("held");
case DB_LSTAT_NOGRANT:
return ("nogrant");
case DB_LSTAT_PENDING:
return ("pending");
case DB_LSTAT_WAITING:
return ("waiting");
}
return ("unknown status");
}