/*-
* Copyright (c) 1990, 1993, 1994
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Margo Seltzer.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#endif /* LIBC_SCCS and not lint */
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <libintl.h>
#ifdef DEBUG
#include <assert.h>
#endif
#include "db-int.h"
#include "hash.h"
#include "page.h"
#include "extern.h"
u_int32_t));
#if DB_BYTE_ORDER == DB_LITTLE_ENDIAN
#endif
/* Return values */
#define SUCCESS (0)
#ifdef HASH_STATISTICS
#endif
/************************** INTERFACE ROUTINES ***************************/
extern DB *
const char *file;
{
return (NULL);
}
return (NULL);
/* set this now, before file goes away... */
if (!file) {
/*
*/
/*
* No memory here is not the only failure
* possibility, but probably the most likely.
*/
return(NULL);
}
/* store the file name so that we can unlink it later */
#ifdef DEBUG
"Using file name %s.\n"), file);
#endif
}
/*
* Even if user wants write only, we need to be able to read
* field in the hashp structure needs to be accurate so that
* we can check accesses.
*/
new_table = 0;
errno = 0; /* In case someone looks at errno. */
new_table = 1;
}
if (file) {
}
/* Process arguments to set up hash table header. */
if (new_table) {
} else {
/* Table already exists */
else
/* copy metadata from page into header */
if (hget_header(hashp,
sizeof(HASHHDR))
/* Verify file type, versions and hash function */
/*
* Figure out how many segments we need. Max_Bucket is the
* maximum bucket number, so the number of buckets is
* max_bucket + 1.
*/
/* Read in bitmaps */
}
/* start up mpool */
else
/*
* For a new table, set up the bitmaps.
*/
if (new_table &&
goto error2;
/* initialize the cursor queue */
/* get a chunk of memory for our split buffer */
goto error2;
goto error2;
#ifdef DEBUG
"%s\n%s%lx\n%s%d\n%s%d\n%s%d\n%s%d\n%s%d\n%s%x\n%s%x\n%s%d\n%s%d\n",
"init_htab:",
"TABLE POINTER ", (void *)hashp,
#endif
#ifdef HASH_STATISTICS
hash_bigpages = 0;
#endif
return (dbp);
save_errno = errno;
errno = save_errno;
return (NULL);
if (!specified_file)
errno = save_errno;
return (NULL);
}
static int32_t
{
if (!dbp)
return (ERROR);
return (retval);
}
static int32_t
{
if (!dbp)
return (ERROR);
return (-1);
}
}
/************************** LOCAL CREATION ROUTINES **********************/
static HTAB *
const char *file;
{
nelem = 1;
/* Fix bucket size to be optimal for file system */
return (NULL);
}
if (info) {
/* Round pagesize up to power of 2 */
return (NULL);
}
}
return (NULL);
}
}
}
return (hashp);
}
/*
* Returns 0 on No Error
*/
static int32_t
{
/*
* Divide number of elements by the fill factor and determine a
* desired number of buckets. Allocate space for the next greater
* power of two number of buckets.
*/
/*
* The number of header pages is the size of the header divided by
* the amount of freespace on header pages (the page size - the
* size of 1 integer where the length of the header info on that
* page is stored) plus another page if it didn't divide evenly.
*/
? 0 : 1);
/* Create pages for these buckets */
/*
for (i = 0; i <= hashp->hdr.max_bucket; i++) {
if (__new_page(hashp, (u_int32_t)i, A_BUCKET) != 0)
return (-1);
}
*/
/* First bitmap page is at: splitpoint l2 page offset 1 */
return (-1);
return (0);
}
/*
*/
static u_int32_t
{
num_copied = 0;
i = 0;
/*
* XXX
* This should not be printing to stderr on a "normal" error case.
*/
if (num_copied != sizeof(HASHHDR)) {
/* Solaris Kerberos: Make sure to print a newline */
"hash: could not retrieve header\n"));
return (0);
}
#if DB_BYTE_ORDER == DB_LITTLE_ENDIAN
#endif
return (num_copied);
}
static void
{
#if DB_BYTE_ORDER == DB_LITTLE_ENDIAN
#endif
num_copied = i = 0;
#if DB_BYTE_ORDER == DB_LITTLE_ENDIAN
#endif
if (num_copied != sizeof(HASHHDR))
/* Solaris Kerberos: Make sure to print a newline */
"hash: could not write hash header\n"));
return;
}
/*
* Flushes any changes to the file if necessary and destroys the hashp
* structure, freeing all allocated space.
*/
static int32_t
{
save_errno = 0;
#ifdef HASH_STATISTICS
{ int i;
"hdestroy: accesses %ld collisions %ld\n"),
"hdestroy: expansions %ld\n"), hash_expansions);
"hdestroy: overflows %ld\n"), hash_overflows);
"hdestroy: big key/data pages %ld\n"), hash_bigpages);
for (i = 0; i < NCACHED; i++)
}
#endif
save_errno = errno;
/* Free the split page */
/* Free the big key and big data returns */
if (hashp->bigkey_buf)
if (hashp->bigdata_buf)
/* XXX This should really iterate over the cursor queue, but
it's not clear how to do that, and the only cursor a hash
table ever creates is the one used by hash_seq(). Passing
NULL as the first arg is also a kludge, but I know that
it's never used, so I do it. The intent is to plug the
memory leak. Correctness can come later. */
if (hashp->seq_cursor)
/* shut down mpool */
/*
* *** This may cause problems if hashp->fname is set in any case
* other than the case that we are generating a temporary file name.
* Note that the new version of mpool should support temporary
* files within mpool itself.
*/
#ifdef DEBUG
#endif
/* we need to chmod the file to allow it to be deleted... */
/* destroy the temporary name */
}
if (save_errno) {
errno = save_errno;
return (ERROR);
}
return (SUCCESS);
}
/*
* Write modified pages to disk
*
* Returns:
* 0 == OK
* -1 ERROR
*/
static int32_t
{
/*
* XXX
*/
}
/*
* Returns:
* 0 == OK
* -1 indicates that errno should be set
*/
static int32_t
{
int32_t i;
return (0);
/* write out metadata */
for (i = 0; i < NCACHED; i++)
if (__put_page(hashp,
return (-1);
}
return (0);
}
/*******************************SEARCH ROUTINES *****************************/
/*
* All the access routines return
*
* Returns:
* 0 on SUCCESS
* 1 to indicate an external ERROR (i.e. key not found, etc)
* -1 to indicate an internal ERROR (i.e. out of memory, etc)
*/
/* *** make sure this is true! */
static int32_t
{
if (flag) {
return (ERROR);
}
}
static int32_t
{
return (ERROR);
}
return (ERROR);
}
}
static int32_t
{
if (flag) {
return (ERROR);
}
return (ERROR);
}
}
/*
* Assume that hashp has been set in wrapper routine.
*/
static int32_t
{
#ifdef HASH_STATISTICS
#endif
num_items = 0;
/*
* Set up item_info so that we're looking for space to add an item
* as we cycle through the pages looking for the key.
*/
else
} else
item_info.seek_found_page = 0;
while (1) {
return (ABNORMAL);
break;
num_items++;
/*
* !!!
* 0 is a valid index.
*/
goto found;
goto found;
}
#ifdef HASH_STATISTICS
#endif
/*
* At this point, item_info will list either the last page in
* the chain, or the last page in the chain plus a pgno for where
* to find the first page in the chain with space for the
* item we wish to add.
*/
/* Not found */
switch (action) {
case HASH_PUT:
case HASH_PUTNEW:
return (ERROR);
break;
case HASH_GET:
case HASH_DELETE:
default:
return (ABNORMAL);
}
if (item_info.caused_expand)
return (SUCCESS);
switch (action) {
case HASH_PUTNEW:
/* mpool_put(hashp->mp, pagep, 0); */
return (ABNORMAL);
case HASH_GET:
return (ERROR);
} else {
}
/* *** data may not be available! */
break;
case HASH_PUT:
return (ERROR);
if (item_info.caused_expand)
break;
case HASH_DELETE:
return (ERROR);
break;
default:
abort();
}
return (SUCCESS);
}
/* ****************** CURSORS ********************************** */
CURSOR *
{
if (!new_curs)
return NULL;
return NULL;
}
/* place onto queue of cursors */
return new_curs;
}
static int32_t
{
return (ERROR);
}
#ifdef HASH_STATISTICS
#endif
else
/*
* This needs to be changed around. As is, get_item_next advances
* the pointers on the page but this function actually advances
* bucket pointers. This works, since the only other place we
* use get_item_next is in hash_access which only deals with one
* bucket at a time. However, there is the problem that certain other
* functions (such as find_bigpair and delpair) depend on the
* pgndx member of the cursor. Right now, they are using pngdx - 1
* since indices refer to the __next__ item that is to be fetched
* from the page. This is ugly, as you may have noticed, whoever
* you are. The best solution would be to depend on item_infos to
* deal with _current_ information, and have the cursors only
* deal with _next_ information. In that scheme, get_item_next
* would also advance buckets. Version 3...
*/
/*
* Must always enter this loop to do error handling and
*/
while (1) {
return (ABNORMAL);
break;
return (ABNORMAL);
return (ABNORMAL);
}
return (0);
}
static int32_t
{
/* XXX this is empirically determined, so it might not be completely
correct, but it seems to work. At the very least it fixes
a memory leak */
return (0);
}
static int32_t
{
/*
* Seq just uses the default cursor to go sequecing through the
* database. Note that the default cursor is the first in the list.
*/
if (!hashp->seq_cursor)
}
/********************************* UTILITIES ************************/
/*
* Returns:
* 0 ==> OK
* -1 ==> Error
*/
{
#ifdef HASH_STATISTICS
#endif
/* Get a page for this new bucket */
return (-1);
/*
* If the split point is increasing (hdr.max_bucket's log base 2
* increases), we need to copy the current contents of the spare
* split bucket to the next bucket.
*/
}
/* Starting a new doubling */
}
"hash: Cannot allocate new bucket. Pages exhausted.\n"));
return (-1);
}
/* Relocate records to the new bucket */
}
int8_t *k;
{
return (bucket);
}
#if DB_BYTE_ORDER == DB_LITTLE_ENDIAN
/*
* Hashp->hdr needs to be byteswapped.
*/
static void
{
int32_t i;
for (i = 0; i < NCACHED; i++) {
}
}
static void
{
int32_t i;
for (i = 0; i < NCACHED; i++) {
}
}
#endif /* DB_BYTE_ORDER == DB_LITTLE_ENDIAN */