hash_bigkey.c revision 54925bf60766fbb4f1f2d7c843721406a7b7a3fb
#pragma ident "%Z%%M% %I% %E% SMI"
/*-
* Copyright (c) 1990, 1993, 1994
* The Regents of the University of California. All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* Margo Seltzer.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
static char sccsid[] = "@(#)hash_bigkey.c 8.5 (Berkeley) 11/2/95";
#endif /* LIBC_SCCS and not lint */
/*
* PACKAGE: hash
* DESCRIPTION:
*
* ROUTINES:
* External
* __big_keydata
* __big_split
* __big_insert
* __big_return
* __big_delete
* __find_last_page
* Internal
* collect_key
* collect_data
*/
#include <stdlib.h>
#include <string.h>
#ifdef DEBUG
#include <assert.h>
#endif
#include "db-int.h"
#include "hash.h"
#include "page.h"
#include "extern.h"
/*
* Big_insert
*
* MINFILL * the bucket size
*
* Returns:
* 0 ==> OK
* -1 ==> ERROR
*/
{
/* Add a page! */
pagep =
if (!pagep)
return (-1);
/* There's just going to be one entry on this page. */
/* Move the key's data. */
/* Mark the page as to how much key & data is on this page. */
/* Note big pages build beginning --> end, not vice versa. */
if (key_move_bytes)
if (val_move_bytes)
base_page = 0;
}
return (0);
}
/*
* Called when we need to delete a big pair.
*
* Returns:
* 0 => OK
* -1 => ERROR
*/
#ifdef __STDC__
#else
#endif
{
if (!pagep)
return (-1);
/*
* Traverse through the pages, freeing the previous one (except
* the first) at each new page.
*/
last_pagep = pagep;
if (!pagep)
return (-1);
}
/* Free the last page in the chain. */
return (0);
}
/*
* Given a key, indicates whether the big key at cursorp matches the
* given key.
*
* Returns:
* 1 = Found!
* 0 = Key not found
* -1 error
*/
{
bytes = 0;
hold_pagep = NULL;
/* Chances are, hashp->cpage is the base page. */
else {
if (!pagep)
return (-1);
}
/*
* Now, get the first page with the big stuff on it.
*
* XXX
* KLUDGE: we know that cursor is looking at the _next_ item, so
* we have to look at pgndx - 1.
*/
if (!hold_pagep)
if (!pagep)
return (-1);
/* While there are both keys to compare. */
return (0);
}
if (!pagep)
return (-1);
}
}
#ifdef DEBUG
#endif
if (ksize != 0) {
#ifdef HASH_STATISTICS
#endif
return (0);
} else
return (1);
}
/*
* Fill in the key and data for this big pair.
*/
{
if (!key_pagep)
return (-1);
return (-1);
/* Create an item_info to direct __big_return to the beginning pgno. */
}
/*
* Return the big key on page, ndx.
*/
#ifdef __STDC__
#else
#endif
{
if (!pagep)
return (-1);
return (0);
}
/*
* Return the big key and data indicated in item_info.
*/
{
if (!on_bigkey_page) {
/* Get first page with big pair on it. */
if (!pagep)
return (-1);
} else {
if (!pagep)
return (-1);
}
/* Traverse through the bigkey pages until a page with data is found. */
while (!BIGDATALEN(pagep)) {
if (!pagep)
return (-1);
}
return (-1);
return (0);
}
/*
* Given a page with a big key on it, traverse through the pages counting data
* length, and collect all of the data on the way up. Store the key in
* hashp->bigkey_buf. last_page indicates to the calling function what the
* last page with key on it is; this will help if you later want to retrieve
* the data portion.
*
* Does the work for __get_bigkey.
*
* Return total length of data; -1 if error.
*/
static int32_t
{
#ifdef DEBUG
#endif
/* If this is the last page with key. */
if (BIGDATALEN(pagep)) {
if (hashp->bigkey_buf)
if (!hashp->bigkey_buf)
return (-1);
if (last_page)
return (totlen);
}
/* Key filled up all of last key page, so we've gone 1 too far. */
if (hashp->bigkey_buf)
}
/* Set pagep to the next page in the chain. */
if (last_page)
if (!next_pagep)
return (-1);
#ifdef DEBUG
#endif
#ifdef DEBUG
#endif
return (retval);
}
/*
* Given a page with big data on it, recur through the pages counting data
* length, and collect all of the data on the way up. Store the data in
* hashp->bigdata_buf.
*
* Does the work for __big_return.
*
* Return total length of data; -1 if error.
*/
static int32_t
{
#ifdef DEBUG
#endif
/* If there is no next page. */
if (hashp->bigdata_buf)
if (!hashp->bigdata_buf)
return (-1);
return (totlen);
}
/* Set pagep to the next page in the chain. */
if (!next_pagep)
return (-1);
#ifdef DEBUG
#endif
#ifdef DEBUG
#endif
return (retval);
}