rbtdb.c revision 6098d364b690cb9dabf96e9664c4689c8559bd2e
/*
* Copyright (C) 2004-2008 Internet Systems Consortium, Inc. ("ISC")
* Copyright (C) 1999-2003 Internet Software Consortium.
*
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/* $Id: rbtdb.c,v 1.263 2008/09/24 02:46:22 marka Exp $ */
/*! \file */
/*
* Principal Author: Bob Halley
*/
#include <config.h>
/* #define inline */
#include <isc/platform.h>
#include <isc/refcount.h>
#include <dns/dbiterator.h>
#include <dns/fixedname.h>
#include <dns/masterdump.h>
#include <dns/rdataset.h>
#include <dns/rdatasetiter.h>
#include <dns/rdataslab.h>
#include <dns/rdatastruct.h>
#ifdef DNS_RBTDB_VERSION64
#include "rbtdb64.h"
#else
#include "rbtdb.h"
#endif
#ifdef DNS_RBTDB_VERSION64
#else
#endif
/*%
* Note that "impmagic" is not the first four bytes of the struct, so
* ISC_MAGIC_VALID cannot be used.
*/
#ifdef DNS_RBTDB_VERSION64
typedef isc_uint64_t rbtdb_serial_t;
/*%
* Make casting easier in symbolic debuggers by using different names
* for the 64 bit version.
*/
#define dns_rbtdb_t dns_rbtdb64_t
#define rdatasetheader_t rdatasetheader64_t
#define rbtdb_version_t rbtdb_version64_t
#else
typedef isc_uint32_t rbtdb_serial_t;
#endif
typedef isc_uint32_t rbtdb_rdatatype_t;
#define RBTDB_RDATATYPE_SIGNSEC \
#define RBTDB_RDATATYPE_SIGNSEC3 \
#define RBTDB_RDATATYPE_SIGNS \
#define RBTDB_RDATATYPE_SIGCNAME \
#define RBTDB_RDATATYPE_SIGDNAME \
#define RBTDB_RDATATYPE_NCACHEANY \
/*
* We use rwlock for DB lock only when ISC_RWLOCK_USEATOMIC is non 0.
* Using rwlock is effective with regard to lookup performance only when
* it is implemented in an efficient way.
* Otherwise, it is generally wise to stick to the simple locking since rwlock
* would require more memory or can even make lookups slower due to its own
* overhead (when it internally calls mutex locks).
*/
#ifdef ISC_RWLOCK_USEATOMIC
#define DNS_RBTDB_USERWLOCK 1
#else
#define DNS_RBTDB_USERWLOCK 0
#endif
#define RBTDB_INITLOCK(l) isc_rwlock_init((l), 0, 0)
#define RBTDB_DESTROYLOCK(l) isc_rwlock_destroy(l)
#define RBTDB_LOCK(l, t) RWLOCK((l), (t))
#define RBTDB_UNLOCK(l, t) RWUNLOCK((l), (t))
#else
#define RBTDB_INITLOCK(l) isc_mutex_init(l)
#define RBTDB_DESTROYLOCK(l) DESTROYLOCK(l)
#define RBTDB_LOCK(l, t) LOCK(l)
#define RBTDB_UNLOCK(l, t) UNLOCK(l)
#endif
/*
* Since node locking is sensitive to both performance and memory footprint,
* we need some trick here. If we have both high-performance rwlock and
* high performance and small-memory reference counters, we use rwlock for
* node lock and isc_refcount for node references. In this case, we don't have
* to protect the access to the counters by locks.
* Otherwise, we simply use ordinary mutex lock for node locking, and use
* simple integers as reference counters which is protected by the lock.
* In most cases, we can simply use wrapper macros such as NODE_LOCK and
* NODE_UNLOCK. In some other cases, however, we need to protect reference
* counters first and then protect other parts of a node as read-only data.
* Special additional macros, NODE_STRONGLOCK(), NODE_WEAKLOCK(), etc, are also
* provided for these special cases. When we can use the efficient backend
* routines, we should only protect the "other members" by NODE_WEAKLOCK(read).
* Otherwise, we should use NODE_STRONGLOCK() to protect the entire critical
* section including the access to the reference counter.
* Note that we cannot use NODE_LOCK()/NODE_UNLOCK() wherever the protected
* section is also protected by NODE_STRONGLOCK().
*/
#if defined(ISC_RWLOCK_USEATOMIC) && defined(DNS_RBT_USEISCREFCOUNT)
typedef isc_rwlock_t nodelock_t;
#define NODE_INITLOCK(l) isc_rwlock_init((l), 0, 0)
#define NODE_DESTROYLOCK(l) isc_rwlock_destroy(l)
#define NODE_UNLOCK(l, t) RWUNLOCK((l), (t))
#define NODE_TRYUPGRADE(l) isc_rwlock_tryupgrade(l)
#define NODE_STRONGLOCK(l) ((void)0)
#define NODE_STRONGUNLOCK(l) ((void)0)
#define NODE_WEAKLOCK(l, t) NODE_LOCK(l, t)
#define NODE_WEAKUNLOCK(l, t) NODE_UNLOCK(l, t)
#define NODE_WEAKDOWNGRADE(l) isc_rwlock_downgrade(l)
#else
typedef isc_mutex_t nodelock_t;
#define NODE_INITLOCK(l) isc_mutex_init(l)
#define NODE_DESTROYLOCK(l) DESTROYLOCK(l)
#define NODE_UNLOCK(l, t) UNLOCK(l)
#define NODE_TRYUPGRADE(l) ISC_R_SUCCESS
#define NODE_STRONGLOCK(l) LOCK(l)
#define NODE_STRONGUNLOCK(l) UNLOCK(l)
#define NODE_WEAKLOCK(l, t) ((void)0)
#define NODE_WEAKUNLOCK(l, t) ((void)0)
#define NODE_WEAKDOWNGRADE(l) ((void)0)
#endif
/*
* Allow clients with a virtual time of up to 5 minutes in the past to see
* records that would have otherwise have expired.
*/
#define RBTDB_VIRTUAL 300
struct noqname {
void * neg;
void * negsig;
};
typedef struct acachectl acachectl_t;
typedef struct rdatasetheader {
/*%
* Locked by the owning node's lock.
*/
/*%<
* We don't use the LIST macros, because the LIST structure has
* both head and tail pointers, and is doubly linked.
*/
struct rdatasetheader *next;
/*%<
* If this is the top header for an rdataset, 'next' points
* to the top header for the next rdataset (i.e., the next type).
* Otherwise, it points up to the header whose down pointer points
* at this header.
*/
struct rdatasetheader *down;
/*%<
* Points to the header for the next older version of
* this rdataset.
*/
/*%<
* Monotonously increased every time this rdataset is bound so that
* it is used as the base of the starting point in DNS responses
* when the "cyclic" rrset-order is required. Since the ordering
* should not be so crucial, no lock is set for the counter for
* performance reasons.
*/
/*%<
* Used for LRU-based cache management. We should probably make
* these cache-DB specific. We might also make it a pointer and
* ensure only the top header has a valid link to save memory.
* The linked-list is locked by the rbtdb->lrulock.
*/
/*
* It's possible this should not be here anymore, but instead
* referenced from the bucket's heap directly.
*/
#if 0
#endif
unsigned int heap_index;
/*%<
* Used for TTL-based cache cleaning.
*/
#define RDATASET_ATTR_NONEXISTENT 0x0001
#define RDATASET_ATTR_STALE 0x0002
#define RDATASET_ATTR_IGNORE 0x0004
#define RDATASET_ATTR_RETAIN 0x0008
#define RDATASET_ATTR_NXDOMAIN 0x0010
#define RDATASET_ATTR_RESIGN 0x0020
#define RDATASET_ATTR_STATCOUNT 0x0040
#define RDATASET_ATTR_OPTOUT 0x0080
typedef struct acache_cbarg {
unsigned int count;
struct acachectl {
};
/*
* XXX
* When the cache will pre-expire data (due to memory low or other
* situations) before the rdataset's TTL has expired, it MUST
* respect the RETAIN bit and not expire the data until its TTL is
* expired.
*/
#define NONEXISTENT(header) \
typedef struct {
/* Protected in the refcount routines. */
/* Locked by lock. */
typedef struct rbtdb_changed {
typedef struct rbtdb_version {
/* Not locked */
/*
* Protected in the refcount routines.
* XXXJT: should we change the lock policy based on the refcount
* performance?
*/
/* Locked by database lock. */
/* NSEC3 parameters */
unsigned char salt[NSEC3_MAX_HASH_LENGTH];
typedef enum {
typedef struct {
/* Unlocked. */
#else
#endif
unsigned int node_lock_count;
/* Locked by lock. */
unsigned int active;
unsigned int attributes;
isc_task_t * task;
/*
* This is a linked list used to implement the LRU cache. There will
* be node_lock_count linked lists here. Nodes in bucket 1 will be
* placed on the linked list rdatasets[1].
*/
/*%
* Temporary storage for stale cache nodes and dynamically deleted
* nodes that await being cleaned up.
*/
/*
* Heaps. Each of these is used for TTL based expiry.
*/
isc_heap_t **heaps;
/* Locked by tree_lock. */
/* Unlocked */
unsigned int quantum;
} dns_rbtdb_t;
#define RBTDB_ATTR_LOADED 0x01
#define RBTDB_ATTR_LOADING 0x02
/*%
* Search Context
*/
typedef struct {
dns_rbtdb_t * rbtdb;
unsigned int options;
/*%
* Load Context
*/
typedef struct {
dns_rbtdb_t * rbtdb;
} rbtdb_load_t;
dns_zone_t **zonep,
dns_name_t *fname);
static dns_rdatasetmethods_t rdataset_methods = {
NULL,
NULL,
};
static dns_rdatasetitermethods_t rdatasetiter_methods = {
};
typedef struct rbtdb_rdatasetiter {
dns_name_t *name);
dns_name_t *name);
dns_name_t *name);
static dns_dbiteratormethods_t dbiterator_methods = {
};
#define DELETION_BATCH_MAX 64
/*
* If 'paused' is ISC_TRUE, then the tree lock is not being held.
*/
typedef struct rbtdb_dbiterator {
int delete;
isc_event_t *event);
/*%
* 'init_count' is used to initialize 'newheader->count' which inturn
* is used to determine where in the cycle rrset-order cyclic starts.
* We don't lock this as we don't care about simultanious updates.
*
* Note:
* Both init_count and header->count can be ISC_UINT32_MAX.
* The count on the returned rdataset however can't be as
* that indicates that the database does not implement cyclic
* processing.
*/
static unsigned int init_count;
/*
* Locking
*
* If a routine is going to lock more than one lock in this module, then
* the locking must be done in the following order:
*
* Tree Lock
*
* Node Lock (Only one from the set may be locked at one time by
* any caller)
*
* Database Lock
*
* Failure to follow this hierarchy can result in deadlock.
*/
/*
* Deleting Nodes
*
* For zone databases the node for the origin of the zone MUST NOT be deleted.
*/
/*
* DB Routines
*/
static void
}
static void
}
static void
{
dns_rdatastatstype_t base = 0;
/* At the moment we count statistics only for cache DB */
} else
if (increment)
else
}
static void
int idx;
return;
/*
* It's possible the rbtdb is not a cache. If this is the case,
* we will not have a heap, and we move on. If we do, though,
* we might need to adjust things.
*/
return;
return;
else
}
/*%
* These functions allow the heap code to rank the priority of each
* element. It returns ISC_TRUE if v1 happens "sooner" than v2.
*/
static isc_boolean_t
return (ISC_TRUE);
return (ISC_FALSE);
}
static isc_boolean_t
return (ISC_TRUE);
return (ISC_FALSE);
}
/*%
* This function sets the heap index into the header.
*/
static void
rdatasetheader_t *h = what;
h->heap_index = index;
}
/*%
* Work out how many nodes can be deleted in the time between two
* requests to the nameserver. Smooth the resulting number and use it
* as a estimate for the number of nodes to be deleted in the next
* iteration.
*/
static unsigned int
unsigned int interval;
unsigned int new;
if (pps < 100)
pps = 100;
isc_time_now(&end);
if (interval == 0)
interval = 1;
if (usecs == 0) {
/*
* We were unable to measure the amount of time taken.
* Double the nodes deleted next time.
*/
old *= 2;
if (old > 1000)
old = 1000;
return (old);
}
if (new == 0)
new = 1;
else if (new > 1000)
new = 1000;
/* Smooth */
return (new);
}
static void
unsigned int i;
char buf[DNS_NAME_FORMATSIZE];
unsigned int refs;
&refs);
sizeof(rbtdb_version_t));
}
/*
* We assume the number of remaining dead nodes is reasonably small;
* the overhead of unlinking all nodes here should be negligible.
*/
for (i = 0; i < rbtdb->node_lock_count; i++) {
}
}
if (result == ISC_R_QUOTA) {
&start);
NULL,
sizeof(isc_event_t));
goto again;
return;
}
}
if (result == ISC_R_QUOTA) {
&start);
NULL,
sizeof(isc_event_t));
goto again;
return;
}
}
if (log) {
sizeof(buf));
else
"done free_rbtdb(%s)", buf);
}
for (i = 0; i < rbtdb->node_lock_count; i++) {
}
/*
* Clean up LRU / re-signing order lists.
*/
for (i = 0; i < rbtdb->node_lock_count; i++)
sizeof(rdatasetheaderlist_t));
}
/*
* Clean up dead node buckets.
*/
for (i = 0; i < rbtdb->node_lock_count; i++)
}
/*
* Clean up heap objects.
*/
for (i = 0; i < rbtdb->node_lock_count; i++)
sizeof(isc_heap_t *));
}
}
static inline void
unsigned int i;
unsigned int inactive = 0;
/* XXX check for open versions here */
/*
* Even though there are no external direct references, there still
* may be nodes in use.
*/
for (i = 0; i < rbtdb->node_lock_count; i++) {
== 0) {
inactive++;
}
}
if (inactive != 0) {
if (want_free) {
char buf[DNS_NAME_FORMATSIZE];
sizeof(buf));
else
"calling free_rbtdb(%s)", buf);
}
}
}
static void
unsigned int refs;
if (refs == 0)
}
static void
unsigned int refs;
}
static inline rbtdb_version_t *
{
return (NULL);
if (result != ISC_R_SUCCESS) {
return (NULL);
}
return (version);
}
static isc_result_t
ISC_TRUE);
} else {
version->iterations = 0;
version->salt_length = 0;
}
rbtdb->next_serial++;
}
return (ISC_R_NOMEMORY);
return (ISC_R_SUCCESS);
}
static void
{
unsigned int refs;
*targetp = rbtversion;
}
static rbtdb_changed_t *
{
unsigned int refs;
/*
* Caller must be holding the node lock if its reference must be
* protected by the lock.
*/
} else
return (changed);
}
static void
{
unsigned int count;
unsigned int i;
unsigned char *raw; /* RDATASLAB */
/*
* The caller must be holding the corresponding node lock.
*/
return;
/*
* Sanity check: since an additional cache entry has a reference to
* the original DB node (in the callback arg), there should be no
* acache entries when the node can be freed.
*/
for (i = 0; i < count; i++)
}
static inline void
}
static inline void
{
ISC_LINK_INIT(h, lru_link);
h->heap_index = 0;
#if TRACE_HEADER
#else
#endif
}
static inline rdatasetheader_t *
{
rdatasetheader_t *h;
h = isc_mem_get(mctx, sizeof(*h));
if (h == NULL)
return (NULL);
#if TRACE_HEADER
#endif
init_rdataset(rbtdb, h);
return (h);
}
static inline void
{
unsigned int size;
int idx;
}
if (rdataset->heap_index != 0)
rdataset->heap_index = 0;
else
sizeof(*rdataset));
}
static inline void
/*
* Caller must hold the node lock.
*/
/*
* We set the IGNORE attribute on rdatasets with serial number
* 'serial'. When the reference count goes to zero, these rdatasets
* will be cleaned up; until that time, they will be ignored.
*/
}
}
}
}
if (make_dirty)
}
static inline void
{
rdatasetheader_t *d, *down_next;
}
}
static inline void
/*
* Caller must be holding the node lock.
*/
/*
* If current is nonexistent or stale, we can clean it up.
*/
if ((current->attributes &
(RDATASET_ATTR_NONEXISTENT|RDATASET_ATTR_STALE)) != 0) {
else
} else
}
}
static inline void
{
/*
* Caller must be holding the node lock.
*/
REQUIRE(least_serial != 0);
/*
* First, we clean up any instances of multiple rdatasets
* with the same serial number, or that have the IGNORE
* attribute.
*/
} else
}
/*
* We've now eliminated all IGNORE datasets with the possible
* exception of current, which we now check.
*/
else
/*
* current no longer exists, so we can
* just continue with the loop.
*/
continue;
} else {
/*
* Pull up current->down, making it the new
* current.
*/
else
}
}
/*
* We now try to find the first down node less than the
* least serial.
*/
break;
}
/*
* If there is a such an rdataset, delete it and any older
* versions.
*/
do {
}
/*
* Note. The serial number of 'current' might be less than
* least_serial too, but we cannot delete it because it is
* the most recent version, unless it is a NONEXISTENT
* rdataset.
*/
} else {
/*
* If this is a NONEXISTENT rdataset, we can delete it.
*/
if (NONEXISTENT(current)) {
else
} else
}
}
if (!still_dirty)
}
/*%
* Clean up dead nodes. These are nodes which have no references, and
* have no data. They are dead but we could not or chose not to delete
* them when we deleted all the data at that node because we did not want
* to wait for the tree write lock.
*
* The caller must hold a tree write lock and bucketnum'th node (write) lock.
*/
static void
/*
* Since we're holding a tree write lock, it should be
* impossible for this node to be referenced by others.
*/
else
if (result != ISC_R_SUCCESS)
"cleanup_dead_nodes: "
"dns_rbt_deletenode: %s",
count--;
}
}
/*
* Caller must be holding the node lock if its reference must be protected
* by the lock.
*/
static inline void
}
}
/*
* This function is assumed to be called when a node is newly referenced
* and can be in the deadnode list. In that case the node must be retrieved
* from the list because the it is going to be used. In addition, if the caller
* happens to hold a write lock on the tree, it's a good chance to purge dead
* nodes.
* Note: while a new reference is gained in multiple places, there are only very
* few cases where the node can be in the deadnode list (only empty nodes can
* have been added to the list).
*/
static inline void
{
if (need_relock) {
if (treelocktype == isc_rwlocktype_write)
}
}
/*
* Caller must be holding the node lock; either the "strong", read or write
* lock. Note that the lock must be held even when node references are
* atomically modified; in that case the decrement operation itself does not
* have to be protected, but we must avoid a race condition where multiple
* threads are decreasing the reference to zero simultaneously and at least
* one of them is going to free the node.
* This function returns ISC_TRUE if and only if the node reference decreases
* to zero.
*/
static isc_boolean_t
{
/* Handle easy and typical case first. */
if (nrefs == 0) {
}
}
/* Upgrade the lock? */
if (nlock == isc_rwlocktype_read) {
}
if (nrefs > 0) {
/* Restore the lock? */
if (nlock == isc_rwlocktype_read)
return (ISC_FALSE);
}
else {
if (least_serial == 0) {
/*
* Caller doesn't know the least serial.
* Get it.
*/
}
}
}
/*
* XXXDCL should this only be done for cache zones?
*/
/* Restore the lock? */
if (nlock == isc_rwlocktype_read)
return (ISC_TRUE);
}
/*
* Attempt to switch to a write lock on the tree. If this fails,
* we will add this node to a linked list of nodes in this locking
* bucket which we will free later.
*/
if (tlock != isc_rwlocktype_write) {
/*
* Locking hierarchy notwithstanding, we don't need to free
* the node lock before acquiring the tree write lock because
* we only do a trylock.
*/
if (tlock == isc_rwlocktype_read)
else
result == ISC_R_LOCKBUSY);
} else
/*
* We can now delete the node if the reference counter is
* zero. This should be typically the case, but a different
* thread may still gain a (new) reference just before the
* current thread locks the tree (e.g., in findnode()).
*/
char printname[DNS_NAME_FORMATSIZE];
"decrement_reference: "
"delete from rbt: %p %s",
node,
sizeof(printname)));
}
else
if (result != ISC_R_SUCCESS)
"decrement_reference: "
"dns_rbt_deletenode: %s",
} else if (dns_rbtnode_refcurrent(node) == 0) {
}
/* Restore the lock? */
if (nlock == isc_rwlocktype_read)
/*
* Relock a read lock, or unlock the write lock if no lock was held.
*/
if (tlock == isc_rwlocktype_none)
if (write_locked)
if (tlock == isc_rwlocktype_read)
if (write_locked)
return (ISC_TRUE);
}
static inline void
{
/*
* Caller must be holding the database lock.
*/
}
static inline void
/*
* If the changed record is dirty, then
* an update created multiple versions of
* a given rdataset. We keep this list
* until we're the least open version, at
* which point it's safe to get rid of any
* older versions.
*
* If the changed record isn't dirty, then
* we don't need it anymore since we're
* committing and not rolling back.
*
* The caller must be holding the database lock.
*/
changed = next_changed) {
}
}
}
static void
if (result == ISC_R_SUCCESS) {
while (result == ISC_R_SUCCESS) {
if (dns_zonekey_iszonekey(&keyrdata)) {
break;
}
}
}
if (!haszonekey) {
return;
}
0, 0, &nsecset, &signsecset);
if (result == ISC_R_SUCCESS) {
if (dns_rdataset_isassociated(&signsecset)) {
if (result == ISC_R_SUCCESS) {
}
}
}
/*
*/
/*
*/
else if (hasoptbit || nsec3createflag)
else
}
/*%<
* Walk the origin node looking for NSEC3PARAM records.
* Cache the nsec3 parameters.
*/
static void
{
unsigned char *raw; /* RDATASLAB */
header = header_next) {
do {
if (NONEXISTENT(header))
break;
} else
/*
* Find A NSEC3PARAM with a supported algorithm.
*/
#else
raw += 2;
#endif
while (count-- > 0U) {
raw += 4;
#else
raw += 2;
#endif
®ion);
NULL);
continue;
#ifdef RFC5155_STRICT
if (nsec3param.flags != 0)
continue;
#else
!= 0)
!= 0)
continue;
#endif
/*
* Look for a better algorithm than the
* unknown test algorithm.
*/
goto unlock;
}
}
}
}
static void
unsigned int refs;
if (refs > 0) { /* typical and easy case first */
if (commit) {
}
goto end;
}
if (commit) {
unsigned cur_ref;
/*
* The current version is going to be replaced.
* Release the (likely last) reference to it from the
* DB itself and unlink it from the open list.
*/
&cur_ref);
if (cur_ref == 0) {
cur_version, link);
}
/*
* We're going to become the least open
* version.
*/
&cleanup_list);
} else {
/*
* Some other open version is the
* least version. We can't cleanup
* records that were changed in this
* version because the older versions
* may still be in use by an open
* version.
*
* We can, however, discard the
* changed records for things that
* we've added that didn't exist in
* prior versions.
*/
}
/*
* If the (soon to be former) current version
* isn't being used by anyone, we can clean
* it up.
*/
if (cur_ref == 0) {
link);
}
/*
* Become the current version.
*/
/*
* Keep the current version in the open list, and
* gain a reference for the DB itself (see the DB
* creation function below). This must be the only
* case where we need to increment the counter from
* zero and need to use isc_refcount_increment0().
*/
&cur_ref);
} else {
/*
* We're rolling back this transaction.
*/
}
} else {
/*
* There are no external or internal references
* to this version and it can be cleaned up.
*/
/*
* Find the version with the least serial
* number greater than ours.
*/
if (least_greater == NULL)
/*
* Is this the least open version?
*/
/*
* Yes. Install the new least open
* version.
*/
&cleanup_list);
} else {
/*
* Add any unexecuted cleanups to
* those of the least greater version.
*/
link);
}
}
/*
* Update the zone's secure status.
*/
if (cleanup_version != NULL) {
sizeof(*cleanup_version));
}
/*
*/
if (rollback) {
}
}
if (!EMPTY(cleanup_list)) {
/*
* We acquire a tree write lock here in order to make sure
* that stale nodes will be removed in decrement_reference().
* If we didn't have the lock, those nodes could miss the
* chance to be removed until the server stops. The write lock
* is expensive, but this event should be rare enough to justify
* the cost.
*/
changed = next_changed) {
/*
* This is a good opportunity to purge any dead nodes,
* so use it.
*/
if (rollback)
sizeof(*changed));
}
}
end:
}
/*
* Add the necessary magic for the wildcard name 'name'
* to be found in 'rbtdb'.
*
* In order for wildcard matching to work correctly in
* zone_find(), we must ensure that a node for the wildcarding
* level exists in the database, and has its 'find_callback'
* and 'wild' bits set.
*
* E.g. if the wildcard name is "*.sub.example." then we
* must ensure that "sub.example." exists and is marked as
* a wildcard level.
*/
static isc_result_t
unsigned int n;
n = dns_name_countlabels(name);
INSIST(n >= 2);
n--;
return (result);
return (ISC_R_SUCCESS);
}
static isc_result_t
unsigned int n, l, i;
n = dns_name_countlabels(name);
i = l + 1;
while (i < n) {
if (dns_name_iswildcard(&foundname)) {
if (result != ISC_R_SUCCESS)
return (result);
&node);
return (result);
}
i++;
}
return (ISC_R_SUCCESS);
}
static isc_result_t
{
if (result != ISC_R_SUCCESS) {
if (!create) {
if (result == DNS_R_PARTIALMATCH)
return (result);
}
/*
* It would be nice to try to upgrade the lock instead of
* unlocking then relocking.
*/
if (result == ISC_R_SUCCESS) {
#ifdef DNS_RBT_USEHASH
#else
#endif
if (dns_name_iswildcard(name)) {
if (result != ISC_R_SUCCESS) {
return (result);
}
}
} else if (result != ISC_R_EXISTS) {
return (result);
}
}
return (ISC_R_SUCCESS);
}
static isc_result_t
{
if (result != ISC_R_SUCCESS) {
if (!create) {
if (result == DNS_R_PARTIALMATCH)
return (result);
}
/*
* It would be nice to try to upgrade the lock instead of
* unlocking then relocking.
*/
if (result == ISC_R_SUCCESS) {
#ifdef DNS_RBT_USEHASH
#else
#endif
} else if (result != ISC_R_EXISTS) {
return (result);
}
} else
return (ISC_R_SUCCESS);
}
static isc_result_t
/*
* We only want to remember the topmost zone cut, since it's the one
* that counts, so we'll just continue if we've already found a
* zonecut.
*/
return (DNS_R_CONTINUE);
/*
* Look for an NS or DNAME rdataset active in our version.
*/
dname_header = NULL;
do {
/*
* Is this a "this rdataset doesn't
* exist" record?
*/
if (NONEXISTENT(header))
break;
} else
/*
* We've found an NS rdataset that
* isn't at the origin node. We check
* that they're not at the origin node,
* because otherwise we'd erroneously
* treat the zone top as if it were
* a delegation.
*/
}
}
}
}
/*
* Did we find anything?
*/
if (dname_header != NULL) {
/*
* Note that DNAME has precedence over NS if both exist.
*/
}
/*
* We increment the reference count on node to ensure that
* search->zonecut_rdataset will still be valid later.
*/
/*
* Since we've found a zonecut, anything beneath it is
* glue and is not subject to wildcard matching, so we
* may clear search->wild.
*/
/*
* If the caller does not want to find glue, then
* this is the best answer and the search should
* stop now.
*/
} else {
/*
* The search will continue beneath the zone cut.
* This may or may not be the best match. In case it
* is, we need to remember the node name.
*/
}
} else {
/*
* There is no zonecut at this node which is active in this
* version.
*
* If this is a "wild" node and the caller hasn't disabled
* wildcard matching, remember that we've seen a wild node
* in case we need to go searching for wildcard matches
* later on.
*/
}
return (result);
}
static inline void
{
unsigned char *raw; /* RDATASLAB */
/*
* Caller must be holding the node reader lock.
* XXXJT: technically, we need a writer lock, since we'll increment
* the header count below. However, since the actual counter value
* doesn't matter, we prioritize performance here. (We may want to
* use atomic increment when available).
*/
return;
/*
* Reset iterator state.
*/
rdataset->privateuint4 = 0;
/*
* Add noqname proof.
*/
/*
* Copy out re-signing information.
*/
} else
}
static inline isc_result_t
{
/*
* The caller MUST NOT be holding any node locks.
*/
/*
* If we have to set foundname, we do it before anything else.
* If we were to set foundname after we had set nodep or bound the
* rdataset, then we'd have to undo that work if dns_name_copy()
* failed. By setting foundname first, there's nothing to undo if
* we have trouble.
*/
if (result != ISC_R_SUCCESS)
return (result);
}
/*
* Note that we don't have to increment the node's reference
* count here because we're going to use the reference we
* already have in the search block.
*/
}
}
if (type == dns_rdatatype_dname)
return (DNS_R_DNAME);
return (DNS_R_DELEGATION);
}
static inline isc_boolean_t
{
unsigned char *raw; /* RDATASLAB */
/*
* No additional locking is required.
*/
/*
* Valid glue types are A, AAAA, A6. NS is also a valid glue type
* if it occurs at a zone cut, but is not valid below it.
*/
if (type == dns_rdatatype_ns) {
return (ISC_FALSE);
}
} else if (type != dns_rdatatype_a &&
type != dns_rdatatype_aaaa &&
type != dns_rdatatype_a6) {
return (ISC_FALSE);
}
#else
raw += 2;
#endif
while (count > 0) {
count--;
raw += 4;
#else
raw += 2;
#endif
/*
* XXX Until we have rdata structures, we have no choice but
* to directly access the rdata format.
*/
break;
}
}
return (valid);
}
static inline isc_boolean_t
{
if (result != ISC_R_SUCCESS)
break;
break;
}
break;
}
if (result == ISC_R_SUCCESS)
return (answer);
}
static inline isc_boolean_t
unsigned int n;
/*
* Find if qname is at or below a empty node.
* Use our own copy of the chain.
*/
do {
if (result != ISC_R_SUCCESS)
break;
break;
}
break;
if (result == ISC_R_SUCCESS)
if (result != ISC_R_SUCCESS)
if (result != ISC_R_SUCCESS)
break;
break;
}
break;
}
if (result == ISC_R_SUCCESS)
if (result != ISC_R_SUCCESS)
/*
* Remove the wildcard label to find the terminal name.
*/
n = dns_name_countlabels(wname);
do {
break;
}
/*
* Remove the left hand label.
*/
n = dns_name_countlabels(&rname);
return (answer);
}
static inline isc_result_t
{
unsigned int i, j;
/*
* Caller must be holding the tree lock and MUST NOT be holding
* any node locks.
*/
/*
* Examine each ancestor level. If the level's wild bit
* is set, then construct the corresponding wildcard name and
* search for it. If the wildcard node exists, and is active in
* this version, we're done. If not, then we next check to see
* if the ancestor is active in this version. If so, then there
* can be no possible wildcard match and again we're done. If not,
* continue the search.
*/
do {
/*
* First we try to figure out if this node is active in
* the search's version. We do this now, even though we
* may not need the information, because it simplifies the
* locking and code flow.
*/
break;
}
else
else
if (wild) {
/*
* Construct the wildcard name for this level.
*/
j = i;
while (result == ISC_R_SUCCESS && j != 0) {
j--;
&name,
NULL);
}
if (result != ISC_R_SUCCESS)
break;
if (result == ISC_R_SUCCESS) {
/*
* We have found the wildcard node. If it
* is active in the search's version, we're
* done.
*/
break;
}
wname)) {
return (ISC_R_NOTFOUND);
}
/*
* The wildcard node is active!
*
* Note: result is still ISC_R_SUCCESS
* so we don't have to set it.
*/
break;
}
} else if (result != ISC_R_NOTFOUND &&
result != DNS_R_PARTIALMATCH) {
/*
* An error has occurred. Bail out.
*/
break;
}
}
if (active) {
/*
* The level node is active. Any wildcarding
* present at higher levels has no
* effect and we're done.
*/
break;
}
if (i > 0) {
i--;
} else
} while (!done);
return (result);
}
static isc_boolean_t
{
unsigned char *raw; /* RDATASLAB */
#else
raw += 2;
#endif
raw += 4;
#else
raw += 2;
#endif
return (ISC_TRUE);
return (ISC_FALSE);
}
static inline isc_result_t
{
} else {
}
do {
if (result != ISC_R_SUCCESS)
return (result);
header = header_next) {
/*
* Look for an active, extant NSEC or RRSIG NSEC.
*/
do {
/*
* Is this a "this rdataset doesn't
* exist" record?
*/
if (NONEXISTENT(header))
break;
} else
/*
* We now know that there is at least one
* active rdataset at this node.
*/
break;
break;
}
}
}
if (!empty_node) {
{
/*
*
* Note: for this to really be the right
* NSEC record, it's essential that the NSEC
* records of any nodes obscured by a zone
* cut have been removed; we assume this is
* the case.
*/
if (result == ISC_R_SUCCESS) {
node);
}
rdataset);
node,
}
/*
* This node is active, but has no NSEC or
* RRSIG NSEC. That means it's glue or
* other obscured zone data that isn't
* relevant for our search. Treat the
* node as if it were empty and keep looking.
*/
} else {
/*
* We found an active node, but either the
* NSEC or the RRSIG NSEC is missing. This
* shouldn't happen.
*/
}
} else {
/*
* This node isn't active. We've got to keep
* looking.
*/
NULL);
}
if (result == ISC_R_SUCCESS) {
goto again;
}
}
/*
* If the result is ISC_R_NOMORE, then we got to the beginning of
* the database and didn't find a NSEC record. This shouldn't
* happen.
*/
if (result == ISC_R_NOMORE)
return (result);
}
static isc_result_t
{
/*
* We don't care about 'now'.
*/
/*
* If the caller didn't supply a version, attach to the current
* version.
*/
}
/*
* 'wild' will be true iff. we've matched a wildcard.
*/
/*
* Search down from the root of the tree. If, while going down, we
* encounter a callback node, zone_zonecut_callback() will search the
* rdatasets at the zone cut for active DNAME or NS rdatasets.
*/
if (result == DNS_R_PARTIALMATCH) {
goto tree_exit;
}
/*
* At least one of the levels in the search chain
* potentially has a wildcard. For each such level,
* we must see if there's a matching wildcard active
* in the current version.
*/
if (result == ISC_R_SUCCESS) {
if (result != ISC_R_SUCCESS)
goto tree_exit;
goto found;
}
else if (result != ISC_R_NOTFOUND)
goto tree_exit;
}
/*
* If we're here, then the name does not exist, is not
* beneath a zonecut, and there's no matching wildcard.
*/
{
if (result == ISC_R_SUCCESS)
} else
goto tree_exit;
} else if (result != ISC_R_SUCCESS)
goto tree_exit;
/*
* We have found a node whose name is the desired name, or we
* have matched a wildcard.
*/
/*
* If we're beneath a zone cut, we don't want to look for
* CNAMEs because they're not legitimate zone glue.
*/
} else {
/*
* The node may be a zone cut itself. If it might be one,
* make sure we check for it later.
*/
if (node->find_callback &&
}
/*
* Certain DNSSEC types are not subject to CNAME matching
* (RFC4035, section 2.5 and RFC3007).
*
* We don't check for RRSIG, because we don't store RRSIG records
* directly.
*/
/*
* We now go looking for rdata...
*/
nsecheader = NULL;
/*
* Look for an active, extant rdataset.
*/
do {
/*
* Is this a "this rdataset doesn't
* exist" record?
*/
if (NONEXISTENT(header))
break;
} else
/*
* We now know that there is at least one active
* rdataset at this node.
*/
/*
* Do special zone cut handling, if requested.
*/
if (maybe_zonecut &&
/*
* We increment the reference count on node to
* ensure that search->zonecut_rdataset will
* still be valid later.
*/
/*
* It is not clear if KEY should still be
* allowed at the parent side of the zone
* cut or not. It is needed for RFC3007
* validated updates.
*/
&& type != dns_rdatatype_nsec
&& type != dns_rdatatype_key) {
/*
* Glue is not OK, but any answer we
* could return would be glue. Return
* the delegation.
*/
break;
}
break;
}
/*
* If the NSEC3 record doesn't match the chain
* we are using behave as if it isn't here.
*/
goto partial_match;
/*
* If we found a type we were looking for,
* remember it.
*/
type == dns_rdatatype_any ||
cname_ok)) {
/*
* We've found the answer!
*/
cname_ok) {
/*
* We may be finding a CNAME instead
* of the desired type.
*
* If we've already got the CNAME RRSIG,
* use it, otherwise change sigtype
* so that we find it.
*/
else
sigtype =
}
/*
* If we've got all we need, end the search.
*/
break;
/*
* We've found the RRSIG rdataset for our
* target type. Remember it.
*/
/*
* If we've got all we need, end the search.
*/
break;
/*
* Remember a NSEC rdataset even if we're
* not specifically looking for it, because
* we might need it later.
*/
nsecheader = header;
/*
* If we need the NSEC rdataset, we'll also
* need its signature.
*/
} else if (cname_ok &&
/*
* If we get a CNAME match, we'll also need
* its signature.
*/
}
}
}
if (empty_node) {
/*
* We have an exact match for the name, but there are no
* active rdatasets in the desired version. That means that
* this node doesn't exist in the desired version, and that
* we really have a partial match.
*/
if (!wild) {
goto partial_match;
}
}
/*
* If we didn't find what we were looking for...
*/
/*
* We were trying to find glue at a node beneath a
* zone cut, but didn't.
*
* Return the delegation.
*/
goto tree_exit;
}
/*
* The desired type doesn't exist.
*/
/*
* The zone is secure but there's no NSEC,
* or the NSEC has no signature!
*/
if (!wild) {
goto node_exit;
}
if (result == ISC_R_SUCCESS)
goto tree_exit;
}
nsecheader == NULL)
{
/*
* There's no NSEC record, and we were told
* to find one.
*/
goto node_exit;
}
}
{
0, rdataset);
nsecsig, 0, sigrdataset);
}
if (wild)
goto node_exit;
}
/*
* We found what we were looking for, or we found a CNAME.
*/
type != dns_rdatatype_any &&
/*
* We weren't doing an ANY query and we found a CNAME instead
* of the type we were looking for, so we need to indicate
* that result to the caller.
*/
/*
* If we're beneath a zone cut, we must indicate that the
* result is glue, unless we're actually at the zone cut
* and the type is NSEC or KEY.
*/
/*
* It is not clear if KEY should still be
* allowed at the parent side of the zone
* cut or not. It is needed for RFC3007
* validated updates.
*/
if (type == dns_rdatatype_nsec ||
type == dns_rdatatype_nsec3 ||
else if (type == dns_rdatatype_any)
else
result = DNS_R_GLUE;
} else
result = DNS_R_GLUE;
/*
* We might have found data that isn't glue, but was occluded
* by a dynamic update. If the caller cares about this, they
* will have told us to validate glue.
*
* XXX We should cache the glue validity state!
*/
if (result == DNS_R_GLUE &&
goto tree_exit;
}
} else {
/*
* An ordinary successful query!
*/
}
if (!at_zonecut)
else
}
if (type != dns_rdatatype_any) {
}
if (wild)
/*
* If we found a zonecut but aren't going to use it, we have to
* let go of it.
*/
if (search.need_cleanup) {
}
if (close_version)
return (result);
}
static isc_result_t
{
return (ISC_R_NOTIMPLEMENTED);
}
static isc_result_t
/* XXX comment */
/*
* Keep compiler silent.
*/
/*
* Look for a DNAME or RRSIG DNAME rdataset.
*/
dname_header = NULL;
header_prev = NULL;
/*
* This rdataset is stale. If no one else is
* using the node, we can clean it up right
* now, otherwise we mark it as stale, and
* the node as dirty, so it will get cleaned
* up later.
*/
(locktype == isc_rwlocktype_write ||
/*
* We update the node's status only when we
* can get write access; otherwise, we leave
* others to this work. Periodical cleaning
* will eventually take the job as the last
* resort.
* We won't downgrade the lock, since other
* rdatasets are probably stale, too.
*/
if (dns_rbtnode_refcurrent(node) == 0) {
/*
* header->down can be non-NULL if the
* refcount has just decremented to 0
* but decrement_reference() has not
* performed clean_cache_node(), in
* which case we need to purge the
* stale headers first.
*/
mctx,
header);
if (header_prev != NULL)
header_prev->next =
else
header);
} else {
header->attributes |=
}
} else
} else
}
if (dname_header != NULL &&
/*
* We increment the reference count on node to ensure that
* search->zonecut_rdataset will still be valid later.
*/
} else
return (result);
}
static inline isc_result_t
{
unsigned int i;
/*
* Caller must be holding the tree lock.
*/
do {
/*
* Look for NS and RRSIG NS rdatasets.
*/
header_prev = NULL;
header = header_next) {
/*
* This rdataset is stale. If no one else is
* using the node, we can clean it up right
* now, otherwise we mark it as stale, and
* the node as dirty, so it will get cleaned
* up later.
*/
RBTDB_VIRTUAL) &&
(locktype == isc_rwlocktype_write ||
/*
* We update the node's status only
* when we can get write access.
*/
== 0) {
isc_mem_t *m;
m, header);
if (header_prev != NULL)
header_prev->next =
else
free_rdataset(rbtdb, m,
header);
} else {
header->attributes |=
}
} else
/*
* We've found an extant rdataset. See if
* we're interested in it.
*/
break;
break;
}
} else
}
/*
* If we have to set foundname, we do it before
* anything else. If we were to set foundname after
* we had set nodep or bound the rdataset, then we'd
* have to undo that work if dns_name_concatenate()
* failed. By setting foundname first, there's
* nothing to undo if we have trouble.
*/
while (result == ISC_R_SUCCESS && i > 0) {
i--;
&name);
result =
&name,
NULL);
}
if (result != ISC_R_SUCCESS) {
goto node_exit;
}
}
}
rdataset);
if (locktype != isc_rwlocktype_write) {
}
}
}
}
i--;
} else
} while (!done);
return (result);
}
static isc_result_t
{
do {
if (result != ISC_R_SUCCESS)
return (result);
header_prev = NULL;
header = header_next) {
/*
* This rdataset is stale. If no one else is
* using the node, we can clean it up right
* now, otherwise we mark it as stale, and the
* node as dirty, so it will get cleaned up
* later.
*/
(locktype == isc_rwlocktype_write ||
/*
* We update the node's status only
* when we can get write access.
*/
== 0) {
isc_mem_t *m;
m, header);
if (header_prev != NULL)
header_prev->next =
else
header);
} else {
header->attributes |=
}
} else
continue;
}
if (NONEXISTENT(header) ||
continue;
}
}
if (result != ISC_R_SUCCESS)
goto unlock_node;
now, sigrdataset);
} else if (!empty_node) {
} else
NULL);
return (result);
}
static isc_result_t
{
if (now == 0)
/*
* Search down from the root of the tree. If, while going down, we
* encounter a callback node, cache_zonecut_callback() will search the
* rdatasets at the zone cut for a DNAME rdataset.
*/
if (result == DNS_R_PARTIALMATCH) {
if (result == DNS_R_COVERINGNSEC)
goto tree_exit;
}
goto tree_exit;
} else {
goto tree_exit;
}
} else if (result != ISC_R_SUCCESS)
goto tree_exit;
/*
* Certain DNSSEC types are not subject to CNAME matching
* (RFC4035, section 2.5 and RFC3007).
*
* We don't check for RRSIG, because we don't store RRSIG records
* directly.
*/
/*
* We now go looking for rdata...
*/
header_prev = NULL;
/*
* This rdataset is stale. If no one else is using the
* node, we can clean it up right now, otherwise we
* mark it as stale, and the node as dirty, so it will
* get cleaned up later.
*/
(locktype == isc_rwlocktype_write ||
/*
* We update the node's status only when we
* can get write access.
*/
if (dns_rbtnode_refcurrent(node) == 0) {
header);
if (header_prev != NULL)
header_prev->next =
else
header);
} else {
header->attributes |=
}
} else
/*
* We now know that there is at least one active
* non-stale rdataset at this node.
*/
/*
* If we found a type we were looking for, remember
* it.
*/
(type == dns_rdatatype_any &&
/*
* We've found the answer.
*/
cname_ok &&
/*
* If we've already got the CNAME RRSIG,
* use it, otherwise change sigtype
* so that we find it.
*/
else
sigtype =
}
/*
* We've found the RRSIG rdataset for our
* target type. Remember it.
*/
/*
* We've found a negative cache entry.
*/
/*
* Remember a NS rdataset even if we're
* not specifically looking for it, because
* we might need it later.
*/
/*
* If we need the NS rdataset, we'll also
* need its signature.
*/
} else if (cname_ok &&
/*
* If we get a CNAME match, we'll also need
* its signature.
*/
}
} else
}
if (empty_node) {
/*
* We have an exact match for the name, but there are no
* extant rdatasets. That means that this node doesn't
* meaningfully exist, and that we really have a partial match.
*/
goto find_ns;
}
/*
* If we didn't find what we were looking for...
*/
((options & DNS_DBFIND_GLUEOK) == 0)) ||
((options & DNS_DBFIND_PENDINGOK) == 0))) {
/*
* If there is an NS rdataset at this node, then this is the
* deepest zone cut.
*/
}
rdataset);
}
goto node_exit;
}
/*
* Go find the deepest zone cut.
*/
goto find_ns;
}
/*
* We found what we were looking for, or we found a CNAME.
*/
}
/*
* We found a negative cache entry.
*/
else
type != dns_rdatatype_any &&
/*
* We weren't doing an ANY query and we found a CNAME instead
* of the type we were looking for, so we need to indicate
* that result to the caller.
*/
} else {
/*
* An ordinary successful query!
*/
}
result == DNS_R_NCACHENXRRSET) {
rdataset);
}
}
locktype != isc_rwlocktype_write) {
}
/*
* If we found a zonecut but aren't going to use it, we have to
* let go of it.
*/
if (search.need_cleanup) {
}
return (result);
}
static isc_result_t
{
unsigned int rbtoptions = DNS_RBTFIND_EMPTYDATA;
if (now == 0)
if ((options & DNS_DBFIND_NOEXACT) != 0)
/*
* Search down from the root of the tree.
*/
if (result == DNS_R_PARTIALMATCH) {
goto tree_exit;
} else if (result != ISC_R_SUCCESS)
goto tree_exit;
/*
* We now go looking for an NS rdataset at the node.
*/
header_prev = NULL;
/*
* This rdataset is stale. If no one else is using the
* node, we can clean it up right now, otherwise we
* mark it as stale, and the node as dirty, so it will
* get cleaned up later.
*/
(locktype == isc_rwlocktype_write ||
/*
* We update the node's status only when we
* can get write access.
*/
if (dns_rbtnode_refcurrent(node) == 0) {
header);
if (header_prev != NULL)
header_prev->next =
else
header);
} else {
header->attributes |=
}
} else
/*
* If we found a type we were looking for, remember
* it.
*/
/*
* Remember a NS rdataset even if we're
* not specifically looking for it, because
* we might need it later.
*/
/*
* If we need the NS rdataset, we'll also
* need its signature.
*/
}
} else
}
/*
* No NS records here.
*/
goto find_ns;
}
}
if (locktype != isc_rwlocktype_write) {
}
}
}
if (result == DNS_R_DELEGATION)
return (result);
}
static void
unsigned int refs;
}
static void
}
}
if (inactive) {
if (want_free) {
char buf[DNS_NAME_FORMATSIZE];
sizeof(buf));
else
"calling free_rbtdb(%s)", buf);
}
}
}
static isc_result_t
/*
* These are the category and module used by the cache cleaner.
*/
char printname[DNS_NAME_FORMATSIZE];
/*
* Caller must hold a tree lock.
*/
if (now == 0)
/*
* XXXDCL Could stand to have a better policy, like LRU.
*/
/*
* Note that 'log' can be true IFF rbtdb->overmem is also true.
* rbtdb->ovemem can currently only be true for cache databases
* -- hence all of the "overmem cache" log strings.
*/
if (log)
"overmem cache: %s %s",
sizeof(printname)));
}
/*
* We may not need write access, but this code path is not performance
* sensitive, so it should be okay to always lock as a writer.
*/
/*
* We don't check if refcurrent(rbtnode) == 0 and try
* to free like we do in cache_find(), because
* refcurrent(rbtnode) must be non-zero. This is so
* because 'node' is an argument to the function.
*/
if (log)
level, "overmem cache: stale %s",
} else if (force_expire) {
} else if (log) {
level, "overmem cache: "
"reprieve by RETAIN() %s",
}
"overmem cache: saved %s", printname);
return (ISC_R_SUCCESS);
}
static void
}
static void
do {
if (!first)
"\tserial = %lu, ttl = %u, "
"trust = %u, attributes = %u, "
"resign = %u\n",
}
} else
}
static isc_result_t
{
return (ISC_R_NOMEMORY);
else
return (ISC_R_SUCCESS);
}
static isc_result_t
{
if (rbtversion == NULL) {
}
now = 0;
if (covers == 0)
else
sigmatchtype = 0;
do {
/*
* Is this a "this rdataset doesn't
* exist" record?
*/
if (NONEXISTENT(header))
break;
} else
/*
* We have an active, extant rdataset. If it's a
* type we're looking for, remember it.
*/
break;
break;
}
}
}
}
if (close_version)
return (ISC_R_NOTFOUND);
return (ISC_R_SUCCESS);
}
static isc_result_t
{
if (now == 0)
if (covers == 0)
else
sigmatchtype = 0;
(locktype == isc_rwlocktype_write ||
/*
* We update the node's status only when we
* can get write access.
*/
/*
* We don't check if refcurrent(rbtnode) == 0
* and try to free like we do in cache_find(),
* because refcurrent(rbtnode) must be
* non-zero. This is so because 'node' is an
* argument to the function.
*/
}
}
}
}
return (ISC_R_NOTFOUND);
/*
* We found a negative cache entry.
*/
else
}
return (result);
}
static isc_result_t
{
unsigned int refs;
return (ISC_R_NOMEMORY);
now = 0;
if (rbtversion == NULL)
(dns_dbversion_t **) (void *)(&rbtversion));
else {
unsigned int refs;
&refs);
}
} else {
if (now == 0)
rbtversion = NULL;
}
return (ISC_R_SUCCESS);
}
static isc_boolean_t
/*
* The caller must hold the node lock.
*/
/*
* Look for CNAME and "other data" rdatasets active in our version.
*/
/*
* Look for an active extant CNAME.
*/
do {
/*
* Is this a "this rdataset doesn't
* exist" record?
*/
if (NONEXISTENT(header))
break;
} else
} else {
/*
* Look for active extant "other data".
*
* "Other data" is any rdataset whose type is not
* KEY, RRSIG KEY, NSEC, RRSIG NSEC or RRSIG CNAME.
*/
if (rdtype == dns_rdatatype_rrsig ||
if (rdtype != dns_rdatatype_nsec &&
rdtype != dns_rdatatype_key &&
rdtype != dns_rdatatype_cname) {
/*
* We've found a type that isn't
* NSEC, KEY, CNAME, or one of their
* signatures. Is it active and extant?
*/
do {
/*
* Is this a "this rdataset
* doesn't exist" record?
*/
if (NONEXISTENT(header))
break;
} else
}
}
}
if (cname && other_data)
return (ISC_TRUE);
return (ISC_FALSE);
}
static isc_result_t
return (result);
}
static isc_result_t
{
unsigned char *merged;
int idx;
/*
* Add an rdatasetheader_t to a node.
*/
/*
* Caller must be holding the node lock.
*/
if ((options & DNS_DBADD_MERGE) != 0) {
} else
if ((options & DNS_DBADD_FORCE) != 0)
else
/*
* We always add a changed record, even if no changes end up
* being made to this node, because it's harmless and
* simplifies the code.
*/
return (ISC_R_NOMEMORY);
}
}
negtype = 0;
if (rdtype == 0) {
/*
* We're adding a negative cache entry.
*/
if (covers == dns_rdatatype_any) {
/*
* We're adding an negative cache entry
* which covers all types (NXDOMAIN,
* NODATA(QTYPE=ANY)).
*
* We make all other data stale so that the
* only rdataset that can be found at this
* node is the negative cache entry.
*/
topheader->attributes |=
}
goto find_header;
}
} else {
/*
* We're adding something that isn't a
* negative cache entry. Look for an extant
* cache entry.
*/
break;
}
/*
* Found one.
*/
/*
* is more trusted.
*/
if (addedrdataset != NULL)
return (DNS_R_UNCHANGED);
}
/*
* The new rdataset is better. Expire the
*/
goto find_header;
}
}
}
break;
}
/*
* If header isn't NULL, we've found the right type. There may be
* IGNORE rdatasets between the top of the chain and the first real
* data. We skip over them.
*/
/*
* Deleting an already non-existent rdataset has no effect.
*/
if (header_nx && newheader_nx) {
return (DNS_R_UNCHANGED);
}
/*
* Trying to add an rdataset with lower trust to a cache DB
* has no effect, provided that the cache data isn't stale.
*/
if (addedrdataset != NULL)
return (DNS_R_UNCHANGED);
}
/*
* Don't merge if a nonexistent rdataset is involved.
*/
/*
* If 'merge' is ISC_TRUE, we'll try to create a new rdataset
* that is the union of 'newheader' and 'header'.
*/
if (merge) {
unsigned int flags = 0;
if ((options & DNS_DBADD_EXACT) != 0)
if ((options & DNS_DBADD_EXACTTTL) != 0 &&
if (result == ISC_R_SUCCESS)
(unsigned char *)header,
(unsigned char *)newheader,
(unsigned int)(sizeof(*newheader)),
if (result == ISC_R_SUCCESS) {
/*
* If 'header' has the same serial number as
* we do, we could clean it up now if we knew
* that our caller had no references to it.
* We don't know this, however, so we leave it
* alone. It will get cleaned up when
* clean_zone_node() runs.
*/
} else {
return (result);
}
}
/*
* Don't replace existing NS, A and AAAA RRsets
* in the cache if they are already exist. This
* prevents named being locked to old servers.
* Don't lower trust of existing record if the
* update is forced.
*/
!header_nx && !newheader_nx &&
dns_rdataslab_equalx((unsigned char *)header,
(unsigned char *)newheader,
(unsigned int)(sizeof(*newheader)),
/*
* Honour the new ttl if it is less than the
* older one.
*/
}
}
if (addedrdataset != NULL)
return (ISC_R_SUCCESS);
}
!header_nx && !newheader_nx &&
dns_rdataslab_equal((unsigned char *)header,
(unsigned char *)newheader,
(unsigned int)(sizeof(*newheader)))) {
/*
* Honour the new ttl if it is less than the
* older one.
*/
}
}
if (addedrdataset != NULL)
return (ISC_R_SUCCESS);
}
if (topheader_prev != NULL)
else
if (loading) {
/*
* There are no other references to 'header' when
* loading, so we MAY clean up 'header' now.
* Since we don't generate changed records when
* loading, we MUST clean up 'header' now.
*/
} else {
if (rbtversion == NULL) {
}
/*
* XXXMLG We don't check the return value
* here. If it fails, we will not do TTL
* based expiry on this node. However, we
* will do it on the LRU side, so memory
* will not leak... for long.
*/
}
} else {
/*
* No non-IGNORED rdatasets of the given type exist at
* this node.
*/
/*
* If we're trying to delete the type, don't bother.
*/
if (newheader_nx) {
return (DNS_R_UNCHANGED);
}
/*
* We have an list of rdatasets of the given type,
* but they're all marked IGNORE. We simply insert
* the new rdataset at the head of the list.
*
* Ignored rdatasets cannot occur during loading, so
* we INSIST on it.
*/
if (topheader_prev != NULL)
else
} else {
/*
* No rdatasets of the given type exist at the node.
*/
}
}
}
/*
* Check if the node now contains CNAME and other data.
*/
if (rbtversion != NULL &&
return (DNS_R_CNAMEANDOTHER);
if (addedrdataset != NULL)
return (ISC_R_SUCCESS);
}
static inline isc_boolean_t
{
if (type == dns_rdatatype_dname)
return (ISC_TRUE);
else
return (ISC_FALSE);
} else if (type == dns_rdatatype_dname ||
(type == dns_rdatatype_ns &&
return (ISC_TRUE);
return (ISC_FALSE);
}
static inline isc_result_t
{
isc_region_t r;
goto cleanup;
}
if (result != ISC_R_SUCCESS)
goto cleanup;
if (result != ISC_R_SUCCESS)
goto cleanup;
if (result != ISC_R_SUCCESS)
goto cleanup;
return (ISC_R_SUCCESS);
return(result);
}
static inline isc_result_t
{
isc_region_t r;
goto cleanup;
}
if (result != ISC_R_SUCCESS)
goto cleanup;
if (result != ISC_R_SUCCESS)
goto cleanup;
if (result != ISC_R_SUCCESS)
goto cleanup;
return (ISC_R_SUCCESS);
return(result);
}
static dns_dbmethods_t zone_methods;
static isc_result_t
{
if (rbtversion == NULL) {
if (now == 0)
} else
now = 0;
®ion,
sizeof(rdatasetheader_t));
if (result != ISC_R_SUCCESS)
return (result);
newheader->attributes = 0;
if (rbtversion != NULL) {
now = 0;
} else
} else {
if (result != ISC_R_SUCCESS) {
return (result);
}
}
if (result != ISC_R_SUCCESS) {
return (result);
}
}
}
/*
* If we're adding a delegation type (e.g. NS or DNAME for a zone,
* just DNAME for the cache), then we need to set the callback bit
* on the node.
*/
else
/*
* If we're adding a delegation type or the DB is a cache in an overmem
* state, hold an exclusive lock on the tree. In the latter case
* the lock does not necessarily have to be acquired but it will help
* purge stale entries more effectively.
*/
}
}
if (tree_locked)
/*
* If we've been holding a write lock on the tree just for
* cleaning, we can release it now. However, we still need the
* node lock.
*/
if (tree_locked && !delegating) {
}
}
addedrdataset, now);
if (tree_locked)
/*
* Update the zone's secure status. If version is non-NULL
* this is defered until closeversion() is called.
*/
return (result);
}
static isc_result_t
{
unsigned char *subresult;
®ion,
sizeof(rdatasetheader_t));
if (result != ISC_R_SUCCESS)
return (result);
newheader->attributes = 0;
} else
return (ISC_R_NOMEMORY);
}
break;
}
/*
* If header isn't NULL, we've found the right type. There may be
* IGNORE rdatasets between the top of the chain and the first real
* data. We skip over them.
*/
unsigned int flags = 0;
if ((options & DNS_DBSUB_EXACT) != 0) {
}
if (result == ISC_R_SUCCESS)
(unsigned char *)header,
(unsigned char *)newheader,
(unsigned int)(sizeof(*newheader)),
if (result == ISC_R_SUCCESS) {
/*
* We have to set the serial since the rdataslab
* subtraction routine copies the reserved portion of
* header, not newheader.
*/
/*
* XXXJT: dns_rdataslab_subtract() copied the pointers
* to additional info. We need to clear these fields
* to avoid having duplicated references.
*/
} else if (result == DNS_R_NXRRSET) {
/*
* This subtraction would remove all of the rdata;
* add a nonexistent header instead.
*/
goto unlock;
}
} else {
goto unlock;
}
/*
* If we're here, we want to link newheader in front of
* topheader.
*/
if (topheader_prev != NULL)
else
} else {
/*
* The rdataset doesn't exist, so we don't need to do anything
* to satisfy the deletion request.
*/
if ((options & DNS_DBSUB_EXACT) != 0)
else
}
/*
* Update the zone's secure status. If version is non-NULL
* this is defered until closeversion() is called.
*/
return (result);
}
static isc_result_t
{
if (type == dns_rdatatype_any)
return (ISC_R_NOTIMPLEMENTED);
return (ISC_R_NOTIMPLEMENTED);
return (ISC_R_NOMEMORY);
if (rbtversion != NULL)
else
/*
* Update the zone's secure status. If version is non-NULL
* this is defered until closeversion() is called.
*/
return (result);
}
static isc_result_t
/*
* This routine does no node locking. See comments in
* 'load' below for more information on loading and
* locking.
*/
/*
* SOA records are only allowed at top of zone.
*/
return (DNS_R_NOTZONETOP);
if (dns_name_iswildcard(name)) {
/*
* NS record owners cannot legally be wild cards.
*/
return (DNS_R_INVALIDNS);
/*
* NSEC3 record owners cannot legally be wild cards.
*/
return (DNS_R_INVALIDNSEC3);
if (result != ISC_R_SUCCESS)
return (result);
}
if (result == ISC_R_SUCCESS)
} else {
if (result == ISC_R_SUCCESS)
}
return (result);
if (result != ISC_R_EXISTS) {
#ifdef DNS_RBT_USEHASH
#else
#endif
}
®ion,
sizeof(rdatasetheader_t));
if (result != ISC_R_SUCCESS)
return (result);
newheader->attributes = 0;
} else
if (result == ISC_R_SUCCESS &&
else if (result == DNS_R_UNCHANGED)
return (result);
}
static isc_result_t
return (ISC_R_NOMEMORY);
else
== 0);
return (ISC_R_SUCCESS);
}
static isc_result_t
/*
* If there's a KEY rdataset at the zone origin containing a
* zone key, we consider the zone secure.
*/
return (ISC_R_SUCCESS);
}
static isc_result_t
filename, masterformat));
}
static void
}
}
static isc_boolean_t
return (secure);
}
static isc_boolean_t
return (dnssec);
}
static unsigned int
unsigned int count;
return (count);
}
static void
}
static isc_boolean_t
return (ISC_FALSE);
}
static isc_result_t
/* Note that the access to origin_node doesn't require a DB lock */
} else {
}
return (result);
}
static isc_result_t
{
if (rbtversion == NULL)
if (rbtversion->havensec3) {
}
if (salt_length != NULL)
if (iterations != NULL)
}
return (result);
}
static isc_result_t
header--;
if (header->heap_index != 0) {
if (resign == 0) {
header->heap_index);
header->heap_index = 0;
header->heap_index);
else
header->heap_index);
}
return (result);
}
static isc_result_t
{
unsigned int i;
for (i = 0; i < rbtdb->node_lock_count; i++) {
continue;
}
goto unlock;
return (result);
}
static void
{
header--;
/*
* Delete from heap and save to re-signed list so that it can
* be restored if we backout of this change.
*/
header->heap_index = 0;
}
static dns_stats_t *
return (rbtdb->rrsetstats);
}
static dns_dbmethods_t zone_methods = {
dump,
NULL,
};
static dns_dbmethods_t cache_methods = {
dump,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
};
#ifdef DNS_RBTDB_VERSION64
#else
#endif
{
int i;
isc_boolean_t (*sooner)(void *, void *);
/* Keep the compiler happy. */
return (ISC_R_NOMEMORY);
if (type == dns_dbtype_cache) {
} else if (type == dns_dbtype_stub) {
} else
if (result != ISC_R_SUCCESS)
goto cleanup_rbtdb;
if (result != ISC_R_SUCCESS)
goto cleanup_lock;
if (rbtdb->node_lock_count == 0) {
else
}
sizeof(rbtdb_nodelock_t));
goto cleanup_tree_lock;
}
if (result != ISC_R_SUCCESS)
goto cleanup_node_locks;
sizeof(rdatasetheaderlist_t));
goto cleanup_rrsetstats;
}
for (i = 0; i < (int)rbtdb->node_lock_count; i++)
} else
/*
* Create the heaps.
*/
sizeof(isc_heap_t *));
goto cleanup_rdatasets;
}
for (i = 0; i < (int)rbtdb->node_lock_count; i++)
for (i = 0; i < (int)rbtdb->node_lock_count; i++) {
if (result != ISC_R_SUCCESS)
goto cleanup_heaps;
}
/*
* Create deadnode lists.
*/
sizeof(rbtnodelist_t));
goto cleanup_heaps;
}
for (i = 0; i < (int)rbtdb->node_lock_count; i++)
for (i = 0; i < (int)(rbtdb->node_lock_count); i++) {
if (result == ISC_R_SUCCESS) {
if (result != ISC_R_SUCCESS)
}
if (result != ISC_R_SUCCESS) {
while (i-- > 0) {
}
goto cleanup_deadnodes;
}
}
/*
* Attach to the mctx. The database will persist so long as there
* are references to it, and attaching to the mctx ensures that our
* mctx won't disappear out from under us.
*/
/*
* Must be initalized before free_rbtdb() is called.
*/
/*
* Make a copy of the origin name.
*/
if (result != ISC_R_SUCCESS) {
return (result);
}
/*
* Make the Red-Black Trees.
*/
if (result != ISC_R_SUCCESS) {
return (result);
}
if (result != ISC_R_SUCCESS) {
return (result);
}
/*
* In order to set the node callback bit correctly in zone databases,
* we need to know if the node has the origin name of the zone.
* In loading_addrdataset() we could simply compare the new name
* to the origin name, but this is expensive. Also, we don't know the
* node name in addrdataset(), so we need another way of knowing the
* zone's top.
*
* We now explicitly create a node for the zone's origin, and then
* we simply remember the node's address. This is safe, because
* the top-of-zone node can never be deleted, nor can its address
* change.
*/
&rbtdb->origin_node);
if (result != ISC_R_SUCCESS) {
return (result);
}
/*
* We need to give the origin node the right locknum.
*/
#ifdef DNS_RBT_USEHASH
#else
#endif
}
/*
* Misc. Initialization.
*/
if (result != ISC_R_SUCCESS) {
return (result);
}
rbtdb->attributes = 0;
/*
* Version Initialization.
*/
return (ISC_R_NOMEMORY);
}
/*
* Keep the current version in the open list so that list operation
* won't happen in normal lookup operations.
*/
return (ISC_R_SUCCESS);
for (i = 0 ; i < (int)rbtdb->node_lock_count ; i++)
}
sizeof(rdatasetheaderlist_t));
return (result);
}
/*
* Slabbed Rdataset Methods
*/
static void
}
static isc_result_t
unsigned int count;
if (count == 0) {
return (ISC_R_NOMORE);
}
else
#endif
raw += 2;
/*
* The privateuint4 field is the number of rdata beyond the
* cursor position, so we decrement the total count by one
* before storing it.
*
* If DNS_RDATASETATTR_LOADORDER is not set 'raw' points to the
* first record. If DNS_RDATASETATTR_LOADORDER is set 'raw' points
* to the first entry in the offset table.
*/
count--;
return (ISC_R_SUCCESS);
}
static isc_result_t
unsigned int count;
unsigned int length;
unsigned char *raw; /* RDATASLAB */
if (count == 0)
return (ISC_R_NOMORE);
count--;
/*
* Skip forward one record (length + 4) or one offset (4).
*/
#endif
}
#else
#endif
return (ISC_R_SUCCESS);
}
static void
unsigned int offset;
#endif
unsigned int length;
isc_region_t r;
unsigned int flags = 0;
/*
* Find the start of the record if not already in private5
* then skip the length and order fields.
*/
}
#endif
raw += 4;
#else
raw += 2;
#endif
if (*raw & DNS_RDATASLAB_OFFLINE)
length--;
raw++;
}
}
static void
/*
* Reset iterator state.
*/
target->privateuint4 = 0;
}
static unsigned int
unsigned int count;
return (count);
}
static isc_result_t
{
cloned_node = NULL;
nsec->privateuint4 = 0;
cloned_node = NULL;
nsecsig->privateuint4 = 0;
return (ISC_R_SUCCESS);
}
static isc_result_t
{
cloned_node = NULL;
nsec->privateuint4 = 0;
cloned_node = NULL;
nsecsig->privateuint4 = 0;
return (ISC_R_SUCCESS);
}
/*
* Rdataset Iterator Methods
*/
static void
sizeof(*rbtiterator));
}
static isc_result_t
serial = 1;
} else {
now = 0;
}
do {
/*
* Is this a "this rdataset doesn't exist"
* record? Or is it too old in the cache?
*
* Note: unlike everywhere else, we
* check for now > header->rdh_ttl instead
* of now >= header->rdh_ttl. This allows
* ANY and RRSIG queries for 0 TTL
* rdatasets to work.
*/
if (NONEXISTENT(header) ||
break;
} else
break;
}
return (ISC_R_NOMORE);
return (ISC_R_SUCCESS);
}
static isc_result_t
return (ISC_R_NOMORE);
serial = 1;
} else {
now = 0;
}
if (rdtype == 0) {
} else
/*
* If not walking back up the down list.
*/
do {
/*
* Is this a "this rdataset doesn't
* exist" record?
*
* Note: unlike everywhere else, we
* check for now > header->ttl instead
* of now >= header->ttl. This allows
* ANY and RRSIG queries for 0 TTL
* rdatasets to work.
*/
if ((header->attributes &
RDATASET_ATTR_NONEXISTENT) != 0 ||
break;
} else
break;
}
}
return (ISC_R_NOMORE);
return (ISC_R_SUCCESS);
}
static void
rdataset);
}
/*
* Database Iterator Methods
*/
static inline void
return;
}
static inline void
return;
}
static void
int i;
/*
* Note that "%d node of %d in tree" can report things like
* "flush_deletions: 59 nodes of 41 in tree". This means
* That some nodes appear on the deletions list more than
* once. Only the last occurence will actually be deleted.
*/
"flush_deletions: %d nodes of %d in tree",
}
}
if (was_read_locked) {
} else {
}
}
}
static inline void
}
static void
} else
dns_db_detach(&db);
}
static isc_result_t
} else {
origin);
}
}
if (result == ISC_R_SUCCESS) {
}
} else {
}
return (result);
}
static isc_result_t
}
}
if (result == ISC_R_SUCCESS) {
}
} else {
}
return (result);
}
static isc_result_t
} else {
/*
* Stay on main chain if not found on either chain.
*/
if (result == DNS_R_PARTIALMATCH) {
if (result == ISC_R_SUCCESS) {
}
}
}
#if 1
if (result == ISC_R_SUCCESS) {
if (result == ISC_R_SUCCESS) {
}
} else if (result == DNS_R_PARTIALMATCH) {
}
#else
if (tresult == ISC_R_SUCCESS) {
} else {
}
} else
#endif
return (result);
}
static isc_result_t
if (result == ISC_R_NOTFOUND)
}
}
if (result == ISC_R_SUCCESS)
return (result);
}
static isc_result_t
if (result == ISC_R_NOTFOUND)
}
}
if (result == ISC_R_SUCCESS)
return (result);
}
static isc_result_t
{
if (result != ISC_R_SUCCESS)
return (result);
} else
/*
* If the deletion array is full, flush it before trying
* to expire the current node. The current node can't
* fully deleted while the iteration cursor is still on it.
*/
/*
* expirenode() currently always returns success.
*/
unsigned int refs;
}
}
return (result);
}
static isc_result_t
return (ISC_R_SUCCESS);
}
return (ISC_R_SUCCESS);
}
static isc_result_t
}
/*%
* Additional cache routines.
*/
static isc_result_t
{
unsigned int count;
unsigned int total_count;
switch (type) {
break;
break;
break;
default:
INSIST(0);
}
if (type != dns_rdatasetadditional_fromcache)
return (ISC_R_NOTFOUND);
}
return (ISC_R_NOTFOUND);
}
return (result);
}
static void
unsigned int count;
/*
* The caller must hold the entry lock.
*/
break;
break;
default:
INSIST(0);
}
} else
}
static void
{
}
static isc_result_t
{
unsigned int total_count, count;
if (type == dns_rdatasetadditional_fromcache)
return (ISC_R_SUCCESS);
return (ISC_R_NOMEMORY);
if (result != ISC_R_SUCCESS)
goto fail;
/* Set cache data in the new entry. */
if (result != ISC_R_SUCCESS)
goto fail;
switch (type) {
break;
break;
default:
INSIST(0);
}
unsigned int i;
sizeof(acachectl_t));
goto fail;
}
for (i = 0; i < total_count; i++) {
}
}
switch (type) {
break;
break;
default:
INSIST(0);
}
/*
* Swap the entry. Delay cleaning-up the old entry since
* it would require a node lock.
*/
}
}
return (ISC_R_SUCCESS);
fail:
&newcbarg);
} else {
sizeof(*newcbarg));
}
}
return (result);
}
static isc_result_t
{
unsigned int total_count, count;
if (type == dns_rdatasetadditional_fromcache)
return (ISC_R_SUCCESS);
switch (type) {
break;
break;
default:
INSIST(0);
}
return (ISC_R_NOTFOUND);
}
return (ISC_R_NOTFOUND);
}
}
return (ISC_R_SUCCESS);
}
/*%
* Routines for LRU-based cache management.
*/
/*%
* See if a given cache entry that is being reused needs to be updated
* in the LRU-list. For the non-threaded case this is always true unless the
* entry has already been marked as stale; for the threaded case, updating
* the entry every time it is referenced might be expensive because it requires
* a node write lock. Thus this function returns true if the entry has not been
* updated for some period of time. We differentiate the NS or glue address
* case and the others since experiments have shown that the former tends to be
* accessed relatively infrequently and the cost of cache miss is higher
* (e.g., a missing NS records may cause external queries at a higher level
* zone, involving more transactions).
*
* Caller must hold the node (read or write) lock.
*/
static inline isc_boolean_t
if ((header->attributes &
return (ISC_FALSE);
#ifdef ISC_PLATFORM_USETHREADS
/*
* Glue records are updated if at least 60 seconds have passed
* since the previous update time.
*/
}
/* Other records are updated if 5 minutes have passed. */
#else
return (ISC_TRUE);
#endif
}
/*%
* Update the timestamp of a given cache entry and move it to the head
* of the corresponding LRU list.
*
* Caller must hold the node (write) lock.
*
* Note that the we do NOT touch the heap here, as the TTL has not changed.
*/
static void
{
/* To be checked: can we really assume this? XXXMLG */
}
/*%
* Examine the tail entry of the LRU list to see if it expires or is stale
* (unused for some period). If so, it's marked as stale and possibly freed.
* If the DB is in the overmem condition, the tail and the next to tail entries
* will be unconditionally marked. We don't care about a race on 'overmem'
* at the risk of causing some collateral damage or a small delay in starting
* cleanup, so we don't bother to lock rbtdb.
*
* Caller must hold the node (write) lock.
*
* We can get away with locking only one node here, since it will lock all
* other nodes in that lock pool bucket.
*/
static void
{
int victims = 0;
/*
* Check for TTL-based expiry.
*/
victims++;
/*
* If no one else is using the node, we can
* clean it up now. We first need to gain
* a new reference to the node to meet a
* requirement of decrement_reference().
*/
}
}
/*
* If we are over memory, delete the end entry from the LRU.
*/
victims++;
/*
* If no one else is using the node, we can
* clean it up now. We first need to gain
* a new reference to the node to meet a
* requirement of decrement_reference().
*/
}
}
}