/*
* Copyright 2001-2003 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 1988, 1989, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgment:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)radix.c 8.4 (Berkeley) 11/2/94
*
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* Routines to build and maintain radix trees for routing lookups.
*/
#include "defs.h"
static boolean_t rn_refines(void *, void *);
static struct radix_node
*rn_addroute(void *, void *, struct radix_node_head *,
struct radix_node [2]),
*rn_delete(void *, void *, struct radix_node_head *),
struct radix_node [2]),
*rn_match(void *, struct radix_node_head *),
*rn_search(void *, struct radix_node *),
*rn_search_m(void *, struct radix_node *, void *);
#ifdef DEBUG
#else
#define DBGMSG(x) (void) 0
#endif
/*
* The data structure for the keys is a radix tree with one way
* branching removed. The index rn_b at an internal node n represents a bit
* position to be tested. The tree is arranged so that all descendants
* of a node n have keys whose bits all agree up to position rn_b - 1.
* (We say the index of n is rn_b.)
*
* There is at least one descendant which has a one bit at position rn_b,
* and at least one with a zero there.
*
* A route is determined by a pair of key and mask. We require that the
* bit-wise logical and of the key and mask to be the key.
* We define the index of a route to associated with the mask to be
* the first bit number in the mask where 0 occurs (with bit number 0
* representing the highest order bit).
*
* We say a mask is normal if every bit is 0, past the index of the mask.
* If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
* and m is a normal mask, then the route applies to every descendant of n.
* If the index(m) < rn_b, this implies the trailing last few bits of k
* before bit b are all 0, (and hence consequently true of every descendant
* of n), so the route applies to all descendants of the node as well.
*
* Similar logic shows that a non-normal mask m such that
* index(m) <= index(n) could potentially apply to many children of n.
* Thus, for each non-host route, we attach its mask to a list at an internal
* node as high in the tree as we can go.
*
* The present version of the code makes use of normal routes in short-
* circuiting an explict mask and compare operation when testing whether
* a key satisfies a normal route, and also in remembering the unique leaf
* that governs a subtree.
*/
static struct radix_node *
{
struct radix_node *x;
uint8_t *v;
x = x->rn_r;
else
x = x->rn_l;
}
return (x);
}
static struct radix_node *
{
struct radix_node *x;
x = x->rn_r;
else
x = x->rn_l;
}
return (x);
}
/*
* Returns true if there are no bits set in n_arg that are zero in
* m_arg and the masks aren't equal. In other words, it returns true
* when m_arg is a finer-granularity netmask -- it represents a subset
* of the destinations implied by n_arg.
*/
static boolean_t
{
while (n < lim) {
if (*n & ~(*m))
return (_B_FALSE);
if (*n++ != *m++)
}
return (!masks_are_equal);
}
static struct radix_node *
{
struct radix_node *x;
if (m_arg) {
NULL) {
DBGMSG(("rn_lookup: failed to add mask"));
return (NULL);
}
}
if (x && netmask) {
x = x->rn_dupedkey;
}
return (x);
}
/*
* Returns true if address 'trial' has no bits differing from the
* leaf's key when compared under the leaf's mask. In other words,
* returns true when 'trial' matches leaf.
*/
static boolean_t
struct radix_node *leaf,
int skip)
{
return (_B_FALSE);
return (_B_TRUE);
}
static struct radix_node *
{
/*
* Open code rn_search(v, top) to avoid overhead of extra
* subroutine call.
*/
for (; t->rn_b >= 0; ) {
t = t->rn_r;
else
t = t->rn_l;
}
/*
* This extra grot is in case we are explicitly asked
* to look up the default. Ugh!
* Or 255.255.255.255
*
* In this case, we have a complete match of the key. Unless
* the node is one of the roots, we are finished.
* If it is the zeros root, then take what we have, prefering
* any real data.
* If it is the ones root, then pretend the target key was followed
* by a byte of zeros.
*/
return (t); /* not a root */
if (t->rn_dupedkey) {
t = t->rn_dupedkey;
return (t); /* have some real data */
}
if (*(cp-1) == 0)
return (t); /* not the ones root */
b = 0; /* fake a zero after 255.255.255.255 */
goto calculated_differing_bit;
b--;
matched_off = cp - v;
b += matched_off << 3;
rn_b = -1 - b;
/*
* If there is a host route in a duped-key chain, it will be first.
*/
t = t->rn_dupedkey;
for (; t; t = t->rn_dupedkey) {
/*
* Even if we don't match exactly as a host,
* we may match if the leaf we wound up at is
* a route to a net.
*/
if (t->rn_flags & RNF_NORMAL) {
return (t);
} else if (rn_satisfies_leaf(v, t, matched_off)) {
return (t);
}
}
t = saved_t;
/* start searching up the tree */
do {
struct radix_mask *m;
t = t->rn_p;
/*
* If non-contiguous masks ever become important
* we can restore the masking and open coding of
* the search and satisfaction test and put the
* calculation of "off" back before the "do".
*/
do {
if (m->rm_flags & RNF_NORMAL) {
return (m->rm_leaf);
} else {
x = rn_search_m(v, t, m->rm_mask);
while (x != NULL &&
x = x->rn_dupedkey;
if (x != NULL &&
rn_satisfies_leaf(v, x, off))
return (x);
}
}
} while (t != top);
return (NULL);
}
#ifdef RN_DEBUG
int rn_nodenum;
int rn_saveinfo;
#endif
static struct radix_node *
{
t->rn_b = b;
t->rn_off = b >> 3;
#ifdef RN_DEBUG
t->rn_info = rn_nodenum++;
#endif
return (t);
}
static struct radix_node *
{
/*
* Find first bit at which v and t->rn_key differ
*/
{
goto found_differing_byte;
/* handle adding 255.255.255.255 */
return (t);
}
cmp_res >>= 1;
}
{
struct radix_node *p, *x = top;
cp = v;
do {
p = x;
x = x->rn_r;
else
x = x->rn_l;
} while (b > (unsigned)x->rn_b);
#ifdef RN_DEBUG
if (rn_debug) {
msglog("rn_insert: Going In:");
traverse(p);
}
#endif
p->rn_l = t;
else
p->rn_r = t;
x->rn_p = t; /* frees x, p as temp vars below */
t->rn_p = p;
t->rn_r = x;
} else {
t->rn_l = x;
}
#ifdef RN_DEBUG
if (rn_debug) {
msglog("rn_insert: Coming Out:");
traverse(p);
}
#endif
}
return (tt);
}
static struct radix_node *
{
struct radix_node *x;
static int last_zeroed = 0;
if (skip == 0)
skip = 1;
return (mask_rnhead->rnh_nodes);
if (skip > 1)
/*
* Trim trailing zeroes.
*/
cp--;
if (m0 >= last_zeroed)
last_zeroed = mlen;
return (mask_rnhead->rnh_nodes);
}
if (m0 < last_zeroed)
x = NULL;
return (x);
saved_x = x;
if (maskduplicated) {
#ifdef DEBUG
#else
msglog("rn_addmask: mask impossibly already in tree");
#endif
return (x);
}
/*
* Calculate index of mask, and check for normalcy.
*/
x->rn_flags |= RNF_NORMAL;
cp++;
b++;
x->rn_flags &= ~RNF_NORMAL;
}
x->rn_b = -1 - b;
return (x);
}
static boolean_t /* Note: arbitrary ordering for non-contiguous masks */
{
return (_B_TRUE);
return (_B_FALSE);
}
static struct radix_mask *
struct radix_mask *next)
{
struct radix_mask *m;
MKGet(m);
if (m == NULL) {
#ifdef DEBUG
#else
msglog("Mask for route not entered");
#endif
return (NULL);
}
(void) memset(m, 0, sizeof (*m));
else
return (m);
}
static struct radix_node *
{
short b = 0, b_leaf = 0;
/*
* In dealing with non-contiguous masks, there may be
* many different routes which have the same mask.
* We will find it useful to have a unique pointer to
* the mask to speed avoiding duplicate references at
* nodes and possibly save time in calculating indices.
*/
if (netmask) {
DBGMSG(("rn_addroute: addmask failed"));
return (NULL);
}
b = -1 - x->rn_b;
}
/*
* Deal with duplicated keys: attach node to previous instance
*/
if (keyduplicated) {
DBGMSG(("rn_addroute: duplicated route and "
"mask"));
return (NULL);
}
break;
}
/*
* If the mask is not duplicated, we wouldn't
* find it among possible duplicate key entries
* anyway, so the above test doesn't hurt.
*
* We sort the masks for a duplicated key the same way as
* in a masklist -- most specific to least specific.
* This may require the unfortunate nuisance of relocating
* the head of the list.
*/
/* link in at head of list */
if (x->rn_l == t)
else
x = xx;
} else {
t->rn_dupedkey = tt;
}
#ifdef RN_DEBUG
t = tt + 1;
t->rn_info = rn_nodenum++;
#endif
}
/*
* Put mask in tree.
*/
if (netmask) {
}
if (keyduplicated)
goto key_already_in_tree;
x = t->rn_l;
else
x = t->rn_r;
/* Promote general routes from below */
if (x->rn_b < 0) {
}
} else if (x->rn_mklist) {
/*
* Skip over masks whose index is > that of new node
*/
break;
t->rn_mklist = m;
*mp = 0;
}
/* Add new route to highest possible ancestor's list */
return (tt); /* can't lift at all */
}
do {
x = t;
t = t->rn_p;
/*
* Search through routes associated with node to
* insert new route according to index.
* Need same criteria as when sorting dupedkeys to avoid
* double loop on deletion.
*/
continue;
break;
if (m->rm_flags & RNF_NORMAL) {
#ifdef DEBUG
"not entered");
#else
msglog("Non-unique normal route, mask "
"not entered");
#endif
return (tt);
}
} else
m->rm_refs++;
return (tt);
}
break;
}
return (tt);
}
static struct radix_node *
{
int b;
v = v_arg;
x = head->rnh_treetop;
top = x;
DBGMSG(("rn_delete: unable to locate route to delete"));
return (NULL);
}
/*
* Delete our route from mask lists.
*/
if (netmask) {
DBGMSG(("rn_delete: cannot add mask"));
return (NULL);
}
DBGMSG(("rn_delete: cannot locate mask"));
return (NULL);
}
}
goto annotation_removed;
#ifdef DEBUG
#else
msglog("rn_delete: inconsistent annotation");
#endif
return (NULL); /* dangling ref could cause disaster */
}
} else {
#ifdef DEBUG
#else
msglog("rn_delete: inconsistent annotation");
#endif
goto annotation_removed;
}
if (--m->rm_refs >= 0)
goto annotation_removed;
}
if (b > t->rn_b)
goto annotation_removed; /* Wasn't lifted at all */
do {
x = t;
t = t->rn_p;
if (m == saved_m) {
MKFree(m);
break;
}
if (m == NULL) {
#ifdef DEBUG
#else
msglog("rn_delete: couldn't find our annotation");
#endif
return (NULL); /* Dangling ref to us */
}
/*
* Eliminate us from tree
*/
DBGMSG(("rn_delete: cannot delete root"));
return (NULL);
}
#ifdef RN_DEBUG
/* Get us out of the creation list */
if (t != NULL)
#endif
x = dupedkey;
x->rn_p = t;
t->rn_l = x;
else
t->rn_r = x;
} else {
p = p->rn_dupedkey;
if (p != NULL) {
} else {
#ifdef DEBUG
#else
msglog("rn_delete: couldn't find us");
#endif
}
}
t = tt + 1;
if (t->rn_flags & RNF_ACTIVE) {
#ifndef RN_DEBUG
*++x = *t;
p = t->rn_p;
#else
b = t->rn_info;
*++x = *t;
t->rn_info = b;
p = t->rn_p;
#endif
if (p->rn_l == t)
p->rn_l = x;
else
p->rn_r = x;
}
goto out;
}
x = t->rn_r;
else
x = t->rn_l;
p = t->rn_p;
if (p->rn_r == t)
p->rn_r = x;
else
p->rn_l = x;
x->rn_p = p;
/*
* Demote routes attached to us.
*/
if (t->rn_mklist) {
if (x->rn_b >= 0) {
} else {
/*
* If there are any key,mask pairs in a sibling
* duped-key chain, some subset will appear sorted
* in the same order attached to our mklist
*/
for (m = t->rn_mklist; m && x; x = x->rn_dupedkey)
if (m == x->rn_mklist) {
x->rn_mklist = 0;
if (--(m->rm_refs) < 0)
MKFree(m);
m = mm;
}
if (m != NULL) {
#ifdef DEBUG
m, x);
#else
msglog("rn_delete: Orphaned Mask %p at %p\n", m,
x);
#endif
}
}
}
/*
* We may be holding an active internal node in the tree.
*/
x = tt + 1;
if (t != x) {
#ifndef RN_DEBUG
*t = *x;
#else
b = t->rn_info;
*t = *x;
t->rn_info = b;
#endif
p = x->rn_p;
if (p->rn_l == x)
p->rn_l = t;
else
p->rn_r = t;
}
out:
return (tt);
}
int
int (*f)(struct radix_node *, void *),
void *w)
{
int error;
/*
* This gets complicated because we may delete the node
* while applying the function f to it, so we need to calculate
* the successor node in advance.
*/
/* First time through node, go left */
do {
/* If at right child go back up, otherwise, go right */
/* Find the next *leaf* since next node might vanish, too */
/* Process leaves */
return (error);
}
return (0);
}
int
{
if (*head)
return (1);
t->rn_p = t;
rnh->rnh_treetop = t;
return (1);
}
void
rn_init(void)
{
if (max_keylen == 0) {
return;
}
*cp++ = 0xFF;
if (rn_inithead((void **)&mask_rnhead, 0) == 0) {
logbad(0, "rn_init: could not initialize radix tree");
}
}