dispatch.c revision 2b2fc9b4df2db5686126c9eb56973b0af0c109e5
/*
* Copyright (C) 2004-2009 Internet Systems Consortium, Inc. ("ISC")
* Copyright (C) 1999-2003 Internet Software Consortium.
*
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/* $Id: dispatch.c,v 1.167 2009/11/25 23:49:21 tbox Exp $ */
/*! \file */
#include <config.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdlib.h>
#include <dns/dispatch.h>
#include <dns/portlist.h>
typedef struct dispsocket dispsocket_t;
typedef struct dispportentry dispportentry_t;
/* ARC4 Random generator state */
typedef struct arc4ctx {
isc_uint8_t i;
isc_uint8_t j;
isc_uint8_t s[256];
int count;
} arc4ctx_t;
typedef struct dns_qid {
unsigned int magic;
unsigned int qid_nbuckets; /*%< hash table size */
unsigned int qid_increment; /*%< id increment on collision */
} dns_qid_t;
struct dns_dispatchmgr {
/* Unlocked. */
unsigned int magic;
/* Locked by "lock". */
unsigned int state;
/* Locked by arc4_lock. */
/* locked by buffer lock */
unsigned int buffers; /*%< allocated buffers */
unsigned int buffersize; /*%< size of each buffer */
unsigned int maxbuffers; /*%< max buffers */
/* Locked internally. */
/*%
* Locked by qid->lock if qid exists; otherwise, can be used without
* being locked.
* Memory footprint considerations: this is a simple implementation of
* available ports, i.e., an ordered array of the actual port numbers.
* This will require about 256KB of memory in the worst case (128KB for
* each of IPv4 and IPv6). We could reduce it by representing it as a
* more sophisticated way such as a list (or array) of ranges that are
* searched to identify a specific port. Our decision here is the saved
* memory isn't worth the implementation complexity, considering the
* fact that the whole BIND9 process (which is mainly named) already
* requires a pretty large memory footprint. We may, however, have to
* revisit the decision when we want to use it as a separate module for
* an environment where memory requirement is severer.
*/
unsigned int nv4ports; /*%< # of available ports for IPv4 */
unsigned int nv6ports; /*%< # of available ports for IPv4 */
};
#define MGR_SHUTTINGDOWN 0x00000001U
struct dns_dispentry {
unsigned int magic;
unsigned int bucket;
void *arg;
};
/*%
* Maximum number of dispatch sockets that can be pooled for reuse. The
* appropriate value may vary, but experiments have shown a busy caching server
* may need more than 1000 sockets concurrently opened. The maximum allowable
* number of dispatch sockets (per manager) will be set to the double of this
* value.
*/
#ifndef DNS_DISPATCH_POOLSOCKS
#define DNS_DISPATCH_POOLSOCKS 2048
#endif
/*%
* Quota to control the number of dispatch sockets. If a dispatch has more
* than the quota of sockets, new queries will purge oldest ones, so that
* a massive number of outstanding queries won't prevent subsequent queries
* (especially if the older ones take longer time and result in timeout).
*/
#ifndef DNS_DISPATCH_SOCKSQUOTA
#define DNS_DISPATCH_SOCKSQUOTA 3072
#endif
struct dispsocket {
unsigned int magic;
unsigned int bucket;
};
/*%
* A port table entry. We remember every port we first open in a table with a
* reference counter so that we can 'reuse' the same port (with different
* destination addresses) using the SO_REUSEADDR socket option.
*/
struct dispportentry {
unsigned int refs;
};
#ifndef DNS_DISPATCH_PORTTABLESIZE
#define DNS_DISPATCH_PORTTABLESIZE 1024
#endif
#define INVALID_BUCKET (0xffffdead)
/*%
* Number of tasks for each dispatch that use separate sockets for different
* transactions. This must be a power of 2 as it will divide 32 bit numbers
* to get an uniformly random tasks selection. See get_dispsocket().
*/
#define MAX_INTERNAL_TASKS 64
struct dns_dispatch {
/* Unlocked. */
unsigned int magic; /*%< magic */
int ntasks;
/*%
* internal task buckets. We use multiple tasks to distribute various
* socket events well when using separate dispatch sockets. We use the
* 1st task (task[0]) for internal control events.
*/
unsigned int maxrequests; /*%< max requests */
/*% Locked by mgr->lock. */
/* Locked by "lock". */
unsigned int attributes;
unsigned int refcount; /*%< number of users */
unsigned int shutting_down : 1,
shutdown_out : 1,
connected : 1,
tcpmsg_valid : 1,
unsigned int nsockets;
unsigned int requests; /*%< how many requests we have */
unsigned int tcpbuffers; /*%< allocated buffers */
};
/*%
* Locking a query port buffer is a bit tricky. We access the buffer without
* locking until qid is created. Technically, there is a possibility of race
* between the creation of qid and access to the port buffer; in practice,
* however, this should be safe because qid isn't created until the first
* dispatch is created and there should be no contending situation until then.
*/
/*
* Statics.
*/
dns_messageid_t, in_port_t, unsigned int);
isc_socket_t **sockp);
unsigned int maxrequests,
unsigned int attributes,
dns_dispatch_t **dispp);
#define LVL(x) ISC_LOG_DEBUG(x)
static void
static void
char msgbuf[2048];
return;
}
static inline void
}
static void
static void
char msgbuf[2048];
return;
}
static void
static void
{
char msgbuf[2048];
char peerbuf[256];
return;
if (VALID_RESPONSE(resp)) {
} else {
msgbuf);
}
}
/*%
* ARC4 random number generator derived from OpenBSD.
* Only dispatch_random() and dispatch_uniformrandom() are expected
* to be called from general dispatch routines; the rest of them are subroutines
* for these two.
*
* The original copyright follows:
* Copyright (c) 1996, David Mazieres <dm@uun.org>
* Copyright (c) 2008, Damien Miller <djm@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef BIND9
static void
{
int n;
for (n = 0; n < 256; n++)
actx->s[n] = n;
actx->i = 0;
actx->j = 0;
}
static void
int n;
actx->i--;
for (n = 0; n < 256; n++) {
}
}
static inline isc_uint8_t
}
static inline isc_uint16_t
return (val);
}
static void
int i;
union {
unsigned char rnd[128];
} rnd;
/*
* We accept any quality of random data to avoid blocking.
*/
} else {
for (i = 0; i < 32; i++)
}
/*
* Discard early keystream, as per recommendations in:
*/
for (i = 0; i < 256; i++)
(void)dispatch_arc4get8(actx);
/*
* Derived from OpenBSD's implementation. The rationale is not clear,
* but should be conservative enough in safety, and reasonably large
* for efficiency.
*/
}
static isc_uint16_t
return (result);
}
#else
/*
* For general purpose library, we don't have to be too strict about the
* quality of random values. Performance doesn't matter much, either.
* So we simply use the isc_random module to keep the library as small as
* possible.
*/
static void
{
return;
}
static isc_uint16_t
isc_uint32_t r;
isc_random_get(&r);
return (r & 0xffff);
}
#endif /* BIND9 */
static isc_uint16_t
isc_uint16_t min, r;
if (upper_bound < 2)
return (0);
/*
* Ensure the range of random numbers [min, 0xffff] be a multiple of
* upper_bound and contain at least a half of the 16 bit range.
*/
if (upper_bound > 0x8000)
else
/*
* This could theoretically loop forever but each retry has
* p > 0.5 (worst case, usually far better) of selecting a
* number inside the range we need, so it should rarely need
* to re-roll.
*/
for (;;) {
r = dispatch_random(actx);
if (r >= min)
break;
}
return (r % upper_bound);
}
/*
* Return a hash of the destination and message id.
*/
static isc_uint32_t
{
unsigned int ret;
return (ret);
}
/*
* Find the first entry in 'qid'. Returns NULL if there are no entries.
*/
static dns_dispentry_t *
unsigned int bucket;
bucket = 0;
return (ret);
bucket++;
}
return (NULL);
}
/*
* Find the next entry after 'resp' in 'qid'. Return NULL if there are
* no more entries.
*/
static dns_dispentry_t *
unsigned int bucket;
return (ret);
bucket++;
return (ret);
bucket++;
}
return (NULL);
}
/*
* The dispatch must be locked.
*/
static isc_boolean_t
{
return (ISC_FALSE);
if (disp->recv_pending != 0)
return (ISC_FALSE);
return (ISC_FALSE);
if (disp->shutting_down == 0)
return (ISC_FALSE);
return (ISC_TRUE);
}
/*
* Called when refcount reaches 0 (and safe to destroy).
*
* The dispatcher must not be locked.
* The manager must be locked.
*/
static void
int i;
"shutting down; detaching from sock %p, task %p",
}
if (killmgr)
destroy_mgr(&mgr);
}
/*%
* Manipulate port table per dispatch: find an entry for a given port number,
* create a new entry, and decrement a given entry with possible clean-up.
*/
static dispportentry_t *
return (portentry);
}
return (NULL);
}
static dispportentry_t *
return (portentry);
return (portentry);
}
/*%
* The caller must not hold the qid->lock.
*/
static void
}
*portentryp = NULL;
}
/*%
* Find a dispsocket for socket address 'dest', and port number 'port'.
* Return NULL if no such entry exists.
*/
static dispsocket_t *
unsigned int bucket)
{
return (dispsock);
}
return (NULL);
}
/*%
* Make a new socket for a single dispatch with a random port number.
* The caller must hold the disp->lock and qid->lock.
*/
static isc_result_t
{
int i;
isc_uint32_t r;
unsigned int bucket = 0;
unsigned int nports;
unsigned int bindoptions;
} else {
}
if (nports == 0)
return (ISC_R_ADDRNOTAVAIL);
} else {
return (ISC_R_NOMEMORY);
isc_random_get(&r);
}
/*
* Pick up a random UDP port and open a new socket with it. Avoid
* choosing ports that share the same destination because it will be
* very likely to fail in bind(2) or connect(2).
*/
for (i = 0; i < 64; i++) {
nports)];
continue;
bindoptions = 0;
if (result == ISC_R_SUCCESS) {
break;
}
}
break;
} else if (result != ISC_R_ADDRINUSE)
break;
}
if (result == ISC_R_SUCCESS) {
} else {
/*
* We could keep it in the inactive list, but since this should
* be an exceptional case and might be resource shortage, we'd
* rather destroy it.
*/
}
return (result);
}
/*%
* Destroy a dedicated dispatch socket.
*/
static void
/*
* The dispatch must be locked.
*/
blink);
}
}
/*%
* Deactivate a dedicated dispatch socket. Move it to the inactive list for
* future reuse unless the total number of sockets are exceeding the maximum.
*/
static void
/*
* The dispatch must be locked.
*/
}
#ifdef BIND9
else {
blink);
if (result == ISC_R_SUCCESS)
else {
/*
* If the underlying system does not allow this
* optimization, destroy this temporary structure (and
* create a new one for a new transaction).
*/
}
}
#else
/* This kind of optimization isn't necessary for normal use */
#endif
}
/*
* Find an entry for query ID 'id', socket address 'dest', and port number
* 'port'.
* Return NULL if no such entry exists.
*/
static dns_dispentry_t *
{
return (res);
}
}
return (NULL);
}
static void
case isc_sockettype_tcp:
disp->tcpbuffers--;
break;
case isc_sockettype_udp:
break;
default:
INSIST(0);
break;
}
}
static void *
void *temp;
return (temp);
}
static inline void
disp->shutdown_out = 0;
return;
}
}
static inline dns_dispatchevent_t *
return (NULL);
return (ev);
}
static void
}
static void
}
/*
* General flow:
*
* If I/O result == CANCELED or error, free the buffer.
*
* If query, free the buffer, restart.
*
* If response:
* Allocate event, fill in details.
* If cannot allocate, free buffer, restart.
* find target. If not found, free buffer, restart.
* if event queue is not empty, queue. else, send.
* restart.
*/
static void
unsigned int flags;
unsigned int bucket;
int match;
int result;
"got packet: requests %d, buffers %d, recvs %d",
/*
* Unless the receive event was imported from a listening
* interface, in which case the event type is
* DNS_EVENT_IMPORTRECVDONE, receive operation must be pending.
*/
disp->recv_pending = 0;
}
/*
* dispsock->resp can be NULL if this transaction was canceled
* just after receiving a response. Since this socket is
* exclusively used and there should be at most one receive
* event the canceled event should have been no effect. So
* we can (and should) deactivate the socket right now.
*/
}
if (disp->shutting_down) {
/*
* This dispatcher is shutting down.
*/
if (killit)
return;
}
/*
* This is most likely a network error on a
* connected socket. It makes no sense to
* check the address or parse the packet, but it
* will help to return the error to the caller.
*/
goto sendresponse;
}
} else {
return;
}
"odd socket result in udp_recv(): %s",
return;
}
/*
* If this is from a blackholed address, drop it.
*/
match > 0)
{
char netaddrstr[ISC_NETADDR_FORMATSIZE];
sizeof(netaddrstr));
"blackholed packet from %s",
}
goto restart;
}
/*
* Peek into the buffer to see what we can see.
*/
if (dres != ISC_R_SUCCESS) {
goto restart;
}
"got valid DNS message header, /QR %c, id %u",
/*
* Look at flags. If query, drop it. If response,
* look to see where it goes.
*/
if ((flags & DNS_MESSAGEFLAG_QR) == 0) {
/* query */
goto restart;
}
/*
* Search for the corresponding response. If we are using an exclusive
* socket, we've already identified it and we can skip the search; but
* the ID and the address must match the expected ones.
*/
bucket);
"search for response in bucket %d: %s",
goto unlock;
}
"response to an exclusive socket doesn't match");
goto unlock;
}
/*
* Now that we have the original dispatch the query was sent
* from check that the address and port the response was
* sent to make sense.
*/
/*
* Check that the socket types and ports match.
*/
goto unlock;
}
/*
* If both dispatches are bound to an address then fail as
* the addresses can't be equal (enforced by the IP stack).
*
* Note under Linux a packet can be sent out via IPv4 socket
* and the response be received via a IPv6 socket.
*
* Requests sent out via IPv6 should always come back in
* via IPv6.
*/
goto unlock;
}
goto unlock;
}
}
goto unlock;
}
/*
* At this point, rev contains the event we want to fill in, and
* resp contains the information on the place to send it to.
* Send the event off.
*/
if (queue_response) {
} else {
"[a] Sent event %p buffer %p len %d to task %p",
}
if (qidlocked)
/*
* Restart recv() to get the next packet.
*/
/*
* XXX: wired. There seems to be no recovery process other than
* deactivate this socket anyway (since we cannot start
* receiving, we won't be able to receive a cancel event
* from the user).
*/
}
}
/*
* General flow:
*
* If I/O result == CANCELED, EOF, or error, notify everyone as the
* various queues drain.
*
* If query, restart.
*
* If response:
* Allocate event, fill in details.
* If cannot allocate, restart.
* find target. If not found, restart.
* if event queue is not empty, queue. else, send.
* restart.
*/
static void
unsigned int flags;
unsigned int bucket;
int level;
char buf[ISC_SOCKADDR_FORMATSIZE];
"got TCP packet: requests %d, buffers %d, recvs %d",
disp->recv_pending = 0;
/*
* This dispatcher is shutting down. Force cancelation.
*/
}
case ISC_R_CANCELED:
break;
case ISC_R_EOF:
break;
case ISC_R_CONNECTIONRESET:
goto logit;
default:
"receive error: %s: %s", buf,
break;
}
/*
* The event is statically allocated in the tcpmsg
* structure, and destroy_disp() frees the tcpmsg, so we must
* free the event *before* calling destroy_disp().
*/
/*
* If the recv() was canceled pass the word on.
*/
if (killit)
return;
}
/*
* Peek into the buffer to see what we can see.
*/
if (dres != ISC_R_SUCCESS) {
goto restart;
}
"got valid DNS message header, /QR %c, id %u",
/*
* Allocate an event to send to the query or response client, and
* allocate a new buffer for our use.
*/
/*
* Look at flags. If query, drop it. If response,
* look to see where it goes.
*/
if ((flags & DNS_MESSAGEFLAG_QR) == 0) {
/*
* Query.
*/
goto restart;
}
/*
* Response.
*/
"search for response in bucket %d: %s",
goto unlock;
goto unlock;
/*
* At this point, rev contains the event we want to fill in, and
* resp contains the information on the place to send it to.
* Send the event off.
*/
disp->tcpbuffers++;
if (queue_response) {
} else {
"[b] Sent event %p buffer %p len %d to task %p",
}
/*
* Restart recv() to get the next packet.
*/
}
/*
* disp must be locked.
*/
static isc_result_t
return (ISC_R_SUCCESS);
return (ISC_R_SUCCESS);
return (ISC_R_SUCCESS);
return (ISC_R_NOMEMORY);
return (ISC_R_SUCCESS);
else
/*
* UDP reads are always maximal.
*/
case isc_sockettype_udp:
return (ISC_R_NOMEMORY);
dispsock);
if (res != ISC_R_SUCCESS) {
return (res);
}
} else {
if (res != ISC_R_SUCCESS) {
return (ISC_R_SUCCESS); /* recover by cancel */
}
}
break;
case isc_sockettype_tcp:
if (res != ISC_R_SUCCESS) {
return (ISC_R_SUCCESS); /* recover by cancel */
}
break;
default:
INSIST(0);
break;
}
return (ISC_R_SUCCESS);
}
/*
* Mgr must be locked when calling this function.
*/
static isc_boolean_t
"destroy_mgr_ok: shuttingdown=%d, listnonempty=%d, "
"epool=%d, rpool=%d, dpool=%d",
if (!MGR_IS_SHUTTINGDOWN(mgr))
return (ISC_FALSE);
return (ISC_FALSE);
return (ISC_FALSE);
return (ISC_FALSE);
return (ISC_FALSE);
return (ISC_TRUE);
}
/*
* Mgr must be unlocked when calling this function.
*/
static void
#ifdef BIND9
#endif /* BIND9 */
}
}
}
static isc_result_t
{
if (result != ISC_R_SUCCESS)
return (result);
} else {
#ifdef BIND9
if (result != ISC_R_SUCCESS)
return (result);
#else
INSIST(0);
#endif
}
#ifndef ISC_ALLOW_MAPPED
#endif
if (result != ISC_R_SUCCESS) {
else {
#ifdef BIND9
#else
INSIST(0);
#endif
}
return (result);
}
return (ISC_R_SUCCESS);
}
/*%
* Create a temporary port list to set the initial default set of dispatch
* ports: [1024, 65535]. This is almost meaningless as the application will
* normally set the ports explicitly, but is provided to fill some minor corner
* cases.
*/
static isc_result_t
if (result != ISC_R_SUCCESS)
return (result);
return (ISC_R_SUCCESS);
}
/*
* Publics.
*/
{
return (ISC_R_NOMEMORY);
if (result != ISC_R_SUCCESS)
goto deallocate;
if (result != ISC_R_SUCCESS)
goto kill_lock;
if (result != ISC_R_SUCCESS)
goto kill_arc4_lock;
if (result != ISC_R_SUCCESS)
goto kill_buffer_lock;
goto kill_pool_lock;
}
goto kill_epool;
}
goto kill_rpool;
}
mgr->buffersize = 0;
mgr->maxbuffers = 0;
if (result == ISC_R_SUCCESS) {
if (result == ISC_R_SUCCESS) {
}
}
if (result != ISC_R_SUCCESS)
goto kill_dpool;
#ifdef BIND9
#else
#endif
return (ISC_R_SUCCESS);
return (result);
}
void
}
}
void
{
/* This function is deprecated: use dns_dispatchmgr_setavailports(). */
return;
}
return (NULL); /* this function is deprecated */
}
{
if (nv4ports != 0) {
return (ISC_R_NOMEMORY);
}
if (nv6ports != 0) {
sizeof(in_port_t) *
}
return (ISC_R_NOMEMORY);
}
}
p = 0;
i4 = 0;
i6 = 0;
do {
if (isc_portset_isset(v4portset, p)) {
}
if (isc_portset_isset(v6portset, p)) {
}
} while (p++ < 65535);
}
}
return (ISC_R_SUCCESS);
}
static isc_result_t
unsigned int buffersize, unsigned int maxbuffers,
unsigned int maxrequests, unsigned int buckets,
unsigned int increment)
{
REQUIRE(maxbuffers > 0);
/*
* Keep some number of items around. This should be a config
* option. For now, keep 8, but later keep at least two even
* if the caller wants less. This allows us to ensure certain
* things, like an event can be "freed" and the next allocation
* will always succeed.
*
* Note that if limits are placed on anything here, we use one
* event internally, so the actual limit should be "wanted + 1."
*
* XXXMLG
*/
if (maxbuffers < 8)
maxbuffers = 8;
/* Create or adjust buffer pool */
} else {
if (result != ISC_R_SUCCESS) {
return (result);
}
}
/* Create or adjust socket pool */
return (ISC_R_SUCCESS);
}
if (result != ISC_R_SUCCESS) {
goto cleanup;
}
if (result != ISC_R_SUCCESS)
goto cleanup;
return (ISC_R_SUCCESS);
return (result);
}
void
if (killit)
destroy_mgr(&mgr);
}
void
}
static int
return (-1);
return (0);
else
return (1);
}
static isc_boolean_t
{
unsigned int nports;
if (result != ISC_R_SUCCESS)
goto unlock;
}
} else {
}
goto unlock;
return (available);
}
static isc_boolean_t
return (ISC_TRUE);
/*
* Don't match wildcard ports unless the port is available in the
* current configuration.
*/
if (isc_sockaddr_getport(addr) == 0 &&
return (ISC_FALSE);
}
/*
* Check if we match the binding <address,port>.
*/
return (ISC_TRUE);
if (isc_sockaddr_getport(addr) == 0)
return (ISC_FALSE);
/*
* Check if we match a bound wildcard port <address,port>.
*/
return (ISC_FALSE);
if (result != ISC_R_SUCCESS)
return (ISC_FALSE);
}
/*
* Requires mgr be locked.
*
* No dispatcher can be locked by this thread when calling this function.
*
*
* NOTE:
* If a matching dispatcher is found, it is locked after this function
* returns, and must be unlocked by the caller.
*/
static isc_result_t
unsigned int attributes, unsigned int mask,
{
/*
* Make certain that we will not match a private or exclusive dispatch.
*/
if ((disp->shutting_down == 0)
break;
}
goto out;
}
out:
return (result);
}
static isc_result_t
{
unsigned int i;
return (ISC_R_NOMEMORY);
buckets * sizeof(dns_displist_t));
return (ISC_R_NOMEMORY);
}
if (needsocktable) {
sizeof(dispsocketlist_t));
buckets * sizeof(dns_displist_t));
return (ISC_R_NOMEMORY);
}
}
if (result != ISC_R_SUCCESS) {
buckets * sizeof(dispsocketlist_t));
}
buckets * sizeof(dns_displist_t));
return (result);
}
for (i = 0; i < buckets; i++) {
}
return (ISC_R_SUCCESS);
}
static void
}
}
/*
* Allocate and set important limits.
*/
static isc_result_t
{
/*
* Set up the dispatcher, mostly. Don't bother setting some of
* the options that are controlled by tcp vs. udp, etc.
*/
return (ISC_R_NOMEMORY);
disp->attributes = 0;
disp->recv_pending = 0;
disp->shutting_down = 0;
disp->shutdown_out = 0;
disp->tcpmsg_valid = 0;
disp->tcpbuffers = 0;
if (result != ISC_R_SUCCESS)
goto deallocate;
goto kill_lock;
}
return (ISC_R_SUCCESS);
/*
* error returns
*/
return (result);
}
/*
* MUST be unlocked, and not used by anything.
*/
static void
{
int i;
if (disp->tcpmsg_valid) {
disp->tcpmsg_valid = 0;
}
for (i = 0; i < DNS_DISPATCH_PORTTABLESIZE; i++)
sizeof(disp->port_table[0]) *
}
}
unsigned int maxbuffers, unsigned int maxrequests,
{
/*
* dispatch_allocate() checks mgr for us.
* qid_allocate() checks buckets and increment for us.
*/
if (result != ISC_R_SUCCESS) {
return (result);
}
if (result != ISC_R_SUCCESS)
goto deallocate_dispatch;
if (result != ISC_R_SUCCESS)
goto kill_socket;
sizeof(isc_event_t));
goto kill_task;
}
/*
* Append it to the dispatcher list.
*/
return (ISC_R_SUCCESS);
/*
* Error returns.
*/
return (result);
}
unsigned int buffersize,
unsigned int maxbuffers, unsigned int maxrequests,
unsigned int attributes, unsigned int mask,
{
REQUIRE(maxbuffers > 0);
if (result != ISC_R_SUCCESS)
return (result);
if ((attributes & DNS_DISPATCHATTR_EXCLUSIVE) != 0) {
goto createudp;
}
/*
* See if we have a dispatcher that matches.
*/
if (result == ISC_R_SUCCESS) {
(attributes & DNS_DISPATCHATTR_NOLISTEN) != 0)
{
if (disp->recv_pending != 0)
}
return (ISC_R_SUCCESS);
}
/*
* Nope, create one.
*/
if (result != ISC_R_SUCCESS) {
return (result);
}
return (ISC_R_SUCCESS);
}
/*
* mgr should be locked.
*/
#ifndef DNS_DISPATCH_HELD
#define DNS_DISPATCH_HELD 20U
#endif
static isc_result_t
{
unsigned int i, j;
if (anyport) {
unsigned int nports;
/*
* If no port is specified, we first try to pick up a random
* port by ourselves.
*/
} else {
}
if (nports == 0)
return (ISC_R_ADDRNOTAVAIL);
for (i = 0; i < 1024; i++) {
nports)];
0, &sock);
if (result == ISC_R_SUCCESS ||
result != ISC_R_ADDRINUSE) {
return (result);
}
}
/*
* If this fails 1024 times, we then ask the kernel for
* choosing one.
*/
} else {
/* Allow to reuse address for non-random ports. */
if (result == ISC_R_SUCCESS)
return (result);
}
i = 0;
for (j = 0; j < 0xffffU; j++) {
if (result != ISC_R_SUCCESS)
goto end;
else if (!anyport)
break;
break;
isc_socket_detach(&held[i]);
if (i == DNS_DISPATCH_HELD)
i = 0;
}
if (j == 0xffffU) {
"avoid-v%s-udp-ports: unable to allocate "
"an available port",
goto end;
}
end:
for (i = 0; i < DNS_DISPATCH_HELD; i++) {
isc_socket_detach(&held[i]);
}
return (result);
}
static isc_result_t
unsigned int maxrequests,
unsigned int attributes,
{
int i = 0;
/*
* dispatch_allocate() checks mgr for us.
*/
if (result != ISC_R_SUCCESS)
return (result);
if ((attributes & DNS_DISPATCHATTR_EXCLUSIVE) == 0) {
if (result != ISC_R_SUCCESS)
goto deallocate_dispatch;
} else {
/*
* For dispatches using exclusive sockets with a specific
* source address, we only check if the specified address is
* available on the system. Query sockets will be created later
* on demand.
*/
if (result != ISC_R_SUCCESS)
goto deallocate_dispatch;
}
sizeof(disp->port_table[0]) *
goto deallocate_dispatch;
for (i = 0; i < DNS_DISPATCH_PORTTABLESIZE; i++)
if (result != ISC_R_SUCCESS)
goto deallocate_dispatch;
}
if ((attributes & DNS_DISPATCHATTR_EXCLUSIVE) != 0)
else
if (result != ISC_R_SUCCESS) {
while (--i >= 0) {
}
goto kill_socket;
}
}
sizeof(isc_event_t));
goto kill_task;
}
/*
* Append it to the dispatcher list.
*/
return (result);
/*
* Error returns.
*/
return (result);
}
void
}
/*
* It is important to lock the manager while we are deleting the dispatch,
* since dns_dispatch_getudp will call dispatch_find, which returns to
* the caller a dispatch but does not attach to it until later. _getudp
* locks the manager, however, so locking it here will keep us from attaching
* to a dispatcher that is in the process of going away.
*/
void
if (disp->recv_pending > 0)
}
}
if (killit)
}
{
unsigned int bucket;
int i;
return (ISC_R_SHUTTINGDOWN);
}
return (ISC_R_QUOTA);
}
/*
* Kill oldest outstanding query if the number of sockets
* exceeds the quota to keep the room for new queries.
*/
ISC_EVENT_PTR(&rev));
}
}
/*
* Move this entry to the tail so that it won't (easily) be
* examined before actually being canceled.
*/
}
/*
* Get a separate UDP socket with a random port number.
*/
&localport);
if (result != ISC_R_SUCCESS) {
return (result);
}
} else {
}
/*
* Try somewhat hard to find an unique ID.
*/
for (i = 0; i < 64; i++) {
break;
}
id &= 0x0000ffff;
}
if (!ok) {
return (ISC_R_NOMORE);
}
if (dispsocket != NULL)
return (ISC_R_NOMEMORY);
}
if (dispsocket != NULL)
if (result != ISC_R_SUCCESS) {
if (dispsocket != NULL)
return (result);
}
}
if (dispsocket != NULL)
return (ISC_R_SUCCESS);
}
{
}
void
}
void
{
unsigned int bucket;
unsigned int n;
} else {
}
if (disp->recv_pending > 0)
}
}
/*
* We've posted our event, but the caller hasn't gotten it
* yet. Take it back.
*/
/*
* We had better have gotten it back.
*/
INSIST(n == 1);
}
}
}
/*
* Free any buffered requests as well
*/
}
else
if (killit)
}
static void
return;
/*
* Search for the first response handler without packets outstanding
* unless a specific hander is given.
*/
/* Empty. */)
/*
* No one to send the cancel event to, so nothing to do.
*/
goto unlock;
/*
* Send the shutdown failsafe event to this resp.
*/
"cancel: failsafe event %p -> task %p",
}
}
else
return (NULL);
}
return (ISC_R_SUCCESS);
}
return (ISC_R_NOTIMPLEMENTED);
}
void
return;
}
return;
}
unsigned int
/*
* We don't bother locking disp here; it's the caller's responsibility
* to use only non volatile flags.
*/
return (disp->attributes);
}
void
unsigned int attributes, unsigned int mask)
{
/* Exclusive attribute can only be set on creation */
/* Also, a dispatch with randomport specified cannot start listening */
(attributes & DNS_DISPATCHATTR_NOLISTEN) == 0);
/* XXXMLG
* Should check for valid attributes here!
*/
if ((mask & DNS_DISPATCHATTR_NOLISTEN) != 0) {
(attributes & DNS_DISPATCHATTR_NOLISTEN) == 0) {
== 0 &&
(attributes & DNS_DISPATCHATTR_NOLISTEN) != 0) {
if (disp->recv_pending != 0)
}
}
}
void
void *buf;
newsevent = (isc_socketevent_t *)
disp, sizeof(isc_socketevent_t));
return;
return;
}
}
#if 0
void
char foo[1024];
}
}
#endif