client.c revision b35a009df86b4aa3793e87602c95af2a503ec0ee
/*
* Copyright (C) 1999, 2000 Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
* ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
* CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#include <config.h>
#include <dns/dispatch.h>
#include <dns/rdatalist.h>
#include <dns/rdataset.h>
#include <named/interfacemgr.h>
/***
*** Client
***/
/*
* Important note!
*
* All client state changes, other than that from idle to listening, occur
* as a result of events. This guarantees serialization and avoids the
* need for locking.
*
* If a routine is ever created that allows someone other than the client's
* task to change the client, then the client will have to be locked.
*/
#define NS_CLIENT_TRACE
#ifdef NS_CLIENT_TRACE
ISC_LOG_DEBUG(3), \
"%s", (m))
ISC_LOG_DEBUG(3), \
"clientmgr @%p: %s", manager, (m))
#else
#define CTRACE(m) ((void)(m))
#define MTRACE(m) ((void)(m))
#endif
#define SEND_BUFFER_SIZE 2048
#define RECV_BUFFER_SIZE 2048
struct ns_clientmgr {
/* Unlocked. */
unsigned int magic;
/* Locked by lock. */
};
#define VALID_MANAGER(m) ((m) != NULL && \
(m)->magic == MANAGER_MAGIC)
/*
* Client object states. Ordering is significant: higher-numbered
* states are generally "more active", meaning that the client can
* have more dynamically allocated data, outstanding events, etc.
* In the list below, any such properties listed for state N
* also apply to any state > N.
*
* To force the client into a less active state, set client->newstate
* to that state and call exit_check(). This will cause any
* activities defined for higher-numbered states to be aborted.
*/
#define NS_CLIENTSTATE_FREED 0
/*
* The client object no longer exists.
*/
#define NS_CLIENTSTATE_INACTIVE 1
/*
* The client object exists and has a task and timer.
* Its "query" struct and sendbuf are initialized.
* It is on the client manager's list of inactive clients.
* It has a message and OPT, both in the reset state.
*/
#define NS_CLIENTSTATE_READY 2
/*
* The client object is either a TCP or a UDP one, and
* it is associated with a network interface. It is on the
* client manager's list of active clients.
*
* If it is a TCP client object, it has a TCP listener socket
* and an outstading TCP listen request.
*
* If it is a UDP client object, it is associated with a
* dispatch and has an outstanding dispatch request.
*/
#define NS_CLIENTSTATE_READING 3
/*
* The client object is a TCP client object that has received
* a connection. It has a tcpsocket, tcpmsg, TCP quota, and an
* outstanding TCP read request. This state is not used for
* UDP client objects.
*/
#define NS_CLIENTSTATE_WORKING 4
/*
* The client object has received a request and is working
* on it. It has a view, and it may have any of a non-reset OPT,
* recursion quota, and an outstanding write request. If it
* is a UDP client object, it has a dispatch event.
*/
#define NS_CLIENTSTATE_MAX 9
/*
* Sentinel value used to indicate "no state". When client->newstate
* has this value, we are not attempting to exit the current state.
* Must be greater than any valid state.
*/
/*
* Enter the inactive state.
*
* Requires:
* No requests are outstanding.
*/
static void
else
}
client->attributes = 0;
}
/*
* Clean up a client object and free its memory.
* Requires:
* The client is in the inactive state.
*/
static void
/*
* When "shuttingdown" is true, either the task has received
* its shutdown event or no shutdown event has ever been
* set up. Thus, we have no outstanding shutdown
* event at this point.
*/
}
}
CTRACE("free");
}
static void
if (result != ISC_R_SUCCESS) {
"setting timouet: %s",
/* Continue anyway. */
}
}
/*
* Check for a deactivation or shutdown request and take appropriate
* action. Returns ISC_TRUE if either is in progress; in this case
* the caller must no longer use the client object as it may have been
* freed.
*/
static isc_boolean_t
return (ISC_FALSE); /* Business as usual. */
/*
* We need to detach from the view early when shutting down
* the server to break the following vicious circle:
*
* - The resolver will not shut down until the view refcount is zero
* - The view refcount does not go to zero until all clients detach
* - The client does not detach from the view until references is zero
* - references does not go to zero until the resolver has shut down
*
*/
/*
* We are trying to abort request processing.
*/
if (TCP_CLIENT(client))
else
socket =
}
/*
* Still waiting for I/O cancel completion.
* or lingering references.
*/
return (ISC_TRUE);
}
/*
* I/O cancel is complete. Burn down all state
* related to the current request.
*/
return (ISC_TRUE); /* We're done. */
}
}
/*
* We are trying to abort the current TCP connection,
* if any.
*/
/* Still waiting for read cancel completion. */
return (ISC_TRUE);
}
if (client->tcpmsg_valid) {
}
CTRACE("closetcp");
}
/*
* Now the client is ready to accept a new TCP connection
* or UDP request, but we may have enough clients doing
* that already. Check whether this client needs to remain
* active and force it to go inactive if not.
*/
if (TCP_CLIENT(client)) {
} else {
/*
* Give the processed dispatch event back to
* the dispatch. This tells the dispatch
* that we are ready to receive the next event.
*/
}
return (ISC_TRUE);
}
}
/*
* We are trying to enter the inactive state.
*/
/* Still waiting for accept cancel completion. */
return (ISC_TRUE);
}
/* Accept cancel is complete. */
return (ISC_TRUE); /* We're done. */
}
}
/*
* We are trying to free the client.
*/
return (ISC_TRUE);
}
return (ISC_TRUE);
}
/*
* The client's task has received a shutdown event.
*/
static void
CTRACE("shutdown");
}
(void)exit_check(client);
}
static void
CTRACE("endrequest");
}
}
}
static void
/*
* This client object should normally go inactive
* at this point, but if we have fewer active client
* objects than desired due to earlier quota exhaustion,
* keep it active to make up for the shortage.
*/
if (TCP_CLIENT(client)) {
} else {
/*
* The UDP client quota is enforced by making
* requests fail rather than by not listening
* for new ones. Therefore, there is always a
* full set of UDP clients listening.
*/
}
if (! need_another_client) {
/*
* We don't need this client object. Recycle it.
*/
}
}
}
void
int newstate;
CTRACE("next");
if (result != ISC_R_SUCCESS)
/*
* An error processing a TCP request may have left
* the connection out of sync. To be safe, we always
* sever the connection when result != ISC_R_SUCCESS.
*/
else
(void) exit_check(client);
}
static void
CTRACE("senddone");
"error sending response: %s",
}
if (exit_check(client))
return;
}
void
unsigned char *data;
isc_region_t r;
struct in6_pktinfo *pktinfo;
unsigned int bufsize = 512;
CTRACE("send");
/*
* XXXRTH The following doesn't deal with TSIGs, TCP buffer resizing,
* or ENDS1 more data packets.
*/
if (TCP_CLIENT(client)) {
/*
* XXXRTH "tcpbuffer" is a hack to get things working.
*/
goto done;
} else {
else
}
if (result != ISC_R_SUCCESS)
goto done;
if (result != ISC_R_SUCCESS)
goto done;
/*
* XXXRTH dns_message_setopt() should probably do this...
*/
}
DNS_SECTION_QUESTION, 0);
if (result != ISC_R_SUCCESS)
goto done;
DNS_SECTION_ANSWER, 0);
if (result == ISC_R_NOSPACE) {
goto renderend;
}
if (result != ISC_R_SUCCESS)
goto done;
if (result == ISC_R_NOSPACE) {
goto renderend;
}
if (result != ISC_R_SUCCESS)
goto done;
goto done;
if (result != ISC_R_SUCCESS)
goto done;
if (TCP_CLIENT(client)) {
isc_buffer_usedregion(&buffer, &r);
isc_buffer_usedregion(&tcpbuffer, &r);
} else {
isc_buffer_usedregion(&buffer, &r);
}
CTRACE("sendto");
else
if (result == ISC_R_SUCCESS) {
return;
}
done:
}
}
void
CTRACE("error");
/*
* message may be an in-progress reply that we had trouble
* with, in which case QR will be set. We need to clear QR before
* calling dns_message_reply() to avoid triggering an assertion.
*/
/*
* AA and AD shouldn't be set.
*/
if (result != ISC_R_SUCCESS) {
/*
* It could be that we've got a query with a good header,
* but a bad question section, so we try again with
* want_question_section set to ISC_FALSE.
*/
if (result != ISC_R_SUCCESS) {
return;
}
}
}
static inline isc_result_t
if (result != ISC_R_SUCCESS)
return (result);
if (result != ISC_R_SUCCESS)
return (result);
if (result != ISC_R_SUCCESS)
return (result);
/*
* Set Maximum UDP buffer size.
*/
/*
* Set EXTENDED-RCODE, VERSION, and Z to 0.
*/
/*
* No ENDS options.
*/
return (ISC_R_SUCCESS);
}
/*
* Handle an incoming request event from the dispatch (UDP case)
* or tcpmsg (TCP case).
*/
static void
TCP_CLIENT(client) ?
} else {
}
else
} else {
/*
* client->peeraddr was set when the connection was accepted.
*/
}
"%s request",
if (exit_check(client))
goto cleanup_serverlock;
if (result != ISC_R_SUCCESS) {
if (TCP_CLIENT(client))
else
goto cleanup_serverlock;
}
"multicast request");
#if 0
#endif
}
if (result != ISC_R_SUCCESS) {
goto cleanup_serverlock;
}
/*
* We expect a query, not a response. Unexpected UDP responses
* are discarded early by the dispatcher, but TCP responses
* bypass the dispatcher and must be discarded here.
*/
CTRACE("unexpected response");
goto cleanup_serverlock;
}
/*
* Deal with EDNS.
*/
unsigned int version;
/*
* Set the client's UDP buffer size.
*/
/*
* Create an OPT for our reply.
*/
if (result != ISC_R_SUCCESS) {
goto cleanup_serverlock;
}
/*
* Do we understand this version of ENDS?
*
* XXXRTH need library support for this!
*/
if (version != 0) {
goto cleanup_serverlock;
}
}
/*
* Find a view that matches the client's source address.
*
* XXXRTH View list management code will be moving to its own module
* soon.
*/
{
int match;
match > 0))
{
break;
}
}
}
"no matching view");
goto cleanup_serverlock;
}
/*
* Lock the view's configuration data for reading.
* We must attach a separate view reference for this
* purpose instad of using client->view, because
* client->view may or may not be detached at the point
* when we return from this event handler depending
* on whether the request handler causes ns_client_next()
* to be called or not.
*/
/*
* Check for a signature. We log bad signatures regardless of
* whether they ultimately cause the request to be rejected or
* not. We do not log the lack of a signature unless we are
* debugging.
*/
if (result != ISC_R_SUCCESS) {
goto cleanup_viewlock;
}
if (result == ISC_R_SUCCESS) {
"request has valid signature");
} else if (result == ISC_R_NOTFOUND) {
"request is not signed");
} else if (result == DNS_R_NOIDENTITY) {
"request is signed by a nonauthoritative key");
} else {
/* There is a signature, but it is bad. */
"request has invalid signature: %s",
}
/*
* Decide whether recursive service is available to this client.
* We do this here rather than in the query code so that we can
* set the RA bit correctly on all kinds of responses, not just
* responses to ordinary queries.
*/
/* XXX this will log too much too early */
/*
* Dispatch the request.
*/
case dns_opcode_query:
CTRACE("query");
break;
case dns_opcode_update:
CTRACE("update");
break;
case dns_opcode_notify:
CTRACE("notify");
break;
case dns_opcode_iquery:
CTRACE("iquery");
break;
default:
CTRACE("unknown opcode");
}
}
static void
CTRACE("timeout");
}
(void) exit_check(client);
}
static isc_result_t
{
/*
* Caller must be holding the manager lock.
*
* Note: creating a client does not add the client to the
* manager's client list or set the client's manager pointer.
* The caller is responsible for that.
*/
return (ISC_R_NOMEMORY);
if (result != ISC_R_SUCCESS)
goto cleanup_client;
if (result != ISC_R_SUCCESS)
goto cleanup_task;
if (result != ISC_R_SUCCESS)
goto cleanup_task;
if (result != ISC_R_SUCCESS)
goto cleanup_timer;
/* XXXRTH Hardwired constants */
goto cleanup_message;
client->references = 0;
client->attributes = 0;
/*
* We call the init routines for the various kinds of client here,
* after we have created an otherwise valid client, because some
* of them call routines that REQUIRE(NS_CLIENT_VALID(client)).
*/
if (result != ISC_R_SUCCESS)
goto cleanup_sendbuf;
CTRACE("create");
return (ISC_R_SUCCESS);
return (result);
}
static void
CTRACE("read");
if (result != ISC_R_SUCCESS)
goto fail;
/*
* Set a timeout to limit the amount of time we will wait
* for a request on this TCP connection.
*/
return;
fail:
}
static void
/*
* We must take ownership of the new socket before the exit
* check to make sure it gets destroyed if we decide to exit.
*/
"new TCP connection");
} else {
/*
* XXXRTH What should we do? We're trying to accept but
* it didn't work. If we just give up, then TCP
* service may eventually stop.
*
* For now, we just go idle.
*
* Going idle is probably the right thing if the
* I/O was canceled.
*/
"accept failed: %s",
}
if (exit_check(client))
goto freeevent;
/*
* Let a new client take our place immediately, before
* we wait for a request packet. If we don't,
* telnetting to port 35 (once per CPU) will
* deny service to legititmate TCP clients.
*/
if (result == ISC_R_SUCCESS)
if (result != ISC_R_SUCCESS) {
"no more TCP clients: %s",
}
}
}
static void
CTRACE("accept");
if (result != ISC_R_SUCCESS) {
"isc_socket_accept() failed: %s",
/*
* XXXRTH What should we do? We're trying to accept but
* it didn't work. If we just give up, then TCP
* service may eventually stop.
*
* For now, we just go idle.
*/
return;
}
}
void
source->references++;
}
void
client->references--;
(void) exit_check(client);
}
}
CTRACE("replace");
(TCP_CLIENT(client) ?
if (result != ISC_R_SUCCESS)
return (result);
/*
* The responsibility for listening for new requests is hereby
* transferred to the new client. Therefore, the old client
* should refrain from listening for any more requests.
*/
return (ISC_R_SUCCESS);
}
/***
*** Client Manager
***/
static void
MTRACE("clientmgr_destroy");
}
{
return (ISC_R_NOMEMORY);
if (result != ISC_R_SUCCESS)
goto cleanup_manager;
MTRACE("create");
return (ISC_R_SUCCESS);
return (result);
}
void
MTRACE("destroy");
if (need_destroy)
}
{
unsigned int i;
REQUIRE(n > 0);
MTRACE("createclients");
/*
* We MUST lock the manager lock for the entire client creation
* process. If we didn't do this, then a client could get a
* shutdown event and disappear out from under us.
*/
for (i = 0; i < n; i++) {
/*
* Allocate a client. First try to get a recycled one;
* if that fails, make a new one.
*/
MTRACE("recycle");
} else {
MTRACE("create new");
if (result != ISC_R_SUCCESS)
break;
}
if (tcp) {
&client->tcplistener);
} else {
if (result != ISC_R_SUCCESS) {
ISC_LOG_DEBUG(3),
"dns_dispatch_addrequest() "
"failed: %s",
break;
}
}
}
if (i != 0) {
/*
* We managed to create at least one client, so we
* declare victory.
*/
}
return (result);
}
}
{
int match;
if (default_allow)
goto allow;
else
goto deny;
}
if (result != ISC_R_SUCCESS)
goto deny; /* Internal error, already logged. */
if (match > 0)
goto allow;
goto deny; /* Negative match or no match. */
"%s approved", opname);
return (ISC_R_SUCCESS);
deny:
"%s denied", opname);
return (DNS_R_REFUSED);
}
static void
{
char msgbuf[2048];
char peerbuf[256];
if (client->peeraddr_valid) {
} else {
}
}
void
{
}