stats.c revision 10dd5f62f27b050c0e51d85cbd97e2f5925eb9ac
/*
* Copyright (C) 2009, 2012-2014 Internet Systems Consortium, Inc. ("ISC")
*
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/* $Id$ */
/*! \file */
#include <config.h>
#include <string.h>
#include <isc/platform.h>
#ifndef ISC_STATS_USEMULTIFIELDS
#if defined(ISC_RWLOCK_USEATOMIC) && defined(ISC_PLATFORM_HAVEXADD) && !defined(ISC_PLATFORM_HAVEXADDQ)
#define ISC_STATS_USEMULTIFIELDS 1
#else
#define ISC_STATS_USEMULTIFIELDS 0
#endif
#endif /* ISC_STATS_USEMULTIFIELDS */
typedef struct {
} isc_stat_t;
#else
typedef isc_uint64_t isc_stat_t;
#endif
struct isc_stats {
/*% Unlocked */
unsigned int magic;
int ncounters;
unsigned int references; /* locked by lock */
/*%
* Locked by counterlock or unlocked if efficient rwlock is not
* available.
*/
#ifdef ISC_RWLOCK_USEATOMIC
#endif
/*%
* We don't want to lock the counters while we are dumping, so we first
* copy the current counter values into a local array. This buffer
* will be used as the copy destination. It's allocated on creation
* of the stats structure so that the dump operation won't fail due
* to memory allocation failure.
* XXX: this approach is weird for non-threaded build because the
* additional memory and the copy overhead could be avoided. We prefer
* simplicity here, however, under the assumption that this function
* should be only rarely called.
*/
};
static isc_result_t
return (ISC_R_NOMEMORY);
if (result != ISC_R_SUCCESS)
goto clean_stats;
goto clean_mutex;
}
sizeof(isc_uint64_t) * ncounters);
goto clean_counters;
}
#ifdef ISC_RWLOCK_USEATOMIC
if (result != ISC_R_SUCCESS)
goto clean_copiedcounters;
#endif
return (result);
#ifdef ISC_RWLOCK_USEATOMIC
sizeof(isc_stat_t) * ncounters);
#endif
return (result);
}
void
stats->references++;
}
void
stats->references--;
if (stats->references == 0) {
#ifdef ISC_RWLOCK_USEATOMIC
#endif
return;
}
}
int
}
static inline void
#ifdef ISC_RWLOCK_USEATOMIC
/*
* We use a "read" lock to prevent other threads from reading the
* counter while we "writing" a counter field. The write access itself
* is protected by the atomic operation.
*/
#endif
/*
* If the lower 32-bit field overflows, increment the higher field.
* Note that it's *theoretically* possible that the lower field
* overlaps again before the higher field is incremented. It doesn't
* matter, however, because we don't read the value until
* isc_stats_copy() is called where the whole process is protected
* by the write (exclusive) lock.
*/
#elif defined(ISC_PLATFORM_HAVEXADDQ)
#else
#endif
#ifdef ISC_RWLOCK_USEATOMIC
#endif
}
static inline void
#ifdef ISC_RWLOCK_USEATOMIC
#endif
if (prev == 0)
-1);
#elif defined(ISC_PLATFORM_HAVEXADDQ)
#else
#endif
#ifdef ISC_RWLOCK_USEATOMIC
#endif
}
static void
int i;
#ifdef ISC_RWLOCK_USEATOMIC
/*
* We use a "write" lock before "reading" the statistics counters as
* an exclusive lock.
*/
#endif
stats->copiedcounters[i] =
}
#else
UNUSED(i);
#endif
#ifdef ISC_RWLOCK_USEATOMIC
#endif
}
}
void
}
void
}
void
{
int i;
if ((options & ISC_STATSDUMP_VERBOSE) == 0 &&
stats->copiedcounters[i] == 0)
continue;
}
}
void
{
#ifdef ISC_RWLOCK_USEATOMIC
/*
* We use a "write" lock before "reading" the statistics counters as
* an exclusive lock.
*/
#endif
#else
#endif
#ifdef ISC_RWLOCK_USEATOMIC
#endif
}