ddi.c revision 9acbbeaf2a1ffe5c14b244867d427714fab43c5c
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* UNIX Device Driver Interface functions
*
* This file contains functions that are to be added to the kernel
* to put the interface presented to drivers in conformance with
* the DDI standard. Of the functions added to the kernel, 17 are
* function equivalents of existing macros in sysmacros.h,
*
* 17 additional functions -- drv_getparm(), drv_setparm(),
* getrbuf(), freerbuf(),
* getemajor(), geteminor(), etoimajor(), itoemajor(), drv_usectohz(),
* drv_hztousec(), drv_usecwait(), drv_priv(), and kvtoppid() --
* are specified by DDI to exist in the kernel and are implemented here.
*
* Note that putnext() and put() are not in this file. The C version of
* might exist for some architectures.
*/
/*
* return internal major number corresponding to device
* number (new format) argument
*/
{
#ifdef _LP64
#else
#endif
}
/*
* return external major number corresponding to device
* number (new format) argument
*/
{
#ifdef _LP64
#else
#endif
}
/*
* return internal minor number corresponding to device
* number (new format) argument
*/
{
#ifdef _LP64
#else
#endif
}
/*
* return external minor number corresponding to device
* number (new format) argument
*/
{
#ifdef _LP64
#else
#endif
}
/*
* return internal major number corresponding to external
* major number.
*/
int
{
#ifdef _LP64
return (-1); /* invalid external major */
#else
return (-1); /* invalid external major */
#endif
return ((int)emajnum);
}
/*
* return external major number corresponding to internal
* major number argument or -1 if no external major number
* can be found after lastemaj that maps to the internal
* major number. Pass a lastemaj val of -1 to start
* the search initially. (Typical use of this function is
* of the form:
*
* lastemaj = -1;
* while ((lastemaj = itoemajor(imag, lastemaj)) != -1)
* { process major number }
*/
int
{
return (-1);
/*
* if lastemaj == -1 then start from beginning of
* the (imaginary) MAJOR table
*/
if (lastemaj < -1)
return (-1);
/*
* given that there's a 1-1 mapping of internal to external
* major numbers, searching is somewhat pointless ... let's
* just go there directly.
*/
return (imajnum);
return (-1);
}
/*
* encode external major and minor number arguments into a
* new format device number
*/
{
#ifdef _LP64
#else
#endif
}
/*
* cmpdev - compress new device format to old device format
*/
{
#ifdef _LP64
#else
#endif
}
{
#ifdef _LP64
#else
#endif
}
/*
* return true (1) if the message type input is a data
* message type, 0 otherwise
*/
int
{
}
/*
* return a pointer to the other queue in the queue pair of qp
*/
queue_t *
{
return (_OTHERQ(q));
}
/*
* return a pointer to the read queue in the queue pair of qp.
*/
queue_t *
{
return (_RD(q));
}
/*
* return a pointer to the write queue in the queue pair of qp.
*/
int
{
return (_SAMESTR(q));
}
/*
* return a pointer to the write queue in the queue pair of qp.
*/
queue_t *
{
return (_WR(q));
}
/*
* store value of kernel parameter associated with parm
*/
int
{
switch (parm) {
case UPROCP:
break;
case PPGRP:
break;
case LBOLT:
break;
case TIME:
if ((now = gethrestime_sec()) == 0) {
} else {
}
break;
case PPID:
break;
case PSID:
mutex_enter(&p->p_splock);
mutex_exit(&p->p_splock);
break;
case UCRED:
break;
default:
return (-1);
}
return (0);
}
/*
* set value of kernel parameter associated with parm
*/
int
{
switch (parm) {
case SYSRINT:
break;
case SYSXINT:
break;
case SYSMINT:
break;
case SYSRAWC:
break;
case SYSCANC:
break;
case SYSOUTC:
break;
default:
return (-1);
}
return (0);
}
/*
* allocate space for buffer header and return pointer to it.
* preferred means of obtaining space for a local buf header.
* returns pointer to buf upon success, NULL for failure
*/
struct buf *
{
return (NULL);
return (bp);
}
/*
* free up space allocated by getrbuf()
*/
void
{
}
/*
* convert byte count input to logical page units
* (byte counts that are not a page-size multiple
* are rounded down)
*/
{
}
/*
* convert byte count input to logical page units
* (byte counts that are not a page-size multiple
* are rounded up)
*/
{
}
/*
* convert size in pages to bytes.
*/
{
}
#define MAXCLOCK_T LONG_MAX
/*
* Convert from system time units (hz) to microseconds.
*
* If ticks <= 0, return 0.
* If converting ticks to usecs would overflow, return MAXCLOCK_T.
* Otherwise, convert ticks to microseconds.
*/
{
if (ticks <= 0)
return (0);
return (MAXCLOCK_T);
return (TICK_TO_USEC(ticks));
}
/*
* Convert from microseconds to system time units (hz), rounded up.
*
* If ticks <= 0, return 0.
* Otherwise, convert microseconds to ticks, rounding up.
*/
{
if (microsecs <= 0)
return (0);
return (USEC_TO_TICK_ROUNDUP(microsecs));
}
#ifdef sun
/*
* drv_usecwait implemented in each architecture's machine
* specific code somewhere. For sparc, it is the alternate entry
* to usec_delay (eventually usec_delay goes away). See
*/
#endif
/*
* bcanputnext, canputnext assume called from timeout, bufcall,
* or esballoc free routines. since these are driven by
* clock interrupts, instead of system calls the appropriate plumbing
* locks have not been acquired.
*/
int
{
int ret;
claimstr(q);
releasestr(q);
return (ret);
}
int
canputnext(queue_t *q)
{
"canputnext?:%p\n", q);
} else
/* get next module forward with a service queue */
/* this is for loopback transports, they should not do a canputnext */
"canputnext:%p %d", q, 1);
return (1);
}
}
/* the above is the most frequently used path */
mutex_enter(QLOCK(q));
mutex_exit(QLOCK(q));
"canputnext:%p %d", q, 0);
return (0);
}
mutex_exit(QLOCK(q));
return (1);
}
/*
*
* "qprocson enables the put and service routines of the driver
* or module... Prior to the call to qprocson, the put and service
* routines of a newly pushed module or newly opened driver are
* disabled. For the module, messages flow around it as if it
* were not present in the stream... qprocson must be called by
* the first open of a module or driver after allocation and
* initialization of any resource on which the put and service
* routines depend."
*
* put or service procedures to be run by using put() or qenable().
*/
void
{
/*
* Do not call insertq() if it is a re-open. But if _QINSERTING
* is set, q_next will not be NULL and we need to call insertq().
*/
(q->q_flag & _QINSERTING))
}
/*
* into the queue.
*
* "qprocsoff disables the put and service routines of the driver
* or module... When the routines are disabled in a module, messages
* flow around the module as if it were not present in the stream.
* qprocsoff must be called by the close routine of a driver or module
* put and service routines depend. qprocsoff will remove the
* queue's service routines from the list of service routines to be
* run and waits until any concurrent put or service routines are
* finished."
*
* put procedures to be run by using put().
*/
void
{
/* Called more than once */
return;
}
disable_svc(q);
removeq(q);
}
/*
* "freezestr() freezes the state of the entire STREAM containing
* the queue pair q. A frozen STREAM blocks any thread
* attempting to enter any open, close, put or service routine
* belonging to any queue instance in the STREAM, and blocks
* any thread currently within the STREAM if it attempts to put
* messages onto or take messages off of any queue within the
* STREAM (with the sole exception of the caller). Threads
* blocked by this mechanism remain so until the STREAM is
* thawed by a call to unfreezestr().
*
* Use strblock to set SQ_FROZEN in all syncqs in the stream (prevents
* further entry into put, service, open, and close procedures) and
* grab (and hold) all the QLOCKs in the stream (to block putq, getq etc.)
*
* Note: this has to be the only code that acquires one QLOCK while holding
*/
void
{
/*
* Increment refcnt to prevent q_next from changing during the strblock
* as well as while the stream is frozen.
*/
strblock(q);
mutex_enter(QLOCK(q));
}
}
/*
* Undo what freezestr did.
* Have to drop the QLOCKs before the strunblock since strunblock will
* potentially call other put procedures.
*/
void
unfreezestr(queue_t *q)
{
}
strunblock(q);
releasestr(RD(q));
}
/*
* Used by open and close procedures to "sleep" waiting for messages to
* arrive. Note: can only be used in open and close procedures.
*
* Lower the gate and let in either messages on the syncq (if there are
*
* If the queue has an outer perimeter this will not prevent entry into this
* syncq (since outer_enter does not set SQ_WRITER on the syncq that gets the
* exclusive access to the outer perimeter.)
*
* Return 0 is the cv_wait_sig was interrupted; otherwise 1.
*
* It only makes sense to grab sq_putlocks for !SQ_CIOC sync queues because
* otherwise put entry points were not blocked in the first place. if this is
* SQ_CIOC then qwait is used to wait for service procedure to run since syncq
* is always SQ_CIPUT if it is SQ_CIOC.
*
* Note that SQ_EXCL is dropped and SQ_WANTEXITWAKEUP set in sq_flags
* atomically under sq_putlocks to make sure putnext will not miss a pending
* wakeup.
*/
int
{
int ret = 1;
int is_sq_cioc;
/*
* Perform the same operations as a leavesq(sq, SQ_OPENCLOSE)
* while detecting all cases where the perimeter is entered
* so that qwait_sig can return to the caller.
*
* Drain the syncq if possible. Otherwise reset SQ_EXCL and
* wait for a thread to leave the syncq.
*/
/*
* XXX this does not work if there is only an outer perimeter.
*/
if (outer)
if (is_sq_cioc == 0) {
}
/*
* Drop SQ_EXCL and sq_count but hold the SQLOCK
* to prevent any undetected entry and exit into the perimeter.
*/
if (is_sq_cioc == 0) {
}
/*
* Unblock any thread blocked in an entersq or outer_enter.
* since that could lead to livelock with two threads in
* qwait for the same (per module) inner perimeter.
*/
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
if (is_sq_cioc == 0) {
}
/* drain_syncq() drops SQLOCK */
return (1);
}
/*
* Sleep on sq_exitwait to only be woken up when threads leave the
* put or service procedures. We can not sleep on sq_wait since an
* outer_exit in a qwait running in the same outer perimeter would
* cause a livelock "ping-pong" between two or more qwait'ers.
*/
do {
if (is_sq_cioc == 0) {
}
if (is_sq_cioc == 0) {
}
if (is_sq_cioc == 0) {
}
/*
* Re-enter the perimeters again
*/
return (ret);
}
/*
* Used by open and close procedures to "sleep" waiting for messages to
* arrive. Note: can only be used in open and close procedures.
*
* Lower the gate and let in either messages on the syncq (if there are
*
* If the queue has an outer perimeter this will not prevent entry into this
* syncq (since outer_enter does not set SQ_WRITER on the syncq that gets the
* exclusive access to the outer perimeter.)
*
* It only makes sense to grab sq_putlocks for !SQ_CIOC sync queues because
* otherwise put entry points were not blocked in the first place. if this is
* SQ_CIOC then qwait is used to wait for service procedure to run since syncq
* is always SQ_CIPUT if it is SQ_CIOC.
*
* Note that SQ_EXCL is dropped and SQ_WANTEXITWAKEUP set in sq_flags
* atomically under sq_putlocks to make sure putnext will not miss a pending
* wakeup.
*/
void
{
int is_sq_cioc;
/*
* Perform the same operations as a leavesq(sq, SQ_OPENCLOSE)
* while detecting all cases where the perimeter is entered
* so that qwait can return to the caller.
*
* Drain the syncq if possible. Otherwise reset SQ_EXCL and
* wait for a thread to leave the syncq.
*/
/*
* XXX this does not work if there is only an outer perimeter.
*/
if (outer)
if (is_sq_cioc == 0) {
}
/*
* Drop SQ_EXCL and sq_count but hold the SQLOCK
* to prevent any undetected entry and exit into the perimeter.
*/
if (is_sq_cioc == 0) {
}
/*
* Unblock any thread blocked in an entersq or outer_enter.
* since that could lead to livelock with two threads in
* qwait for the same (per module) inner perimeter.
*/
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
if (is_sq_cioc == 0) {
}
/* drain_syncq() drops SQLOCK */
return;
}
/*
* Sleep on sq_exitwait to only be woken up when threads leave the
* put or service procedures. We can not sleep on sq_wait since an
* outer_exit in a qwait running in the same outer perimeter would
* cause a livelock "ping-pong" between two or more qwait'ers.
*/
do {
if (is_sq_cioc == 0) {
}
if (is_sq_cioc == 0) {
}
if (is_sq_cioc == 0) {
}
/*
* Re-enter the perimeters again
*/
}
/*
* Used for the synchronous streams entrypoints when sleeping outside
* the perimeters. Must never be called from regular put entrypoint.
*
* There's no need to grab sq_putlocks here (which only exist for CIPUT sync
* queues). If it is CIPUT sync queue put entry points were not blocked in the
* permiter syncronization purposes.
*
* Consolidation private.
*/
{
/*
* Perform the same operations as a leavesq(sq, SQ_PUT)
* while detecting all cases where the perimeter is entered
* so that qwait_rw can return to the caller.
*
* Drain the syncq if possible. Otherwise reset SQ_EXCL and
* wait for a thread to leave the syncq.
*/
/*
* Drop SQ_EXCL and sq_count but hold the SQLOCK until to prevent any
* undetected entry and exit into the perimeter.
*/
}
/*
* Unblock any thread blocked in an entersq or outer_enter.
* since that could lead to livelock with two threads in
* qwait for the same (per module) inner perimeter.
*/
if (flags & SQ_WANTWAKEUP) {
flags &= ~SQ_WANTWAKEUP;
}
/* drain_syncq() drops SQLOCK */
return (B_FALSE);
}
/*
* Sleep on sq_exitwait to only be woken up when threads leave the
* put or service procedures. We can not sleep on sq_wait since an
* outer_exit in a qwait running in the same outer perimeter would
* cause a livelock "ping-pong" between two or more qwait'ers.
*/
do {
break;
}
/*
* Re-enter the perimeters again
*/
return (gotsignal);
}
/*
* Asynchronously upgrade to exclusive access at either the inner or
* outer perimeter.
*/
void
{
if (perim == PERIM_INNER)
else if (perim == PERIM_OUTER)
else
panic("qwriter: wrong \"perimeter\" parameter");
}
/*
* Schedule a synchronous streams timeout
*/
{
/*
* you don't want the timeout firing before its params are set up
* callbparams_alloc() acquires SQLOCK(sq)
* qtimeout() can't fail and can't sleep, so panic if memory is not
* available.
*/
/*
* the callbflags in the sq use the same flags. They get anded
* in the callbwrapper to determine if a qun* of this callback type
* is required. This is not a request to cancel.
*/
/* check new timeout version return codes */
/* use local id because the cbp memory could be free by now */
return (tid);
}
{
/*
* you don't want the timeout firing before its params are set up
* callbparams_alloc() acquires SQLOCK(sq) if successful.
*/
return ((bufcall_id_t)0);
/*
* the callbflags in the sq use the same flags. They get anded
* in the callbwrapper to determine if a qun* of this callback type
* is required. This is not a request to cancel.
*/
/* check new timeout version return codes */
if (bid == 0) {
}
/* use local id because the params memory could be free by now */
return (bid);
}
/*
* cancel a timeout callback which enters the inner perimeter.
* cancelling of all callback types on a given syncq is serialized.
* the SQ_CALLB_BYPASSED flag indicates that the callback fn did
* not execute. The quntimeout return value needs to reflect this.
* As with out existing callback programming model - callbacks must
* be cancelled before a close completes - so ensuring that the sq
* is valid when the callback wrapper is executed.
*/
{
/* callbacks are processed serially on each syncq */
}
}
if (ret != -1) {
/* The wrapper was never called - need to free based on id */
}
ret = 0; /* this was how much time left */
}
sq->sq_callbflags = 0;
}
return (ret);
}
void
{
/* callbacks are processed serially on each syncq */
}
}
/*
* No indication from unbufcall if the callback has already run.
* Always attempt to free it.
*/
sq->sq_callbflags = 0;
}
}
/*
* Associate the stream with an instance of the bottom driver. This
* function is called by APIs that establish or modify the hardware
* association (ppa) of an open stream. Two examples of such
* post-open(9E) APIs are the dlpi(7p) DL_ATTACH_REQ message, and the
* ndd(1M) "instance=" ioctl(2). This interface may be called from a
* stream driver's wput procedure and from within syncq perimeters,
* so it can't block.
*
* The qassociate() "model" is that it should drive attach(9E), yet it
* can't really do that because driving attach(9E) is a blocking
* operation. Instead, the qassociate() implementation has complex
* dependencies on the implementation behavior of other parts of the
* kernel to ensure all appropriate instances (ones that have not been
* made inaccessible by DR) are attached at stream open() time, and
* that they will not autodetach. The code relies on the fact that an
* open() of a stream that ends up using qassociate() always occurs on
* a minor node created with CLONE_DEV. The open() comes through
* clnopen() and since clnopen() calls ddi_hold_installed_driver() we
* attach all instances and mark them DN_NO_AUTODETACH (given
* DN_DRIVER_HELD is maintained correctly).
*
* Since qassociate() can't really drive attach(9E), there are corner
* cases where the compromise described above leads to qassociate()
* returning failure. This can happen when administrative functions
* that cause detach(9E), such as "update_drv" or "modunload -i", are
* performed on the driver between the time the stream was opened and
* the time its hardware association was established. Although this can
* theoretically be an arbitrary amount of time, in practice the window
* is usually quite small, since applications almost always issue their
* hardware association request immediately after opening the stream,
* and do not typically switch association while open. When these
* corner cases occur, and qassociate() finds the requested instance
* detached, it will return failure. This failure should be propagated
* to the requesting administrative application using the appropriate
* post-open(9E) API error mechanism.
*
* All qassociate() callers are expected to check for and gracefully handle
* failure return, propagating errors back to the requesting administrative
* application.
*/
int
{
if (instance == -1) {
return (0);
}
return (-1);
return (0);
}
/*
* This routine is the SVR4MP 'replacement' for
* hat_getkpfnum. The only major difference is
* the return value for illegal addresses - since
* sunm_getkpfnum() and srmmu_getkpfnum() both
* return '-1' for bogus mappings, we can (more or
* less) return the value directly.
*/
{
}
/*
* This is used to set the timeout value for cv_timed_wait() or
* cv_timedwait_sig().
*/
void
{
}