/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
* Copyright 2016 Joyent, Inc.
*/
/*
* Copyright (c) 2009-2010, Intel Corporation.
* All rights reserved.
*/
/*
* ACPI CA OSL for Solaris x86
*/
#include <sys/pci_cfgspace.h>
#include <sys/x86_archext.h>
/* local functions */
static int CompressEisaID(char *np);
static int acpica_query_bbn_problem(void);
static void acpica_devinfo_handler(ACPI_HANDLE, void *);
/*
* Event queue vars
*/
int acpica_eventq_init = 0;
/*
* Priorities relative to minclsyspri that each taskq
* run at; OSL_NOTIFY_HANDLER needs to run at a higher
* priority than OSL_GPE_HANDLER. There's an implicit
* assumption that no priority here results in exceeding
* maxclsyspri.
* Note: these initializations need to match the order of
* ACPI_EXECUTE_TYPE.
*/
0, /* OSL_GLOBAL_LOCK_HANDLER */
2, /* OSL_NOTIFY_HANDLER */
0, /* OSL_GPE_HANDLER */
0, /* OSL_DEBUGGER_THREAD */
0, /* OSL_EC_POLL_HANDLER */
0 /* OSL_EC_BURST_HANDLER */
};
/*
* Note, if you change this path, you need to update
* /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
*/
/* non-zero while scan_d2a_map() is working */
static int scanning_d2a_map = 0;
static int d2a_done = 0;
/* features supported by ACPICA and ACPI device configuration. */
/* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
int acpica_use_safe_delay = 0;
/* CPU mapping data */
struct cpu_map_item {
};
static int cpu_map_count_max = 0;
static int cpu_map_count = 0;
static int cpu_map_built = 0;
/*
* On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
* This flag is used to check for uppc-only systems by detecting whether
* acpica_map_cpu() has been called or not.
*/
static int cpu_map_called = 0;
/* buffer for AcpiOsVprintf() */
static int acpi_osl_pr_buflen;
#define D2A_DEBUG
/*
*
*/
static void
{
int i;
/*
* destroy event queues
*/
for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
if (osl_eventq[i])
}
}
/*
*
*/
static ACPI_STATUS
{
int i, error = 0;
/*
* Initialize event queues
*/
/* Always allocate only 1 thread per queue to force FIFO execution */
for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
osl_eventq_pri_delta[i] + minclsyspri, 0);
if (osl_eventq[i] == NULL)
error++;
}
if (error != 0) {
#ifdef DEBUG
#endif
return (AE_ERROR);
}
acpica_eventq_init = 1;
return (AE_OK);
}
/*
* One-time initialization of OSL layer
*/
AcpiOsInitialize(void)
{
/*
* Allocate buffer for AcpiOsVprintf() here to avoid
* kmem_alloc()/kmem_free() at high PIL
*/
if (acpi_osl_pr_buffer != NULL)
return (AE_OK);
}
/*
* One-time shut-down of OSL layer
*/
AcpiOsTerminate(void)
{
if (acpi_osl_pr_buffer != NULL)
return (AE_OK);
}
{
/*
* For EFI firmware, the root pointer is defined in EFI systab.
* The boot code process the table and put the physical address
* in the acpi-root-tab property.
*/
return (Address);
}
/*ARGSUSED*/
{
*NewVal = 0;
return (AE_OK);
}
static void
{
/*LINTED*/
/* copy the string */;
*dest = '\0';
}
{
int count;
/* File name format is "signature_oemid_oemtableid.dat" */
*NewTable = 0;
return (AE_OK);
} else {
if (count >= MAX_DAT_FILE_SIZE) {
*NewTable = 0;
} else {
}
}
return (AE_OK);
}
{
return (AE_SUPPORT);
}
/*
* ACPI semaphore implementation
*/
typedef struct {
} acpi_sema_t;
/*
*
*/
void
{
/* no need to enter mutex here at creation */
}
/*
*
*/
void
{
}
/*
*
*/
{
/*
* Enough units available, no blocking
*/
return (rv);
} else if (wait_time == 0) {
/*
* Not enough units available and timeout
* specifies no blocking
*/
return (rv);
}
/*
* Not enough units available and timeout specifies waiting
*/
if (wait_time != ACPI_WAIT_FOREVER)
deadline = ddi_get_lbolt() +
do {
if (wait_time == ACPI_WAIT_FOREVER)
break;
}
/* if we dropped out of the wait with AE_OK, we got the units */
return (rv);
}
/*
*
*/
void
{
}
{
return (AE_BAD_PARAMETER);
return (AE_OK);
}
{
return (AE_BAD_PARAMETER);
return (AE_OK);
}
{
return (AE_BAD_PARAMETER);
}
{
return (AE_BAD_PARAMETER);
return (AE_OK);
}
{
return (AE_BAD_PARAMETER);
return (AE_OK);
}
void
{
return;
}
{
return (AE_BAD_PARAMETER);
/* spin */;
} else
return (AE_OK);
}
void
{
}
void *
{
return (tmp_ptr);
}
void
{
tmp_ptr -= 1;
}
static void *dummy_ioapicadr;
void
acpica_find_ioapics(void)
{
if (acpi_mapic_dtp != NULL)
return; /* already parsed table */
return;
napics_found = 0;
/*
* Search the MADT for ioapics
*/
madt_seen = sizeof (*acpi_mapic_dtp);
case ACPI_MADT_TYPE_IO_APIC:
if (napics_found < MAX_IO_APIC) {
ioapic_paddr[napics_found++] =
}
break;
default:
break;
}
/* advance to next entry */
}
if (dummy_ioapicadr == NULL)
}
void *
{
int i;
/*
* If the iopaic address table is populated, check if trying
* to access an ioapic. Instead, return a pointer to a dummy ioapic.
*/
for (i = 0; i < napics_found; i++) {
return (dummy_ioapicadr);
}
/* FUTUREWORK: test PhysicalAddress for > 32 bits */
}
void
{
/*
* Check if trying to unmap dummy ioapic address.
*/
if (LogicalAddress == dummy_ioapicadr)
return;
}
/*ARGSUSED*/
{
/* UNIMPLEMENTED: not invoked by ACPI CA code */
return (AE_NOT_IMPLEMENTED);
}
void *acpi_isr_context;
{
int status;
if (status == ACPI_INTERRUPT_HANDLED) {
return (DDI_INTR_CLAIMED);
} else {
return (DDI_INTR_UNCLAIMED);
}
}
static int acpi_intr_hooked = 0;
void *Context)
{
int retval;
int sci_vect;
/*
*/
return (AE_ERROR);
}
#ifdef DEBUG
#endif
if (retval) {
acpi_intr_hooked = 1;
return (AE_OK);
} else
return (AE_BAD_PARAMETER);
}
{
#ifdef DEBUG
#endif
if (acpi_intr_hooked) {
acpi_intr_hooked = 0;
}
return (AE_OK);
}
AcpiOsGetThreadId(void)
{
/*
* ACPI CA doesn't care what actual value is returned as long
* as it is non-zero and unique to each existing thread.
* ACPI CA assumes that thread ID is castable to a pointer,
* so we use the current thread pointer.
*/
}
/*
*
*/
void *Context)
{
if (!acpica_eventq_init) {
/*
* Create taskqs for event handling
*/
if (init_event_queues() != AE_OK)
return (AE_ERROR);
}
DDI_NOSLEEP) == DDI_FAILURE) {
#ifdef DEBUG
#endif
return (AE_ERROR);
}
return (AE_OK);
}
void
AcpiOsWaitEventsComplete(void)
{
int i;
/*
* Wait for event queues to be empty.
*/
for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
if (osl_eventq[i] != NULL) {
ddi_taskq_wait(osl_eventq[i]);
}
}
}
void
{
/*
* During kernel startup, before the first tick interrupt
* has taken place, we can't call delay; very late in
* are blocked, so delay doesn't work then either.
* So we busy wait if lbolt == 0 (kernel startup)
* or if acpica_use_safe_delay has been set to a
* non-zero value.
*/
if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
else
}
void
{
}
/*
* Implementation of "Windows 2001" compatible I/O permission map
*
*/
#define OSL_IO_NONE (0)
static struct io_perm {
} osl_io_perm[] = {
};
/*
*
*/
static struct io_perm *
{
struct io_perm *p;
p = osl_io_perm;
while (p != NULL) {
break;
}
return (p);
}
/*
*
*/
{
struct io_perm *p;
/* verify permission */
p = osl_io_find_perm(Address);
if (p && (p->perm & OSL_IO_READ) == 0) {
*Value = 0xffffffff;
return (AE_ERROR);
}
switch (Width) {
case 8:
break;
case 16:
break;
case 32:
break;
default:
return (AE_BAD_PARAMETER);
}
return (AE_OK);
}
{
struct io_perm *p;
/* verify permission */
p = osl_io_find_perm(Address);
if (p && (p->perm & OSL_IO_WRITE) == 0) {
return (AE_ERROR);
}
switch (Width) {
case 8:
break;
case 16:
break;
case 32:
break;
default:
return (AE_BAD_PARAMETER);
}
return (AE_OK);
}
/*
*
*/
static void
{
switch (maplen) {
case 1:
break;
case 2:
break;
case 4:
break;
case 8:
break;
default:
Width);
break;
}
}
{
return (AE_OK);
}
{
return (AE_OK);
}
{
switch (Width) {
case 8:
break;
case 16:
break;
case 32:
break;
case 64:
default:
return (AE_BAD_PARAMETER);
}
return (AE_OK);
}
/*
*
*/
{
if (!acpica_write_pci_config_ok) {
return (AE_OK);
}
switch (Width) {
case 8:
break;
case 16:
break;
case 32:
break;
case 64:
default:
return (AE_BAD_PARAMETER);
}
return (AE_OK);
}
/*
* Called with ACPI_HANDLEs for both a PCI Config Space
* OpRegion and (what ACPI CA thinks is) the PCI device
* to which this ConfigSpace OpRegion belongs.
*
* ACPI CA uses _BBN and _ADR objects to determine the default
* values for bus, segment, device and function; anything ACPI CA
* can't figure out from the ACPI tables will be 0. One very
* old 32-bit x86 system is known to have broken _BBN; this is
* not addressed here.
*
* Some BIOSes implement _BBN() by reading PCI config space
* on bus #0 - which means that we'll recurse when we attempt
* to create the devinfo-to-ACPI map. If Derive is called during
* scan_d2a_map, we don't translate the bus # and return.
*
* We get the parent of the OpRegion, which must be a PCI
* node, fetch the associated devinfo node and snag the
* b/d/f from it.
*/
void
ACPI_PCI_ID **PciId)
{
/*
* See above - avoid recursing during scanning_d2a_map.
*/
if (scanning_d2a_map)
return;
/*
* Get the OpRegion's parent
*/
return;
/*
* If we've mapped the ACPI node to the devinfo
* tree, use the devinfo reg property
*/
}
}
/*ARGSUSED*/
{
/* Always says yes; all mapped memory assumed readable */
return (1);
}
/*ARGSUSED*/
{
/* Always says yes; all mapped memory assumed writable */
return (1);
}
AcpiOsGetTimer(void)
{
/* gethrtime() returns 1nS resolution; convert to 100nS granules */
}
static struct AcpiOSIFeature_s {
const char *feature_name;
} AcpiOSIFeatures[] = {
{ ACPI_FEATURE_OSI_MODULE, "Module Device" },
{ 0, "Processor Device" }
};
/*ARGSUSED*/
{
int i;
for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
i++) {
continue;
}
/* Check whether required core features are available. */
if (AcpiOSIFeatures[i].control_flag != 0 &&
AcpiOSIFeatures[i].control_flag) {
break;
}
/* Feature supported. */
return (AE_OK);
}
return (AE_SUPPORT);
}
/*ARGSUSED*/
{
return (AE_OK);
}
{
/* FUTUREWORK: debugger support */
return (AE_OK);
}
{
}
/*
* When != 0, sends output to console
*/
int acpica_console_out = 0;
int acpica_outbuf_offset;
/*
*
*/
static void
{
int out_remaining;
/*
* copy the supplied buffer into the output buffer
* when we hit a '\n' or overflow the output buffer,
* output and reset the output buffer
*/
while (c = *bufp++) {
*outp++ = c;
if (c == '\n' || --out_remaining == 0) {
*outp = '\0';
switch (acpica_console_out) {
case 1:
break;
case 2:
break;
case 0:
default:
(void) strlog(0, 0, 0,
break;
}
acpica_outbuf_offset = 0;
}
}
}
void
{
/*
* If AcpiOsInitialize() failed to allocate a string buffer,
* resort to vprintf().
*/
if (acpi_osl_pr_buffer == NULL) {
return;
}
/*
* It is possible that a very long debug output statement will
* be truncated; this is silently ignored.
*/
}
void
{
/* FUTUREWORK: debugger support */
#ifdef DEBUG
#endif
}
{
/* FUTUREWORK: debugger support */
return (0);
}
/*
* Device tree binding
*/
static ACPI_STATUS
{
/* Check whether device exists. */
/*
* Skip object if device doesn't exist.
* According to ACPI Spec,
* 1) setting either bit 0 or bit 3 means that device exists.
* 2) Absence of _STA method means all status bits set.
*/
return (AE_CTRL_DEPTH);
}
return (AE_OK);
}
if (acpi_has_broken_bbn) {
/* Decree _BBN == n from PCI<n> */
return (AE_CTRL_TERMINATE);
}
return (AE_CTRL_TERMINATE);
}
return (AE_CTRL_TERMINATE);
}
} else if (busno == 0) {
return (AE_CTRL_TERMINATE);
}
return (AE_CTRL_DEPTH);
}
static int
{
/* initialize static flag by querying ACPI namespace for bug */
if (acpi_has_broken_bbn == -1)
(void **)&busobj);
return (AE_OK);
}
}
return (AE_ERROR);
}
static ACPI_STATUS
{
/* Check whether device exists. */
/*
* Skip object if device doesn't exist.
* According to ACPI Spec,
* 1) setting either bit 0 or bit 3 means that device exists.
* 2) Absence of _STA method means all status bits set.
*/
return (AE_CTRL_DEPTH);
}
return (AE_OK);
/*
* If we find more than one bus with a 0 _BBN
* we have the problem that BigBear's BIOS shows
*/
return (AE_CTRL_TERMINATE);
} else {
/*
*/
return (AE_CTRL_DEPTH);
}
}
/*
* Look for ACPI problem where _BBN is zero for multiple PCI buses
* This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
* below if it exists.
*/
static int
acpica_query_bbn_problem(void)
{
int zerobbncnt;
void *rv;
zerobbncnt = 0;
}
}
static int
hexdig(int c)
{
/*
* Get hex digit:
*
* Returns the 4-bit hex digit named by the input character. Returns
* zero if the input character is not valid hex!
*/
int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
int j = sizeof (hextab);
while (--j && (x != hextab[j])) {
}
return (j);
}
static int
{
/*
* Compress an EISA device name:
*
* This routine converts a 7-byte ASCII device name into the 4-byte
* compressed form used by EISA (50 bytes of ROM to save 1 byte of
* NV-RAM!)
*/
}
{
ACPI_TYPE_INTEGER)) == AE_OK)
return (status);
}
static int
{
AcpiOsFree(rv);
return (AE_OK);
char *stringData;
/* Convert the string into an EISA ID */
AcpiOsFree(rv);
return (AE_ERROR);
}
/*
* If the string is an EisaID, it must be 7
* characters; if it's an ACPI ID, it will be 8
* (and we don't care about ACPI ids here).
*/
AcpiOsFree(rv);
return (AE_ERROR);
}
AcpiOsFree(rv);
return (AE_OK);
} else
AcpiOsFree(rv);
}
return (AE_ERROR);
}
/*
* Create linkage between devinfo nodes and ACPI nodes
*/
{
/*
* Tag the devinfo node with the ACPI name
*/
if (ACPI_FAILURE(status)) {
} else {
/*
* Tag the ACPI node with the dip
*/
}
return (status);
}
/*
* Destroy linkage between devinfo nodes and ACPI nodes
*/
{
(void) acpica_unset_devinfo(acpiobj);
return (AE_OK);
}
/*
* Return the ACPI device node matching the CPU dev_info node.
*/
{
int i;
/*
* if cpu_map itself is NULL, we're a uppc system and
* acpica_build_processor_map() hasn't been called yet.
* So call it here
*/
(void) acpica_build_processor_map();
return (AE_ERROR);
}
if (cpu_id < 0) {
return (AE_ERROR);
}
/*
* search object with cpuid in cpu_map
*/
for (i = 0; i < cpu_map_count; i++) {
break;
}
}
return (AE_OK);
}
/* Handle special case for uppc-only systems. */
if (cpu_map_called == 0) {
if (apicid != UINT32_MAX) {
for (i = 0; i < cpu_map_count; i++) {
break;
}
}
return (AE_OK);
}
}
}
return (AE_ERROR);
}
/*
* Determine if this object is a processor
*/
static ACPI_STATUS
{
unsigned long acpi_id;
return (AE_OK);
if (objtype == ACPI_TYPE_PROCESSOR) {
/* process a Processor */
return (status);
}
} else if (objtype == ACPI_TYPE_DEVICE) {
/* process a processor Device */
"!acpica: error probing Processor Device\n");
return (status);
}
"!acpica: error probing Processor Device _UID\n");
return (AE_ERROR);
}
}
return (AE_OK);
}
void
scan_d2a_map(void)
{
char *device_type_prop;
int bus;
static int map_error = 0;
return;
scanning_d2a_map = 1;
/*
* Find all child-of-root PCI buses, and find their corresponding
* ACPI child-of-root PCI nodes. For each one, add to the
* d2a table.
*/
/* prune non-PCI nodes */
continue;
continue;
}
/*
* To get bus number of dip, get first child and get its
* bus number. If NULL, just continue, because we don't
* care about bus nodes with no children anyway.
*/
continue;
#ifdef D2ADEBUG
#endif
map_error = 1;
scanning_d2a_map = 0;
d2a_done = 1;
return;
}
#ifdef D2ADEBUG
#endif
map_error = 1;
continue;
}
/* call recursively to enumerate subtrees */
}
scanning_d2a_map = 0;
d2a_done = 1;
}
/*
* For all acpi child devices of acpiobj, find their matching
* bus is assumed to already be a match from caller, and is
* used here only to record in the d2a entry. Recurse if necessary.
*/
static void
{
char *device_type_prop;
== AE_OK) {
continue;
/* look through all the immediate children of dip */
continue;
/* dev must match; function must match or wildcard */
continue;
/* found a match, record it */
/* if we find a bridge, recurse from here */
DDI_PROP_DONTPASS, "device_type",
&device_type_prop) == DDI_PROP_SUCCESS) {
}
/* done finding a match, so break now */
break;
}
}
}
/*
*/
int
{
int len;
return (-1);
if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
return (-1);
}
return (0);
}
/*
* Return the ACPI device node matching this dev_info node, if it
* exists in the ACPI tree.
*/
{
char *acpiname;
#ifdef DEBUG
if (d2a_done == 0)
#endif
return (AE_ERROR);
}
ddi_prop_free((void *)acpiname);
return (status);
}
/*
* Manage OS data attachment to ACPI nodes
*/
/*
* Return the (dev_info_t *) associated with the ACPI node.
*/
{
void *ptr;
return (status);
}
/*
* Set the dev_info_t associated with the ACPI node.
*/
static ACPI_STATUS
{
return (status);
}
/*
* Unset the dev_info_t associated with the ACPI node.
*/
static ACPI_STATUS
{
}
/*
*
*/
void
{
/* no-op */
}
{
void *rv;
/*
* shouldn't be called more than once anyway
*/
if (cpu_map_built)
return (AE_OK);
/*
* ACPI device configuration driver has built mapping information
* among processor id and object handle, no need to probe again.
*/
cpu_map_built = 1;
return (AE_OK);
}
/*
* Look for Processor objects
*/
4,
NULL,
NULL,
&rv);
/*
* Look for processor Device objects
*/
NULL,
&rv);
cpu_map_built = 1;
return (status);
}
/*
* Grow cpu map table on demand.
*/
static void
acpica_grow_cpu_map(void)
{
if (cpu_map_count == cpu_map_count_max) {
KM_SLEEP);
if (cpu_map_count != 0) {
}
}
}
/*
* Maintain mapping information among (cpu id, ACPI processor id, APIC id,
* ACPI handle). The mapping table will be setup in two steps:
* 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
* processor id and ACPI object handle.
* 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
* On systems with which have ACPI device configuration for CPUs enabled,
* acpica_map_cpu() will be called after acpica_add_processor_to_map(),
* otherwise acpica_map_cpu() will be called before
* acpica_add_processor_to_map().
*/
{
int i;
return (AE_ERROR);
}
/*
* Special case for uppc
* If we're a uppc system and ACPI device configuration for CPU has
* been disabled, there won't be a CPU map yet because uppc psm doesn't
* call acpica_map_cpu(). So create one and use the passed-in processor
* as CPU 0
* Assumption: the first CPU returned by
* AcpiGetDevices/AcpiWalkNamespace will be the BSP.
* Unfortunately there appears to be no good way to ASSERT this.
*/
cpu_map_count = 1;
return (AE_OK);
}
for (i = 0; i < cpu_map_count; i++) {
break;
}
}
/*
* ACPI alias objects may cause more than one objects
* with the same ACPI processor id, only remember the
* the first object encountered.
*/
} else {
}
rc = AE_NO_MEMORY;
} else {
}
}
return (rc);
}
{
int i;
for (i = 0; i < cpu_map_count; i++) {
continue;
}
/* Free item if no more reference to it. */
if (i != cpu_map_count) {
}
}
break;
}
return (rc);
}
{
int i;
if (cpuid == -1) {
return (AE_ERROR);
}
cpu_map_called = 1;
for (i = 0; i < cpu_map_count; i++) {
break;
}
}
} else {
}
rc = AE_NO_MEMORY;
} else {
}
}
return (rc);
}
{
int i;
if (cpuid == -1) {
return (rc);
}
for (i = 0; i < cpu_map_count; i++) {
continue;
}
/* Free item if no more reference. */
if (i != cpu_map_count) {
}
}
break;
}
return (rc);
}
{
int i;
if (cpuid == -1) {
return (rc);
}
for (i = 0; i < cpu_map_count; i++) {
break;
}
}
return (rc);
}
{
int i;
for (i = 0; i < cpu_map_count; i++) {
break;
}
}
return (rc);
}
{
int i;
if (apicid == UINT32_MAX) {
return (rc);
}
for (i = 0; i < cpu_map_count; i++) {
break;
}
}
return (rc);
}
{
int i;
return (rc);
}
*cpuidp = -1;
for (i = 0; i < cpu_map_count; i++) {
break;
}
}
return (rc);
}
{
int i;
return (rc);
}
*rp = UINT32_MAX;
for (i = 0; i < cpu_map_count; i++) {
break;
}
}
return (rc);
}
{
int i;
return (rc);
}
*rp = UINT32_MAX;
for (i = 0; i < cpu_map_count; i++) {
break;
}
}
return (rc);
}
void
{
}
void
{
}
{
return (acpica_core_features & features);
}
void
{
}
void
{
}
{
return (acpica_devcfg_features & features);
}
void
{
*gbl_FADT = &AcpiGbl_FADT;
}
void
{
}
{
ulong_t v;
return (ACPI_UINT32_MAX);
}
return ((uint32_t)v);
}