pcie.c revision 83e6495b5bcdb3fbe09948670b92d3e265047dcc
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#include <sys/sysmacros.h>
#include <sys/pci_impl.h>
#include <sys/pcie_impl.h>
#include <sys/pci_cfgacc.h>
/* Local functions prototypes */
static void pcie_init_pfd(dev_info_t *);
static void pcie_fini_pfd(dev_info_t *);
#endif /* defined(__i386) || defined(__amd64) */
#ifdef DEBUG
uint_t pcie_debug_flags = 0;
#endif /* DEBUG */
/* Variable to control default PCI-Express config settings */
/* xxx_fw are bits that are controlled by FW and should not be modified */
0xF800; /* Reserved Bits */
0xF000; /* Reserved Bits */
/* PCI-Express Base error defaults */
/* PCI-Express Device Control Register */
/* PCI-Express AER Root Control Register */
#define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \
/* PCI-Express Root Error Command Register */
/* ECRC settings in the PCIe AER Control Register */
/*
* instead of using #defines have the platform's PCIe Root Complex driver set
* these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For
* x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the
* closest PCIe root complex driver is PX.
*
* pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86
* systems may want to disable SERR in general. For root ports, enabling SERR
* causes NMIs which are not handled and results in a watchdog timeout error.
*/
/* Default severities needed for eversholt. Error handling doesn't care */
int pcie_disable_ari = 0;
int *max_supported);
/*
* modload support
*/
&mod_miscops, /* Type of module */
"PCI Express Framework Module"
};
static struct modlinkage modlinkage = {
(void *)&modlmisc,
};
/*
* Global Variables needed for a non-atomic version of ddi_fm_ereport_post.
* Currently used to send the pci.fabric ereports whose payload depends on the
* type of PCI device it is being sent for.
*/
char *pcie_nv_buf;
int
_init(void)
{
int rval;
return (rval);
}
int
_fini()
{
int rval;
return (rval);
}
int
{
}
/* ARGSUSED */
int
{
int ret = DDI_SUCCESS;
/*
* Create a "devctl" minor node to support DEVCTL_DEVICE_*
* and DEVCTL_BUS_* ioctls to this bus.
*/
DDI_NT_NEXUS, 0)) != DDI_SUCCESS) {
PCIE_DBG("Failed to create devctl minor node for %s%d\n",
return (ret);
}
/*
* On some x86 platforms, we observed unexpected hotplug
* initialization failures in recent years. The known cause
* is a hardware issue: while the problem PCI bridges have
* the Hotplug Capable registers set, the machine actually
* does not implement the expected ACPI object.
*
* We don't want to stop PCI driver attach and system boot
* just because of this hotplug initialization failure.
* Continue with a debug message printed.
*/
PCIE_DBG("%s%d: Failed setting hotplug framework\n",
#if defined(__sparc)
return (ret);
#endif /* defined(__sparc) */
}
return (DDI_SUCCESS);
}
/* ARGSUSED */
int
{
int ret = DDI_SUCCESS;
(void) pcie_ari_disable(dip);
PCIE_DBG("Failed to uninitialize hotplug for %s%d\n",
return (ret);
}
return (ret);
}
/*
* PCIe module interface for enabling hotplug interrupt.
*
* It should be called after pcie_init() is done and bus driver's
* interrupt handlers have being attached.
*/
int
{
if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
} else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
(void) pcishpc_enable_irqs(ctrl_p);
}
return (DDI_SUCCESS);
}
/*
* PCIe module interface for disabling hotplug interrupt.
*
* It should be called before pcie_uninit() is called and bus driver's
* interrupt handlers is dettached.
*/
int
{
if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
} else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
(void) pcishpc_disable_irqs(ctrl_p);
}
return (DDI_SUCCESS);
}
/* ARGSUSED */
int
{
return (pcie_hp_intr(dip));
}
/* ARGSUSED */
int
{
/*
* Make sure the open is for the right file type.
*/
return (EINVAL);
/*
* Handle the open by tracking the device state.
*/
return (EBUSY);
}
else
return (0);
}
/* ARGSUSED */
int
{
return (EINVAL);
return (0);
}
/* ARGSUSED */
int
{
struct devctl_iocdata *dcp;
int rv = DDI_SUCCESS;
/*
* We can use the generic implementation for devctl ioctl
*/
switch (cmd) {
case DEVCTL_DEVICE_GETSTATE:
case DEVCTL_DEVICE_ONLINE:
case DEVCTL_DEVICE_OFFLINE:
case DEVCTL_BUS_GETSTATE:
default:
break;
}
/*
* read devctl ioctl data
*/
return (EFAULT);
switch (cmd) {
case DEVCTL_BUS_QUIESCE:
if (bus_state == BUS_QUIESCED)
break;
break;
case DEVCTL_BUS_UNQUIESCE:
if (bus_state == BUS_ACTIVE)
break;
break;
case DEVCTL_BUS_RESET:
case DEVCTL_BUS_RESETALL:
case DEVCTL_DEVICE_RESET:
break;
default:
}
return (rv);
}
/* ARGSUSED */
int
{
if (dev == DDI_DEV_T_ANY)
goto skip;
if (PCIE_IS_HOTPLUG_CAPABLE(dip) &&
}
skip:
}
int
{
return (DDI_FAILURE);
/* Create an config access special to error handling */
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
void
{
}
/*
* PCI-Express child device initialization.
* This function enables generic pci-express interrupts and error
* handling.
*
* @param pdip root dip (root nexus's dip)
* @param cdip child's dip (device's dip)
* @return DDI_SUCCESS or DDI_FAILURE
*/
/* ARGSUSED */
int
{
PCIE_DBG("%s: BUS not found.\n",
return (DDI_FAILURE);
}
return (DDI_FAILURE);
/*
* Update pcie_bus_t with real Vendor Id Device Id.
*
* For assigned devices in IOV environment, the OBP will return
* properties in root domain. translate_devid() function will
* platforms, so that we can utilize the properties here to get
*
* For unassigned devices or devices in non-IOV environment, the
* operation below won't make a difference.
*
* The IOV implementation only supports assignment of PCIE
* endpoint devices. Devices under pci-pci bridges don't need
* operation like this.
*/
"device-id", -1);
"vendor-id", -1);
/* Clear the device's status register */
/* Setup the device's command register */
/*
* access as it can cause a hang if enabled.
*/
if ((empty_io_range == B_TRUE) &&
(pcie_command_default & PCI_COMM_IO)) {
tmp16 &= ~PCI_COMM_IO;
PCIE_DBG("No I/O range found for %s, bdf 0x%x\n",
}
if ((empty_mem_range == B_TRUE) &&
(pcie_command_default & PCI_COMM_MAE)) {
tmp16 &= ~PCI_COMM_MAE;
PCIE_DBG("No Mem range found for %s, bdf 0x%x\n",
}
#endif /* defined(__i386) || defined(__amd64) */
/*
* If the device has a bus control register then program it
* based on the settings in the command register.
*/
if (PCIE_IS_BDG(bus_p)) {
/* Clear the device's secondary status register */
/* Setup the device's secondary command register */
/*
* Workaround for this Nvidia bridge. Don't enable the SERR
* enable bit in the bridge control register as it could lead to
* bogus NMIs.
*/
/*
* Enable Master Abort Mode only if URs have not been masked.
* For PCI and PCIe-PCI bridges, enabling this bit causes a
* bit is masked, posted requests are dropped and non-posted
* requests are returned with -1.
*/
if (pcie_aer_uce_mask & PCIE_AER_UCE_UR)
else
reg16);
}
if (PCIE_IS_PCIE(bus_p)) {
/* Setup PCIe device control register */
/* Enable PCIe errors */
}
== PCIE_ARI_DEVICE)) {
}
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
static void
{
pfd_p->pe_severity_flags = 0;
pfd_p->pe_orig_severity_flags = 0;
/* Allocate the root fault struct for both RC and RP */
if (PCIE_IS_ROOT(bus_p)) {
}
if (PCIE_IS_BDG(bus_p))
if (PCIE_IS_PCIE(bus_p)) {
if (PCIE_IS_RP(bus_p))
PCIE_RP_REG(pfd_p) =
if (PCIE_IS_RP(bus_p)) {
} else if (PCIE_IS_PCIE_BDG(bus_p)) {
}
if (PCIX_ECC_VERSION_CHECK(bus_p)) {
PCIX_BDG_ECC_REG(pfd_p, 0) =
}
}
} else if (PCIE_IS_PCIX(bus_p)) {
if (PCIE_IS_BDG(bus_p)) {
if (PCIX_ECC_VERSION_CHECK(bus_p)) {
PCIX_BDG_ECC_REG(pfd_p, 0) =
}
} else {
if (PCIX_ECC_VERSION_CHECK(bus_p))
}
}
}
static void
{
if (PCIE_IS_PCIE(bus_p)) {
if (PCIX_ECC_VERSION_CHECK(bus_p)) {
sizeof (pf_pcix_ecc_regs_t));
sizeof (pf_pcix_ecc_regs_t));
}
sizeof (pf_pcix_bdg_err_regs_t));
}
if (PCIE_IS_RP(bus_p))
sizeof (pf_pcie_adv_rp_err_regs_t));
else if (PCIE_IS_PCIE_BDG(bus_p))
sizeof (pf_pcie_adv_bdg_err_regs_t));
sizeof (pf_pcie_adv_err_regs_t));
if (PCIE_IS_RP(bus_p))
sizeof (pf_pcie_rp_err_regs_t));
} else if (PCIE_IS_PCIX(bus_p)) {
if (PCIE_IS_BDG(bus_p)) {
if (PCIX_ECC_VERSION_CHECK(bus_p)) {
sizeof (pf_pcix_ecc_regs_t));
sizeof (pf_pcix_ecc_regs_t));
}
sizeof (pf_pcix_bdg_err_regs_t));
} else {
if (PCIX_ECC_VERSION_CHECK(bus_p))
sizeof (pf_pcix_ecc_regs_t));
sizeof (pf_pcix_err_regs_t));
}
}
if (PCIE_IS_BDG(bus_p))
sizeof (pf_pci_bdg_err_regs_t));
if (PCIE_IS_ROOT(bus_p)) {
}
}
/*
* Special functions to allocate pf_data_t's for PCIe root complexes.
* Note: Root Complex not Root Port
*/
void
{
pfd_p->pe_severity_flags = 0;
pfd_p->pe_orig_severity_flags = 0;
}
void
{
}
/*
* init pcie_bus_t for root complex
*
* Only a few of the fields in bus_t is valid for root complex.
* The fields that are bracketed are initialized in this routine:
*
* dev_info_t * <bus_dip>
* dev_info_t * bus_rp_dip
* ddi_acc_handle_t bus_cfg_hdl
* uint_t <bus_fm_flags>
* pcie_req_id_t bus_bdf
* pcie_req_id_t bus_rp_bdf
* uint32_t bus_dev_ven_id
* uint8_t bus_rev_id
* uint8_t <bus_hdr_type>
* uint16_t <bus_dev_type>
* uint8_t bus_bdg_secbus
* uint16_t bus_pcie_off
* uint16_t <bus_aer_off>
* uint16_t bus_pcix_off
* uint16_t bus_ecc_ver
* pci_bus_range_t bus_bus_range
* ppb_ranges_t * bus_addr_ranges
* int bus_addr_entries
* pci_regspec_t * bus_assigned_addr
* int bus_assigned_entries
* pf_data_t * bus_pfd
* pcie_domain_t * <bus_dom>
* int bus_mps
* uint64_t bus_cfgacc_base
* void * bus_plat_private
*/
void
{
/* Fake that there are AER logs */
/* Needed only for handle lookup */
}
void
{
}
/*
* partially init pcie_bus_t for device (dip,bdf) for accessing pci
* config space
*
* This routine is invoked during boot, either after creating a devinfo node
* (x86 case) or during px driver attach (sparc case); it is also invoked
* in hotplug context after a devinfo node is created.
*
* The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL
* is set:
*
* dev_info_t * <bus_dip>
* dev_info_t * <bus_rp_dip>
* ddi_acc_handle_t bus_cfg_hdl
* uint_t bus_fm_flags
* pcie_req_id_t <bus_bdf>
* pcie_req_id_t <bus_rp_bdf>
* uint32_t <bus_dev_ven_id>
* uint8_t <bus_rev_id>
* uint8_t <bus_hdr_type>
* uint16_t <bus_dev_type>
* uint8_t <bus_bdg_secbus
* uint16_t <bus_pcie_off>
* uint16_t <bus_aer_off>
* uint16_t <bus_pcix_off>
* uint16_t <bus_ecc_ver>
* pci_bus_range_t bus_bus_range
* ppb_ranges_t * bus_addr_ranges
* int bus_addr_entries
* pci_regspec_t * bus_assigned_addr
* int bus_assigned_entries
* pf_data_t * bus_pfd
* pcie_domain_t * bus_dom
* int bus_mps
* uint64_t bus_cfgacc_base
* void * bus_plat_private
*
* The fields that are bracketed are initialized if flag PCIE_BUS_FINAL
* is set:
*
* dev_info_t * bus_dip
* dev_info_t * bus_rp_dip
* ddi_acc_handle_t bus_cfg_hdl
* uint_t bus_fm_flags
* pcie_req_id_t bus_bdf
* pcie_req_id_t bus_rp_bdf
* uint32_t bus_dev_ven_id
* uint8_t bus_rev_id
* uint8_t bus_hdr_type
* uint16_t bus_dev_type
* uint8_t <bus_bdg_secbus>
* uint16_t bus_pcie_off
* uint16_t bus_aer_off
* uint16_t bus_pcix_off
* uint16_t bus_ecc_ver
* pci_bus_range_t <bus_bus_range>
* ppb_ranges_t * <bus_addr_ranges>
* int <bus_addr_entries>
* pci_regspec_t * <bus_assigned_addr>
* int <bus_assigned_entries>
* pf_data_t * <bus_pfd>
* pcie_domain_t * bus_dom
* int bus_mps
* uint64_t bus_cfgacc_base
* void * <bus_plat_private>
*/
{
int range_size;
if (!(flags & PCIE_BUS_INITIAL))
goto initial_done;
/* Save the Vendor ID, Device ID and revision ID */
/* Save the Header Type */
/*
* Figure out the device type and all the relavant capability offsets
*/
/* set default value */
goto caps_done; /* capability not supported */
/* Relevant conventional capabilities first */
/* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */
num_cap = 2;
switch (bus_p->bus_hdr_type) {
case PCI_HEADER_ZERO:
break;
case PCI_HEADER_PPB:
break;
case PCI_HEADER_CARDBUS:
break;
default:
goto caps_done;
}
switch (capid) {
case PCI_CAP_ID_PCI_E:
/* Check and save PCIe hotplug capability information */
& PCIE_PCIECAP_SLOT_IMPL) &&
num_cap--;
break;
case PCI_CAP_ID_PCIX:
if (PCIE_IS_BDG(bus_p))
bus_p->bus_ecc_ver =
else
bus_p->bus_ecc_ver =
num_cap--;
break;
default:
break;
}
}
/* Check and save PCI hotplug (SHPC) capability information */
if (PCIE_IS_BDG(bus_p)) {
base + PCI_CAP_NEXT_PTR)) {
if (capid == PCI_CAP_ID_PCI_HOTPLUG) {
break;
}
}
}
/* Then, relevant extended capabilities */
if (!PCIE_IS_PCIE(bus_p))
goto caps_done;
/* Extended caps: PCIE_EXT_CAP_ID_AER */
if (capid == PCI_CAP_EINVAL32)
break;
== PCIE_EXT_CAP_ID_AER) {
break;
}
}
/* save RP dip and RP bdf */
if (PCIE_IS_RP(bus_p)) {
} else {
/*
* If RP dip and RP bdf in parent's bus_t have
* been initialized, simply use these instead of
* continuing up to the RC.
*/
break;
}
/*
* When debugging be aware that some NVIDIA x86
* architectures have 2 nodes for each RP, One at Bus
* 0x0 and one at Bus 0x80. The requester is from Bus
* 0x80
*/
if (PCIE_IS_ROOT(parent_bus_p)) {
break;
}
}
}
bus_p->bus_fm_flags = 0;
if (PCIE_IS_HOTPLUG_CAPABLE(dip))
"hotplug-capable");
if (!(flags & PCIE_BUS_FINAL))
goto final_done;
/* already initialized? */
if (PCIE_IS_BDG(bus_p)) {
/* get "bus_range" property */
range_size = sizeof (pci_bus_range_t);
!= DDI_PROP_SUCCESS) {
errstr = "Cannot find \"bus-range\" property";
"PCIE init err info failed BDF 0x%x:%s\n",
}
/* get secondary bus number */
/* Get "ranges" property */
bus_p->bus_addr_entries = 0;
}
/* save "assigned-addresses" property array, ignore failues */
else
bus_p->bus_assigned_entries = 0;
PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n",
#ifdef DEBUG
#endif
return (bus_p);
}
/*
* Invoked before destroying devinfo node, mostly during hotplug
* operation to free pcie_bus_t data structure
*/
/* ARGSUSED */
void
{
if (flags & PCIE_BUS_INITIAL) {
/* zero out the fields that have been destroyed */
bus_p->bus_assigned_entries = 0;
bus_p->bus_addr_entries = 0;
}
if (flags & PCIE_BUS_FINAL) {
if (PCIE_IS_HOTPLUG_CAPABLE(dip)) {
"hotplug-capable");
}
}
}
int
{
if (!bus_p)
return (DDI_FAILURE);
return (pcie_enable_ce(cdip));
}
/*
* PCI-Express child device de-initialization.
* This function disables generic pci-express interrupts and error
* handling.
*/
void
{
}
/*
* find the root complex dip
*/
{
break;
}
return (rcdip);
}
static boolean_t
{
char *device_type;
return (B_FALSE);
return (B_FALSE);
}
return (B_TRUE);
}
typedef struct {
/*ARGSUSED*/
static int
{
if (!pcie_is_pci_device(dip))
goto out;
goto out;
} else {
}
return (DDI_WALK_CONTINUE);
out:
return (DDI_WALK_PRUNECHILD);
}
void
{
int circular_count;
}
void
{
int circular_count;
}
void
{
/*
* Clear any pending errors
*/
if (!PCIE_IS_PCIE(bus_p))
return;
/*
* Enable Baseline Error Handling but leave CE reporting off (poweron
* default).
*/
}
/* Enable Root Port Baseline Error Receiving */
if (PCIE_IS_ROOT(bus_p) &&
reg16);
}
/*
* Enable PCI-Express Advanced Error Handling if Exists
*/
if (!PCIE_HAS_AER(bus_p))
return;
/* Set Uncorrectable Severity */
reg32);
}
/* Enable Uncorrectable errors */
reg32);
}
/* Enable ECRC generation and checking */
}
/* Enable Secondary Uncorrectable errors if this is a bridge */
if (!PCIE_IS_PCIE_BDG(bus_p))
goto root;
/* Set Uncorrectable Severity */
reg32);
}
}
root:
/*
* Enable Root Control this is a Root device
*/
if (!PCIE_IS_ROOT(bus_p))
return;
}
}
/*
* This function is used for enabling CE reporting and setting the AER CE mask.
* When called from outside the pcie module it should always be preceded by
* a call to pcie_enable_errors.
*/
int
{
if (!PCIE_IS_PCIE(bus_p))
return (DDI_SUCCESS);
/*
* The "pcie_ce_mask" property is used to control both the CE reporting
* enable field in the device control register and the AER CE mask. We
* leave CE reporting disabled if pcie_ce_mask is set to -1.
*/
/*
* Nothing to do since CE reporting has already been disabled.
*/
return (DDI_SUCCESS);
}
if (PCIE_HAS_AER(bus_p)) {
/* Enable AER CE */
0);
/* Clear any pending AER CE errors */
}
/* clear any pending CE errors */
/* Enable CE reporting */
return (DDI_SUCCESS);
}
/* ARGSUSED */
void
{
if (!PCIE_IS_PCIE(bus_p))
return;
/*
* Disable PCI-Express Baseline Error Handling
*/
/*
* Disable PCI-Express Advanced Error Handling if Exists
*/
if (!PCIE_HAS_AER(bus_p))
goto root;
/* Disable Uncorrectable errors */
/* Disable Correctable errors */
/* Disable ECRC generation and checking */
aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA |
}
/*
* Disable Secondary Uncorrectable errors if this is a bridge
*/
if (!PCIE_IS_PCIE_BDG(bus_p))
goto root;
root:
/*
* disable Root Control this is a Root device
*/
if (!PCIE_IS_ROOT(bus_p))
return;
if (!pcie_serr_disable_flag) {
}
if (!PCIE_HAS_AER(bus_p))
return;
}
}
/*
* Extract bdf from "reg" property.
*/
int
{
int reglen;
return (DDI_FAILURE);
if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) {
return (DDI_FAILURE);
}
/* Get phys_hi from first element. All have same bdf. */
return (DDI_SUCCESS);
}
{
;
return (cdip);
}
{
/*
* As part of the probing, the PCI fcode interpreter may setup a DMA
* request if a given card has a fcode on it using dip and rdip of the
* case, return a invalid value for the bdf since we cannot get to the
* bdf value of the actual device which will be initiating this DMA.
*/
return (PCIE_INVALID_BDF);
/*
* For a given rdip, return the bdf value of dip's (px or pcieb)
* immediate child or secondary bus-id if dip is a PCIe2PCI bridge.
*
* XXX - For now, return a invalid bdf value for all PCI and PCI-X
* devices since this needs more work.
*/
return (PCI_GET_PCIE2PCI_SECBUS(cdip) ?
}
return (pcie_aer_uce_mask);
}
return (pcie_aer_ce_mask);
}
return (pcie_aer_suce_mask);
}
return (pcie_serr_disable_flag);
}
void
if (mask & PCIE_AER_UCE_UR)
else
if (mask & PCIE_AER_UCE_ECRC)
pcie_ecrc_value = 0;
}
void
}
void
}
void
}
/*
* Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling
* up erronously. Ex. ISA ctlops to a PCI-PCI Bridge.
*/
{
break;
}
{
if (PCIE_IS_PCIE(bus_p)) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Initialize the MPS for a root port.
*
* dip - dip of root port device.
*/
void
{
if (rp_cap < max_supported)
(void) pcie_initchild_mps(dip);
}
/*
* Initialize the Maximum Payload Size of a device.
*
* cdip - dip of device.
*
* returns - DDI_SUCCESS or DDI_FAILURE
*/
int
{
PCIE_DBG("%s: BUS not found.\n",
return (DDI_FAILURE);
}
/*
* For ARI Devices, only function zero's MPS needs to be set.
*/
if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) &&
return (DDI_FAILURE);
if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0)
return (DDI_SUCCESS);
}
if (PCIE_IS_PCIE(bus_p)) {
int suggested_mrrs, fabric_mps;
return (DDI_SUCCESS);
}
if (device_mps_cap < fabric_mps)
else
if ((device_mps == fabric_mps) ||
/*
* Replace MPS and MRRS settings.
*/
}
return (DDI_SUCCESS);
}
/*
*
* rc_dip - dip of Root Complex.
* dip - dip of device where scan will begin.
* max_supported (IN) - maximum allowable MPS.
* max_supported (OUT) - maximum payload size capability of fabric.
*/
void
{
return;
/*
* Perform a fabric scan to obtain Maximum Payload Capabilities
*/
}
/*
* Scans fabric and determines Maximum Payload Size based on
* highest common denominator alogorithm
*/
static void
{
int circular_count;
(void *)&max_pay_load_supported);
}
/*
* Called as part of the Maximum Payload Size scan.
*/
static int
{
int rlen;
goto fail1;
}
PCIE_DBG("MPS: pcie_get_max_supported: %s: "
goto fail1;
}
/*
* If the suggested-mrrs property exists, then don't include this
* device in the MPS capabilities scan.
*/
"suggested-mrrs") != 0)
goto fail1;
PCIE_DBG("MPS: pcie_get_max_supported: %s: "
goto fail1;
}
&config_handle) != DDI_SUCCESS) {
PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys "
goto fail2;
}
DDI_FAILURE) {
goto fail3;
}
return (DDI_WALK_CONTINUE);
}
/*
* Determines if there are any root ports attached to a root complex.
*
* dip - dip of root complex
*
* Returns - DDI_SUCCESS if there is at least one root port otherwise
* DDI_FAILURE.
*/
int
{
int port_type;
/*
* Determine if any of the children of the passed in dip
* are root ports.
*/
continue;
&cap_ptr)) == DDI_FAILURE) {
continue;
}
if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT)
return (DDI_SUCCESS);
}
/* No root ports were found */
return (DDI_FAILURE);
}
/*
* Function that determines if a device a PCIe device.
*
* dip - dip of device.
*
* returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE.
*/
int
{
/* get parent device's device_type property */
char *device_type;
int rc = DDI_FAILURE;
!= DDI_PROP_SUCCESS) {
return (DDI_FAILURE);
}
rc = DDI_SUCCESS;
else
rc = DDI_FAILURE;
return (rc);
}
/*
* Function to map in a device's memory space.
*/
static int
{
int result;
hp->ah_rnumber = 0;
if (result != DDI_SUCCESS) {
} else {
}
return (result);
}
/*
* Map out memory that was mapped in with pcie_map_phys();
*/
static void
{
}
void
{
}
/*
* Return parent Root Port's pe_rber_fatal value.
*/
{
}
int
{
return (PCIE_ARI_FORW_NOT_SUPPORTED);
if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) &&
return (PCIE_ARI_FORW_NOT_SUPPORTED);
if (pcie_disable_ari) {
return (PCIE_ARI_FORW_NOT_SUPPORTED);
}
return (PCIE_ARI_FORW_NOT_SUPPORTED);
}
PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n",
if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) {
PCIE_DBG("pcie_ari_supported: "
"dip=%p: ARI Forwarding is supported\n", dip);
return (PCIE_ARI_FORW_SUPPORTED);
}
return (PCIE_ARI_FORW_NOT_SUPPORTED);
}
int
{
return (DDI_FAILURE);
PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n",
return (DDI_SUCCESS);
}
int
{
return (DDI_FAILURE);
PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n",
return (DDI_SUCCESS);
}
int
{
return (PCIE_ARI_FORW_DISABLED);
PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n",
if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) {
PCIE_DBG("pcie_ari_is_enabled: "
"dip=%p: ARI Forwarding is enabled\n", dip);
return (PCIE_ARI_FORW_ENABLED);
}
return (PCIE_ARI_FORW_DISABLED);
}
int
{
/*
* XXX - This function may be called before the bus_p structure
* has been populated. This code can be changed to remove
* pci_config_setup()/pci_config_teardown() when the RFE
* to populate the bus_p structures early in boot is putback.
*/
/* First make sure it is a PCIe device */
return (PCIE_NOT_ARI_DEVICE);
!= DDI_SUCCESS) {
return (PCIE_NOT_ARI_DEVICE);
}
/* Locate the ARI Capability */
&cap_ptr)) == DDI_FAILURE) {
return (PCIE_NOT_ARI_DEVICE);
}
/* ARI Capability was found so it must be a ARI device */
return (PCIE_ARI_DEVICE);
}
int
{
/*
* XXX - This function may be called before the bus_p structure
* has been populated. This code can be changed to remove
* pci_config_setup()/pci_config_teardown() when the RFE
* to populate the bus_p structures early in boot is putback.
*/
return (DDI_FAILURE);
if ((PCI_CAP_LOCATE(handle,
return (DDI_FAILURE);
}
*func = next_function;
return (DDI_SUCCESS);
}
{
return (NULL);
return (cdip);
}
return (NULL);
}
#ifdef DEBUG
static void
{
}
/*
* For debugging purposes set pcie_dbg_print != 0 to see printf messages
* during interrupt.
*
* When a proper solution is in place this code will disappear.
* Potential solutions are:
* o circular buffers
* o taskq to print at lower pil
*/
int pcie_dbg_print = 0;
void
{
if (!pcie_debug_flags) {
return;
}
if (servicing_interrupt()) {
if (pcie_dbg_print) {
}
} else {
}
}
#endif /* DEBUG */
static void
{
PCI_BCNF_IO_MASK) << 8);
/*
* Assuming that a zero based io_range[0] implies an
* invalid I/O range. Likewise for mem_range[0].
*/
if (val == 0)
*empty_io_range = B_TRUE;
PCI_BCNF_MEM_MASK) << 16);
if (val == 0)
}
}
#endif /* defined(__i386) || defined(__amd64) */