axq.c revision 193974072f41a843678abf5f61979c748687e66b
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/ddi_impldefs.h>
#include <sys/sysmacros.h>
#include <sys/autoconf.h>
/*
* Function prototypes
*/
/* autoconfig entry point function definitions */
/* internal axq definitions */
static void axq_init(struct axq_soft_state *);
static void axq_init_local(struct axq_local_regs *);
/* axq kstats */
static void axq_add_kstats(struct axq_soft_state *);
static int axq_counters_kstat_update(kstat_t *, int);
/*
* Configuration data structures
*/
static struct cb_ops axq_cb_ops = {
nulldev, /* open */
nulldev, /* close */
nulldev, /* strategy */
nulldev, /* print */
nodev, /* dump */
nulldev, /* read */
nulldev, /* write */
nulldev, /* ioctl */
nodev, /* devmap */
nodev, /* mmap */
nodev, /* segmap */
nochpoll, /* poll */
ddi_prop_op, /* cb_prop_op */
0, /* streamtab */
CB_REV, /* rev */
nodev, /* cb_aread */
nodev /* cb_awrite */
};
DEVO_REV, /* rev */
0, /* refcnt */
axq_getinfo, /* getinfo */
nulldev, /* identify */
nulldev, /* probe */
axq_attach, /* attach */
axq_detach, /* detach */
nulldev, /* reset */
&axq_cb_ops, /* cb_ops */
(struct bus_ops *)0, /* bus_ops */
nulldev, /* power */
ddi_quiesce_not_supported, /* devo_quiesce */
};
/*
* AXQ globals
*/
struct axq_local_regs axq_local;
/*
* If non-zero, iopause will be asserted during DDI_SUSPEND.
* Clients using the axq_iopause_*_all interfaces should set this to zero.
*/
int axq_suspend_iopause = 1;
/*
* loadable module support
*/
extern struct mod_ops mod_driverops;
&mod_driverops, /* Type of module. This one is a driver */
"AXQ driver", /* name of module */
&axq_ops, /* driver ops */
};
static struct modlinkage modlinkage = {
(void *)&modldrv,
};
static void *axq_softp;
/*
* AXQ Performance counters
* We statically declare a array of the known
* AXQ event-names and event masks. The number
* of events in this array is AXQ_NUM_EVENTS.
*/
{"ha_adr_fifo_lk2", HA_ADR_FIFO_LK2},
{"ha_adr_fifo_lk1", HA_ADR_FIFO_LK1},
{"ha_adr_fifo_lk0", HA_ADR_FIFO_LK0},
{"ha_dump_q", HA_DUMP_Q},
{"ha_rd_f_stb_q", HA_RD_F_STB_Q},
{"ha_dp_wr_q", HA_DP_WR_Q},
{"ha_int_q", HA_INT_Q},
{"ha_wrb_q", HA_WRB_Q},
{"ha_wr_mp_q", HA_WR_MP_Q},
{"ha_wrtag_q", HA_WRTAG_Q},
{"ha_wt_wait_fifo", HA_WT_WAIT_FIFO},
{"ha_wrb_stb_fifo", HA_WRB_STB_FIFO},
{"ha_ap0_q", HA_AP0_Q},
{"ha_ap1_q", HA_AP1_Q},
{"ha_new_wr_q", HA_NEW_WR_Q},
{"ha_dp_rd_q", HA_DP_RD_Q},
{"ha_unlock_q", HA_UNLOCK_Q},
{"ha_cdc_upd_q", HA_CDC_UPD_Q},
{"ha_ds_q", HA_DS_Q},
{"ha_unlk_wait_q", HA_UNLK_WAIT_Q},
{"ha_rd_mp_q", HA_RD_MP_Q},
{"l2_io_q", L2_IO_Q},
{"l2_sb_q", L2_SB_Q},
{"l2_ra_q", L2_RA_Q},
{"l2_ha_q", L2_HA_Q},
{"l2_sa_q", L2_SA_Q},
{"ra_wait_fifo", RA_WAIT_FIFO},
{"ra_wrb_inv_fifo", RA_WRB_INV_FIFO},
{"ra_wrb_fifo", RA_WRB_FIFO},
{"ra_cc_ptr_fifo", RA_CC_PTR_FIFO},
{"ra_io_ptr_fifo", RA_IO_PTR_FIFO},
{"ra_int_ptr_fifo", RA_INT_PTR_FIFO},
{"ra_rp_q", RA_RP_Q},
{"ra_wrb_rp_q", RA_WRB_RP_Q},
{"ra_dp_q", RA_DP_Q},
{"ra_dp_stb_q", RA_DP_STB_Q},
{"ra_gtarg_q", RA_GTARG_Q},
{"sdc_recv_q", SDC_RECV_Q},
{"sdc_redir_io_q", SDC_REDIR_IO_Q},
{"sdc_redir_sb_q", SDC_REDIR_SB_Q},
{"sdc_outb_io_q", SDC_OUTB_IO_Q},
{"sdc_outb_sb_q", SDC_OUTB_SB_Q},
{"sa_add1_input_q", SA_ADD1_INPUT_Q},
{"sa_add2_input_q", SA_ADD2_INPUT_Q},
{"sa_inv_q", SA_INV_Q},
{"sa_no_inv_q", SA_NO_INV_Q},
{"sa_int_dp_q", SA_INT_DP_Q},
{"sa_dp_q", SA_DP_Q},
{"sl_wrtag_q", SL_WRTAG_Q},
{"sl_rto_dp_q", SL_RTO_DP_Q},
{"syreg_input_q", SYSREG_INPUT_Q},
{"sdi_sys_status1", SDI_SYS_STATUS1},
{"sdi_sys_status0", SDI_SYS_STATUS0},
{"cdc_hits", CDC_HITS},
{"total_cdc_read", TOTAL_CDC_READ},
{"ha_watranid_sd", HA_WATRANID_SD},
{"ha_stb_sd", HA_STB_SD},
{"ha_l2_irq_sd", HA_L2_IRQ_SD},
{"ha_sl_wrtag_sd", HA_SL_WRTAG_SD},
{"aa_home_cc_full", AA_HOME_CC_FULL},
{"aa_home_io_full", AA_HOME_IO_FULL},
{"aa_slave_full", AA_SLAVE_FULL},
{"aa_rp_full", AA_RP_FULL}
};
static int axq_attachcnt = 0; /* # of instances attached */
static void axq_unmap_phys(ddi_acc_handle_t *);
int starcat_axq_pio_workaround(dev_info_t *);
static int axq_slot1_idle(struct axq_soft_state *);
static boolean_t axq_panic_callb(void *, int);
static callb_id_t axq_panic_cb_id;
/*
* These are the module initialization routines.
*/
int
_init(void)
{
int error;
sizeof (struct axq_soft_state), 1)) != 0)
return (error);
return (error);
}
CB_CL_PANIC, "axq_panic");
return (0);
}
int
_fini(void)
{
int error;
return (error);
(void) callb_delete(axq_panic_cb_id);
return (0);
}
int
{
}
static int
{
int instance;
struct axq_soft_state *softsp;
switch (cmd) {
case DDI_ATTACH:
break;
case DDI_RESUME:
/*
* Reenable the axq io pause if it is
* employed. See the DDI_SUSPEND comments
*/
}
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
return (DDI_FAILURE);
/* Set the dip in the soft state */
/* Get the "portid" property */
"property.");
goto bad;
}
/*
* derive the slot # from the portid - for starcat, it is
* either 0 or 1 based on the lsb of the axq portid.
*/
/*
* map in the regs. There are two regspecs - one
* in safari config space and the other in local space.
*/
goto bad;
}
/*
* This is a hack for support DR copy rename scripting
* Get the physical address of the start of the
* AXQ config space and save it.
*/
/*
* Map in the regs for local space access
* This is global for all axq instances.
* Make sure that some axq instance does
* it for the rest of the gang..
* Note that this mapping is never removed.
*/
/* initialize and map in the local space */
goto bad;
}
}
/* update the axq array for this new instance */
return (DDI_SUCCESS);
bad:
return (DDI_FAILURE);
}
static void
{
int i;
/*
* Setup the AXQ registers
* Some offsets and availability are dependent on the slot type
*/
/* This is a slot type 0 AXQ */
} else {
/* slot type 1 AXQ */
}
/* setup CASM slots */
for (i = 0; i < AXQ_MAX_EXP; i++) {
(AXQ_CASM_SLOT_START + AXQ_REGOFF(i)));
}
/* setup SDI timeout register accesses */
/*
* Save the CDC state (enabled or disabled)
* as originally setup by Post.
*/
}
#ifndef _AXQ_LOCAL_ACCESS_SUPPORTED
/*
* Setup cpu2ssc intr register in explicit expander
* space. Local space addressing for this is broken,
* we'll use explicit addressing for now.
*/
#endif /* _AXQ_LOCAL_ACCESS_SUPPORTED */
}
static void
{
/*
* local access to cpu2ssc intr register will
* be the only one that may work properly in the
* next revision of the AXQ asics.
* Set it up here for now.
*/
}
/* ARGSUSED */
static int
{
int instance;
int i;
struct axq_soft_state *softsp;
/* get the instance of this devi */
/* get the soft state pointer for this device node */
switch (cmd) {
case DDI_SUSPEND:
/*
* Depending on the variable "use_axq_iopause"
* we set the axq iopause bit as a paranoid
* safety net. This is assuming all the devices
* associated with the slot are already suspended.
* Care must be taken to not set iopause when CPUs
* are known to be present on the slot 1 board,
* i.e. MCPU board type.
* This io pause bit only applies to slot 1 axq,
*/
/*
* Do not enable AXQ_DOMCTRL_PAUSE if CPUs are
* known to be present in slot 1.
*/
for (i = 0; i < STARCAT_SLOT1_CPU_MAX; i++) {
return (DDI_SUCCESS);
}
}
/*
* Make sure that there is no outstanding
* I/O activity by reading the domain ctrl reg.
* A non-zero lsb indicates no I/O activity.
*/
return (DDI_FAILURE);
}
}
return (DDI_SUCCESS);
case DDI_DETACH:
!= NULL);
/*
* remove counter kstats for this device
*/
}
/*
* See if we are the last instance to detach.
* If so, we need to remove the picN kstats
*/
if (--axq_attachcnt == 0) {
for (i = 0; i < AXQ_NUM_PICS; i++) {
kstat_delete(axq_picN_ksp[i]);
axq_picN_ksp[i] = NULL;
}
}
}
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
/* ARGSUSED0 */
static int
{
struct axq_soft_state *softsp;
switch (infocmd) {
case DDI_INFO_DEVT2DEVINFO:
softsp = (struct axq_soft_state *)
ret = DDI_FAILURE;
} else {
ret = DDI_SUCCESS;
}
break;
case DDI_INFO_DEVT2INSTANCE:
ret = DDI_SUCCESS;
break;
default:
ret = DDI_FAILURE;
break;
}
return (ret);
}
/*
* Flush the CDC Sram of the slot0 axq
* indicated by the expid argument
*/
int
{
struct axq_soft_state *softsp;
int retval = 0;
int i;
if (!held)
/* save the value of the ctrl test reg */
/* disable sram and setup the ctrl test reg for flushing */
| AXQ_CDC_DIS;
/* Enable CDC test in the CDC Address test reg */
/* clear the CDC Data write regs */
/*
* write in the size of the sram to clear
* into the CDC Counter test reg
*/
/* wait for flush to complete */
for (i = 0; i < AXQ_CDC_FLUSH_WAIT; i++) {
if (((*softsp->axq_cdc_counter) &
AXQ_CDC_CNT_TEST_DONE) != 0) {
break;
}
}
if (i >= AXQ_CDC_FLUSH_WAIT) {
expid);
}
/*
* Disable test mode in CDC address test reg
*/
*softsp->axq_cdc_addrtest = 0;
/*
* If "disabled" option is requested, leave
* the CDC disabled.
*/
if (disabled) {
} else {
}
if (!held)
return (retval);
}
/*
* Flush all the CDC srams for all the AXQs in
* the local domain.
*/
int
{
int retval;
int i;
for (i = 0; i < AXQ_MAX_EXP; i++) {
if (retval != DDI_SUCCESS) break;
}
}
return (retval);
}
/*
* Disable and flush all CDC srams for all the AXQs
* in the local domain.
*/
int
{
int retval;
int i;
/*
* Disable and flush all the CDC srams
*/
for (i = 0; i < AXQ_MAX_EXP; i++) {
if (retval != DDI_SUCCESS) break;
}
}
if (retval != DDI_SUCCESS) {
}
return (retval);
}
/*
* Enable the CDC srams for all the AXQs in the
* the local domain. This routine is used in
* conjunction with axq_cdc_disable_flush_all().
*/
void
{
struct axq_soft_state *softsp;
int i;
/*
* Enable all the CDC sram
*/
for (i = 0; i < AXQ_MAX_EXP; i++) {
}
}
}
}
/*
* Interface for DR to enable slot1 iopause after cpus have been idled.
* Precondition is for all devices to have been suspended (including axq).
* This routine avoids locks as it is called by DR with cpus paused.
*/
int
{
int i, j;
int retval = DDI_SUCCESS;
struct axq_soft_state *softsp;
DELAY(1000);
for (i = 0; i < AXQ_MAX_EXP; i++) {
/*
* Do not enable if cpus configured in slot1.
* Unconfigured cpus should be idle in nc space.
*/
for (j = 0; j < STARCAT_SLOT1_CPU_MAX; j++) {
break;
}
}
if (j < STARCAT_SLOT1_CPU_MAX) {
continue;
}
if (retval == DDI_FAILURE) {
break;
}
}
}
if (retval != DDI_SUCCESS) {
*errexp = i;
}
return (retval);
}
/*
* De-assert axq iopause on all slot1 boards. This routine avoids locks
* as it is called by DR with cpus paused.
*/
void
{
int i;
struct axq_soft_state *softsp;
for (i = 0; i < AXQ_MAX_EXP; i++) {
}
}
}
/*
* Attempt to wait for slot1 activity to go idle.
*/
static int
{
int i;
for (i = 0; i < 10; i++) {
return (DDI_SUCCESS);
}
DELAY(50);
}
return (DDI_FAILURE);
}
/*
* Read a particular NASM entry
*/
int
{
struct axq_soft_state *softsp;
if (slot > AXQ_MAX_SLOT_PER_EXP ||
expid > AXQ_MAX_EXP ||
nasm_entry > AXQ_NASM_SIZE) {
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
return (DDI_FAILURE);
}
/*
* Write a particular NASM entry
*/
static int
{
struct axq_soft_state *softsp;
/*
* Note: need to make sure axq_array_lock held first, so that a
* paused thread is not holding softsp->axq_lock, which could
* result in deadlock.
*/
if (slot > AXQ_MAX_SLOT_PER_EXP ||
expid > AXQ_MAX_EXP ||
nasm_entry > AXQ_NASM_SIZE) {
return (DDI_FAILURE);
}
return (DDI_FAILURE);
}
return (DDI_SUCCESS);
}
int
{
int rc;
return (rc);
}
/*
* Write a particular NASM entry for all the
* axqs in the domain
* Note: other CPUs are paused when this function called.
*/
int
{
int i;
int rc;
for (i = 0; i < AXQ_MAX_EXP; i++) {
data);
if (rc != DDI_SUCCESS) {
return (DDI_FAILURE);
}
}
data);
if (rc != DDI_SUCCESS) {
return (DDI_FAILURE);
}
}
}
return (DDI_SUCCESS);
}
/*
* Take write lock for axq_nasm_write_all() outside
* critical section where other CPUs are paused.
*/
void
axq_array_rw_enter(void)
{
}
/*
* Release write lock for axq_nasm_write_all() outside
* critical section where other CPUs are paused.
*/
void
axq_array_rw_exit(void)
{
}
/*
* Read a particular CASM entry
*/
{
struct axq_soft_state *softsp;
return (retval);
}
/*
* Write a particular CASM entry
*/
int
{
struct axq_soft_state *softsp;
int retval;
/*
* first read the casm slot in question
* it should be non-zero to indicate that
* we have write permission to update it.
* Note that if we write it without valid
* permission, we can get an exception.
*/
} else {
}
return (retval);
}
/*
* Write a particular CASM entry for all the
* axqs in the domain
*/
int
{
int i;
struct axq_soft_state *softsp;
/*
* Since we are updating all the AXQs,
* it will be easier to simply grab
* exclusive access to the AXQs by obtaining
* the RW_WRITER access to the axq_array.
*/
/*
* Paranoid check: run thru all the avail AXQs
* and make sure we can write into that slot in question
* We check it by reading the slot and it should be
* non-zero.
*/
for (i = 0; i < AXQ_MAX_EXP; i++) {
== 0) {
break;
}
}
== 0) {
break;
}
}
}
if (i < AXQ_MAX_EXP) {
/*
* We have no write permission for some AXQ
* for the CASM slot in question. Flag it
* as an error
*/
return (DDI_FAILURE);
}
/*
* everything looks good - do the update
*/
for (i = 0; i < AXQ_MAX_EXP; i++) {
}
}
}
return (DDI_SUCCESS);
}
/*
* Construct a script of <physicaladdr, data> tuple pairs that
* reprogram the all the AXQs in the local domain to swap the
* contents of casmslot0 with casmslot1.
*/
int
int casmslot1)
{
struct axq_soft_state *softsp;
int i, slot;
/*
* There should be some global locking at the
* DR level to do this - since this is one of
* the sequence of steps in copyrename.
* For now, we grab the RW_WRITER lock for
* script construction.
*/
/*
* Construct the <physicaladdr, data> tuple pairs
* for reprogramming the AXQs so that the value in
* casmslot0 is swapped with the content in casmslot1.
* Paranoid check: We make sure that we can write to
* both slots in all the AXQs by reading the slots and
* they should be non-zero.
*/
for (i = 0; i < AXQ_MAX_EXP; i++) {
} else {
/*
* Somehow we can't access one of
* the casm slot - quit.
*/
break;
}
}
}
if (i < AXQ_MAX_EXP) break;
}
/* successful */
*script_elm = s_elm;
return (DDI_SUCCESS);
} else {
return (DDI_FAILURE);
}
}
/*
* Send an interrupt to the SSC passing
* a 8 bit cookie value
*/
int
{
int retval, i;
#ifndef _AXQ_LOCAL_SPACE_SUPPORTED
/* Local space access not available */
/* Make sure the current cpu is not switched out */
/*
* Compute the exp# and slot# of the current cpu
* so that we know which AXQ cpu2ssc intr reg to
* use.
*/
#else
/* use local space */
#endif /* _AXQ_LOCAL_SPACE_SUPPORTED */
for (i = 0; i < AXQ_INTR_PEND_WAIT; i++) {
if (!(*intr_reg & AXQ_CPU2SSC_INTR_PEND)) {
break;
}
DELAY(200);
}
#ifndef _AXQ_LOCAL_SPACE_SUPPORTED
#endif
return (retval);
}
/*
* Read the SDI timeout register (SRD use)
* This routine accepts a clear flag to indicate
* whether the register should be cleared after
* the read.
*/
{
struct axq_soft_state *softsp;
if (clearflag) {
/* read and then clear register */
} else {
}
return (retval);
}
/*
* Routine to create a kstat for each %pic that
* the AXQ has (there are 3 of them). These read-only
* kstats export event names that the respective %pic
* supports. Pic0 and Pic1 are similar and they both have
* a 128-input mux. Pic2 counts the clock and can set up
* to count or freeze.
* Note that all AXQ instances use the same events, we only
* need to create one set of the picN kstats.
*/
static void
{
struct kstat_named *axq_pic_named_data;
int pic_shift = 0;
/*
* Create the picN kstat for Pic0 and Pic1
* Both have similar set of events. Add one
* extra event for the clear_event mask.
*/
char pic_name[20];
int num_events, i;
pic_name);
/* remove pic kstats that was created earlier */
for (i = 0; i < pic; i++) {
kstat_delete(axq_picN_ksp[i]);
axq_picN_ksp[i] = NULL;
}
return;
}
/*
* for each picN event, write a kstat record of
* name = EVENT & value.ui64 = PCR_MASK.
*/
/* pcr_mask */
/* event name */
}
/*
* Add the clear pic event and mask as the last
* record in the kstat.
*/
"clear_pic", KSTAT_DATA_UINT64);
}
}
static void
{
struct kstat *axq_counters_ksp;
struct kstat_named *axq_counters_named_data;
/*
* Create the picN kstats if we are the first instance
* to attach. We use axq_attachcnt as a count of how
* many instances have attached. This is protected by
* a lock.
*/
if (axq_attachcnt++ == 0)
/*
* A "counter" kstat is created for each axq
* instance that provides access to the %pcr and %pic
* registers for that instance.
*
* The size of this kstat is AXQ_NUM_PICS + 1 for %pcr
*/
KSTAT_FLAG_WRITABLE)) == NULL) {
return;
}
/* initialize the named kstats */
"pcr", KSTAT_DATA_UINT32);
"pic0", KSTAT_DATA_UINT32);
"pic1", KSTAT_DATA_UINT32);
"pic2", KSTAT_DATA_UINT32);
/* update the softstate */
}
static int
{
struct kstat_named *axq_counters_data;
struct axq_soft_state *softsp;
if (rw == KSTAT_WRITE) {
/*
* Write the pcr value to the softsp->axq_pcr.
* The pic register is read-only so we don't
* attempt to write to it.
*/
} else {
/*
* Read %pcr and %pic register values and write them
* into counters kstat.
*
*/
/* pcr */
/* pic0 */
/* pic1 */
/* pic2 */
}
return (0);
}
struct gptwo_phys_spec {
};
int axq_pio_workaround_disable = 0;
int axq_pio_limit = 3;
int
{
int portid, axq_portid;
char *name;
struct gptwo_phys_spec *gptwo_spec;
return (0);
/*
* Get the portid for the PCI (Schizo) device).
*/
return (0);
}
/*
* Calculate the portid for the Slot 1 AXQ. The portid for
* Schizo 0 EEEEE11100
* Schizo 1 EEEEE11101
* AXQ 0 EEEEE11110
* AXQ 1 EEEEE11111
* where EEEEE is the 5 bit expander number. So the portid for
* AXQ 1 can be easily calculated by oring a 3 to the portid of
* Schizo 0 or 1.
*/
/*
* Look for AXQ nodes that have the portid we calculated.
*/
pdip = ddi_root_node();
!= DDI_PROP_SUCCESS) {
continue;
}
continue;
}
/*
* Found an AXQ node.
*/
if (portid == axq_portid) {
/*
* We found the correct AXQ node.
*/
break;
}
}
return (0);
}
return (0);
}
return (0);
}
(int32_t *)&io_domain_control)) {
return (0);
}
/*
* If bit 6 of the IO Domain Control Register is a one,
* then this AXQ version does not have the PIO Limit problem.
*/
return (0);
return (axq_pio_limit);
}
static int
{
int result;
hp->ah_rnumber = 0;
if (result != DDI_SUCCESS) {
} else {
}
return (result);
}
static void
{
}
/* ARGSUSED */
static boolean_t
{
return (B_TRUE);
}