mem_cache.c revision 34a79eb7e68ed2b7c23a4dcc4851b4f437bf59b6
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
*/
#include <sys/cheetahregs.h>
#include <sys/mem_cache.h>
#include <sys/mem_cache_ioctl.h>
/* Macro for putting 64-bit onto stack as two 32-bit ints */
int mem_cache_debug = 0x0;
uint32_t retire_failures = 0;
/* dev_ops and cb_ops entry point function declarations */
void **);
static int mem_cache_ioctl_ops(int, int, cache_info_t *);
struct cb_ops mem_cache_cb_ops = {
nodev, /* dump */
nodev, /* devmap */
ddi_segmap, /* segmap */
NULL, /* for STREAMS drivers */
};
static struct dev_ops mem_cache_dev_ops = {
DEVO_REV, /* driver build version */
0, /* device reference count */
nulldev, /* probe */
nulldev, /* reset */
nulldev, /* power */
ddi_quiesce_not_needed, /* quiesce */
};
/*
* Soft state
*/
struct mem_cache_softc {
};
(inst)))
/* module configuration stuff */
static void *statep;
extern struct mod_ops mod_driverops;
"mem_cache_driver (08/01/30) ",
};
static struct modlinkage modlinkage = {
&modldrv,
0
};
extern const int _ncpu; /* Pull the kernel's global _ncpu definition */
int
_init(void)
{
int e;
return (e);
}
if ((e = mod_install(&modlinkage)) != 0)
return (e);
}
int
_fini(void)
{
int e;
if ((e = mod_remove(&modlinkage)) != 0)
return (e);
return (DDI_SUCCESS);
}
int
{
}
/*ARGSUSED*/
static int
{
int inst;
int retval = DDI_SUCCESS;
struct mem_cache_softc *softc;
switch (cmd) {
case DDI_INFO_DEVT2DEVINFO:
} else
break;
case DDI_INFO_DEVT2INSTANCE:
break;
default:
}
return (retval);
}
static int
{
int inst;
char name[80];
switch (cmd) {
case DDI_ATTACH:
if (inst >= MAX_MEM_CACHE_INSTANCES) {
return (DDI_FAILURE);
}
inst,
DDI_FAILURE) {
return (DDI_FAILURE);
}
/* Allocate a soft state structure for this instance */
"for inst %d\n", inst);
goto attach_failed;
}
/* Setup soft state */
/* Create main environmental node */
return (DDI_SUCCESS);
case DDI_RESUME:
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
/* Free soft state, if allocated. remove minor node if added earlier */
if (softc)
return (DDI_FAILURE);
}
static int
{
int inst;
struct mem_cache_softc *softc;
switch (cmd) {
case DDI_DETACH:
return (ENXIO);
/* Free the soft state and remove minor node added earlier */
return (DDI_SUCCESS);
case DDI_SUSPEND:
return (DDI_SUCCESS);
default:
return (DDI_FAILURE);
}
}
/*ARGSUSED*/
static int
{
}
/*ARGSUSED*/
static int
{
}
static char *tstate_to_desc[] = {
"Invalid", /* 0 */
"Shared", /* 1 */
"Exclusive", /* 2 */
"Owner", /* 3 */
"Modified", /* 4 */
"NA", /* 5 */
"Reserved(7)", /* 7 */
};
static char *
{
}
void
{
"PA=0x%08x.%08x E$tag 0x%08x.%08x E$state %s\n",
}
void
{
int i, offset;
"\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
"E$tag 0x%08x.%08x E$state %s",
/*
* Dump out Ecache subblock data captured.
* For Cheetah, we need to compute the ECC for each 16-byte
* chunk and compare it with the captured chunk ECC to figure
* out which chunk is bad.
*/
for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
int l2_data_idx = (i/2);
offset = i * 16;
[l2_data_idx];
if ((i & 1) == 0) {
} else {
}
"\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
" ECC 0x%03x",
}
} /* end of for way loop */
}
void
{
int i, offset;
"\nWAY = %d index = 0x%08x PA=0x%08x.%08x\n"
"E$tag 0x%08x.%08x E$state %s",
/*
* Dump out Ecache subblock data captured.
* For Cheetah, we need to compute the ECC for each 16-byte
* chunk and compare it with the captured chunk ECC to figure
* out which chunk is bad.
*/
for (i = 0; i < (CH_ECACHE_SUBBLK_SIZE/16); i++) {
int ec_data_idx = (i/2);
offset = i * 16;
ecdptr =
[ec_data_idx];
if ((i & 1) == 0) {
} else {
}
"\nE$Data (0x%02x) 0x%08x.%08x 0x%08x.%08x"
" ECC 0x%03x",
}
}
}
static boolean_t
{
char *type_str;
switch (type) {
case L2_CACHE_TAG:
case L2_CACHE_DATA:
type_str = "L2:";
break;
case L3_CACHE_TAG:
case L3_CACHE_DATA:
type_str = "L3:";
break;
default:
/*
* Should never reach here.
*/
ASSERT(0);
return (B_FALSE);
}
if (mem_cache_debug & 0x1)
"%s collision detected tag_addr = 0x%08x"
" start_paddr = 0x%08x end_paddr = 0x%08x\n",
return (B_TRUE);
}
else
return (B_FALSE);
}
static uint64_t
{
switch (cache_info->cache) {
case L2_CACHE_TAG:
case L2_CACHE_DATA:
break;
case L3_CACHE_TAG:
case L3_CACHE_DATA:
break;
default:
/*
* Should never reach here.
*/
ASSERT(0);
return (uint64_t)(0);
}
return (tag_addr);
}
static int
{
int ret_val = 0;
int i, retire_retry_count;
switch (cache_info->cache) {
case L2_CACHE_TAG:
case L2_CACHE_DATA:
return (EINVAL);
if (cache_info->index >=
return (EINVAL);
break;
case L3_CACHE_TAG:
case L3_CACHE_DATA:
return (EINVAL);
if (cache_info->index >=
return (EINVAL);
break;
default:
return (ENOTSUP);
}
/*
* Check if we have a valid cpu ID and that
* CPU is ONLINE.
*/
return (EINVAL);
}
switch (cmd) {
case MEM_CACHE_RETIRE:
MSB_BIT_MASK) {
} else {
pattern = 0;
}
pattern |= PN_ECSTATE_NA;
retire_retry_count = 0;
switch (cache_info->cache) {
case L2_CACHE_DATA:
case L2_CACHE_TAG:
ret_val =
else
pattern);
if (ret_val == 1) {
/*
* cacheline was in retired
* STATE already.
* so return success.
*/
ret_val = 0;
}
if (ret_val < 0) {
"retire_l2() failed. index = 0x%x way %d. Retrying...\n",
cache_info->way);
if (retire_retry_count >= 2) {
return (EIO);
}
goto retry_l2_retire;
}
if (ret_val == 2)
/*
* We bind ourself to a CPU and send cross trap to
* ourself. On return from xt_one we can rely on the
* data in tag_data being filled in. Normally one would
* do a xt_sync to make sure that the CPU has completed
* the cross trap call xt_one.
*/
(xcfunc_t *)(get_l2_tag_tl1),
if (state != PN_ECSTATE_NA) {
tag_data);
"L2 RETIRE:failed for index 0x%x way %d. Retrying...\n",
cache_info->way);
if (retire_retry_count >= 2) {
return (EIO);
}
goto retry_l2_retire;
}
break;
case L3_CACHE_TAG:
case L3_CACHE_DATA:
ret_val =
else
pattern);
if (ret_val == 1) {
/*
* cacheline was in retired
* STATE already.
* so return success.
*/
ret_val = 0;
}
if (ret_val < 0) {
"retire_l3() failed. ret_val = %d index = 0x%x\n",
cache_info->index);
return (EIO);
}
/*
* We bind ourself to a CPU and send cross trap to
* ourself. On return from xt_one we can rely on the
* data in tag_data being filled in. Normally one would
* do a xt_sync to make sure that the CPU has completed
* the cross trap call xt_one.
*/
(xcfunc_t *)(get_l3_tag_tl1),
if (state != PN_ECSTATE_NA) {
"L3 RETIRE failed for index 0x%x\n",
cache_info->index);
return (EIO);
}
break;
}
break;
case MEM_CACHE_UNRETIRE:
switch (cache_info->cache) {
case L2_CACHE_DATA:
case L2_CACHE_TAG:
/*
*/
/*
* We bind ourself to a CPU and send cross trap to
* ourself. On return from xt_one we can rely on the
* data in tag_data being filled in. Normally one would
* do a xt_sync to make sure that the CPU has completed
* the cross trap call xt_one.
*/
(xcfunc_t *)(get_l2_tag_tl1),
if (state != PN_ECSTATE_NA) {
return (EINVAL);
}
ret_val =
else
ret_val =
pattern);
if (ret_val != 0) {
"unretire_l2() failed. ret_val = %d index = 0x%x\n",
cache_info->index);
return (EIO);
}
break;
case L3_CACHE_TAG:
case L3_CACHE_DATA:
/*
*/
/*
* We bind ourself to a CPU and send cross trap to
* ourself. On return from xt_one we can rely on the
* data in tag_data being filled in. Normally one would
* do a xt_sync to make sure that the CPU has completed
* the cross trap call xt_one.
*/
(xcfunc_t *)(get_l3_tag_tl1),
if (state != PN_ECSTATE_NA) {
return (EINVAL);
}
ret_val =
else
ret_val =
pattern);
if (ret_val != 0) {
"unretire_l3() failed. ret_val = %d index = 0x%x\n",
cache_info->index);
return (EIO);
}
break;
}
break;
case MEM_CACHE_ISRETIRED:
case MEM_CACHE_STATE:
return (ENOTSUP);
case MEM_CACHE_READ_TAGS:
#ifdef DEBUG
#endif
/*
* Read tag and data for all the ways at a given afar
*/
<< PN_CACHE_LINE_SHIFT);
/*
* We bind ourself to a CPU and send cross trap to
* ourself. On return from xt_one we can rely on the
* data in clop being filled in. Normally one would
* do a xt_sync to make sure that the CPU has completed
* the cross trap call xt_one.
*/
(xcfunc_t *)(get_ecache_dtags_tl1),
switch (cache_info->cache) {
case L2_CACHE_TAG:
for (i = 0; i < PN_CACHE_NWAYS; i++) {
Lxcache_tag_data[i] =
[i].ec_tag;
}
break;
case L3_CACHE_TAG:
for (i = 0; i < PN_CACHE_NWAYS; i++) {
Lxcache_tag_data[i] =
[i].ec_tag;
}
break;
default:
return (ENOTSUP);
} /* end if switch(cache) */
#ifdef DEBUG
if (cmd == MEM_CACHE_READ_ERROR_INJECTED_TAGS) {
/*
* If error bit is ECC we need to make sure
* ECC on all all WAYS are corrupted.
*/
if ((last_error_injected_bit >= 6) &&
(last_error_injected_bit <= 14)) {
for (i = 0; i < PN_CACHE_NWAYS; i++)
Lxcache_tag_data[i] ^=
} else
}
#endif
sizeof (Lxcache_tag_data), mode)
!= DDI_SUCCESS) {
return (EFAULT);
}
break; /* end of READ_TAGS */
default:
return (ENOTSUP);
} /* end if switch(cmd) */
return (ret_val);
}
/*ARGSUSED*/
static int
int *rvalp)
{
int inst;
struct mem_cache_softc *softc;
int ret_val;
int is_panther;
return (ENXIO);
#ifdef _MULTI_DATAMODEL
return (EFAULT);
}
} else
#endif
return (EFAULT);
}
return (EINVAL);
}
if (!is_panther) {
return (ENOTSUP);
}
switch (cmd) {
case MEM_CACHE_RETIRE:
case MEM_CACHE_UNRETIRE:
break;
}
/*FALLTHROUGH*/
case MEM_CACHE_ISRETIRED:
case MEM_CACHE_STATE:
case MEM_CACHE_READ_TAGS:
#ifdef DEBUG
#endif
break;
default:
break;
}
return (ret_val);
}