Lines Matching defs:cpi

374 #define	CPI_FAMILY_XTD(cpi)	BITX((cpi)->cpi_std[1].cp_eax, 27, 20)
375 #define CPI_MODEL_XTD(cpi) BITX((cpi)->cpi_std[1].cp_eax, 19, 16)
376 #define CPI_TYPE(cpi) BITX((cpi)->cpi_std[1].cp_eax, 13, 12)
377 #define CPI_FAMILY(cpi) BITX((cpi)->cpi_std[1].cp_eax, 11, 8)
378 #define CPI_STEP(cpi) BITX((cpi)->cpi_std[1].cp_eax, 3, 0)
379 #define CPI_MODEL(cpi) BITX((cpi)->cpi_std[1].cp_eax, 7, 4)
381 #define CPI_FEATURES_EDX(cpi) ((cpi)->cpi_std[1].cp_edx)
382 #define CPI_FEATURES_ECX(cpi) ((cpi)->cpi_std[1].cp_ecx)
383 #define CPI_FEATURES_XTD_EDX(cpi) ((cpi)->cpi_extd[1].cp_edx)
384 #define CPI_FEATURES_XTD_ECX(cpi) ((cpi)->cpi_extd[1].cp_ecx)
385 #define CPI_FEATURES_7_0_EBX(cpi) ((cpi)->cpi_std[7].cp_ebx)
387 #define CPI_BRANDID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 7, 0)
388 #define CPI_CHUNKS(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 15, 7)
389 #define CPI_CPU_COUNT(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 23, 16)
390 #define CPI_APIC_ID(cpi) BITX((cpi)->cpi_std[1].cp_ebx, 31, 24)
425 #define IS_LEGACY_P6(cpi) ( \
426 cpi->cpi_family == 6 && \
427 (cpi->cpi_model == 1 || \
428 cpi->cpi_model == 3 || \
429 cpi->cpi_model == 5 || \
430 cpi->cpi_model == 6 || \
431 cpi->cpi_model == 7 || \
432 cpi->cpi_model == 8 || \
433 cpi->cpi_model == 0xA || \
434 cpi->cpi_model == 0xB) \
438 #define IS_NEW_F6(cpi) ((cpi->cpi_family == 6) && !IS_LEGACY_P6(cpi))
441 #define IS_EXTENDED_MODEL_INTEL(cpi) (cpi->cpi_family == 0x6 || \
442 cpi->cpi_family >= 0xf)
456 #define MWAIT_SUPPORTED(cpi) ((cpi)->cpi_std[1].cp_ecx & CPUID_INTC_ECX_MON)
457 #define MWAIT_INT_ENABLE(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x2)
458 #define MWAIT_EXTENSION(cpi) ((cpi)->cpi_std[5].cp_ecx & 0x1)
459 #define MWAIT_SIZE_MIN(cpi) BITX((cpi)->cpi_std[5].cp_eax, 15, 0)
460 #define MWAIT_SIZE_MAX(cpi) BITX((cpi)->cpi_std[5].cp_ebx, 15, 0)
464 #define MWAIT_NUM_SUBC_STATES(cpi, c_state) \
465 BITX((cpi)->cpi_std[5].cp_edx, c_state + 3, c_state)
591 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
594 ASSERT(cpi != NULL);
595 ASSERT(cpi != &cpuid_info0);
600 for (i = 1; i < cpi->cpi_std_4_size; i++)
601 kmem_free(cpi->cpi_std_4[i], sizeof (struct cpuid_regs));
602 if (cpi->cpi_std_4_size > 0)
603 kmem_free(cpi->cpi_std_4,
604 cpi->cpi_std_4_size * sizeof (struct cpuid_regs *));
606 kmem_free(cpi, sizeof (*cpi));
740 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
742 for (i = 1; i < cpi->cpi_ncpu_per_chip; i <<= 1)
745 cpi->cpi_chipid = cpi->cpi_apicid >> chipid_shift;
746 cpi->cpi_clogid = cpi->cpi_apicid & ((1 << chipid_shift) - 1);
754 if (cpi->cpi_ncore_per_chip == 1)
755 ncpu_per_core = cpi->cpi_ncpu_per_chip;
756 else if (cpi->cpi_ncore_per_chip > 1)
757 ncpu_per_core = cpi->cpi_ncpu_per_chip /
758 cpi->cpi_ncore_per_chip;
775 * store the value of cpi->cpi_ncpu_per_chip.
778 * cpi->cpi_ncore_per_chip.
782 cpi->cpi_coreid = cpi->cpi_apicid >> coreid_shift;
783 cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
788 cpi->cpi_coreid = cpi->cpi_chipid;
789 cpi->cpi_pkgcoreid = 0;
791 cpi->cpi_procnodeid = cpi->cpi_chipid;
792 cpi->cpi_compunitid = cpi->cpi_coreid;
801 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
827 cpi->cpi_coreid = cpu->cpu_id;
828 cpi->cpi_compunitid = cpu->cpu_id;
830 if (cpi->cpi_xmaxeax >= 0x80000008) {
832 coreidsz = BITX((cpi)->cpi_extd[8].cp_ecx, 15, 12);
838 cpi->cpi_ncore_per_chip =
839 BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
842 for (i = 1; i < cpi->cpi_ncore_per_chip; i <<= 1)
849 cpi->cpi_ncore_per_chip = 1;
853 cpi->cpi_clogid = cpi->cpi_pkgcoreid =
854 cpi->cpi_apicid & ((1<<coreidsz) - 1);
855 cpi->cpi_ncpu_per_chip = cpi->cpi_ncore_per_chip;
859 cpi->cpi_xmaxeax >= 0x8000001e) {
860 cp = &cpi->cpi_extd[0x1e];
864 cpi->cpi_procnodes_per_pkg = BITX(cp->cp_ecx, 10, 8) + 1;
865 cpi->cpi_procnodeid = BITX(cp->cp_ecx, 7, 0);
866 cpi->cpi_cores_per_compunit = BITX(cp->cp_ebx, 15, 8) + 1;
867 cpi->cpi_compunitid = BITX(cp->cp_ebx, 7, 0)
868 + (cpi->cpi_ncore_per_chip / cpi->cpi_cores_per_compunit)
869 * (cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg);
870 } else if (cpi->cpi_family == 0xf || cpi->cpi_family >= 0x11) {
871 cpi->cpi_procnodeid = (cpi->cpi_apicid >> coreidsz) & 7;
872 } else if (cpi->cpi_family == 0x10) {
878 if ((cpi->cpi_model < 8) || BITX(nb_caps_reg, 29, 29) == 0) {
880 cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 5,
888 cpi->cpi_procnodes_per_pkg = 2;
890 first_half = (cpi->cpi_pkgcoreid <=
891 (cpi->cpi_ncore_per_chip/2 - 1));
893 if (cpi->cpi_apicid == cpi->cpi_pkgcoreid) {
895 cpi->cpi_procnodeid = (first_half ? 0 : 1);
900 node2_1 = BITX(cpi->cpi_apicid, 5, 4) << 1;
910 cpi->cpi_procnodeid = node2_1 +
913 cpi->cpi_procnodeid = node2_1 +
918 cpi->cpi_procnodeid = 0;
921 cpi->cpi_chipid =
922 cpi->cpi_procnodeid / cpi->cpi_procnodes_per_pkg;
950 struct cpuid_info *cpi;
967 cpi = cpu->cpu_m.mcpu_cpi;
968 ASSERT(cpi != NULL);
969 cp = &cpi->cpi_std[0];
971 cpi->cpi_maxeax = __cpuid_insn(cp);
973 uint32_t *iptr = (uint32_t *)cpi->cpi_vendorstr;
977 *(char *)&cpi->cpi_vendorstr[12] = '\0';
980 cpi->cpi_vendor = _cpuid_vendorstr_to_vendorcode(cpi->cpi_vendorstr);
981 x86_vendor = cpi->cpi_vendor; /* for compatibility */
986 if (cpi->cpi_maxeax > CPI_MAXEAX_MAX)
987 cpi->cpi_maxeax = CPI_MAXEAX_MAX;
988 if (cpi->cpi_maxeax < 1)
991 cp = &cpi->cpi_std[1];
998 cpi->cpi_model = CPI_MODEL(cpi);
999 cpi->cpi_family = CPI_FAMILY(cpi);
1001 if (cpi->cpi_family == 0xf)
1002 cpi->cpi_family += CPI_FAMILY_XTD(cpi);
1010 switch (cpi->cpi_vendor) {
1012 if (IS_EXTENDED_MODEL_INTEL(cpi))
1013 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1016 if (CPI_FAMILY(cpi) == 0xf)
1017 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1020 if (cpi->cpi_model == 0xf)
1021 cpi->cpi_model += CPI_MODEL_XTD(cpi) << 4;
1025 cpi->cpi_step = CPI_STEP(cpi);
1026 cpi->cpi_brandid = CPI_BRANDID(cpi);
1037 cpi->cpi_pabits = cpi->cpi_vabits = 32;
1039 switch (cpi->cpi_vendor) {
1041 if (cpi->cpi_family == 5)
1043 else if (IS_LEGACY_P6(cpi)) {
1049 if (cpi->cpi_model < 3 && cpi->cpi_step < 3)
1051 } else if (IS_NEW_F6(cpi) || cpi->cpi_family == 0xf) {
1060 } else if (cpi->cpi_family > 0xf)
1066 if (cpi->cpi_maxeax < 5)
1074 if (cpi->cpi_family == 0xf && cpi->cpi_model == 0xe) {
1076 cpi->cpi_model = 0xc;
1079 if (cpi->cpi_family == 5) {
1092 if (cpi->cpi_model == 0) {
1102 if (cpi->cpi_model < 6)
1110 if (cpi->cpi_family >= 0xf)
1116 if (cpi->cpi_maxeax < 5)
1135 if (cpi->cpi_family == 5 && cpi->cpi_model == 4 &&
1136 (cpi->cpi_step == 2 || cpi->cpi_step == 3))
1143 if (cpi->cpi_family == 6)
1232 platform_cpuid_mangle(cpi->cpi_vendor, 1, cp);
1238 if (cpi->cpi_vendor == X86_VENDOR_Intel && cpi->cpi_maxeax >= 7) {
1240 ecp = &cpi->cpi_std[7];
1376 if (cpi->cpi_std[7].cp_ebx &
1381 if (cpi->cpi_std[7].cp_ebx &
1386 if (cpi->cpi_std[7].cp_ebx &
1407 cpi->cpi_mwait.support |= MWAIT_SUPPORT;
1440 cpi->cpi_pabits = 36;
1452 cpi->cpi_ncpu_per_chip = CPI_CPU_COUNT(cpi);
1453 if (cpi->cpi_ncpu_per_chip > 1)
1456 cpi->cpi_ncpu_per_chip = 1;
1464 switch (cpi->cpi_vendor) {
1466 if (IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf)
1470 if (cpi->cpi_family > 5 ||
1471 (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
1491 cp = &cpi->cpi_extd[0];
1493 cpi->cpi_xmaxeax = __cpuid_insn(cp);
1496 if (cpi->cpi_xmaxeax & 0x80000000) {
1498 if (cpi->cpi_xmaxeax > CPI_XMAXEAX_MAX)
1499 cpi->cpi_xmaxeax = CPI_XMAXEAX_MAX;
1501 switch (cpi->cpi_vendor) {
1504 if (cpi->cpi_xmaxeax < 0x80000001)
1506 cp = &cpi->cpi_extd[1];
1510 if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1511 cpi->cpi_family == 5 &&
1512 cpi->cpi_model == 6 &&
1513 cpi->cpi_step == 6) {
1524 platform_cpuid_mangle(cpi->cpi_vendor, 0x80000001, cp);
1549 if ((cpi->cpi_vendor == X86_VENDOR_AMD) &&
1550 (cpi->cpi_std[1].cp_edx & CPUID_INTC_EDX_FXSR) &&
1560 if (cpi->cpi_vendor == X86_VENDOR_AMD &&
1605 switch (cpi->cpi_vendor) {
1607 if (cpi->cpi_maxeax >= 4) {
1608 cp = &cpi->cpi_std[4];
1612 platform_cpuid_mangle(cpi->cpi_vendor, 4, cp);
1616 if (cpi->cpi_xmaxeax < 0x80000008)
1618 cp = &cpi->cpi_extd[8];
1621 platform_cpuid_mangle(cpi->cpi_vendor, 0x80000008, cp);
1627 cpi->cpi_pabits = BITX(cp->cp_eax, 7, 0);
1628 cpi->cpi_vabits = BITX(cp->cp_eax, 15, 8);
1637 switch (cpi->cpi_vendor) {
1639 if (cpi->cpi_maxeax < 4) {
1640 cpi->cpi_ncore_per_chip = 1;
1643 cpi->cpi_ncore_per_chip =
1644 BITX((cpi)->cpi_std[4].cp_eax, 31, 26) + 1;
1648 if (cpi->cpi_xmaxeax < 0x80000008) {
1649 cpi->cpi_ncore_per_chip = 1;
1660 cpi->cpi_ncore_per_chip =
1661 BITX((cpi)->cpi_extd[8].cp_ecx, 7, 0) + 1;
1665 cpi->cpi_ncore_per_chip = 1;
1672 switch (cpi->cpi_vendor) {
1674 if (cpi->cpi_maxeax >= 7) {
1675 cp = &cpi->cpi_extd[7];
1685 cpi->cpi_ncore_per_chip = 1;
1691 if (cpi->cpi_ncore_per_chip > 1) {
1699 if (cpi->cpi_ncpu_per_chip == cpi->cpi_ncore_per_chip) {
1703 cpi->cpi_apicid = CPI_APIC_ID(cpi);
1704 cpi->cpi_procnodes_per_pkg = 1;
1705 cpi->cpi_cores_per_compunit = 1;
1711 cpi->cpi_chipid = -1;
1712 cpi->cpi_clogid = 0;
1713 cpi->cpi_coreid = cpu->cpu_id;
1714 cpi->cpi_pkgcoreid = 0;
1715 if (cpi->cpi_vendor == X86_VENDOR_AMD)
1716 cpi->cpi_procnodeid = BITX(cpi->cpi_apicid, 3, 0);
1718 cpi->cpi_procnodeid = cpi->cpi_chipid;
1719 } else if (cpi->cpi_ncpu_per_chip > 1) {
1720 if (cpi->cpi_vendor == X86_VENDOR_Intel)
1722 else if (cpi->cpi_vendor == X86_VENDOR_AMD)
1729 cpi->cpi_coreid = cpi->cpi_chipid;
1730 cpi->cpi_pkgcoreid = 0;
1731 cpi->cpi_procnodeid = cpi->cpi_chipid;
1732 cpi->cpi_compunitid = cpi->cpi_chipid;
1739 cpi->cpi_chiprev = _cpuid_chiprev(cpi->cpi_vendor, cpi->cpi_family,
1740 cpi->cpi_model, cpi->cpi_step);
1741 cpi->cpi_chiprevstr = _cpuid_chiprevstr(cpi->cpi_vendor,
1742 cpi->cpi_family, cpi->cpi_model, cpi->cpi_step);
1743 cpi->cpi_socket = _cpuid_skt(cpi->cpi_vendor, cpi->cpi_family,
1744 cpi->cpi_model, cpi->cpi_step);
1747 cpi->cpi_pass = 1;
1767 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
1769 ASSERT(cpi->cpi_pass == 1);
1771 if (cpi->cpi_maxeax < 1)
1774 if ((nmax = cpi->cpi_maxeax + 1) > NMAX_CPI_STD)
1779 for (n = 2, cp = &cpi->cpi_std[2]; n < nmax; n++, cp++) {
1801 platform_cpuid_mangle(cpi->cpi_vendor, n, cp);
1813 cpi->cpi_ncache = sizeof (*cp) *
1815 if (cpi->cpi_ncache == 0)
1817 cpi->cpi_ncache--; /* skip count byte */
1824 if (cpi->cpi_ncache > (sizeof (*cp) - 1))
1825 cpi->cpi_ncache = sizeof (*cp) - 1;
1827 dp = cpi->cpi_cacheinfo;
1867 if (!(cpi->cpi_mwait.support & MWAIT_SUPPORT))
1874 mwait_size = (size_t)MWAIT_SIZE_MAX(cpi);
1884 cpi->cpi_mwait.mon_min = (size_t)MWAIT_SIZE_MIN(cpi);
1885 cpi->cpi_mwait.mon_max = mwait_size;
1886 if (MWAIT_EXTENSION(cpi)) {
1887 cpi->cpi_mwait.support |= MWAIT_EXTENSIONS;
1888 if (MWAIT_INT_ENABLE(cpi))
1889 cpi->cpi_mwait.support |=
1899 if (cpi->cpi_maxeax >= 0xB && cpi->cpi_vendor == X86_VENDOR_Intel) {
1940 cpi->cpi_apicid = x2apic_id;
1941 cpi->cpi_ncpu_per_chip = ncpu_per_chip;
1942 cpi->cpi_ncore_per_chip = ncpu_per_chip /
1944 cpi->cpi_chipid = x2apic_id >> chipid_shift;
1945 cpi->cpi_clogid = x2apic_id & ((1 << chipid_shift) - 1);
1946 cpi->cpi_coreid = x2apic_id >> coreid_shift;
1947 cpi->cpi_pkgcoreid = cpi->cpi_clogid >> coreid_shift;
1957 if (cpi->cpi_maxeax >= 0xD) {
1975 cpi->cpi_xsave.xsav_hw_features_low = cp->cp_eax;
1976 cpi->cpi_xsave.xsav_hw_features_high = cp->cp_edx;
1977 cpi->cpi_xsave.xsav_max_size = cp->cp_ecx;
1983 if (cpi->cpi_xsave.xsav_hw_features_low & XFEATURE_AVX) {
1995 cpi->cpi_xsave.ymm_size = cp->cp_eax;
1996 cpi->cpi_xsave.ymm_offset = cp->cp_ebx;
2002 xsave_state_size = cpi->cpi_xsave.xsav_max_size;
2008 cpu->cpu_id, cpi->cpi_xsave.xsav_hw_features_low,
2009 cpi->cpi_xsave.xsav_hw_features_high,
2010 (int)cpi->cpi_xsave.xsav_max_size,
2011 (int)cpi->cpi_xsave.ymm_size,
2012 (int)cpi->cpi_xsave.ymm_offset);
2047 CPI_FEATURES_ECX(cpi) &=
2049 CPI_FEATURES_ECX(cpi) &=
2051 CPI_FEATURES_ECX(cpi) &=
2053 CPI_FEATURES_ECX(cpi) &=
2055 CPI_FEATURES_7_0_EBX(cpi) &=
2057 CPI_FEATURES_7_0_EBX(cpi) &=
2059 CPI_FEATURES_7_0_EBX(cpi) &=
2071 if ((cpi->cpi_xmaxeax & 0x80000000) == 0)
2074 if ((nmax = cpi->cpi_xmaxeax - 0x80000000 + 1) > NMAX_CPI_EXTD)
2080 iptr = (void *)cpi->cpi_brandstr;
2081 for (n = 2, cp = &cpi->cpi_extd[2]; n < nmax; cp++, n++) {
2084 platform_cpuid_mangle(cpi->cpi_vendor, 0x80000000 + n, cp);
2098 switch (cpi->cpi_vendor) {
2106 if (cpi->cpi_family < 6 ||
2107 (cpi->cpi_family == 6 &&
2108 cpi->cpi_model < 1))
2116 switch (cpi->cpi_vendor) {
2123 if (cpi->cpi_family < 6 ||
2124 cpi->cpi_family == 6 &&
2125 cpi->cpi_model < 1)
2132 if (cpi->cpi_family == 6 &&
2133 cpi->cpi_model == 3 &&
2134 cpi->cpi_step == 0) {
2144 if (cpi->cpi_family != 6)
2151 if (cpi->cpi_model == 7 ||
2152 cpi->cpi_model == 8)
2161 if (cpi->cpi_model == 9 && cpi->cpi_step == 1)
2179 cpi->cpi_pass = 2;
2183 intel_cpubrand(const struct cpuid_info *cpi)
2188 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2191 switch (cpi->cpi_family) {
2195 switch (cpi->cpi_model) {
2210 cp = &cpi->cpi_std[2]; /* cache info */
2255 return (cpi->cpi_model == 5 ?
2258 return (cpi->cpi_model == 5 ?
2269 if (cpi->cpi_brandid != 0) {
2298 sgn = (cpi->cpi_family << 8) |
2299 (cpi->cpi_model << 4) | cpi->cpi_step;
2302 if (brand_tbl[i].bt_bid == cpi->cpi_brandid)
2305 if (sgn == 0x6b1 && cpi->cpi_brandid == 3)
2307 if (sgn < 0xf13 && cpi->cpi_brandid == 0xb)
2309 if (sgn < 0xf13 && cpi->cpi_brandid == 0xe)
2319 amd_cpubrand(const struct cpuid_info *cpi)
2322 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5)
2325 switch (cpi->cpi_family) {
2327 switch (cpi->cpi_model) {
2346 switch (cpi->cpi_model) {
2362 return ((cpi->cpi_extd[6].cp_ecx >> 16) >= 256 ?
2371 if (cpi->cpi_family == 0xf && cpi->cpi_model == 5 &&
2372 cpi->cpi_brandid != 0) {
2373 switch (BITX(cpi->cpi_brandid, 7, 5)) {
2389 cyrix_cpubrand(struct cpuid_info *cpi, uint_t type)
2392 cpi->cpi_maxeax < 1 || cpi->cpi_family < 5 ||
2415 if (cpi->cpi_family == 4 && cpi->cpi_model == 9)
2417 else if (cpi->cpi_family == 5) {
2418 switch (cpi->cpi_model) {
2426 } else if (cpi->cpi_family == 6) {
2427 switch (cpi->cpi_model) {
2451 fabricate_brandstr(struct cpuid_info *cpi)
2455 switch (cpi->cpi_vendor) {
2457 brand = intel_cpubrand(cpi);
2460 brand = amd_cpubrand(cpi);
2463 brand = cyrix_cpubrand(cpi, x86_type);
2466 if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2470 if (cpi->cpi_family == 5)
2471 switch (cpi->cpi_model) {
2486 if (cpi->cpi_family == 5 &&
2487 (cpi->cpi_model == 0 || cpi->cpi_model == 2))
2491 if (cpi->cpi_family == 5 && cpi->cpi_model == 0)
2495 if (cpi->cpi_family == 5 && cpi->cpi_model == 4)
2504 (void) strcpy((char *)cpi->cpi_brandstr, brand);
2511 (void) snprintf(cpi->cpi_brandstr, sizeof (cpi->cpi_brandstr),
2512 "%s %d.%d.%d", cpi->cpi_vendorstr, cpi->cpi_family,
2513 cpi->cpi_model, cpi->cpi_step);
2531 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
2533 ASSERT(cpi->cpi_pass == 2);
2543 cpi->cpi_ncpu_shr_last_cache = 1;
2544 cpi->cpi_last_lvl_cacheid = cpu->cpu_id;
2546 if (cpi->cpi_maxeax >= 4 && cpi->cpi_vendor == X86_VENDOR_Intel) {
2565 cpi->cpi_ncpu_shr_last_cache =
2569 cpi->cpi_std_4_size = size = i;
2574 * cpuid_pass2() stashed in cpi->cpi_std[4].
2577 cpi->cpi_std_4 =
2579 cpi->cpi_std_4[0] = &cpi->cpi_std[4];
2589 cp = cpi->cpi_std_4[i] =
2605 for (i = 1; i < cpi->cpi_ncpu_shr_last_cache; i <<= 1)
2607 cpi->cpi_last_lvl_cacheid = cpi->cpi_apicid >> shft;
2613 if ((cpi->cpi_xmaxeax & 0x80000000) == 0) {
2614 fabricate_brandstr(cpi);
2622 if (cpi->cpi_brandstr[0]) {
2623 size_t maxlen = sizeof (cpi->cpi_brandstr);
2626 dst = src = (char *)cpi->cpi_brandstr;
2669 while (--dst > cpi->cpi_brandstr)
2675 fabricate_brandstr(cpi);
2677 cpi->cpi_pass = 3;
2689 struct cpuid_info *cpi;
2694 cpi = cpu->cpu_m.mcpu_cpi;
2696 ASSERT(cpi->cpi_pass == 3);
2698 if (cpi->cpi_maxeax >= 1) {
2699 uint32_t *edx = &cpi->cpi_support[STD_EDX_FEATURES];
2700 uint32_t *ecx = &cpi->cpi_support[STD_ECX_FEATURES];
2701 uint32_t *ebx = &cpi->cpi_support[STD_EBX_FEATURES];
2703 *edx = CPI_FEATURES_EDX(cpi);
2704 *ecx = CPI_FEATURES_ECX(cpi);
2705 *ebx = CPI_FEATURES_7_0_EBX(cpi);
2829 if (cpi->cpi_xmaxeax < 0x80000001)
2832 switch (cpi->cpi_vendor) {
2846 edx = &cpi->cpi_support[AMD_EDX_FEATURES];
2847 ecx = &cpi->cpi_support[AMD_ECX_FEATURES];
2849 *edx = CPI_FEATURES_XTD_EDX(cpi);
2850 *ecx = CPI_FEATURES_XTD_ECX(cpi);
2855 switch (cpi->cpi_vendor) {
2902 switch (cpi->cpi_vendor) {
2933 cpi->cpi_support[TM_EDX_FEATURES] = cp.cp_edx;
2941 cpi->cpi_pass = 4;
2957 struct cpuid_info *cpi;
2962 cpi = cpu->cpu_m.mcpu_cpi;
2970 if (cp->cp_eax <= cpi->cpi_maxeax && cp->cp_eax < NMAX_CPI_STD)
2971 xcp = &cpi->cpi_std[cp->cp_eax];
2972 else if (cp->cp_eax >= 0x80000000 && cp->cp_eax <= cpi->cpi_xmaxeax &&
2974 xcp = &cpi->cpi_extd[cp->cp_eax - 0x80000000];
3040 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3042 if (cpi->cpi_vendor == X86_VENDOR_AMD &&
3043 cpi->cpi_xmaxeax >= 0x80000001 &&
3044 (CPI_FEATURES_XTD_EDX(cpi) & CPUID_AMD_EDX_SYSC))
3054 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3064 return (snprintf(s, n, fmt_ht, cpi->cpi_chipid,
3065 cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
3066 cpi->cpi_family, cpi->cpi_model,
3067 cpi->cpi_step, cpu->cpu_type_info.pi_clock));
3069 cpi->cpi_vendorstr, cpi->cpi_std[1].cp_eax,
3070 cpi->cpi_family, cpi->cpi_model,
3071 cpi->cpi_step, cpu->cpu_type_info.pi_clock));
3169 struct cpuid_info *cpi;
3172 cpi = cpu->cpu_m.mcpu_cpi;
3176 socketstr = _cpuid_sktstr(cpi->cpi_vendor, cpi->cpi_family,
3177 cpi->cpi_model, cpi->cpi_step);
3256 struct cpuid_info *cpi;
3259 cpi = cpu->cpu_m.mcpu_cpi;
3260 if (cpi->cpi_vendor == X86_VENDOR_AMD && cpi->cpi_maxeax >= 1 &&
3261 (CPI_FEATURES_XTD_ECX(cpi) & CPUID_AMD_ECX_CR8D) != 0)
3281 struct cpuid_info *cpi;
3285 cpi = cpu->cpu_m.mcpu_cpi;
3290 *pabits = cpi->cpi_pabits;
3292 *vabits = cpi->cpi_vabits;
3305 struct cpuid_info *cpi;
3310 cpi = cpu->cpu_m.mcpu_cpi;
3317 if (cpi->cpi_xmaxeax >= 0x80000006) {
3318 struct cpuid_regs *cp = &cpi->cpi_extd[6];
3352 if (cpi->cpi_xmaxeax >= 0x80000005) {
3353 struct cpuid_regs *cp = &cpi->cpi_extd[5];
3381 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
3388 if (cpi->cpi_vendor != X86_VENDOR_AMD ||
3389 cpi->cpi_family == 4 || cpi->cpi_family == 5 ||
3390 cpi->cpi_family == 6)
3394 eax = cpi->cpi_std[1].cp_eax;
3434 return (cpi->cpi_family < 0x10);
3440 return (cpi->cpi_family <= 0x11);
3444 return (cpi->cpi_family <= 0x11);
3461 return (cpi->cpi_family < 0x10);
3465 return (cpi->cpi_family <= 0x11);
3477 return (cpi->cpi_family < 0x10);
3487 return (cpi->cpi_family < 0x10);
3547 return (cpi->cpi_family < 0x10 || cpi->cpi_family == 0x11);
3551 return (cpi->cpi_family < 0x10);
3557 if (CPI_FAMILY(cpi) == 0xf) {
3573 return (cpi->cpi_family == 0x10 && cpi->cpi_model < 4);
3581 return (cpi->cpi_family == 0x10 || cpi->cpi_family == 0x12);
3599 struct cpuid_info *cpi;
3605 cpi = cpu->cpu_m.mcpu_cpi;
3609 osvwfeature = cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW;
3613 (cpi->cpi_extd[1].cp_ecx & CPUID_AMD_ECX_OSVW));
3838 intel_cpuid_4_cache_info(struct cachetab *ct, struct cpuid_info *cpi)
3843 for (i = 0; i < cpi->cpi_std_4_size; i++) {
3844 level = CPI_CACHE_LVL(cpi->cpi_std_4[i]);
3847 ct->ct_assoc = CPI_CACHE_WAYS(cpi->cpi_std_4[i]) + 1;
3849 CPI_CACHE_COH_LN_SZ(cpi->cpi_std_4[i]) + 1;
3851 (CPI_CACHE_PARTS(cpi->cpi_std_4[i]) + 1) *
3853 (cpi->cpi_std_4[i]->cp_ecx + 1);
3872 intel_walk_cacheinfo(struct cpuid_info *cpi,
3880 if ((dp = cpi->cpi_cacheinfo) == NULL)
3882 for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
3890 if (*dp == 0x49 && cpi->cpi_maxeax >= 0x4 &&
3891 intel_cpuid_4_cache_info(&des_49_ct, cpi) == 1) {
3921 cyrix_walk_cacheinfo(struct cpuid_info *cpi,
3928 if ((dp = cpi->cpi_cacheinfo) == NULL)
3930 for (i = 0; i < cpi->cpi_ncache; i++, dp++) {
4067 amd_cache_info(struct cpuid_info *cpi, dev_info_t *devi)
4071 if (cpi->cpi_xmaxeax < 0x80000005)
4073 cp = &cpi->cpi_extd[5];
4090 switch (cpi->cpi_vendor) {
4093 if (cpi->cpi_family >= 5) {
4133 if (cpi->cpi_xmaxeax < 0x80000006)
4135 cp = &cpi->cpi_extd[6];
4173 x86_which_cacheinfo(struct cpuid_info *cpi)
4175 switch (cpi->cpi_vendor) {
4177 if (cpi->cpi_maxeax >= 2)
4185 if (cpi->cpi_family > 5 ||
4186 (cpi->cpi_family == 5 && cpi->cpi_model >= 1))
4190 if (cpi->cpi_family >= 5)
4205 if (cpi->cpi_xmaxeax >= 0x80000005)
4207 else if (cpi->cpi_vendor == X86_VENDOR_Cyrix)
4209 else if (cpi->cpi_maxeax >= 2)
4218 struct cpuid_info *cpi)
4250 "vendor-id", cpi->cpi_vendorstr);
4252 if (cpi->cpi_maxeax == 0) {
4260 "family", CPI_FAMILY(cpi));
4262 "cpu-model", CPI_MODEL(cpi));
4264 "stepping-id", CPI_STEP(cpi));
4267 switch (cpi->cpi_vendor) {
4277 "type", CPI_TYPE(cpi));
4280 switch (cpi->cpi_vendor) {
4283 create = cpi->cpi_family >= 0xf;
4291 "ext-family", CPI_FAMILY_XTD(cpi));
4294 switch (cpi->cpi_vendor) {
4296 create = IS_EXTENDED_MODEL_INTEL(cpi);
4299 create = CPI_FAMILY(cpi) == 0xf;
4307 "ext-model", CPI_MODEL_XTD(cpi));
4310 switch (cpi->cpi_vendor) {
4315 create = cpi->cpi_xmaxeax >= 0x80000001;
4323 "generation", BITX((cpi)->cpi_extd[1].cp_eax, 11, 8));
4326 switch (cpi->cpi_vendor) {
4332 create = cpi->cpi_family > 6 ||
4333 (cpi->cpi_family == 6 && cpi->cpi_model >= 8);
4336 create = cpi->cpi_family >= 0xf;
4342 if (create && cpi->cpi_brandid != 0) {
4344 "brand-id", cpi->cpi_brandid);
4348 switch (cpi->cpi_vendor) {
4353 create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4356 create = cpi->cpi_family >= 0xf;
4364 "chunks", CPI_CHUNKS(cpi));
4366 "apic-id", cpi->cpi_apicid);
4367 if (cpi->cpi_chipid >= 0) {
4369 "chip#", cpi->cpi_chipid);
4371 "clog#", cpi->cpi_clogid);
4377 "cpuid-features", CPI_FEATURES_EDX(cpi));
4381 switch (cpi->cpi_vendor) {
4383 create = IS_NEW_F6(cpi) || cpi->cpi_family >= 0xf;
4386 create = cpi->cpi_family >= 0xf;
4394 "cpuid-features-ecx", CPI_FEATURES_ECX(cpi));
4397 switch (cpi->cpi_vendor) {
4403 create = cpi->cpi_xmaxeax >= 0x80000001;
4411 "ext-cpuid-features", CPI_FEATURES_XTD_EDX(cpi));
4413 "ext-cpuid-features-ecx", CPI_FEATURES_XTD_ECX(cpi));
4423 "brand-string", cpi->cpi_brandstr);
4428 switch (x86_which_cacheinfo(cpi)) {
4430 intel_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4433 cyrix_walk_cacheinfo(cpi, cpu_devi, add_cacheent_props);
4436 amd_cache_info(cpi, cpu_devi);
4488 amd_l2cacheinfo(struct cpuid_info *cpi, struct l2info *l2i)
4495 if (cpi->cpi_xmaxeax < 0x80000006)
4497 cp = &cpi->cpi_extd[6];
4519 struct cpuid_info *cpi = cpu->cpu_m.mcpu_cpi;
4527 switch (x86_which_cacheinfo(cpi)) {
4529 intel_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4532 cyrix_walk_cacheinfo(cpi, l2i, intel_l2cinfo);
4535 amd_l2cacheinfo(cpi, l2i);
4636 struct cpuid_info *cpi;
4641 cpi = CPU->cpu_m.mcpu_cpi;
4646 switch (cpi->cpi_vendor) {
4648 if (cpi->cpi_xmaxeax < 0x80000007)
4726 struct cpuid_info *cpi;
4732 cpi = CPU->cpu_m.mcpu_cpi;
4734 switch (cpi->cpi_vendor) {
4740 if (cpi->cpi_maxeax >= 6) {
4758 struct cpuid_info *cpi = cp->cpu_m.mcpu_cpi;
4772 if ((cpi->cpi_vendor != X86_VENDOR_Intel) || (cpi->cpi_maxeax < 6))
4792 struct cpuid_info *cpi = CPU->cpu_m.mcpu_cpi;
4798 switch (cpi->cpi_vendor) {
4800 if (cpi->cpi_maxeax >= 1) {