Lines Matching defs:node

35  * one or more CPUs and some local memory.  The CPUs in each node can access
37 * local memory. Typically, a system with only one node has Uniform Memory
38 * Access (UMA), but it may be possible to have a one node system that has
39 * some global memory outside of the node which is higher latency.
44 * which memory (and how much) are in a NUMA node and how far each node is from
47 * in its memory node (memnode) array with the physical address range spanned
48 * by each NUMA node to know which memory belongs to which node, so it can
49 * build and manage a physical page free list for each NUMA node and allocate
50 * local memory from each node as needed. The common lgroup framework uses the
52 * to each node (leaf lgroup) and how far each node is from each other, so it
57 * etc. are in each NUMA node, how far each node is from each other, and to use
58 * a unique lgroup platform handle to refer to each node through the interface.
65 * CPUs and memory are local to a given proximity domain (NUMA node). The SLIT
67 * a NUMA node and should correspond to proximity domains in the SRAT). For
73 * in the system and which CPUs and memory are in each node.
88 * - lgrp_plat_cpu_node[] CPU to node ID mapping table indexed by
92 * different nodes indexed by node ID
100 * table indexed by node ID (only used
104 * each memory node indexed by memory node
109 * lgroup platform handle == node ID == memnode ID
114 * from 0 to <number of nodes - 1>. Then proximity ID N will hash into node ID
116 * and be assigned node ID N. If the proximity domain IDs aren't numbered
119 * to node IDs. However, the proximity domain IDs may not map to the
120 * equivalent node ID since we want to keep the node IDs numbered from 0 to
125 * be continuous with other memory connected to the same lgrp node. In other
128 * relationship between lgrp node and memory node from 1:1 map to 1:N map,
129 * that means there may be multiple memory nodes associated with a lgrp node
135 * node ID and memnode ID is still kept as:
136 * lgroup platform handle == node ID == memnode ID
139 * lgroup platform handle == node ID == memnode ID
145 * 4) All boot code having the assumption "node ID == memnode ID" can live as
146 * is, that's because node ID is always equal to memnode ID at boot time.
151 * "node ID == memnode ID" and may be called at run time, is disabled if
210 * Hash proximity domain ID into node to domain mapping table "mod" number of
212 * proximity domain be node 0
219 * CPU to node ID mapping structure (only used with SRAT)
223 uint_t node;
241 size_t probe_memsize; /* how much memory to probe per node */
269 * Node ID and starting and ending page for physical memory in memory node
286 * CPU to node ID mapping table (only used for SRAT) and its max number of
308 * Physical address range for memory in each node
344 * Maximum memory node ID in use.
376 * node
395 * found to be crossing memory node boundaries. The workaround will eliminate
396 * a base size page at the end of each memory node boundary to ensure that
397 * a large page with constituent pages that span more than 1 memory node
525 * Configure memory nodes for machines with more than one node (ie NUMA)
540 int node;
570 node = plat_pfn_to_mem_node(cur_start);
574 * memory in node doesn't exist or address from
575 * boot installed memory list entry isn't in this node.
579 if (node < 0 || node >= lgrp_plat_max_mem_node ||
580 !lgrp_plat_memnode_info[node].exists ||
581 cur_start < lgrp_plat_memnode_info[node].start ||
582 cur_start > lgrp_plat_memnode_info[node].end) {
593 if (lgrp_plat_memnode_info[node].exists &&
594 cur_end > lgrp_plat_memnode_info[node].end) {
595 cur_end = lgrp_plat_memnode_info[node].end;
599 * node to eliminate large pages
600 * that span more than 1 memory node.
623 * plat_mnode_xcheck: checks the node memory ranges to see if there is a pfncnt
624 * range of pages aligned on pfncnt that crosses an node boundary. Returns 1 if
630 int node, prevnode = -1, basenode;
633 for (node = 0; node < lgrp_plat_max_mem_node; node++) {
635 if (lgrp_plat_memnode_info[node].exists == 0)
639 prevnode = node;
640 basenode = node;
644 /* assume x86 node pfn ranges are in increasing order */
645 ASSERT(lgrp_plat_memnode_info[node].start >
649 * continue if the starting address of node is not contiguous
650 * with the previous node.
653 if (lgrp_plat_memnode_info[node].start !=
655 basenode = node;
656 prevnode = node;
660 /* check if the starting address of node is pfncnt aligned */
661 if ((lgrp_plat_memnode_info[node].start & (pfncnt - 1)) != 0) {
664 * at this point, node starts at an unaligned boundary
665 * and is contiguous with the previous node(s) to
672 ea = P2ROUNDUP((lgrp_plat_memnode_info[node].start),
677 ea <= (lgrp_plat_memnode_info[node].end + 1)) {
687 prevnode = node;
707 int node;
712 for (node = 0; node < lgrp_plat_max_mem_node; node++) {
716 if (!lgrp_plat_memnode_info[node].exists)
720 if (pfn >= lgrp_plat_memnode_info[node].start &&
721 pfn <= lgrp_plat_memnode_info[node].end)
722 return (node);
728 ASSERT(node < lgrp_plat_max_mem_node);
784 int rc, node;
797 * Don't bother here if it's still during boot or only one lgrp node
829 /* Update node to proximity domain mapping */
830 node = lgrp_plat_domain_to_node(lgrp_plat_node_domain,
832 if (node == -1) {
833 node = lgrp_plat_node_domain_update(
835 ASSERT(node != -1);
836 if (node == -1) {
855 /* Update CPU to node mapping. */
857 lgrp_plat_cpu_node[cp->cpu_id].node = node;
896 lgrp_plat_cpu_node[cp->cpu_id].node = UINT_MAX;
1016 * Each lgrp node needs MAX_MEM_NODES_PER_LGROUP memnodes
1056 int node;
1078 * and we are trying to get latency from current CPU to some node.
1094 node = lgrp_plat_cpu_to_node(CPU, lgrp_plat_cpu_node,
1096 ASSERT(node >= 0 && node < lgrp_plat_node_cnt);
1097 if (node == src)
1170 /* Count memory node present at boot. */
1211 * Probe memory in each node from current CPU to determine latency topology
1217 * nodes probing from each node to each of the other nodes some number of
1218 * times. Furthermore, each node is probed some number of times before moving
1230 * how far each node is from each other.
1251 * Determine ID of node containing current CPU
1268 * in each node from current CPU and remember how long it takes,
1270 * This should approximate the memory latency between each node.
1348 * Update CPU to node mapping for given CPU and proximity domain.
1359 int node;
1362 * Get node number for proximity domain
1364 node = lgrp_plat_domain_to_node(node_domain, node_cnt, domain);
1365 if (node == -1) {
1366 node = lgrp_plat_node_domain_update(node_domain, node_cnt,
1368 if (node == -1)
1373 * Search for entry with given APIC ID and fill in its node and
1388 cpu_node[i].node == node)
1395 if (cpu_node[i].node != UINT_MAX)
1399 * Fill in node and proximity domain IDs
1402 cpu_node[i].node = node;
1417 * Get node ID for given CPU
1434 * it, so return node ID for Opteron and -1 otherwise.
1444 * Return -1 when CPU to node ID mapping entry doesn't exist for given
1450 return (cpu_node[cpuid].node);
1455 * Return node number for given proximity domain/system locality
1461 uint_t node;
1465 * Hash proximity domain ID into node to domain mapping table (array),
1467 * of matching entry as node ID.
1469 node = start = NODE_DOMAIN_HASH(domain, node_cnt);
1471 if (node_domain[node].exists) {
1473 if (node_domain[node].prox_domain == domain)
1474 return (node);
1476 node = (node + 1) % node_cnt;
1477 } while (node != start);
1510 * Temporarily allocate boot memory to use for CPU to node
1526 * Fill in CPU to node ID mapping table with APIC ID for each
1555 * memory node when memory is interleaved between any nodes or there is
1556 * only one NUMA node
1575 * each node if memory DR is disabled.
1586 * Determine how far each NUMA node is from each other by
2033 * node, or the height of the lgroup topology less than or equal to 2
2073 * Map memory in each node needed for probing to determine latency
2080 * Skip this node and leave its probe page NULL
2101 * Get PFN for first page in each node
2107 * Map virtual page to first page in node
2161 * Update node to proximity domain mappings for given domain and return node ID
2167 uint_t node;
2171 * Hash proximity domain ID into node to domain mapping table (array)
2174 node = start = NODE_DOMAIN_HASH(domain, node_cnt);
2178 * domain and return node ID which is index into mapping table.
2180 if (!node_domain[node].exists) {
2181 node_domain[node].prox_domain = domain;
2183 node_domain[node].exists = 1;
2184 return (node);
2189 * return node ID (index into table).
2191 if (node_domain[node].prox_domain == domain)
2192 return (node);
2193 node = NODE_DOMAIN_HASH(node + 1, node_cnt);
2194 } while (node != start);
2199 ASSERT(node != start);
2204 * Update node memory information for given proximity domain with specified
2213 int node, mnode;
2216 * Get node number for proximity domain
2218 node = lgrp_plat_domain_to_node(node_domain, node_cnt, domain);
2219 if (node == -1) {
2220 node = lgrp_plat_node_domain_update(node_domain, node_cnt,
2222 if (node == -1)
2255 memnode_info[mnode].lgrphand = node;
2263 * Create entry in table for node if it doesn't exist
2265 ASSERT(node < memnode_cnt);
2266 if (!memnode_info[node].exists) {
2267 memnode_info[node].start = btop(start);
2268 memnode_info[node].end = btop(end);
2269 memnode_info[node].prox_domain = domain;
2270 memnode_info[node].device_id = device_id;
2271 memnode_info[node].lgrphand = node;
2273 memnode_info[node].exists = 1;
2281 * need to update existing start or end address for the node.
2283 if (memnode_info[node].prox_domain == domain) {
2284 if (btop(start) < memnode_info[node].start)
2285 memnode_info[node].start = btop(start);
2286 if (btop(end) > memnode_info[node].end)
2287 memnode_info[node].end = btop(end);
2395 * Swap node to proxmity domain ID assignments
2405 * Swap node to physical memory assignments
2419 * Check to make sure that CPUs assigned to correct node IDs now since
2420 * node to proximity domain ID assignments may have been changed above
2425 int node;
2427 node = lgrp_plat_domain_to_node(node_domain, node_cnt,
2429 if (cpu_node[i].node != node)
2430 cpu_node[i].node = node;
2437 * Return time needed to probe from current CPU to memory in given node
2456 * Determine ID of node containing current CPU
2552 * Read boot property with CPU to APIC ID array, fill in CPU to node ID
2597 * Just return number of CPU APIC IDs if CPU to node mapping table is
2609 * Fill in CPU to node ID mapping table with APIC ID for each CPU
2618 cpu_node[i].node = UINT_MAX;
2630 * NUMA node is from each other
2801 * and memory are local to each other in the same NUMA node and return number
2845 * which CPUs and memory belong to which node.
2867 * Calculate domain (node) ID and fill in APIC ID to
2868 * domain/node mapping table
2894 * Get domain (node) ID and fill in domain/node
2957 * Calculate domain (node) ID and fill in APIC ID to
2958 * domain/node mapping table
3300 * machine are the node ID register for the number of NUMA nodes and the DRAM
3301 * address map registers for the physical address range of each node.
3344 #define OPT_DRAMLIMIT_LO_MASK_DSTNODE 0x7 /* destination node */
3357 #define OPT_NODE_MASK_ID 0x7 /* node ID */
3358 #define OPT_NODE_MASK_CNT 0x70 /* node count */
3359 #define OPT_NODE_MASK_IONODE 0x700 /* Hypertransport I/O hub node ID */
3360 #define OPT_NODE_MASK_LCKNODE 0x7000 /* lock controller node ID */
3366 #define OPT_NODE_RSHIFT_CNT 0x4 /* shift right for node count value */
3394 * info to determine its node topology
3411 #define OPT_PCS_OFF_DRAMBASE_HI 0x140 /* DRAM Base register (node 0) */
3412 #define OPT_PCS_OFF_DRAMBASE_LO 0x40 /* DRAM Base register (node 0) */
3418 #define OPT_PCS_DEV_NODE0 24 /* device number for node 0 */
3422 * Opteron DRAM address map gives base and limit for physical memory in a node
3451 * node count and DRAM address map registers may have different format or
3481 uint_t node;
3489 * determine node information, which memory is in each node, etc.
3501 * Read node ID register for node 0 to get node count
3508 * If number of nodes is more than maximum supported, then set node
3527 for (node = 0; node < *node_cnt; node++) {
3534 * Read node ID register (except for node 0 which we just read)
3536 if (node > 0) {
3537 node_info[node] = pci_getl_func(bus, dev,
3543 * physical memory range of each node
3550 base_hi = dram_map[node].base_hi =
3553 base_lo = dram_map[node].base_lo = pci_getl_func(bus, dev,
3556 if ((dram_map[node].base_lo & OPT_DRAMBASE_LO_MASK_INTRLVEN) &&
3566 limit_hi = dram_map[node].limit_hi =
3571 limit_lo = dram_map[node].limit_lo = pci_getl_func(bus,
3575 * Increment device number to next node and register offsets
3576 * for DRAM base register of next node
3585 * node
3590 * Mark node memory as non-existent and set start and
3593 memnode_info[node].exists = 0;
3594 memnode_info[node].start = memnode_info[node].end =
3600 * Mark node memory as existing and remember physical address
3601 * range of each node for use later
3603 memnode_info[node].exists = 1;
3605 memnode_info[node].start = btop(OPT_DRAMADDR(base_hi, base_lo));
3607 memnode_info[node].end = btop(OPT_DRAMADDR(limit_hi, limit_lo) |
3623 * N times on specified destination node from current CPU