Lines Matching defs:from

44  * which memory (and how much) are in a NUMA node and how far each node is from
50 * local memory from each node as needed. The common lgroup framework uses the
52 * to each node (leaf lgroup) and how far each node is from each other, so it
57 * etc. are in each NUMA node, how far each node is from each other, and to use
112 * above as long as the proximity domains IDs are numbered from 0 to <number of
114 * from 0 to <number of nodes - 1>. Then proximity ID N will hash into node ID
117 * from 0 to <number of nodes - 1>, then hashing the proximity domain IDs into
120 * equivalent node ID since we want to keep the node IDs numbered from 0 to
128 * relationship between lgrp node and memory node from 1:1 map to 1:N map,
141 * been changed from 1:1 map to 1:N map. Memnode IDs [0 - lgrp_plat_node_cnt)
187 /* from fakebop.c */
313 * Statistics gotten from probing
328 * Error code from processing ACPI SRAT
333 * Error code from processing ACPI SLIT
377 * - lgrp_plat_probe_nreads Number of times to read vendor ID from
431 int lgrp_plat_latency(lgrp_handle_t from, lgrp_handle_t to);
574 * memory in node doesn't exist or address from
1044 * Return latency between "from" and "to" lgroups
1053 lgrp_plat_latency(lgrp_handle_t from, lgrp_handle_t to)
1064 if (from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE)
1067 src = from;
1077 * Probe from current CPU if its lgroup latencies haven't been set yet
1078 * and we are trying to get latency from current CPU to some node.
1211 * Probe memory in each node from current CPU to determine latency topology
1217 * nodes probing from each node to each of the other nodes some number of
1230 * how far each node is from each other.
1235 int from;
1253 from = lgrp_plat_cpu_to_node(CPU, lgrp_plat_cpu_node,
1255 ASSERT(from >= 0 && from < lgrp_plat_node_cnt);
1257 ASSERT(lgrp_plat_node_domain[from].exists);
1263 if (lat_stats->latencies[from][from] != 0)
1268 * in each node from current CPU and remember how long it takes,
1291 if (lat_stats->latencies[from][to] == 0 ||
1292 probe_time < lat_stats->latencies[from][to])
1293 lat_stats->latencies[from][to] = probe_time;
1308 * Bail out if weren't able to probe any nodes from current CPU
1586 * Determine how far each NUMA node is from each other by
1651 * in hopes of avoiding any anamolies in probing from
2050 * Should have been able to probe from CPU 0 when it was added
2117 * Probe from current CPU
2437 * Return time needed to probe from current CPU to memory in given node
2447 int from;
2458 from = lgrp_plat_cpu_to_node(CPU, cpu_node, cpu_node_nentries);
2459 ASSERT(from >= 0 && from < lgrp_plat_node_cnt);
2470 lat_stats->latencies[from][to] = 0;
2486 * Probe from current CPU to given memory using specified operation
2502 * Measure how long it takes to read vendor ID from
2540 if (min < probe_stats->probe_min[from][to] ||
2541 probe_stats->probe_min[from][to] == 0)
2542 probe_stats->probe_min[from][to] = min;
2544 if (max > probe_stats->probe_max[from][to])
2545 probe_stats->probe_max[from][to] = max;
2630 * NUMA node is from each other
2823 * Try to get domain information from MSCT table.
2999 * initialized and copy everything from temporary to permanent memory since
3294 * information that may be gotten from the ACPI System Resource Affinity Table
3321 * Macros to derive addresses from Opteron DRAM Address Map registers
3369 * Macros to get values from Opteron Node ID register
3471 * Determine NUMA configuration for Opteron from registers that live in PCI
3488 * Read configuration registers from PCI configuration space to
3623 * N times on specified destination node from current CPU