- wcpu_map.total_procs =
- GetActiveProcessorCount(ALL_PROCESSOR_GROUPS);
-
- LOGICAL_PROCESSOR_RELATIONSHIP lprocRel;
- DWORD lprocInfoSize = 0;
- BOOL ht_enabled = FALSE;
-
- /* First get the processor package information */
- lprocRel = RelationProcessorPackage;
- /* Determine the size of buffer we need (pass NULL) */
- GetLogicalProcessorInformationEx(lprocRel, NULL, &lprocInfoSize);
- wcpu_map.proc_sockets = lprocInfoSize / 48;
-
- lprocInfoSize = 0;
- /* Next get the processor core information */
- lprocRel = RelationProcessorCore;
- GetLogicalProcessorInformationEx(lprocRel, NULL, &lprocInfoSize);
- wcpu_map.proc_cores = lprocInfoSize / 48;
-
- if (wcpu_map.total_procs > wcpu_map.proc_cores)
- ht_enabled = TRUE;
-
- /* Distribute the socket and core ids appropriately
- * across the logical cores. For now, split the cores
- * equally across the sockets.
- */
- unsigned int lcore = 0;
- for (unsigned int socket = 0; socket <
- wcpu_map.proc_sockets; ++socket) {
- for (unsigned int core = 0;
- core < (wcpu_map.proc_cores / wcpu_map.proc_sockets);
- ++core) {
- wcpu_map.wlcore_map[lcore]
- .socket_id = socket;
- wcpu_map.wlcore_map[lcore]
- .core_id = core;
- lcore++;
- if (ht_enabled) {
- wcpu_map.wlcore_map[lcore]
- .socket_id = socket;
- wcpu_map.wlcore_map[lcore]
- .core_id = core;
- lcore++;
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *infos, *info;
+ DWORD infos_size;
+ bool full = false;
+
+ infos_size = 0;
+ if (!GetLogicalProcessorInformationEx(
+ RelationNumaNode, NULL, &infos_size)) {
+ DWORD error = GetLastError();
+ if (error != ERROR_INSUFFICIENT_BUFFER) {
+ log_early("Cannot get NUMA node info size, error %lu\n",
+ GetLastError());
+ rte_errno = ENOMEM;
+ return -1;
+ }
+ }
+
+ infos = malloc(infos_size);
+ if (infos == NULL) {
+ log_early("Cannot allocate memory for NUMA node information\n");
+ rte_errno = ENOMEM;
+ return -1;
+ }
+
+ if (!GetLogicalProcessorInformationEx(
+ RelationNumaNode, infos, &infos_size)) {
+ log_early("Cannot get NUMA node information, error %lu\n",
+ GetLastError());
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ info = infos;
+ while ((uint8_t *)info - (uint8_t *)infos < infos_size) {
+ unsigned int node_id = info->NumaNode.NodeNumber;
+ GROUP_AFFINITY *cores = &info->NumaNode.GroupMask;
+ struct lcore_map *lcore;
+ unsigned int i, socket_id;
+
+ /* NUMA node may be reported multiple times if it includes
+ * cores from different processor groups, e. g. 80 cores
+ * of a physical processor comprise one NUMA node, but two
+ * processor groups, because group size is limited by 32/64.
+ */
+ for (socket_id = 0; socket_id < cpu_map.socket_count;
+ socket_id++) {
+ if (cpu_map.sockets[socket_id].node_id == node_id)
+ break;
+ }
+
+ if (socket_id == cpu_map.socket_count) {
+ if (socket_id == RTE_DIM(cpu_map.sockets)) {
+ full = true;
+ goto exit;
+ }
+
+ cpu_map.sockets[socket_id].node_id = node_id;
+ cpu_map.socket_count++;
+ }
+
+ for (i = 0; i < EAL_PROCESSOR_GROUP_SIZE; i++) {
+ if ((cores->Mask & ((KAFFINITY)1 << i)) == 0)
+ continue;
+
+ if (cpu_map.lcore_count == RTE_DIM(cpu_map.lcores)) {
+ full = true;
+ goto exit;