/* EAX = 0000 0003 */ void handle_std_psn(struct cpu_regs_t *regs, struct cpuid_state_t *state) { if ((state->vendor & (VENDOR_INTEL | VENDOR_TRANSMETA)) == 0) return; ZERO_REGS(regs); regs->eax = 0x01; state->cpuid_call(regs, state); if ((regs->edx & 0x00040000) == 0) { printf("Processor serial number: disabled (or not supported)\n\n"); return; } if (state->vendor & VENDOR_TRANSMETA) { ZERO_REGS(regs); regs->eax = 0x03; state->cpuid_call(regs, state); printf("Processor serial number (Transmeta encoding): %08X-%08X-%08X-%08X\n\n", regs->eax, regs->ebx, regs->ecx, regs->edx); } if (state->vendor & VENDOR_INTEL) { uint32_t ser_eax = regs->eax; ZERO_REGS(regs); regs->eax = 0x03; state->cpuid_call(regs, state); printf("Processor serial number (Intel encoding): %04X-%04X-%04X-%04X-%04X-%04X\n\n", ser_eax >> 16, ser_eax & 0xFFFF, regs->edx >> 16, regs->edx & 0xFFFF, regs->ecx >> 16, regs->ecx & 0xFFFF); }
static uint8_t get_apicid(struct cpuid_state_t *state) { struct cpu_regs_t regs; ZERO_REGS(®s); regs.eax = 1; state->cpuid_call(®s, state); regs.ebx = regs.ebx >> 24; return (char)(regs.ebx & 0xFF); }
/* EAX = 0000 0002 */ void handle_std_cache02(struct cpu_regs_t *regs, struct cpuid_state_t *state) { uint8_t i, m = regs->eax & 0xFF; struct cpu_regs_t *rvec = NULL; uint8_t *cdesc; if ((state->vendor & (VENDOR_INTEL | VENDOR_CYRIX)) == 0) return; /* I don't think this ever happens, but just in case... */ if (m < 1) return; rvec = malloc(sizeof(struct cpu_regs_t) * m); if (!rvec) return; /* We have the first result already, copy it over. */ memcpy(&rvec[0], regs, sizeof(struct cpu_regs_t)); /* Now we can reuse 'regs' as an offset. */ regs = &rvec[1]; for (i = 1; i < m; i++) { ZERO_REGS(regs); regs->eax = 2; state->cpuid_call(regs, state); regs++; } /* Scan for 0xFF descriptor, which says to ignore leaf 0x02 */ cdesc = (uint8_t *)rvec; while (cdesc <= (uint8_t *)rvec + (sizeof(struct cpu_regs_t) * m)) if (*cdesc++ == 0xFF) goto err; /* Printout time. */ printf("Cache descriptors:\n"); regs = rvec; for (i = 0; i < m; i++) { print_intel_caches(regs, &state->sig); regs++; } printf("\n"); err: free(rvec); }
static int sane_l3_sharing(struct cpuid_state_t *state) { struct eax_cache04_t { unsigned type:5; unsigned level:3; unsigned self_initializing:1; unsigned fully_associative:1; unsigned reserved:4; unsigned max_threads_sharing:12; /* +1 encoded */ unsigned apics_reserved:6; /* +1 encoded */ }; uint32_t i = 0; struct cpu_regs_t regs; printf("Verifying L3 thread sharing sanity... "); while (1) { struct eax_cache04_t *eax; ZERO_REGS(®s); regs.eax = 4; regs.ecx = i; state->cpuid_call(®s, state); if (!(regs.eax & 0xF)) break; eax = (struct eax_cache04_t *)®s.eax; if (eax->level == 3 && eax->max_threads_sharing + 1 == 1) { printf("fail (L3 cache shared by too few threads)\n"); return 0; } i++; } printf("ok\n"); return 0; }
static void run_cpuid(struct cpuid_state_t *state, int dump) { uint32_t i, j; uint32_t r; struct cpu_regs_t cr_tmp, ignore[2]; const struct cpuid_leaf_handler_index_t *h; /* Arbitrary leaf that's probably never ever used. */ ZERO_REGS(&ignore[0]); ignore[0].eax = 0x5FFF0000; state->cpuid_call(&ignore[0], state); /* Another arbitrary leaf. On KVM, there are two invalid returns, and they're * split by the 0x80000000 boundary. */ ZERO_REGS(&ignore[1]); ignore[1].eax = 0x8FFF0000; state->cpuid_call(&ignore[1], state); for (r = 0x00000000;; r += 0x00010000) { /* If we're not doing a dump, we don't need to scan ranges * which we don't actually have special handlers for. */ if (!dump) { for (h = decode_handlers; h->handler; h++) { if ((h->leaf_id & 0xFFFF0000) == r) break; } if (!h->handler) goto invalid_leaf; } state->curmax = r; for (i = r; i <= (scan_to ? r + scan_to : state->curmax); i++) { /* If a particular range is unsupported, the processor can report * a really wacky upper boundary. This is a quick sanity check, * since it's very unlikely that any range would have more than * 0xFFFF indices. */ if ((state->curmax & 0xFFFF0000) != (i & 0xFFFF0000)) break; ZERO_REGS(&cr_tmp); /* ECX isn't populated here. It's the job of any leaf handler to * re-call CPUID with the appropriate ECX values. */ cr_tmp.eax = i; state->cpuid_call(&cr_tmp, state); /* Typically, if the range is invalid, the CPU gives an obvious * "bogus" result. We try to catch that here. * * We don't compare the last byte of EDX (size - 1) because on * certain very broken OSes (i.e. Mac OS X) there are no APIs to * force threads to be affinitized to one core. This makes the * value of EDX a bit nondeterministic when CPUID is executed. */ for (j = 0; j < sizeof(ignore) / sizeof(struct cpu_regs_t); j++) { /* The BHYVE hypervisor maps any 4000xxxx leaf to 0x40000000, * which causes the ignore list to exclude leaf 0x40000000 * itself. Special exception here to ensure that the base * hypervisor leaf doesn't get excluded. */ if (i == 0x40000000) break; if (i == r && 0 == memcmp(&ignore[j], &cr_tmp, sizeof(struct cpu_regs_t) - 4)) goto invalid_leaf; } for (h = dump ? dump_handlers : decode_handlers; h->handler; h++) { if (h->leaf_id == i) break; } if (h->handler) h->handler(&cr_tmp, state); else if (dump) state->cpuid_print(&cr_tmp, state, FALSE); } invalid_leaf: /* Terminating condition. * This is an awkward way to terminate the loop, but if we used * r != 0xFFFF0000 as the terminating condition in the outer loop, * then we would effectively skip probing of 0xFFFF0000. So we * turn this into an awkward do/while+for combination. */ if (r == 0xFFFF0000) break; } }