void OPENSSL_cpuid_setup(void) { static int trigger = 0; IA32CAP OPENSSL_ia32_cpuid(unsigned int *); IA32CAP vec; char *env; if (trigger) return; trigger = 1; if ((env = getenv("OPENSSL_ia32cap"))) { int off = (env[0] == '~') ? 1 : 0; # if defined(_WIN32) if (!sscanf(env + off, "%I64i", &vec)) vec = strtoul(env + off, NULL, 0); # else if (!sscanf(env + off, "%lli", (long long *)&vec)) vec = strtoul(env + off, NULL, 0); # endif if (off) { IA32CAP mask = vec; vec = OPENSSL_ia32_cpuid(OPENSSL_ia32cap_P) & ~mask; if (mask & (1<<24)) { /* * User disables FXSR bit, mask even other capabilities * that operate exclusively on XMM, so we don't have to * double-check all the time. We mask PCLMULQDQ, AMD XOP, * AES-NI and AVX. Formally speaking we don't have to * do it in x86_64 case, but we can safely assume that * x86_64 users won't actually flip this flag. */ vec &= ~((IA32CAP)(1<<1|1<<11|1<<25|1<<28) << 32); } } else if (env[0] == ':') { vec = OPENSSL_ia32_cpuid(OPENSSL_ia32cap_P); } if ((env = strchr(env, ':'))) { IA32CAP vecx; env++; off = (env[0] == '~') ? 1 : 0; # if defined(_WIN32) if (!sscanf(env + off, "%I64i", &vecx)) vecx = strtoul(env + off, NULL, 0); # else if (!sscanf(env + off, "%lli", (long long *)&vecx)) vecx = strtoul(env + off, NULL, 0); # endif if (off) { OPENSSL_ia32cap_P[2] &= ~(unsigned int)vecx; OPENSSL_ia32cap_P[3] &= ~(unsigned int)(vecx >> 32); } else {
void OPENSSL_cpuid_setup(void) { static int trigger=0; unsigned long OPENSSL_ia32_cpuid(void); char *env; if (trigger) return; trigger=1; if ((env=TINYCLR_SSL_GETENV("OPENSSL_ia32cap"))) OPENSSL_ia32cap_P = strtoul(env,NULL,0)|(1<<10); else OPENSSL_ia32cap_P = OPENSSL_ia32_cpuid()|(1<<10); /* * |(1<<10) sets a reserved bit to signal that variable * was initialized already... This is to avoid interference * with cpuid snippets in ELF .init segment. */ }