bool acpi_device_always_present(struct acpi_device *adev) { bool ret = false; unsigned int i; for (i = 0; i < ARRAY_SIZE(always_present_ids); i++) { if (acpi_match_device_ids(adev, always_present_ids[i].hid)) continue; if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, always_present_ids[i].uid)) continue; if (!x86_match_cpu(always_present_ids[i].cpu_ids)) continue; if (always_present_ids[i].dmi_ids[0].matches[0].slot && !dmi_check_system(always_present_ids[i].dmi_ids)) continue; ret = true; break; } return ret; }
static int __init amd_power_pmu_init(void) { int ret; if (!x86_match_cpu(cpu_match)) return 0; if (!boot_cpu_has(X86_FEATURE_ACC_POWER)) return -ENODEV; cpu_pwr_sample_ratio = cpuid_ecx(0x80000007); if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) { pr_err("Failed to read max compute unit power accumulator MSR\n"); return -ENODEV; } cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, "perf/x86/amd/power:online", power_cpu_init, power_cpu_exit); ret = perf_pmu_register(&pmu_class, "power", -1); if (WARN_ON(ret)) { pr_warn("AMD Power PMU registration failed\n"); return ret; } pr_info("AMD Power PMU detected\n"); return ret; }
static int __init crct10dif_intel_mod_init(void) { if (!x86_match_cpu(crct10dif_cpu_id)) return -ENODEV; return crypto_register_shash(&alg); }
/** * speedstep_init - initializes the SpeedStep CPUFreq driver * * Initializes the SpeedStep support. Returns -ENODEV on unsupported * devices, -EINVAL on problems during initiatization, and zero on * success. */ static int __init speedstep_init(void) { if (!x86_match_cpu(ss_smi_ids)) return -ENODEV; /* detect processor */ speedstep_processor = speedstep_detect_processor(); if (!speedstep_processor) { pr_debug("Intel(R) SpeedStep(TM) capable processor " "not found\n"); return -ENODEV; } /* detect chipset */ if (!speedstep_detect_chipset()) { pr_debug("Intel(R) SpeedStep(TM) for this chipset not " "(yet) available.\n"); return -ENODEV; } /* activate speedstep support */ if (speedstep_activate()) { pci_dev_put(speedstep_chipset_dev); return -EINVAL; } if (speedstep_find_register()) return -ENODEV; return cpufreq_register_driver(&speedstep_driver); }
static int __init aesni_init(void) { int err; if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; #ifdef CONFIG_X86_64 #ifdef CONFIG_AS_AVX2 if (boot_cpu_has(X86_FEATURE_AVX2)) { pr_info("AVX2 version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc_avx2; aesni_gcm_dec_tfm = aesni_gcm_dec_avx2; } else #endif #ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { pr_info("AVX version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc_avx; aesni_gcm_dec_tfm = aesni_gcm_dec_avx; } else #endif { pr_info("SSE version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc; aesni_gcm_dec_tfm = aesni_gcm_dec; } #endif err = crypto_fpu_init(); if (err) return err; return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); }
static int __init crypto_aegis256_aesni_module_init(void) { if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; return crypto_register_aeads(crypto_aegis256_aesni_alg, ARRAY_SIZE(crypto_aegis256_aesni_alg)); }
static int __init crc32_pclmul_mod_init(void) { if (!x86_match_cpu(crc32pclmul_cpu_id)) { pr_info("PCLMULQDQ-NI instructions are not detected.\n"); return -ENODEV; } return crypto_register_shash(&alg); }
static bool sdhci_acpi_on_byt(void) { static const struct x86_cpu_id byt[] = { { X86_VENDOR_INTEL, 6, 0x37 }, {} }; return x86_match_cpu(byt); }
static int __init mid_pci_init(void) { const struct x86_cpu_id *id; id = x86_match_cpu(lpss_cpu_ids); if (id) pci_set_platform_pm(&mid_pci_platform_pm); return 0; }
static int __init aesni_init(void) { int err; if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; #ifdef CONFIG_X86_64 #ifdef CONFIG_AS_AVX2 if (boot_cpu_has(X86_FEATURE_AVX2)) { pr_info("AVX2 version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc_avx2; aesni_gcm_dec_tfm = aesni_gcm_dec_avx2; } else #endif #ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { pr_info("AVX version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc_avx; aesni_gcm_dec_tfm = aesni_gcm_dec_avx; } else #endif { pr_info("SSE version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc; aesni_gcm_dec_tfm = aesni_gcm_dec; } aesni_ctr_enc_tfm = aesni_ctr_enc; #ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { /* optimize performance of ctr mode encryption transform */ aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; pr_info("AES CTR mode by8 optimization enabled\n"); } #endif #endif err = crypto_fpu_init(); if (err) return err; err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); if (err) goto fpu_exit; err = crypto_register_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs)); if (err) goto unregister_algs; return err; unregister_algs: crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); fpu_exit: crypto_fpu_exit(); return err; }
static bool is_valleyview(void) { static const struct x86_cpu_id cpu_ids[] = { { X86_VENDOR_INTEL, 6, 55 }, /* Valleyview, Bay Trail */ {} }; if (!x86_match_cpu(cpu_ids)) return false; return true; }
static int __init aesni_init(void) { int err; if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; err = crypto_fpu_init(); if (err) return err; return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); }
static int __init crc32c_intel_mod_init(void) { if (!x86_match_cpu(crc32c_cpu_id)) return -ENODEV; #ifdef CONFIG_X86_64 if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { alg.update = crc32c_pcl_intel_update; alg.finup = crc32c_pcl_intel_finup; alg.digest = crc32c_pcl_intel_digest; } #endif return crypto_register_shash(&alg); }
/** * speedstep_init - initializes the SpeedStep CPUFreq driver * * Initializes the SpeedStep support. Returns -ENODEV on unsupported * BIOS, -EINVAL on problems during initiatization, and zero on * success. */ static int __init speedstep_init(void) { if (!x86_match_cpu(ss_smi_ids)) return -ENODEV; speedstep_processor = speedstep_detect_processor(); switch (speedstep_processor) { case SPEEDSTEP_CPU_PIII_T: case SPEEDSTEP_CPU_PIII_C: case SPEEDSTEP_CPU_PIII_C_EARLY: break; default: speedstep_processor = 0; } if (!speedstep_processor) { pr_debug("No supported Intel CPU detected.\n"); return -ENODEV; } pr_debug("signature:0x%.8x, command:0x%.8x, " "event:0x%.8x, perf_level:0x%.8x.\n", ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level); /* Error if no IST-SMI BIOS or no PARM sig= 'ISGE' aka 'Intel Speedstep Gate E' */ if ((ist_info.signature != 0x47534943) && ( (smi_port == 0) || (smi_cmd == 0))) return -ENODEV; if (smi_sig == 1) smi_sig = 0x47534943; else smi_sig = ist_info.signature; /* setup smi_port from MODLULE_PARM or BIOS */ if ((smi_port > 0xff) || (smi_port < 0)) return -EINVAL; else if (smi_port == 0) smi_port = ist_info.command & 0xff; if ((smi_cmd > 0xff) || (smi_cmd < 0)) return -EINVAL; else if (smi_cmd == 0) smi_cmd = (ist_info.command >> 16) & 0xff; return cpufreq_register_driver(&speedstep_driver); }
static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct device *ptr_dev = &dev->dev; struct pmc_dev *pmcdev = &pmc; const struct x86_cpu_id *cpu_id; const struct pmc_reg_map *map = (struct pmc_reg_map *)id->driver_data; int err; cpu_id = x86_match_cpu(intel_pmc_core_ids); if (!cpu_id) { dev_dbg(&dev->dev, "PMC Core: cpuid mismatch.\n"); return -EINVAL; } err = pcim_enable_device(dev); if (err < 0) { dev_dbg(&dev->dev, "PMC Core: failed to enable Power Management Controller.\n"); return err; } err = pci_read_config_dword(dev, SPT_PMC_BASE_ADDR_OFFSET, &pmcdev->base_addr); if (err < 0) { dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n"); return err; } pmcdev->base_addr &= PMC_BASE_ADDR_MASK; dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr); pmcdev->regbase = devm_ioremap_nocache(ptr_dev, pmcdev->base_addr, SPT_PMC_MMIO_REG_LEN); if (!pmcdev->regbase) { dev_dbg(&dev->dev, "PMC Core: ioremap failed.\n"); return -ENOMEM; } mutex_init(&pmcdev->lock); pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); pmcdev->map = map; err = pmc_core_dbgfs_register(pmcdev); if (err < 0) dev_warn(&dev->dev, "PMC Core: debugfs register failed.\n"); pmc.has_slp_s0_res = true; return 0; }
static int __init aesni_init(void) { int err, i; if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; err = crypto_fpu_init(); if (err) return err; for (i = 0; i < ARRAY_SIZE(aesni_algs); i++) INIT_LIST_HEAD(&aesni_algs[i].cra_list); return crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); }
static int __init cstate_pmu_init(void) { const struct x86_cpu_id *id; int err; if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) return -ENODEV; id = x86_match_cpu(intel_cstates_match); if (!id) return -ENODEV; err = cstate_probe((const struct cstate_model *) id->driver_data); if (err) return err; return cstate_init(); }
static int __init pmc_core_probe(void) { struct pmc_dev *pmcdev = &pmc; const struct x86_cpu_id *cpu_id; u64 slp_s0_addr; int err; cpu_id = x86_match_cpu(intel_pmc_core_ids); if (!cpu_id) return -ENODEV; pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data; /* * Coffeelake has CPU ID of Kabylake and Cannonlake PCH. So here * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap * in this case. */ if (!pci_dev_present(pmc_pci_ids)) pmcdev->map = &cnp_reg_map; if (lpit_read_residency_count_address(&slp_s0_addr)) pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT; else pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset; pmcdev->regbase = ioremap(pmcdev->base_addr, pmcdev->map->regmap_length); if (!pmcdev->regbase) return -ENOMEM; mutex_init(&pmcdev->lock); pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); err = pmc_core_dbgfs_register(pmcdev); if (err < 0) { pr_warn(" debugfs register failed.\n"); iounmap(pmcdev->regbase); return err; } pr_info(" initialized\n"); return 0; }
static int __init sc520_freq_init(void) { int err; if (!x86_match_cpu(sc520_ids)) return -ENODEV; cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); if (!cpuctl) { printk(KERN_ERR "sc520_freq: error: failed to remap memory\n"); return -ENOMEM; } err = cpufreq_register_driver(&sc520_freq_driver); if (err) iounmap(cpuctl); return err; }
/** * imr_init - entry point for IMR driver. * * return: -ENODEV for no IMR support 0 if good to go. */ static int __init imr_init(void) { struct imr_device *idev = &imr_dev; int ret; if (!x86_match_cpu(imr_ids) || !iosf_mbi_available()) return -ENODEV; idev->max_imr = QUARK_X1000_IMR_MAX; idev->reg_base = QUARK_X1000_IMR_REGBASE; idev->init = true; mutex_init(&idev->lock); ret = imr_debugfs_register(idev); if (ret != 0) pr_warn("debugfs register failed!\n"); imr_fixup_memmap(idev); return 0; }
static int __init padlock_init(void) { int ret; struct cpuinfo_x86 *c = &cpu_data(0); if (!x86_match_cpu(padlock_cpu_id)) return -ENODEV; if (!cpu_has_xcrypt_enabled) { printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); return -ENODEV; } if ((ret = crypto_register_alg(&aes_alg))) goto aes_err; if ((ret = crypto_register_alg(&ecb_aes_alg))) goto ecb_aes_err; if ((ret = crypto_register_alg(&cbc_aes_alg))) goto cbc_aes_err; printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); } out: return ret; cbc_aes_err: crypto_unregister_alg(&ecb_aes_alg); ecb_aes_err: crypto_unregister_alg(&aes_alg); aes_err: printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n"); goto out; }
static int __init ghash_pclmulqdqni_mod_init(void) { int err; if (!x86_match_cpu(pcmul_cpu_id)) return -ENODEV; err = crypto_register_shash(&ghash_alg); if (err) goto err_out; err = crypto_register_ahash(&ghash_async_alg); if (err) goto err_shash; return 0; err_shash: crypto_unregister_shash(&ghash_alg); err_out: return err; }
/** * imr_self_test_init - entry point for IMR driver. * * return: -ENODEV for no IMR support 0 if good to go. */ static int __init imr_self_test_init(void) { if (x86_match_cpu(imr_ids)) imr_self_test(); return 0; }
static int snd_byt_rt5651_mc_probe(struct platform_device *pdev) { const char * const mic_name[] = { "dmic", "in1", "in2", "in12" }; struct byt_rt5651_private *priv; struct snd_soc_acpi_mach *mach; struct device *codec_dev; const char *i2c_name = NULL; const char *hp_swapped; bool is_bytcr = false; int ret_val = 0; int dai_index = 0; int i; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* register the soc card */ byt_rt5651_card.dev = &pdev->dev; mach = byt_rt5651_card.dev->platform_data; snd_soc_card_set_drvdata(&byt_rt5651_card, priv); /* fix index of codec dai */ for (i = 0; i < ARRAY_SIZE(byt_rt5651_dais); i++) { if (!strcmp(byt_rt5651_dais[i].codec_name, "i2c-10EC5651:00")) { dai_index = i; break; } } /* fixup codec name based on HID */ i2c_name = acpi_dev_get_first_match_name(mach->id, NULL, -1); if (!i2c_name) { dev_err(&pdev->dev, "Error cannot find '%s' dev\n", mach->id); return -ENODEV; } snprintf(byt_rt5651_codec_name, sizeof(byt_rt5651_codec_name), "%s%s", "i2c-", i2c_name); byt_rt5651_dais[dai_index].codec_name = byt_rt5651_codec_name; codec_dev = bus_find_device_by_name(&i2c_bus_type, NULL, byt_rt5651_codec_name); if (!codec_dev) return -EPROBE_DEFER; /* * swap SSP0 if bytcr is detected * (will be overridden if DMI quirk is detected) */ if (x86_match_cpu(baytrail_cpu_ids)) { struct sst_platform_info *p_info = mach->pdata; const struct sst_res_info *res_info = p_info->res_info; if (res_info->acpi_ipc_irq_index == 0) is_bytcr = true; } if (is_bytcr) { /* * Baytrail CR platforms may have CHAN package in BIOS, try * to find relevant routing quirk based as done on Windows * platforms. We have to read the information directly from the * BIOS, at this stage the card is not created and the links * with the codec driver/pdata are non-existent */ struct acpi_chan_package chan_package; /* format specified: 2 64-bit integers */ struct acpi_buffer format = {sizeof("NN"), "NN"}; struct acpi_buffer state = {0, NULL}; struct snd_soc_acpi_package_context pkg_ctx; bool pkg_found = false; state.length = sizeof(chan_package); state.pointer = &chan_package; pkg_ctx.name = "CHAN"; pkg_ctx.length = 2; pkg_ctx.format = &format; pkg_ctx.state = &state; pkg_ctx.data_valid = false; pkg_found = snd_soc_acpi_find_package_from_hid(mach->id, &pkg_ctx); if (pkg_found) { if (chan_package.aif_value == 1) { dev_info(&pdev->dev, "BIOS Routing: AIF1 connected\n"); byt_rt5651_quirk |= BYT_RT5651_SSP0_AIF1; } else if (chan_package.aif_value == 2) { dev_info(&pdev->dev, "BIOS Routing: AIF2 connected\n"); byt_rt5651_quirk |= BYT_RT5651_SSP0_AIF2; } else { dev_info(&pdev->dev, "BIOS Routing isn't valid, ignored\n"); pkg_found = false; } } if (!pkg_found) { /* no BIOS indications, assume SSP0-AIF2 connection */ byt_rt5651_quirk |= BYT_RT5651_SSP0_AIF2; } } /* check quirks before creating card */ dmi_check_system(byt_rt5651_quirk_table); /* Must be called before register_card, also see declaration comment. */ ret_val = byt_rt5651_add_codec_device_props(codec_dev); if (ret_val) { put_device(codec_dev); return ret_val; } /* Cherry Trail devices use an external amplifier enable gpio */ if (x86_match_cpu(cherrytrail_cpu_ids)) { snd_byt_rt5651_mc_add_amp_en_gpio_mapping(codec_dev); priv->ext_amp_gpio = devm_fwnode_get_index_gpiod_from_child( &pdev->dev, "ext-amp-enable", 0, codec_dev->fwnode, GPIOD_OUT_LOW, "speaker-amp"); if (IS_ERR(priv->ext_amp_gpio)) { ret_val = PTR_ERR(priv->ext_amp_gpio); switch (ret_val) { case -ENOENT: priv->ext_amp_gpio = NULL; break; default: dev_err(&pdev->dev, "Failed to get ext-amp-enable GPIO: %d\n", ret_val); /* fall through */ case -EPROBE_DEFER: put_device(codec_dev); return ret_val; } } } put_device(codec_dev); log_quirks(&pdev->dev); if ((byt_rt5651_quirk & BYT_RT5651_SSP2_AIF2) || (byt_rt5651_quirk & BYT_RT5651_SSP0_AIF2)) { /* fixup codec aif name */ snprintf(byt_rt5651_codec_aif_name, sizeof(byt_rt5651_codec_aif_name), "%s", "rt5651-aif2"); byt_rt5651_dais[dai_index].codec_dai_name = byt_rt5651_codec_aif_name; } if ((byt_rt5651_quirk & BYT_RT5651_SSP0_AIF1) || (byt_rt5651_quirk & BYT_RT5651_SSP0_AIF2)) { /* fixup cpu dai name name */ snprintf(byt_rt5651_cpu_dai_name, sizeof(byt_rt5651_cpu_dai_name), "%s", "ssp0-port"); byt_rt5651_dais[dai_index].cpu_dai_name = byt_rt5651_cpu_dai_name; } if (byt_rt5651_quirk & BYT_RT5651_MCLK_EN) { priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3"); if (IS_ERR(priv->mclk)) { ret_val = PTR_ERR(priv->mclk); dev_err(&pdev->dev, "Failed to get MCLK from pmc_plt_clk_3: %d\n", ret_val); /* * Fall back to bit clock usage for -ENOENT (clock not * available likely due to missing dependencies), bail * for all other errors, including -EPROBE_DEFER */ if (ret_val != -ENOENT) return ret_val; byt_rt5651_quirk &= ~BYT_RT5651_MCLK_EN; } } if (byt_rt5651_quirk & BYT_RT5651_HP_LR_SWAPPED) hp_swapped = "-hp-swapped"; else hp_swapped = ""; snprintf(byt_rt5651_long_name, sizeof(byt_rt5651_long_name), "bytcr-rt5651-%s-spk-%s-mic%s", (byt_rt5651_quirk & BYT_RT5651_MONO_SPEAKER) ? "mono" : "stereo", mic_name[BYT_RT5651_MAP(byt_rt5651_quirk)], hp_swapped); byt_rt5651_card.long_name = byt_rt5651_long_name; ret_val = devm_snd_soc_register_card(&pdev->dev, &byt_rt5651_card); if (ret_val) { dev_err(&pdev->dev, "devm_snd_soc_register_card failed %d\n", ret_val); return ret_val; } platform_set_drvdata(pdev, &byt_rt5651_card); return ret_val; }
static int int0002_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct x86_cpu_id *cpu_id; struct gpio_chip *chip; int irq, ret; /* Menlow has a different INT0002 device? <sigh> */ cpu_id = x86_match_cpu(int0002_cpu_ids); if (!cpu_id) return -ENODEV; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(dev, "Error getting IRQ: %d\n", irq); return irq; } chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->label = DRV_NAME; chip->parent = dev; chip->owner = THIS_MODULE; chip->get = int0002_gpio_get; chip->set = int0002_gpio_set; chip->direction_input = int0002_gpio_get; chip->direction_output = int0002_gpio_direction_output; chip->base = -1; chip->ngpio = GPE0A_PME_B0_VIRT_GPIO_PIN + 1; chip->irq.need_valid_mask = true; ret = devm_gpiochip_add_data(&pdev->dev, chip, NULL); if (ret) { dev_err(dev, "Error adding gpio chip: %d\n", ret); return ret; } bitmap_clear(chip->irq.valid_mask, 0, GPE0A_PME_B0_VIRT_GPIO_PIN); /* * We manually request the irq here instead of passing a flow-handler * to gpiochip_set_chained_irqchip, because the irq is shared. */ ret = devm_request_irq(dev, irq, int0002_irq, IRQF_SHARED | IRQF_NO_THREAD, "INT0002", chip); if (ret) { dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret); return ret; } ret = gpiochip_irqchip_add(chip, &int0002_irqchip, 0, handle_edge_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "Error adding irqchip: %d\n", ret); return ret; } gpiochip_set_chained_irqchip(chip, &int0002_irqchip, irq, NULL); return 0; }
static int __init aesni_init(void) { struct simd_skcipher_alg *simd; const char *basename; const char *algname; const char *drvname; int err; int i; if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; #ifdef CONFIG_X86_64 #ifdef CONFIG_AS_AVX2 if (boot_cpu_has(X86_FEATURE_AVX2)) { pr_info("AVX2 version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc_avx2; aesni_gcm_dec_tfm = aesni_gcm_dec_avx2; } else #endif #ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { pr_info("AVX version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc_avx; aesni_gcm_dec_tfm = aesni_gcm_dec_avx; } else #endif { pr_info("SSE version of gcm_enc/dec engaged.\n"); aesni_gcm_enc_tfm = aesni_gcm_enc; aesni_gcm_dec_tfm = aesni_gcm_dec; } aesni_ctr_enc_tfm = aesni_ctr_enc; #ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { /* optimize performance of ctr mode encryption transform */ aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; pr_info("AES CTR mode by8 optimization enabled\n"); } #endif #endif err = crypto_fpu_init(); if (err) return err; err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); if (err) goto fpu_exit; err = crypto_register_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers)); if (err) goto unregister_algs; err = crypto_register_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs)); if (err) goto unregister_skciphers; for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) { algname = aesni_skciphers[i].base.cra_name + 2; drvname = aesni_skciphers[i].base.cra_driver_name + 2; basename = aesni_skciphers[i].base.cra_driver_name; simd = simd_skcipher_create_compat(algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; aesni_simd_skciphers[i] = simd; } for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) { algname = aesni_simd_skciphers2[i].algname; drvname = aesni_simd_skciphers2[i].drvname; basename = aesni_simd_skciphers2[i].basename; simd = simd_skcipher_create_compat(algname, drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) continue; aesni_simd_skciphers2[i].simd = simd; } return 0; unregister_simds: aesni_free_simds(); crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs)); unregister_skciphers: crypto_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers)); unregister_algs: crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs)); fpu_exit: crypto_fpu_exit(); return err; }
/** * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver * * Initializes the LongRun support. */ static int __init longrun_init(void) { if (!x86_match_cpu(longrun_ids)) return -ENODEV; return cpufreq_register_driver(&longrun_driver); }
static int __init aesni_init(void) { int err; if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; if ((err = crypto_fpu_init())) goto fpu_err; if ((err = crypto_register_alg(&aesni_alg))) goto aes_err; if ((err = crypto_register_alg(&__aesni_alg))) goto __aes_err; if ((err = crypto_register_alg(&blk_ecb_alg))) goto blk_ecb_err; if ((err = crypto_register_alg(&blk_cbc_alg))) goto blk_cbc_err; if ((err = crypto_register_alg(&ablk_ecb_alg))) goto ablk_ecb_err; if ((err = crypto_register_alg(&ablk_cbc_alg))) goto ablk_cbc_err; #ifdef CONFIG_X86_64 if ((err = crypto_register_alg(&blk_ctr_alg))) goto blk_ctr_err; if ((err = crypto_register_alg(&ablk_ctr_alg))) goto ablk_ctr_err; if ((err = crypto_register_alg(&__rfc4106_alg))) goto __aead_gcm_err; if ((err = crypto_register_alg(&rfc4106_alg))) goto aead_gcm_err; #ifdef HAS_CTR if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg))) goto ablk_rfc3686_ctr_err; #endif #endif #ifdef HAS_LRW if ((err = crypto_register_alg(&ablk_lrw_alg))) goto ablk_lrw_err; #endif #ifdef HAS_PCBC if ((err = crypto_register_alg(&ablk_pcbc_alg))) goto ablk_pcbc_err; #endif #ifdef HAS_XTS if ((err = crypto_register_alg(&ablk_xts_alg))) goto ablk_xts_err; #endif return err; #ifdef HAS_XTS ablk_xts_err: #endif #ifdef HAS_PCBC crypto_unregister_alg(&ablk_pcbc_alg); ablk_pcbc_err: #endif #ifdef HAS_LRW crypto_unregister_alg(&ablk_lrw_alg); ablk_lrw_err: #endif #ifdef CONFIG_X86_64 #ifdef HAS_CTR crypto_unregister_alg(&ablk_rfc3686_ctr_alg); ablk_rfc3686_ctr_err: #endif crypto_unregister_alg(&rfc4106_alg); aead_gcm_err: crypto_unregister_alg(&__rfc4106_alg); __aead_gcm_err: crypto_unregister_alg(&ablk_ctr_alg); ablk_ctr_err: crypto_unregister_alg(&blk_ctr_alg); blk_ctr_err: #endif crypto_unregister_alg(&ablk_cbc_alg); ablk_cbc_err: crypto_unregister_alg(&ablk_ecb_alg); ablk_ecb_err: crypto_unregister_alg(&blk_cbc_alg); blk_cbc_err: crypto_unregister_alg(&blk_ecb_alg); blk_ecb_err: crypto_unregister_alg(&__aesni_alg); __aes_err: crypto_unregister_alg(&aesni_alg); aes_err: fpu_err: return err; }