static int null_init(void) { dprintk("%s(%p)\n", __FUNCTION__, null_init); memset(&nulldev, 0, sizeof(nulldev)); softc_device_init(&nulldev, "ocfnull", 0, null_methods); null_id = crypto_get_driverid(softc_get_device(&nulldev), CRYPTOCAP_F_HARDWARE); if (null_id < 0) panic("ocfnull: crypto device cannot initialize!"); #define REGISTER(alg) \ crypto_register(null_id,alg,0,0) REGISTER(CRYPTO_DES_CBC); REGISTER(CRYPTO_3DES_CBC); REGISTER(CRYPTO_RIJNDAEL128_CBC); REGISTER(CRYPTO_MD5); REGISTER(CRYPTO_SHA1); REGISTER(CRYPTO_MD5_HMAC); REGISTER(CRYPTO_SHA1_HMAC); #undef REGISTER return 0; }
/* * Attach an interface that successfully probed. */ static int xlr_sec_attach(device_t dev) { struct xlr_sec_softc *sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "rmi crypto driver", MTX_DEF); sc->sc_cid = crypto_get_driverid(0); if (sc->sc_cid < 0) { printf("xlr_sec - error : could not get the driver id\n"); goto error_exit; } if (crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) printf("register failed for CRYPTO_DES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) printf("register failed for CRYPTO_3DES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) printf("register failed for CRYPTO_AES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) printf("register failed for CRYPTO_ARC4\n"); if (crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) printf("register failed for CRYPTO_MD5\n"); if (crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) printf("register failed for CRYPTO_SHA1\n"); if (crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) printf("register failed for CRYPTO_MD5_HMAC\n"); if (crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, xlr_sec_newsession, xlr_sec_freesession, xlr_sec_process, sc) != 0) printf("register failed for CRYPTO_SHA1_HMAC\n"); xlr_sec_init(sc); return (0); error_exit: return (ENXIO); }
/* * Attach an interface that successfully probed. */ static int xlr_sec_attach(device_t dev) { struct xlr_sec_softc *sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "rmi crypto driver", MTX_DEF); sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { printf("xlr_sec - error : could not get the driver id\n"); goto error_exit; } if (crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0) != 0) printf("register failed for CRYPTO_DES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0) != 0) printf("register failed for CRYPTO_3DES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0) != 0) printf("register failed for CRYPTO_AES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0) != 0) printf("register failed for CRYPTO_ARC4\n"); if (crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0) != 0) printf("register failed for CRYPTO_MD5\n"); if (crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0) != 0) printf("register failed for CRYPTO_SHA1\n"); if (crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0) != 0) printf("register failed for CRYPTO_MD5_HMAC\n"); if (crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0) != 0) printf("register failed for CRYPTO_SHA1_HMAC\n"); xlr_sec_init(sc); device_printf(dev, "Initialization complete!\n"); return (0); error_exit: return (ENXIO); }
void via_padlock_attach(void) { #define VIA_ACE (CPUID_VIA_HAS_ACE|CPUID_VIA_DO_ACE) if ((cpu_feature_padlock & VIA_ACE) != VIA_ACE) return; struct via_padlock_softc *vp_sc; if ((vp_sc = malloc(sizeof(*vp_sc), M_DEVBUF, M_NOWAIT)) == NULL) return; memset(vp_sc, 0, sizeof(*vp_sc)); vp_sc->sc_cid = crypto_get_driverid(0); if (vp_sc->sc_cid < 0) { printf("PadLock: Could not get a crypto driver ID\n"); free(vp_sc, M_DEVBUF); return; } /* * Ask the opencrypto subsystem to register ourselves. Although * we don't support hardware offloading for various HMAC algorithms, * we will handle them, because opencrypto prefers drivers that * support all requested algorithms. */ #define REGISTER(alg) \ crypto_register(vp_sc->sc_cid, alg, 0, 0, \ via_padlock_crypto_newsession, via_padlock_crypto_freesession, \ via_padlock_crypto_process, vp_sc); REGISTER(CRYPTO_AES_CBC); REGISTER(CRYPTO_MD5_HMAC_96); REGISTER(CRYPTO_MD5_HMAC); REGISTER(CRYPTO_SHA1_HMAC_96); REGISTER(CRYPTO_SHA1_HMAC); REGISTER(CRYPTO_RIPEMD160_HMAC_96); REGISTER(CRYPTO_RIPEMD160_HMAC); REGISTER(CRYPTO_SHA2_HMAC); printf("PadLock: registered support for AES_CBC\n"); }
static int cryptocteon_attach(device_t dev) { struct cryptocteon_softc *sc; sc = device_get_softc(dev); sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC); if (sc->sc_cid < 0) { device_printf(dev, "crypto_get_driverid ret %d\n", sc->sc_cid); return (ENXIO); } crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); return (0); }
/* * Attach an interface that successfully probed. */ static int xlp_rsa_attach(device_t dev) { struct xlp_rsa_softc *sc = device_get_softc(dev); uint64_t base; int qstart, qnum; int freq, node; sc->sc_dev = dev; node = nlm_get_device_node(pci_get_slot(dev)); freq = nlm_set_device_frequency(node, DFS_DEVICE_RSA, 250); if (bootverbose) device_printf(dev, "RSA Freq: %dMHz\n", freq); if (pci_get_device(dev) == PCI_DEVICE_ID_NLM_RSA) { device_set_desc(dev, "XLP RSA/ECC Accelerator"); if ((sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE)) < 0) { printf("xlp_rsaecc-err:couldn't get the driver id\n"); goto error_exit; } if (crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0) != 0) goto error_exit; base = nlm_get_rsa_pcibase(node); qstart = nlm_qidstart(base); qnum = nlm_qnum(base); sc->rsaecc_vc_start = qstart; sc->rsaecc_vc_end = qstart + qnum - 1; } if (xlp_rsa_init(sc, node) != 0) goto error_exit; device_printf(dev, "RSA Initialization complete!\n"); return (0); error_exit: return (ENXIO); }
static int null_init(void) { dprintk("%s(%p)\n", __FUNCTION__, null_init); null_id = crypto_get_driverid(0); if (null_id < 0) panic("ocfnull: crypto device cannot initialize!"); crypto_register(null_id, CRYPTO_DES_CBC, 0, 0, null_newsession, null_freesession, null_process, NULL); #define REGISTER(alg) \ crypto_register(null_id,alg,0,0,NULL,NULL,NULL,NULL) REGISTER(CRYPTO_3DES_CBC); REGISTER(CRYPTO_RIJNDAEL128_CBC); REGISTER(CRYPTO_MD5); REGISTER(CRYPTO_SHA1); REGISTER(CRYPTO_MD5_HMAC); REGISTER(CRYPTO_SHA1_HMAC); #undef REGISTER return 0; }
static int __devinit pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct pasemi_softc *sc; int ret, i; DPRINTF(KERN_ERR "%s()\n", __FUNCTION__); sc = kzalloc(sizeof(*sc), GFP_KERNEL); if (!sc) return -ENOMEM; softc_device_init(sc, DRV_NAME, 1, pasemi_methods); pci_set_drvdata(pdev, sc); spin_lock_init(&sc->sc_chnlock); sc->sc_sessions = (struct pasemi_session **) kzalloc(PASEMI_INITIAL_SESSIONS * sizeof(struct pasemi_session *), GFP_ATOMIC); if (sc->sc_sessions == NULL) { ret = -ENOMEM; goto out; } sc->sc_nsessions = PASEMI_INITIAL_SESSIONS; sc->sc_lastchn = 0; sc->base_irq = pdev->irq + 6; sc->base_chan = 6; sc->sc_cid = -1; sc->dma_pdev = pdev; sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); if (!sc->iob_pdev) { dev_err(&pdev->dev, "Can't find I/O Bridge\n"); ret = -ENODEV; goto out; } /* This is hardcoded and ugly, but we have some firmware versions * who don't provide the register space in the device tree. Luckily * they are at well-known locations so we can just do the math here. */ sc->dma_regs = ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000); sc->iob_regs = ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000); if (!sc->dma_regs || !sc->iob_regs) { dev_err(&pdev->dev, "Can't map registers\n"); ret = -ENODEV; goto out; } dma_status = __ioremap(0xfd800000, 0x1000, 0); if (!dma_status) { ret = -ENODEV; dev_err(&pdev->dev, "Can't map dmastatus space\n"); goto out; } sc->tx = (struct pasemi_fnu_txring *) kzalloc(sizeof(struct pasemi_fnu_txring) * 8, GFP_KERNEL); if (!sc->tx) { ret = -ENOMEM; goto out; } /* Initialize the h/w */ out_le32(sc->dma_regs + PAS_DMA_COM_CFG, (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) | PAS_DMA_COM_CFG_FWF)); out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); for (i = 0; i < PASEMI_FNU_CHANNELS; i++) { sc->sc_num_channels++; ret = pasemi_dma_setup_tx_resources(sc, i); if (ret) goto out; } sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n"); ret = -ENXIO; goto out; } /* register algorithms with the framework */ printk(DRV_NAME ":"); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); return 0; out: pasemi_dma_remove(pdev); return ret; }
static int talitos_probe(struct platform_device *pdev) #endif { struct talitos_softc *sc = NULL; struct resource *r; #ifdef CONFIG_PPC_MERGE struct device *device = &ofdev->dev; struct device_node *np = ofdev->node; const unsigned int *prop; int err; struct resource res; #endif static int num_chips = 0; int rc; int i; DPRINTF("%s()\n", __FUNCTION__); sc = (struct talitos_softc *) kmalloc(sizeof(*sc), GFP_KERNEL); if (!sc) return -ENOMEM; memset(sc, 0, sizeof(*sc)); softc_device_init(sc, DRV_NAME, num_chips, talitos_methods); sc->sc_irq = -1; sc->sc_cid = -1; #ifndef CONFIG_PPC_MERGE sc->sc_dev = pdev; #endif sc->sc_num = num_chips++; #ifdef CONFIG_PPC_MERGE dev_set_drvdata(device, sc); #else platform_set_drvdata(sc->sc_dev, sc); #endif /* get the irq line */ #ifdef CONFIG_PPC_MERGE err = of_address_to_resource(np, 0, &res); if (err) return -EINVAL; r = &res; sc->sc_irq = irq_of_parse_and_map(np, 0); #else /* get a pointer to the register memory */ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); sc->sc_irq = platform_get_irq(pdev, 0); #endif rc = request_irq(sc->sc_irq, talitos_intr, 0, device_get_nameunit(sc->sc_cdev), sc); if (rc) { printk(KERN_ERR "%s: failed to hook irq %d\n", device_get_nameunit(sc->sc_cdev), sc->sc_irq); sc->sc_irq = -1; goto out; } sc->sc_base_addr = (ocf_iomem_t) ioremap(r->start, (r->end - r->start)); if (!sc->sc_base_addr) { printk(KERN_ERR "%s: failed to ioremap\n", device_get_nameunit(sc->sc_cdev)); goto out; } /* figure out our SEC's properties and capabilities */ sc->sc_chiprev = (u64)talitos_read(sc->sc_base_addr + TALITOS_ID) << 32 | talitos_read(sc->sc_base_addr + TALITOS_ID_HI); DPRINTF("sec id 0x%llx\n", sc->sc_chiprev); #ifdef CONFIG_PPC_MERGE /* get SEC properties from device tree, defaulting to SEC 2.0 */ prop = of_get_property(np, "num-channels", NULL); sc->sc_num_channels = prop ? *prop : TALITOS_NCHANNELS_SEC_2_0; prop = of_get_property(np, "channel-fifo-len", NULL); sc->sc_chfifo_len = prop ? *prop : TALITOS_CHFIFOLEN_SEC_2_0; prop = of_get_property(np, "exec-units-mask", NULL); sc->sc_exec_units = prop ? *prop : TALITOS_HAS_EUS_SEC_2_0; prop = of_get_property(np, "descriptor-types-mask", NULL); sc->sc_desc_types = prop ? *prop : TALITOS_HAS_DESCTYPES_SEC_2_0; #else /* bulk should go away with openfirmware flat device tree support */ if (sc->sc_chiprev & TALITOS_ID_SEC_2_0) { sc->sc_num_channels = TALITOS_NCHANNELS_SEC_2_0; sc->sc_chfifo_len = TALITOS_CHFIFOLEN_SEC_2_0; sc->sc_exec_units = TALITOS_HAS_EUS_SEC_2_0; sc->sc_desc_types = TALITOS_HAS_DESCTYPES_SEC_2_0; } else { printk(KERN_ERR "%s: failed to id device\n", device_get_nameunit(sc->sc_cdev)); goto out; } #endif /* + 1 is for the meta-channel lock used by the channel scheduler */ sc->sc_chnfifolock = (spinlock_t *) kmalloc( (sc->sc_num_channels + 1) * sizeof(spinlock_t), GFP_KERNEL); if (!sc->sc_chnfifolock) goto out; for (i = 0; i < sc->sc_num_channels + 1; i++) { spin_lock_init(&sc->sc_chnfifolock[i]); } sc->sc_chnlastalg = (int *) kmalloc( sc->sc_num_channels * sizeof(int), GFP_KERNEL); if (!sc->sc_chnlastalg) goto out; memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int)); sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc( sc->sc_num_channels * sizeof(struct desc_cryptop_pair *), GFP_KERNEL); if (!sc->sc_chnfifo) goto out; for (i = 0; i < sc->sc_num_channels; i++) { sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc( sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair), GFP_KERNEL); if (!sc->sc_chnfifo[i]) goto out; memset(sc->sc_chnfifo[i], 0, sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair)); } /* reset and initialize the SEC h/w device */ talitos_reset_device(sc); talitos_init_device(sc); sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { printk(KERN_ERR "%s: could not get crypto driver id\n", device_get_nameunit(sc->sc_cdev)); goto out; } /* register algorithms with the framework */ printk("%s:", device_get_nameunit(sc->sc_cdev)); if (sc->sc_exec_units & TALITOS_HAS_EU_RNG) { printk(" rng"); #ifdef CONFIG_OCF_RANDOMHARVEST talitos_rng_init(sc); crypto_rregister(sc->sc_cid, talitos_read_random, sc); #endif } if (sc->sc_exec_units & TALITOS_HAS_EU_DEU) { printk(" des/3des"); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); } if (sc->sc_exec_units & TALITOS_HAS_EU_AESU) { printk(" aes"); crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); } if (sc->sc_exec_units & TALITOS_HAS_EU_MDEU) { printk(" md5"); crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); /* HMAC support only with IPsec for now */ crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); printk(" sha1"); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); /* HMAC support only with IPsec for now */ crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); } printk("\n"); return 0; out: #ifndef CONFIG_PPC_MERGE talitos_remove(pdev); #endif return -ENOMEM; }
/* * our driver startup and shutdown routines */ static int mv_cesa_ocf_init(struct platform_device *pdev) { #if defined(CONFIG_MV78200) || defined(CONFIG_MV632X) if (MV_FALSE == mvSocUnitIsMappedToThisCpu(CESA)) { dprintk("CESA is not mapped to this CPU\n"); return -ENODEV; } #endif dprintk("%s\n", __FUNCTION__); memset(&mv_cesa_dev, 0, sizeof(mv_cesa_dev)); softc_device_init(&mv_cesa_dev, "MV CESA", 0, mv_cesa_methods); cesa_ocf_id = crypto_get_driverid(softc_get_device(&mv_cesa_dev),CRYPTOCAP_F_HARDWARE); if (cesa_ocf_id < 0) panic("MV CESA crypto device cannot initialize!"); dprintk("%s,%d: cesa ocf device id is %d \n", __FILE__, __LINE__, cesa_ocf_id); /* CESA unit is auto power on off */ #if 0 if (MV_FALSE == mvCtrlPwrClckGet(CESA_UNIT_ID,0)) { printk("\nWarning CESA %d is Powered Off\n",0); return EINVAL; } #endif memset(&cesa_device, 0, sizeof(struct cesa_dev)); /* Get the IRQ, and crypto memory regions */ { struct resource *res; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); if (!res) return -ENXIO; cesa_device.sram = ioremap(res->start, res->end - res->start + 1); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); if (!res) { iounmap(cesa_device.sram); return -ENXIO; } cesa_device.reg = ioremap(res->start, res->end - res->start + 1); cesa_device.irq = platform_get_irq(pdev, 0); cesa_device.plat_data = pdev->dev.platform_data; setup_tdma_mbus_windows(&cesa_device); } if( MV_OK != mvCesaInit(CESA_OCF_MAX_SES*5, CESA_Q_SIZE, cesa_device.reg, NULL) ) { printk("%s,%d: mvCesaInit Failed. \n", __FILE__, __LINE__); return EINVAL; } /* clear and unmask Int */ MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0); #ifndef CESA_OCF_POLLING MV_REG_WRITE( MV_CESA_ISR_MASK_REG, MV_CESA_CAUSE_ACC_DMA_MASK); #endif #ifdef CESA_OCF_TASKLET tasklet_init(&cesa_ocf_tasklet, cesa_callback, (unsigned int) 0); #endif /* register interrupt */ if( request_irq( cesa_device.irq, cesa_interrupt_handler, (IRQF_DISABLED) , "cesa", &cesa_ocf_id) < 0) { printk("%s,%d: cannot assign irq %x\n", __FILE__, __LINE__, cesa_device.reg); return EINVAL; } memset(cesa_ocf_sessions, 0, sizeof(struct cesa_ocf_data *) * CESA_OCF_MAX_SES); #define REGISTER(alg) \ crypto_register(cesa_ocf_id, alg, 0,0) REGISTER(CRYPTO_AES_CBC); REGISTER(CRYPTO_DES_CBC); REGISTER(CRYPTO_3DES_CBC); REGISTER(CRYPTO_MD5); REGISTER(CRYPTO_MD5_HMAC); REGISTER(CRYPTO_SHA1); REGISTER(CRYPTO_SHA1_HMAC); #undef REGISTER return 0; }
void ubsec_attach(struct device *parent, struct device *self, void *aux) { struct ubsec_softc *sc = (struct ubsec_softc *)self; struct pci_attach_args *pa = aux; pci_chipset_tag_t pc = pa->pa_pc; pci_intr_handle_t ih; pcireg_t memtype; const char *intrstr = NULL; struct ubsec_dma *dmap; bus_size_t iosize; u_int32_t i; int algs[CRYPTO_ALGORITHM_MAX + 1]; SIMPLEQ_INIT(&sc->sc_queue); SIMPLEQ_INIT(&sc->sc_qchip); SIMPLEQ_INIT(&sc->sc_queue2); SIMPLEQ_INIT(&sc->sc_qchip2); SIMPLEQ_INIT(&sc->sc_queue4); SIMPLEQ_INIT(&sc->sc_qchip4); sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR; sc->sc_maxaggr = UBS_MIN_AGGR; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BLUESTEEL && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BLUESTEEL_5601) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5802 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5805)) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5820 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5822)) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; if ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5821) || (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_SCA1K || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_5821))) { sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY; sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; } if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5823 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5825)) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5860 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5861 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5862)) { sc->sc_maxaggr = UBS_MAX_AGGR; sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY | BS_STAT_MCR3_ALLEMPTY | BS_STAT_MCR4_ALLEMPTY; sc->sc_flags |= UBS_FLAGS_MULTIMCR | UBS_FLAGS_HWNORM | UBS_FLAGS_LONGCTX | UBS_FLAGS_AES | UBS_FLAGS_KEY | UBS_FLAGS_BIGKEY; #if 0 /* The RNG is not yet supported */ sc->sc_flags |= UBS_FLAGS_RNG | UBS_FLAGS_RNG4; #endif } memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BS_BAR); if (pci_mapreg_map(pa, BS_BAR, memtype, 0, &sc->sc_st, &sc->sc_sh, NULL, &iosize, 0)) { printf(": can't find mem space\n"); return; } sc->sc_dmat = pa->pa_dmat; if (pci_intr_map(pa, &ih)) { printf(": couldn't map interrupt\n"); bus_space_unmap(sc->sc_st, sc->sc_sh, iosize); return; } intrstr = pci_intr_string(pc, ih); sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ubsec_intr, sc, self->dv_xname); if (sc->sc_ih == NULL) { printf(": couldn't establish interrupt"); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); bus_space_unmap(sc->sc_st, sc->sc_sh, iosize); return; } sc->sc_cid = crypto_get_driverid(0); if (sc->sc_cid < 0) { pci_intr_disestablish(pc, sc->sc_ih); bus_space_unmap(sc->sc_st, sc->sc_sh, iosize); return; } SIMPLEQ_INIT(&sc->sc_freequeue); dmap = sc->sc_dmaa; for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { struct ubsec_q *q; q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), M_DEVBUF, M_NOWAIT); if (q == NULL) { printf(": can't allocate queue buffers\n"); break; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), &dmap->d_alloc, 0)) { printf(": can't allocate dma buffers\n"); free(q, M_DEVBUF, 0); break; } dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; q->q_dma = dmap; sc->sc_queuea[i] = q; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); } bzero(algs, sizeof(algs)); algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED; algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED; algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED; if (sc->sc_flags & UBS_FLAGS_AES) algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED; crypto_register(sc->sc_cid, algs, ubsec_newsession, ubsec_freesession, ubsec_process); /* * Reset Broadcom chip */ ubsec_reset_board(sc); /* * Init Broadcom specific PCI settings */ ubsec_init_pciregs(pa); /* * Init Broadcom chip */ ubsec_init_board(sc); printf(": 3DES MD5 SHA1"); if (sc->sc_flags & UBS_FLAGS_AES) printf(" AES"); #ifndef UBSEC_NO_RNG if (sc->sc_flags & UBS_FLAGS_RNG) { if (sc->sc_flags & UBS_FLAGS_RNG4) sc->sc_statmask |= BS_STAT_MCR4_DONE; else sc->sc_statmask |= BS_STAT_MCR2_DONE; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &sc->sc_rng.rng_q.q_mcr, 0)) goto skip_rng; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), &sc->sc_rng.rng_q.q_ctx, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } timeout_set(&sc->sc_rngto, ubsec_rng, sc); if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; timeout_add(&sc->sc_rngto, sc->sc_rnghz); printf(" RNG"); skip_rng: ; } #endif /* UBSEC_NO_RNG */ if (sc->sc_flags & UBS_FLAGS_KEY) { sc->sc_statmask |= BS_STAT_MCR2_DONE; } printf(", %s\n", intrstr); }
/* * Attach an interface that successfully probed. */ static int xlp_sec_attach(device_t dev) { struct xlp_sec_softc *sc = device_get_softc(dev); uint64_t base; int qstart, qnum; int freq, node; sc->sc_dev = dev; node = nlm_get_device_node(pci_get_slot(dev)); freq = nlm_set_device_frequency(node, DFS_DEVICE_SAE, 250); if (bootverbose) device_printf(dev, "SAE Freq: %dMHz\n", freq); if(pci_get_device(dev) == PCI_DEVICE_ID_NLM_SAE) { device_set_desc(dev, "XLP Security Accelerator"); sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { printf("xlp_sec - error : could not get the driver" " id\n"); goto error_exit; } if (crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0) != 0) printf("register failed for CRYPTO_DES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0) != 0) printf("register failed for CRYPTO_3DES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0) != 0) printf("register failed for CRYPTO_AES_CBC\n"); if (crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0) != 0) printf("register failed for CRYPTO_ARC4\n"); if (crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0) != 0) printf("register failed for CRYPTO_MD5\n"); if (crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0) != 0) printf("register failed for CRYPTO_SHA1\n"); if (crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0) != 0) printf("register failed for CRYPTO_MD5_HMAC\n"); if (crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0) != 0) printf("register failed for CRYPTO_SHA1_HMAC\n"); base = nlm_get_sec_pcibase(node); qstart = nlm_qidstart(base); qnum = nlm_qnum(base); sc->sec_vc_start = qstart; sc->sec_vc_end = qstart + qnum - 1; } if (xlp_sec_init(sc) != 0) goto error_exit; if (bootverbose) device_printf(dev, "SEC Initialization complete!\n"); return (0); error_exit: return (ENXIO); }