int adf_ae_init(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data; struct adf_hw_device_data *hw_device = accel_dev->hw_device; if (!hw_device->fw_name) return 0; loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL); if (!loader_data) return -ENOMEM; accel_dev->fw_loader = loader_data; if (qat_hal_init(accel_dev)) { dev_err(&GET_DEV(accel_dev), "Failed to init the AEs\n"); kfree(loader_data); return -EFAULT; } if (adf_ae_reset(accel_dev, 0)) { dev_err(&GET_DEV(accel_dev), "Failed to reset the AEs\n"); qat_hal_deinit(loader_data->fw_loader); kfree(loader_data); return -EFAULT; } return 0; }
int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR]; void __iomem *csr = pmisc->virt_addr; void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET; uint64_t reg_val; admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!admin) return -ENOMEM; admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, &admin->phy_addr, GFP_KERNEL); if (!admin->virt_addr) { dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n"); kfree(admin); return -ENOMEM; } reg_val = (uint64_t)admin->phy_addr; ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32); ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val); mutex_init(&admin->lock); admin->mailbox_addr = mailbox; accel_dev->admin = admin; return 0; }
static int adf_add_key_value_data(struct adf_accel_dev *accel_dev, const char *section, const struct adf_user_cfg_key_val *key_val) { if (key_val->type == ADF_HEX) { long *ptr = (long *)key_val->val; long val = *ptr; if (adf_cfg_add_key_value_param(accel_dev, section, key_val->key, (void *)val, key_val->type)) { dev_err(&GET_DEV(accel_dev), "failed to add hex keyvalue.\n"); return -EFAULT; } } else { if (adf_cfg_add_key_value_param(accel_dev, section, key_val->key, key_val->val, key_val->type)) { dev_err(&GET_DEV(accel_dev), "failed to add keyvalue.\n"); return -EFAULT; } } return 0; }
int adf_ae_fw_load(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_device = accel_dev->hw_device; void *uof_addr; uint32_t uof_size; if (request_firmware(&loader_data->uof_fw, hw_device->fw_name, &accel_dev->accel_pci_dev.pci_dev->dev)) { dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n", hw_device->fw_name); return -EFAULT; } uof_size = loader_data->uof_fw->size; uof_addr = (void *)loader_data->uof_fw->data; if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) { dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); goto out_err; } if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) { dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n"); goto out_err; } return 0; out_err: adf_ae_fw_release(accel_dev); return -EFAULT; }
static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key, unsigned int keylen) { struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev; spin_lock(&ctx->lock); if (ctx->enc_cd) { /* rekeying */ dev = &GET_DEV(ctx->inst->accel_dev); memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); } else { /* new key */ int node = get_current_node(); struct qat_crypto_instance *inst = qat_crypto_get_instance_node(node); if (!inst) { spin_unlock(&ctx->lock); return -EINVAL; } dev = &GET_DEV(inst->accel_dev); ctx->inst = inst; ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), &ctx->enc_cd_paddr, GFP_ATOMIC); if (!ctx->enc_cd) { spin_unlock(&ctx->lock); return -ENOMEM; } ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), &ctx->dec_cd_paddr, GFP_ATOMIC); if (!ctx->dec_cd) { spin_unlock(&ctx->lock); goto out_free_enc; } } spin_unlock(&ctx->lock); if (qat_alg_aead_init_sessions(tfm, key, keylen, ICP_QAT_HW_CIPHER_CBC_MODE)) goto out_free_all; return 0; out_free_all: memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); dma_free_coherent(dev, sizeof(struct qat_alg_cd), ctx->dec_cd, ctx->dec_cd_paddr); ctx->dec_cd = NULL; out_free_enc: memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); dma_free_coherent(dev, sizeof(struct qat_alg_cd), ctx->enc_cd, ctx->enc_cd_paddr); ctx->enc_cd = NULL; return -ENOMEM; }
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct scatterlist *assoc, struct scatterlist *sgl, struct scatterlist *sglout, uint8_t *iv, uint8_t ivlen, struct qat_crypto_request *qat_req) { struct device *dev = &GET_DEV(inst->accel_dev); int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc); struct qat_alg_buf_list *bufl; struct qat_alg_buf_list *buflout = NULL; dma_addr_t blp; dma_addr_t bloutp = 0; struct scatterlist *sg; size_t sz = sizeof(struct qat_alg_buf_list) + ((1 + n + assoc_n) * sizeof(struct qat_alg_buf)); if (unlikely(!n)) return -EINVAL; bufl = kmalloc_node(sz, GFP_ATOMIC, dev_to_node(&GET_DEV(inst->accel_dev))); if (unlikely(!bufl)) return -ENOMEM; blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, blp))) goto err; for_each_sg(assoc, sg, assoc_n, i) { if (!sg->length) continue; bufl->bufers[bufs].addr = dma_map_single(dev, sg_virt(sg), sg->length, DMA_BIDIRECTIONAL); bufl->bufers[bufs].len = sg->length; if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) goto err; bufs++; } bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen, DMA_BIDIRECTIONAL); bufl->bufers[bufs].len = ivlen; if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr))) goto err; bufs++; for_each_sg(sgl, sg, n, i) { int y = i + bufs; bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), sg->length, DMA_BIDIRECTIONAL); bufl->bufers[y].len = sg->length; if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) goto err; }
int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) { struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ctx->key_sz = vlen; ret = -EINVAL; /* invalid key size provided */ if (!qat_rsa_enc_fn_id(ctx->key_sz)) goto err; ret = -ENOMEM; ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL); if (!ctx->n) goto err; memcpy(ctx->n, ptr, ctx->key_sz); return 0; err: ctx->key_sz = 0; ctx->n = NULL; return ret; }
static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); struct dh params; int ret; if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; /* Free old secret if any */ qat_dh_clear_ctx(dev, ctx); ret = qat_dh_set_params(ctx, ¶ms); if (ret < 0) return ret; ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa, GFP_KERNEL); if (!ctx->xa) { qat_dh_clear_ctx(dev, ctx); return -ENOMEM; } memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key, params.key_size); return 0; }
static void qat_alg_free_bufl(struct qat_crypto_instance *inst, struct qat_crypto_request *qat_req) { struct device *dev = &GET_DEV(inst->accel_dev); struct qat_alg_buf_list *bl = qat_req->buf.bl; struct qat_alg_buf_list *blout = qat_req->buf.blout; dma_addr_t blp = qat_req->buf.blp; dma_addr_t blpout = qat_req->buf.bloutp; size_t sz = qat_req->buf.sz; size_t sz_out = qat_req->buf.sz_out; int i; for (i = 0; i < bl->num_bufs; i++) dma_unmap_single(dev, bl->bufers[i].addr, bl->bufers[i].len, DMA_BIDIRECTIONAL); dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); kfree(bl); if (blp != blpout) { /* If out of place operation dma unmap only data */ int bufless = blout->num_bufs - blout->num_mapped_bufs; for (i = bufless; i < blout->num_bufs; i++) { dma_unmap_single(dev, blout->bufers[i].addr, blout->bufers[i].len, DMA_BIDIRECTIONAL); } dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); kfree(blout); } }
int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value, size_t vlen) { struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ret = -EINVAL; if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) goto err; ret = -ENOMEM; ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); if (!ctx->d) goto err; memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); return 0; err: ctx->d = NULL; return ret; }
static void adf_device_reset_worker(struct work_struct *work) { struct adf_reset_dev_data *reset_data = container_of(work, struct adf_reset_dev_data, reset_work); struct adf_accel_dev *accel_dev = reset_data->accel_dev; adf_dev_restarting_notify(accel_dev); adf_dev_stop(accel_dev); adf_dev_restore(accel_dev); if (adf_dev_start(accel_dev)) { /* The device hanged and we can't restart it so stop here */ dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); kfree(reset_data); WARN(1, "QAT: device restart failed. Device is unusable\n"); return; } adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); /* The dev is back alive. Notify the caller if in sync mode */ if (reset_data->mode == ADF_DEV_RESET_SYNC) complete(&reset_data->compl); else kfree(reset_data); }
int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; int ret; while (!*ptr && vlen) { ptr++; vlen--; } ret = -EINVAL; if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) goto err; /* In FIPS mode only allow key size 2K & 3K */ if (fips_enabled && (vlen != 256 && vlen != 384)) { pr_err("QAT: RSA: key size not allowed in FIPS mode\n"); goto err; } ret = -ENOMEM; ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL); if (!ctx->n) goto err; memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen); return 0; err: ctx->d = NULL; return ret; }
static QUERY(sniff_status_show) { char *uid = *(va_arg(ap, char **)); session_t *s = session_find(uid); struct pcap_stat stats; if (!s) return -1; if (!s->connected) return 0; if (!s->priv) { debug_error("sniff_status_show() s->priv NULL\n"); return -1; } /* Device: DEVICE (PROMISC?) */ /* some stats */ memset(&stats, 0, sizeof(struct pcap_stat)); if (pcap_stats(GET_DEV(s), &stats) == -1) { debug_error("sniff_status_show() pcap_stats() failed\n"); return -1; } debug("pcap_stats() recv: %d drop: %d ifdrop: %d\n", stats.ps_recv, stats.ps_drop, stats.ps_ifdrop); print("sniff_pkt_rcv", session_name(s), ekg_itoa(stats.ps_recv)); print("sniff_pkt_drop", session_name(s), ekg_itoa(stats.ps_drop)); print("sniff_conn_db", session_name(s), ekg_itoa(list_count(tcp_connections))); return 0; }
int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct qat_rsa_ctx *ctx = context; struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); const char *ptr = value; while (!*ptr && vlen) { ptr++; vlen--; } if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) { ctx->e = NULL; return -EINVAL; } ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL); if (!ctx->e) { ctx->e = NULL; return -ENOMEM; } memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen); return 0; }
static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) { struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); if (unlikely(!params->p || !params->g)) return -EINVAL; if (qat_dh_check_params_length(params->p_size << 3)) return -EINVAL; ctx->p_size = params->p_size; ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); if (!ctx->p) return -ENOMEM; memcpy(ctx->p, params->p, ctx->p_size); /* If g equals 2 don't copy it */ if (params->g_size == 1 && *(char *)params->g == 0x02) { ctx->g2 = true; return 0; } ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL); if (!ctx->g) { dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); ctx->p = NULL; return -ENOMEM; } memcpy(ctx->g + (ctx->p_size - params->g_size), params->g, params->g_size); return 0; }
static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev) { int i; char **names; struct msix_entry *entries; struct adf_hw_device_data *hw_data = accel_dev->hw_device; uint32_t msix_num_entries = hw_data->num_banks + 1; entries = kzalloc_node(msix_num_entries * sizeof(*entries), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!entries) return -ENOMEM; names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL); if (!names) { kfree(entries); return -ENOMEM; } for (i = 0; i < msix_num_entries; i++) { *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL); if (!(*(names + i))) goto err; } accel_dev->accel_pci_dev.msix_entries.entries = entries; accel_dev->accel_pci_dev.msix_entries.names = names; return 0; err: for (i = 0; i < msix_num_entries; i++) kfree(*(names + i)); kfree(entries); kfree(names); return -ENOMEM; }
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin = accel_dev->admin; if (!admin) return; if (admin->virt_addr) dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, admin->virt_addr, admin->phy_addr); dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024, DMA_TO_DEVICE); mutex_destroy(&admin->lock); kfree(admin); accel_dev->admin = NULL; }
static void qat_dh_exit_tfm(struct crypto_kpp *tfm) { struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); qat_dh_clear_ctx(dev, ctx); qat_crypto_put_instance(ctx->inst); }
static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, struct adf_user_cfg_ctl_data *ctl_data) { struct adf_user_cfg_key_val key_val; struct adf_user_cfg_key_val *params_head; struct adf_user_cfg_section section, *section_head; section_head = ctl_data->config_section; while (section_head) { if (copy_from_user(§ion, (void __user *)section_head, sizeof(*section_head))) { dev_err(&GET_DEV(accel_dev), "failed to copy section info\n"); goto out_err; } if (adf_cfg_section_add(accel_dev, section.name)) { dev_err(&GET_DEV(accel_dev), "failed to add section.\n"); goto out_err; } params_head = section_head->params; while (params_head) { if (copy_from_user(&key_val, (void __user *)params_head, sizeof(key_val))) { dev_err(&GET_DEV(accel_dev), "Failed to copy keyvalue.\n"); goto out_err; } if (adf_add_key_value_data(accel_dev, section.name, &key_val)) { goto out_err; } params_head = key_val.next; } section_head = section.next; } return 0; out_err: adf_cfg_del_all(accel_dev); return -EFAULT; }
static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev) { u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT)); if (adf_iov_putmsg(accel_dev, msg, 0)) dev_err(&GET_DEV(accel_dev), "Failed to send Shutdown event to PF\n"); }
static COMMAND(sniff_command_disconnect) { if (!session_connected_get(session)) { printq("not_connected", session_name(session)); return -1; } protocol_disconnected_emit(session, NULL, EKG_DISCONNECT_USER); if (!GET_DEV(session)) { debug_error("sniff_command_disconnect() not dev?!\n"); return -1; } pcap_close(GET_DEV(session)); session->priv = NULL; return 0; }
static int adf_vf2pf_init(struct adf_accel_dev *accel_dev) { u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM | (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT)); if (adf_iov_putmsg(accel_dev, msg, 0)) { dev_err(&GET_DEV(accel_dev), "Failed to send Init event to PF\n"); return -EFAULT; } return 0; }
static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd, unsigned long arg) { int ret; struct adf_user_cfg_ctl_data *ctl_data; struct adf_accel_dev *accel_dev; ret = adf_ctl_alloc_resources(&ctl_data, arg); if (ret) return ret; accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id); if (!accel_dev) { pr_err("QAT: Device %d not found\n", ctl_data->device_id); ret = -ENODEV; goto out; } if (!adf_dev_started(accel_dev)) { dev_info(&GET_DEV(accel_dev), "Starting acceleration device qat_dev%d.\n", ctl_data->device_id); ret = adf_dev_init(accel_dev); if (!ret) ret = adf_dev_start(accel_dev); } else { dev_info(&GET_DEV(accel_dev), "Acceleration device qat_dev%d already started.\n", ctl_data->device_id); } if (ret) { dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n", ctl_data->device_id); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); } out: kfree(ctl_data); return ret; }
static void adf_remove(struct pci_dev *pdev) { struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); if (!accel_dev) { pr_err("QAT: Driver removal failed\n"); return; } if (adf_dev_stop(accel_dev)) dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n"); adf_disable_aer(accel_dev); adf_cleanup_accel(accel_dev); }
void adf_dev_restore(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_device = accel_dev->hw_device; struct pci_dev *pdev = accel_to_pci_dev(accel_dev); if (hw_device->reset_device) { dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n", accel_dev->accel_id); hw_device->reset_device(accel_dev); pci_restore_state(pdev); pci_save_state(pdev); } }
static QUERY(sniff_session_deinit) { char *session = *(va_arg(ap, char**)); session_t *s = session_find(session); if (!s || !s->priv || s->plugin != &sniff_plugin) return 1; debug("sniff closing pcap dev: 0x%x\n", s->priv); pcap_close(GET_DEV(s)); s->priv = NULL; return 0; }
void adf_reset_sbr(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); struct pci_dev *parent = pdev->bus->self; uint16_t bridge_ctl = 0; if (!parent) parent = pdev; if (!pci_wait_for_pending_transaction(pdev)) dev_info(&GET_DEV(accel_dev), "Transaction still in progress. Proceeding\n"); dev_info(&GET_DEV(accel_dev), "Secondary bus reset\n"); pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl); bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); msleep(100); bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl); msleep(100); }
void adf_exit_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin = accel_dev->admin; if (!admin) return; if (admin->virt_addr) dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE, admin->virt_addr, admin->phy_addr); mutex_destroy(&admin->lock); kfree(admin); accel_dev->admin = NULL; }
static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); int ret; /* Free the old key if any */ if (ctx->n) dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); if (ctx->e) dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); if (ctx->d) { memset(ctx->d, '\0', ctx->key_sz); dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); } ctx->n = NULL; ctx->e = NULL; ctx->d = NULL; ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen); if (ret < 0) goto free; if (!ctx->n || !ctx->e) { /* invalid key provided */ ret = -EINVAL; goto free; } return 0; free: if (ctx->d) { memset(ctx->d, '\0', ctx->key_sz); dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); ctx->d = NULL; } if (ctx->e) { dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); ctx->e = NULL; } if (ctx->n) { dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); ctx->n = NULL; ctx->key_sz = 0; } return ret; }
int adf_ae_start(struct adf_accel_dev *accel_dev) { struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; struct adf_hw_device_data *hw_data = accel_dev->hw_device; uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev); for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) { if (hw_data->ae_mask & (1 << ae)) { qat_hal_start(loader_data->fw_loader, ae, 0xFF); ae_ctr++; } } dev_info(&GET_DEV(accel_dev), "qat_dev%d started %d acceleration engines\n", accel_dev->accel_id, ae_ctr); return 0; }