int olTraceInit(OLTraceCtx **pctx, OLTraceParams *params) { OLTraceCtx *ctx = malloc(sizeof(OLTraceCtx)); ctx->p = *params; alloc_bufs(ctx); init_blur(ctx); *pctx = ctx; return 0; }
static int run_test(void) { int ret; if (hints->ep_attr->type == FI_EP_MSG) ret = ft_init_fabric_cm(); else ret = ft_init_fabric(); if (ret) return ret; alloc_bufs(); ret = run_test_loop(); return ret; }
int olTraceReInit(OLTraceCtx *ctx, OLTraceParams *params) { unsigned int new_ksize = ((unsigned int)round(params->sigma * 6 + 1)) | 1; if (ctx->p.mode != params->mode || ctx->p.width != params->width || ctx->p.height != params->height || ctx->ksize != new_ksize) { free_bufs(ctx); ctx->p = *params; alloc_bufs(ctx); } else { ctx->p = *params; } init_blur(ctx); return 0; }
/* * NAME: resync_comp * * DESCRIPTION: Resync the component. Iterate through the raid unit a line at * a time, read from the good device(s) and write the resync * device. * * PARAMETERS: minor_t mnum - minor number identity of metadevice * md_raidcs_t *cs - child save struct * * RETURN: 0 - successfull * 1 - failed * -1 - aborted * * LOCKS: Expects Unit Reader Lock to be held across call. Acquires and * releases Line Reader Lock for per-line I/O. */ static void resync_comp( minor_t mnum, md_raidcs_t *cs ) { mdi_unit_t *ui; mr_unit_t *un; mddb_recid_t recids[2]; rcs_state_t state; md_dev64_t dev_to_write; diskaddr_t write_pwstart; diskaddr_t write_devstart; md_dev64_t dev; int resync; int i; int single_read = 0; int err; int err_cnt; int last_err; diskaddr_t line; diskaddr_t segsincolumn; size_t bsize; uint_t line_count; /* * hs_state is the state of the hotspare on the column being resynced * dev_state is the state of the resync target */ hs_cmds_t hs_state; int err_col = -1; diskaddr_t resync_end_pos; ui = MDI_UNIT(mnum); ASSERT(ui != NULL); un = cs->cs_un; md_unit_readerexit(ui); un = (mr_unit_t *)md_io_writerlock(ui); un = (mr_unit_t *)md_unit_writerlock(ui); resync = un->un_resync_index; state = un->un_column[resync].un_devstate; line_count = un->un_maxio / un->un_segsize; if (line_count == 0) { /* handle the case of segsize > maxio */ line_count = 1; bsize = un->un_maxio; } else bsize = line_count * un->un_segsize; un->un_resync_copysize = (uint_t)bsize; ASSERT(un->c.un_status & MD_UN_RESYNC_ACTIVE); ASSERT(un->un_column[resync].un_devflags & (MD_RAID_COPY_RESYNC | MD_RAID_REGEN_RESYNC)); /* * if the column is not in resync then just bail out. */ if (! (un->un_column[resync].un_devstate & RCS_RESYNC)) { md_unit_writerexit(ui); md_io_writerexit(ui); un = (mr_unit_t *)md_unit_readerlock(ui); return; } SE_NOTIFY(EC_SVM_STATE, ESC_SVM_RESYNC_START, SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un)); /* identify device to write and its start block */ if (un->un_column[resync].un_alt_dev != NODEV64) { if (raid_open_alt(un, resync)) { raid_set_state(un, resync, state, 0); md_unit_writerexit(ui); md_io_writerexit(ui); un = (mr_unit_t *)md_unit_readerlock(ui); cmn_err(CE_WARN, "md: %s: %s open failed replace " "terminated", md_shortname(MD_SID(un)), md_devname(MD_UN2SET(un), un->un_column[resync].un_alt_dev, NULL, 0)); SE_NOTIFY(EC_SVM_STATE, ESC_SVM_RESYNC_FAILED, SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un)); return; } ASSERT(un->un_column[resync].un_devflags & MD_RAID_COPY_RESYNC); dev_to_write = un->un_column[resync].un_alt_dev; write_devstart = un->un_column[resync].un_alt_devstart; write_pwstart = un->un_column[resync].un_alt_pwstart; if (un->un_column[resync].un_devflags & MD_RAID_DEV_ERRED) { single_read = 0; hs_state = HS_BAD; } else { hs_state = HS_FREE; single_read = 1; } un->un_column[resync].un_devflags |= MD_RAID_WRITE_ALT; } else { dev_to_write = un->un_column[resync].un_dev; write_devstart = un->un_column[resync].un_devstart; write_pwstart = un->un_column[resync].un_pwstart; single_read = 0; hs_state = HS_FREE; ASSERT(un->un_column[resync].un_devflags & MD_RAID_REGEN_RESYNC); } alloc_bufs(cs, dbtob(bsize)); /* initialize pre-write area */ if (init_pw_area(un, dev_to_write, write_pwstart, resync)) { un->un_column[resync].un_devflags &= ~MD_RAID_WRITE_ALT; if (un->un_column[resync].un_alt_dev != NODEV64) { raid_close_alt(un, resync); } md_unit_writerexit(ui); md_io_writerexit(ui); if (dev_to_write == un->un_column[resync].un_dev) hs_state = HS_BAD; err = RAID_RESYNC_WRERROR; goto resync_comp_error; } un->c.un_status &= ~MD_UN_RESYNC_CANCEL; segsincolumn = un->un_segsincolumn; err_cnt = raid_state_cnt(un, RCS_ERRED | RCS_LAST_ERRED); /* commit the record */ md_unit_writerexit(ui); md_io_writerexit(ui); /* resync each line of the unit */ for (line = 0; line < segsincolumn; line += line_count) { /* * Update address range in child struct and lock the line. * * The reader version of the line lock is used since only * resync will use data beyond un_resync_line_index on the * resync device. */ un = (mr_unit_t *)md_io_readerlock(ui); if (line + line_count > segsincolumn) line_count = segsincolumn - line; resync_end_pos = raid_resync_fillin_cs(line, line_count, cs); (void) md_unit_readerlock(ui); ASSERT(un->un_resync_line_index == resync_end_pos); err = raid_resync_region(cs, line, (int)line_count, &single_read, &hs_state, &err_col, dev_to_write, write_devstart); /* * if the column failed to resync then stop writing directly * to the column. */ if (err) un->un_resync_line_index = 0; md_unit_readerexit(ui); raid_line_exit(cs); md_io_readerexit(ui); if (err) break; un = (mr_unit_t *)md_unit_writerlock(ui); if (raid_state_cnt(un, RCS_ERRED | RCS_LAST_ERRED) != err_cnt) { err = RAID_RESYNC_STATE; md_unit_writerexit(ui); break; } md_unit_writerexit(ui); } /* for */ resync_comp_error: un = (mr_unit_t *)md_io_writerlock(ui); (void) md_unit_writerlock(ui); un->un_column[resync].un_devflags &= ~MD_RAID_WRITE_ALT; recids[0] = 0; recids[1] = 0; switch (err) { /* * successful resync */ case RAID_RESYNC_OKAY: /* initialize pre-write area */ if ((un->un_column[resync].un_orig_dev != NODEV64) && (un->un_column[resync].un_orig_dev == un->un_column[resync].un_alt_dev)) { /* * replacing a hot spare * release the hot spare, which will close the hotspare * and mark it closed. */ raid_hs_release(hs_state, un, &recids[0], resync); /* * make the resync target the main device and * mark open */ un->un_column[resync].un_hs_id = 0; un->un_column[resync].un_dev = un->un_column[resync].un_orig_dev; un->un_column[resync].un_devstart = un->un_column[resync].un_orig_devstart; un->un_column[resync].un_pwstart = un->un_column[resync].un_orig_pwstart; un->un_column[resync].un_devflags |= MD_RAID_DEV_ISOPEN; /* alt becomes the device so don't close it */ un->un_column[resync].un_devflags &= ~MD_RAID_WRITE_ALT; un->un_column[resync].un_devflags &= ~MD_RAID_ALT_ISOPEN; un->un_column[resync].un_alt_dev = NODEV64; } raid_set_state(un, resync, RCS_OKAY, 0); break; case RAID_RESYNC_WRERROR: if (HOTSPARED(un, resync) && single_read && (un->un_column[resync].un_devflags & MD_RAID_COPY_RESYNC)) { /* * this is the case where the resync target is * bad but there is a good hotspare. In this * case keep the hotspare, and go back to okay. */ raid_set_state(un, resync, RCS_OKAY, 0); cmn_err(CE_WARN, "md: %s: %s write error, replace " "terminated", md_shortname(MD_SID(un)), md_devname(MD_UN2SET(un), un->un_column[resync].un_orig_dev, NULL, 0)); break; } if (HOTSPARED(un, resync)) { raid_hs_release(hs_state, un, &recids[0], resync); un->un_column[resync].un_dev = un->un_column[resync].un_orig_dev; un->un_column[resync].un_devstart = un->un_column[resync].un_orig_devstart; un->un_column[resync].un_pwstart = un->un_column[resync].un_orig_pwstart; } raid_set_state(un, resync, RCS_ERRED, 0); if (un->un_column[resync].un_devflags & MD_RAID_REGEN_RESYNC) dev = un->un_column[resync].un_dev; else dev = un->un_column[resync].un_alt_dev; cmn_err(CE_WARN, "md: %s: %s write error replace terminated", md_shortname(MD_SID(un)), md_devname(MD_UN2SET(un), dev, NULL, 0)); break; case RAID_RESYNC_STATE: if (HOTSPARED(un, resync) && single_read && (un->un_column[resync].un_devflags & MD_RAID_COPY_RESYNC)) { /* * this is the case where the resync target is * bad but there is a good hotspare. In this * case keep the hotspare, and go back to okay. */ raid_set_state(un, resync, RCS_OKAY, 0); cmn_err(CE_WARN, "md: %s: needs maintenance, replace " "terminated", md_shortname(MD_SID(un))); break; } if (HOTSPARED(un, resync)) { raid_hs_release(hs_state, un, &recids[0], resync); un->un_column[resync].un_dev = un->un_column[resync].un_orig_dev; un->un_column[resync].un_devstart = un->un_column[resync].un_orig_devstart; un->un_column[resync].un_pwstart = un->un_column[resync].un_orig_pwstart; } break; case RAID_RESYNC_RDERROR: if (HOTSPARED(un, resync)) { raid_hs_release(hs_state, un, &recids[0], resync); un->un_column[resync].un_dev = un->un_column[resync].un_orig_dev; un->un_column[resync].un_devstart = un->un_column[resync].un_orig_devstart; un->un_column[resync].un_pwstart = un->un_column[resync].un_orig_pwstart; } if ((resync != err_col) && (err_col != NOCOLUMN)) raid_set_state(un, err_col, RCS_ERRED, 0); break; default: ASSERT(0); } if (un->un_column[resync].un_alt_dev != NODEV64) { raid_close_alt(un, resync); } /* * an io operation may have gotten an error and placed a * column in erred state. This will abort the resync, which * will end up in last erred. This is ugly so go through * the columns and do cleanup */ err_cnt = 0; last_err = 0; for (i = 0; i < un->un_totalcolumncnt; i++) { if (un->un_column[i].un_devstate & RCS_OKAY) continue; if (i == resync) { raid_set_state(un, i, RCS_ERRED, 1); err_cnt++; } else if (err == RAID_RESYNC_OKAY) { err_cnt++; } else { raid_set_state(un, i, RCS_LAST_ERRED, 1); last_err++; } } if ((err_cnt == 0) && (last_err == 0)) un->un_state = RUS_OKAY; else if (last_err == 0) { un->un_state = RUS_ERRED; ASSERT(err_cnt == 1); } else if (last_err > 0) { un->un_state = RUS_LAST_ERRED; } uniqtime32(&un->un_column[resync].un_devtimestamp); un->un_resync_copysize = 0; un->un_column[resync].un_devflags &= ~(MD_RAID_REGEN_RESYNC | MD_RAID_COPY_RESYNC); raid_commit(un, recids); /* release unit writer lock and acquire unit reader lock */ md_unit_writerexit(ui); md_io_writerexit(ui); (void) md_unit_readerlock(ui); if (err == RAID_RESYNC_OKAY) { SE_NOTIFY(EC_SVM_STATE, ESC_SVM_RESYNC_DONE, SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un)); } else { SE_NOTIFY(EC_SVM_STATE, ESC_SVM_RESYNC_FAILED, SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un)); if (raid_state_cnt(un, RCS_ERRED | RCS_LAST_ERRED) > 1) { SE_NOTIFY(EC_SVM_STATE, ESC_SVM_LASTERRED, SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un)); } else { SE_NOTIFY(EC_SVM_STATE, ESC_SVM_ERRED, SVM_TAG_METADEVICE, MD_UN2SET(un), MD_SID(un)); } } free_bufs(dbtob(bsize), cs); }
static int tegra_crypto_sha(struct tegra_sha_req *sha_req) { struct crypto_ahash *tfm; struct scatterlist sg[1]; char result[64]; struct ahash_request *req; struct tegra_crypto_completion sha_complete; void *hash_buff; unsigned long *xbuf[XBUFSIZE]; int ret = -ENOMEM; tfm = crypto_alloc_ahash(sha_req->algo, 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "alg: hash: Failed to load transform for %s: " "%ld\n", sha_req->algo, PTR_ERR(tfm)); goto out_alloc; } req = ahash_request_alloc(tfm, GFP_KERNEL); if (!req) { printk(KERN_ERR "alg: hash: Failed to allocate request for " "%s\n", sha_req->algo); goto out_noreq; } ret = alloc_bufs(xbuf); if (ret < 0) { pr_err("alloc_bufs failed"); goto out_buf; } init_completion(&sha_complete.restart); memset(result, 0, 64); hash_buff = xbuf[0]; memcpy(hash_buff, sha_req->plaintext, sha_req->plaintext_sz); sg_init_one(&sg[0], hash_buff, sha_req->plaintext_sz); if (sha_req->keylen) { crypto_ahash_clear_flags(tfm, ~0); ret = crypto_ahash_setkey(tfm, sha_req->key, sha_req->keylen); if (ret) { printk(KERN_ERR "alg: hash: setkey failed on " " %s: ret=%d\n", sha_req->algo, -ret); goto out; } } ahash_request_set_crypt(req, sg, result, sha_req->plaintext_sz); ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_init(req)); if (ret) { pr_err("alg: hash: init failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_update(req)); if (ret) { pr_err("alg: hash: update failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = sha_async_hash_op(req, &sha_complete, crypto_ahash_final(req)); if (ret) { pr_err("alg: hash: final failed on " "for %s: ret=%d\n", sha_req->algo, -ret); goto out; } ret = copy_to_user((void __user *)sha_req->result, (const void *)result, crypto_ahash_digestsize(tfm)); if (ret) { ret = -EFAULT; pr_err("alg: hash: copy_to_user failed (%d) for %s\n", ret, sha_req->algo); } out: free_bufs(xbuf); out_buf: ahash_request_free(req); out_noreq: crypto_free_ahash(tfm); out_alloc: return ret; }
static int tegra_crypt_rsa(struct tegra_crypto_ctx *ctx, struct tegra_rsa_req *rsa_req) { struct crypto_ahash *tfm = NULL; struct ahash_request *req = NULL; struct scatterlist sg[1]; char *result = NULL; void *hash_buff; int ret = 0; unsigned long *xbuf[XBUFSIZE]; struct tegra_crypto_completion rsa_complete; switch (rsa_req->algo) { case TEGRA_RSA512: req = ahash_request_alloc(ctx->rsa512_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa512\n"); goto req_fail; } tfm = ctx->rsa512_tfm; break; case TEGRA_RSA1024: req = ahash_request_alloc(ctx->rsa1024_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa1024\n"); goto req_fail; } tfm = ctx->rsa1024_tfm; break; case TEGRA_RSA1536: req = ahash_request_alloc(ctx->rsa1536_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa1536\n"); goto req_fail; } tfm = ctx->rsa1536_tfm; break; case TEGRA_RSA2048: req = ahash_request_alloc(ctx->rsa2048_tfm, GFP_KERNEL); if (!req) { pr_err("alg: hash: Failed to allocate request for rsa2048\n"); goto req_fail; } tfm = ctx->rsa2048_tfm; break; default: goto req_fail; } ret = alloc_bufs(xbuf); if (ret < 0) { pr_err("alloc_bufs failed"); goto buf_fail; } init_completion(&rsa_complete.restart); result = kzalloc(rsa_req->keylen >> 16, GFP_KERNEL); if (!result) { pr_err("\nresult alloc fail\n"); goto result_fail; } hash_buff = xbuf[0]; memcpy(hash_buff, rsa_req->message, rsa_req->msg_len); sg_init_one(&sg[0], hash_buff, rsa_req->msg_len); if (!(rsa_req->keylen)) goto rsa_fail; if (!rsa_req->skip_key) { ret = crypto_ahash_setkey(tfm, rsa_req->key, rsa_req->keylen); if (ret) { pr_err("alg: hash: setkey failed\n"); goto rsa_fail; } } ahash_request_set_crypt(req, sg, result, rsa_req->msg_len); ret = crypto_ahash_digest(req); if (ret == -EINPROGRESS || ret == -EBUSY) { ret = wait_for_completion_interruptible(&rsa_complete.restart); if (!ret) ret = rsa_complete.req_err; INIT_COMPLETION(rsa_complete.restart); } if (ret) { pr_err("alg: hash: digest failed\n"); goto rsa_fail; } ret = copy_to_user((void __user *)rsa_req->result, (const void *)result, crypto_ahash_digestsize(tfm)); if (ret) { ret = -EFAULT; pr_err("alg: hash: copy_to_user failed (%d)\n", ret); } rsa_fail: kfree(result); result_fail: free_bufs(xbuf); buf_fail: ahash_request_free(req); req_fail: return ret; }
static int process_crypt_req(struct tegra_crypto_ctx *ctx, struct tegra_crypt_req *crypt_req) { struct crypto_ablkcipher *tfm; struct ablkcipher_request *req = NULL; struct scatterlist in_sg; struct scatterlist out_sg; unsigned long *xbuf[NBUFS]; int ret = 0, size = 0; unsigned long total = 0; const u8 *key = NULL; struct tegra_crypto_completion tcrypt_complete; if (crypt_req->op & TEGRA_CRYPTO_ECB) { req = ablkcipher_request_alloc(ctx->ecb_tfm, GFP_KERNEL); tfm = ctx->ecb_tfm; } else if (crypt_req->op & TEGRA_CRYPTO_CBC) { req = ablkcipher_request_alloc(ctx->cbc_tfm, GFP_KERNEL); tfm = ctx->cbc_tfm; } else if ((crypt_req->op & TEGRA_CRYPTO_OFB) && (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2)) { req = ablkcipher_request_alloc(ctx->ofb_tfm, GFP_KERNEL); tfm = ctx->ofb_tfm; } else if ((crypt_req->op & TEGRA_CRYPTO_CTR) && (tegra_get_chipid() != TEGRA_CHIPID_TEGRA2)) { req = ablkcipher_request_alloc(ctx->ctr_tfm, GFP_KERNEL); tfm = ctx->ctr_tfm; } if (!req) { pr_err("%s: Failed to allocate request\n", __func__); return -ENOMEM; } if ((crypt_req->keylen < 0) || (crypt_req->keylen > AES_MAX_KEY_SIZE)) { ret = -EINVAL; pr_err("crypt_req keylen invalid"); goto process_req_out; } crypto_ablkcipher_clear_flags(tfm, ~0); if (!ctx->use_ssk) key = crypt_req->key; if (!crypt_req->skip_key) { ret = crypto_ablkcipher_setkey(tfm, key, crypt_req->keylen); if (ret < 0) { pr_err("setkey failed"); goto process_req_out; } } ret = alloc_bufs(xbuf); if (ret < 0) { pr_err("alloc_bufs failed"); goto process_req_out; } init_completion(&tcrypt_complete.restart); ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, tegra_crypt_complete, &tcrypt_complete); total = crypt_req->plaintext_sz; while (total > 0) { size = min(total, PAGE_SIZE); ret = copy_from_user((void *)xbuf[0], (void __user *)crypt_req->plaintext, size); if (ret) { ret = -EFAULT; pr_debug("%s: copy_from_user failed (%d)\n", __func__, ret); goto process_req_buf_out; } sg_init_one(&in_sg, xbuf[0], size); sg_init_one(&out_sg, xbuf[1], size); if (!crypt_req->skip_iv) ablkcipher_request_set_crypt(req, &in_sg, &out_sg, size, crypt_req->iv); else ablkcipher_request_set_crypt(req, &in_sg, &out_sg, size, NULL); INIT_COMPLETION(tcrypt_complete.restart); tcrypt_complete.req_err = 0; ret = crypt_req->encrypt ? crypto_ablkcipher_encrypt(req) : crypto_ablkcipher_decrypt(req); if ((ret == -EINPROGRESS) || (ret == -EBUSY)) { /* crypto driver is asynchronous */ ret = wait_for_completion_interruptible(&tcrypt_complete.restart); if (ret < 0) goto process_req_buf_out; if (tcrypt_complete.req_err < 0) { ret = tcrypt_complete.req_err; goto process_req_buf_out; } } else if (ret < 0) { pr_debug("%scrypt failed (%d)\n", crypt_req->encrypt ? "en" : "de", ret); goto process_req_buf_out; } ret = copy_to_user((void __user *)crypt_req->result, (const void *)xbuf[1], size); if (ret) { ret = -EFAULT; pr_debug("%s: copy_to_user failed (%d)\n", __func__, ret); goto process_req_buf_out; } total -= size; crypt_req->result += size; crypt_req->plaintext += size; } process_req_buf_out: free_bufs(xbuf); process_req_out: ablkcipher_request_free(req); return ret; }