/* PIC32 supports only 32bit read operation */ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) { void __iomem *fifo = hw_ep->fifo; u32 val, rem = len % 4; /* USB stack ensures dst is always 32bit aligned. */ readsl(fifo, dst, len / 4); if (rem) { dst += len & ~0x03; val = musb_readl(fifo, 0); memcpy(dst, &val, rem); } }
/* * Read NAND chip to buf. Attempt DMA read for 'large' bufs. Fall-back to * readsl for small bufs and if FDMA fails to initialise. */ static void nand_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len) { struct nand_chip *chip = mtd->priv; struct stm_nand_emi *data = chip->priv; unsigned long dma_align = dma_get_cache_alignment() - 1UL; int i; if (len >= 512 && (data->dma_chan >= 0 || init_fdma_nand_ratelimit(data) == 0)) { /* Read up to cache-line boundary */ while ((unsigned long)buf & dma_align) { *buf++ = readb(chip->IO_ADDR_R); len--; } /* Do DMA transfer, fall-back to readsl if fail */ if (nand_read_dma(mtd, buf, len & ~dma_align, NULL, 0) != 0) readsl(chip->IO_ADDR_R, buf, (len & ~dma_align)/4); /* Mop up trailing bytes */ for (i = (len & ~dma_align); i < len; i++) buf[i] = readb(chip->IO_ADDR_R); } else { /* Read buf up to 4-byte boundary */ while ((unsigned int)buf & 0x3) { *buf++ = readb(chip->IO_ADDR_R); len--; } readsl(chip->IO_ADDR_R, buf, len/4); /* Mop up trailing bytes */ for (i = (len & ~0x3); i < len; i++) buf[i] = readb(chip->IO_ADDR_R); } }
static void nand_readsl_buf(struct mtd_info *mtd, uint8_t *buf, int len) { int i; struct nand_chip *chip = mtd->priv; /* read buf up to 4-byte boundary */ while ((unsigned int)buf & 0x3) { *buf++ = readb(chip->IO_ADDR_R); len--; } readsl(chip->IO_ADDR_R, buf, len/4); /* mop up trailing bytes */ for (i = (len & ~0x3); i < len; i++) buf[i] = readb(chip->IO_ADDR_R); }
int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { struct sun4i_ss_alg_template *algt; struct rng_alg *alg = crypto_rng_alg(tfm); int i; u32 v; u32 *data = (u32 *)dst; const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED; size_t len; struct sun4i_ss_ctx *ss; unsigned int todo = (dlen / 4) * 4; algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); ss = algt->ss; spin_lock(&ss->slock); writel(mode, ss->base + SS_CTL); while (todo > 0) { /* write the seed */ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) writel(ss->seed[i], ss->base + SS_KEY0 + i * 4); /* Read the random data */ len = min_t(size_t, SS_DATA_LEN / BITS_PER_BYTE, todo); readsl(ss->base + SS_TXFIFO, data, len / 4); data += len / 4; todo -= len; /* Update the seed */ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) { v = readl(ss->base + SS_KEY0 + i * 4); ss->seed[i] = v; } } writel(0, ss->base + SS_CTL); spin_unlock(&ss->slock); return dlen; }
static void emac_inblk_32bit(void __iomem *reg, void *data, int count) { readsl(reg, data, round_up(count, 4) / 4); }
static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data, unsigned long reg, int len) { readsl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len); }
static int sun4i_ss_opti_poll(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); u32 mode = ctx->mode; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; u32 tx_cnt = 0; u32 spaces; u32 v; int err = 0; unsigned int i; unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ unsigned long flags; if (!areq->cryptlen) return 0; if (!areq->iv) { dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); return -EINVAL; } if (!areq->src || !areq->dst) { dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); return -EINVAL; } spin_lock_irqsave(&ss->slock, flags); for (i = 0; i < op->keylen; i += 4) writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); writel(v, ss->base + SS_IV0 + i * 4); } } writel(mode, ss->base + SS_CTL); sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), SG_MITER_TO_SG | SG_MITER_ATOMIC); sg_miter_next(&mi); sg_miter_next(&mo); if (!mi.addr || !mo.addr) { dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); err = -EINVAL; goto release_ss; } ileft = areq->cryptlen / 4; oleft = areq->cryptlen / 4; oi = 0; oo = 0; do { todo = min3(rx_cnt, ileft, (mi.length - oi) / 4); if (todo) { ileft -= todo; writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); oi += todo * 4; } if (oi == mi.length) { sg_miter_next(&mi); oi = 0; } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); todo = min3(tx_cnt, oleft, (mo.length - oo) / 4); if (todo) { oleft -= todo; readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oo += todo * 4; } if (oo == mo.length) { sg_miter_next(&mo); oo = 0; } } while (oleft); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = readl(ss->base + SS_IV0 + i * 4); *(u32 *)(areq->iv + i * 4) = v; } } release_ss: sg_miter_stop(&mi); sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); return err; }
/* Generic function that support SG with size not multiple of 4 */ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; int no_chunk = 1; struct scatterlist *in_sg = areq->src; struct scatterlist *out_sg = areq->dst; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); u32 mode = ctx->mode; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; u32 tx_cnt = 0; u32 v; u32 spaces; int err = 0; unsigned int i; unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */ unsigned int ob = 0; /* offset in buf */ unsigned int obo = 0; /* offset in bufo*/ unsigned int obl = 0; /* length of data in bufo */ unsigned long flags; if (!areq->cryptlen) return 0; if (!areq->iv) { dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n"); return -EINVAL; } if (!areq->src || !areq->dst) { dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); return -EINVAL; } /* * if we have only SGs with size multiple of 4, * we can use the SS optimized function */ while (in_sg && no_chunk == 1) { if (in_sg->length % 4) no_chunk = 0; in_sg = sg_next(in_sg); } while (out_sg && no_chunk == 1) { if (out_sg->length % 4) no_chunk = 0; out_sg = sg_next(out_sg); } if (no_chunk == 1) return sun4i_ss_opti_poll(areq); spin_lock_irqsave(&ss->slock, flags); for (i = 0; i < op->keylen; i += 4) writel(*(op->key + i / 4), ss->base + SS_KEY0 + i); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); writel(v, ss->base + SS_IV0 + i * 4); } } writel(mode, ss->base + SS_CTL); sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), SG_MITER_TO_SG | SG_MITER_ATOMIC); sg_miter_next(&mi); sg_miter_next(&mo); if (!mi.addr || !mo.addr) { dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); err = -EINVAL; goto release_ss; } ileft = areq->cryptlen; oleft = areq->cryptlen; oi = 0; oo = 0; while (oleft) { if (ileft) { /* * todo is the number of consecutive 4byte word that we * can read from current SG */ todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4); if (todo && !ob) { writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); ileft -= todo * 4; oi += todo * 4; } else { /* * not enough consecutive bytes, so we need to * linearize in buf. todo is in bytes * After that copy, if we have a multiple of 4 * we need to be able to write all buf in one * pass, so it is why we min() with rx_cnt */ todo = min3(rx_cnt * 4 - ob, ileft, mi.length - oi); memcpy(buf + ob, mi.addr + oi, todo); ileft -= todo; oi += todo; ob += todo; if (!(ob % 4)) { writesl(ss->base + SS_RXFIFO, buf, ob / 4); ob = 0; } } if (oi == mi.length) { sg_miter_next(&mi); oi = 0; } } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n", mode, oi, mi.length, ileft, areq->cryptlen, rx_cnt, oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob); if (!tx_cnt) continue; /* todo in 4bytes word */ todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4); if (todo) { readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oleft -= todo * 4; oo += todo * 4; if (oo == mo.length) { sg_miter_next(&mo); oo = 0; } } else { /* * read obl bytes in bufo, we read at maximum for * emptying the device */ readsl(ss->base + SS_TXFIFO, bufo, tx_cnt); obl = tx_cnt * 4; obo = 0; do { /* * how many bytes we can copy ? * no more than remaining SG size * no more than remaining buffer * no need to test against oleft */ todo = min(mo.length - oo, obl - obo); memcpy(mo.addr + oo, bufo + obo, todo); oleft -= todo; obo += todo; oo += todo; if (oo == mo.length) { sg_miter_next(&mo); oo = 0; } } while (obo < obl); /* bufo must be fully used here */ } } if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = readl(ss->base + SS_IV0 + i * 4); *(u32 *)(areq->iv + i * 4) = v; } } release_ss: sg_miter_stop(&mi); sg_miter_stop(&mo); writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); return err; }
static void ide_itdm320_insl (unsigned long port, void *addr, u32 count) { readsl(port, addr, count); }