static void amdgpu_update_memory_usage(struct amdgpu_device *adev, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) { u64 vis_size; if (!adev) return; if (new_mem) { switch (new_mem->mem_type) { case TTM_PL_TT: atomic64_add(new_mem->size, &adev->gtt_usage); break; case TTM_PL_VRAM: atomic64_add(new_mem->size, &adev->vram_usage); vis_size = amdgpu_get_vis_part_size(adev, new_mem); atomic64_add(vis_size, &adev->vram_vis_usage); break; } } if (old_mem) { switch (old_mem->mem_type) { case TTM_PL_TT: atomic64_sub(old_mem->size, &adev->gtt_usage); break; case TTM_PL_VRAM: atomic64_sub(old_mem->size, &adev->vram_usage); vis_size = amdgpu_get_vis_part_size(adev, old_mem); atomic64_sub(vis_size, &adev->vram_vis_usage); break; } } }
static void *s390_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) { struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); struct page *page; unsigned long pa; dma_addr_t map; size = PAGE_ALIGN(size); page = alloc_pages(flag, get_order(size)); if (!page) return NULL; atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages); pa = page_to_phys(page); memset((void *) pa, 0, size); map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE, size, DMA_BIDIRECTIONAL, NULL); if (dma_mapping_error(dev, map)) { free_pages(pa, get_order(size)); return NULL; } if (dma_handle) *dma_handle = map; return (void *) pa; }
int sce_put4pop(sce_hndl_t scehndl, sce_poptask_t *poptask, int failed) { unsigned long flags; sce_t *sce; lun_t *lun; int lunidx; int ret; if ((!scehndl) || (!poptask)) return SCE_ERROR; sce = (sce_t *)scehndl; lunidx = _lun_search(sce, poptask->lunctx); if (lunidx < 0) return SCE_ERROR; lun = &sce->luntbl[lunidx]; spin_lock_irqsave(&lun->lock, flags); if (!failed) { ret = _complete_population(lun, poptask->lun_fragnum); atomic64_inc(&lun->stats.populations); atomic64_inc(&lun->stats.alloc_sctrs); atomic64_add(SCE_SCTRPERFRAG, &lun->stats.valid_sctrs); } else { ret = _cancel_population(lun, poptask->lun_fragnum); } spin_unlock_irqrestore(&lun->lock, flags); return ret; }
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags) { struct request_queue *q = pblk->dev->q; struct pblk_w_ctx w_ctx; sector_t lba = pblk_get_lba(bio); unsigned long start_time = jiffies; unsigned int bpos, pos; int nr_entries = pblk_get_secs(bio); int i, ret; generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0); /* Update the write buffer head (mem) with the entries that we can * write. The write in itself cannot fail, so there is no need to * rollback from here on. */ retry: ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos); switch (ret) { case NVM_IO_REQUEUE: io_schedule(); goto retry; case NVM_IO_ERR: pblk_pipeline_stop(pblk); goto out; } if (unlikely(!bio_has_data(bio))) goto out; pblk_ppa_set_empty(&w_ctx.ppa); w_ctx.flags = flags; if (bio->bi_opf & REQ_PREFLUSH) w_ctx.flags |= PBLK_FLUSH_ENTRY; for (i = 0; i < nr_entries; i++) { void *data = bio_data(bio); w_ctx.lba = lba + i; pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i); pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos); bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); } atomic64_add(nr_entries, &pblk->user_wa); #ifdef CONFIG_NVM_DEBUG atomic_long_add(nr_entries, &pblk->inflight_writes); atomic_long_add(nr_entries, &pblk->req_writes); #endif pblk_rl_inserted(&pblk->rl, nr_entries); out: generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time); pblk_write_should_kick(pblk); return ret; }
static inline void *__brick_block_alloc(gfp_t gfp, int order, int cline) { void *res; #ifdef CONFIG_MARS_MEM_RETRY for (;;) { #endif #ifdef USE_KERNEL_PAGES res = (void*)__get_free_pages(gfp, order); #else res = __vmalloc(PAGE_SIZE << order, gfp, PAGE_KERNEL_IO); #endif #ifdef CONFIG_MARS_MEM_RETRY if (likely(res)) break; msleep(1000); } #endif if (likely(res)) { #ifdef CONFIG_MARS_DEBUG_MEM_STRONG _new_block_info(res, PAGE_SIZE << order, cline); #endif #ifdef BRICK_DEBUG_MEM atomic_inc(&phys_block_alloc); atomic_inc(&raw_count[order]); #endif atomic64_add((PAGE_SIZE/1024) << order, &brick_global_block_used); } return res; }
static int merge_dup(struct tracing_map_sort_entry **sort_entries, unsigned int target, unsigned int dup) { struct tracing_map_elt *target_elt, *elt; bool first_dup = (target - dup) == 1; int i; if (first_dup) { elt = sort_entries[target]->elt; target_elt = copy_elt(elt); if (!target_elt) return -ENOMEM; sort_entries[target]->elt = target_elt; sort_entries[target]->elt_copied = true; } else target_elt = sort_entries[target]->elt; elt = sort_entries[dup]->elt; for (i = 0; i < elt->map->n_fields; i++) atomic64_add(atomic64_read(&elt->fields[i].sum), &target_elt->fields[i].sum); sort_entries[dup]->dup = true; return 0; }
void nilfs_inode_add_blocks(struct inode *inode, int n) { struct nilfs_root *root = NILFS_I(inode)->i_root; inode_add_bytes(inode, (1 << inode->i_blkbits) * n); if (root) atomic64_add(n, &root->blocks_count); }
static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, int enc) { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned long irq_flags; unsigned int processed = 0, to_process; u32 max_sg_len; int rc; spin_lock_irqsave(&nx_ctx->lock, irq_flags); max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg), nx_ctx->ap->sglen); if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; do { to_process = min_t(u64, nbytes - processed, nx_ctx->ap->databytelen); to_process = min_t(u64, to_process, NX_PAGE_SIZE * (max_sg_len - 1)); to_process = to_process & ~(AES_BLOCK_SIZE - 1); rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process, processed, NULL); if (rc) goto out; if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; }
static void radeon_update_memory_usage(struct radeon_bo *bo, unsigned mem_type, int sign) { struct radeon_device *rdev = bo->rdev; u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; switch (mem_type) { case TTM_PL_TT: if (sign > 0) atomic64_add(size, &rdev->gtt_usage); else atomic64_sub(size, &rdev->gtt_usage); break; case TTM_PL_VRAM: if (sign > 0) atomic64_add(size, &rdev->vram_usage); else atomic64_sub(size, &rdev->vram_usage); break; } }
static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ccat_eth_priv *const priv = netdev_priv(dev); struct ccat_eth_dma_fifo *const fifo = &priv->tx_fifo; u32 addr_and_length; if (skb_is_nonlinear(skb)) { pr_warn("Non linear skb not supported -> drop frame.\n"); atomic64_inc(&priv->tx_dropped); priv->kfree_skb_any(skb); return NETDEV_TX_OK; } if (skb->len > sizeof(fifo->next->data)) { pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n", (u64) skb->len, (u64) sizeof(fifo->next->data)); atomic64_inc(&priv->tx_dropped); priv->kfree_skb_any(skb); return NETDEV_TX_OK; } if (!ccat_eth_frame_sent(fifo->next)) { netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); priv->stop_queue(priv->netdev); return NETDEV_TX_BUSY; } /* prepare frame in DMA memory */ fifo->next->tx_flags = cpu_to_le32(0); fifo->next->length = cpu_to_le16(skb->len); memcpy(fifo->next->data, skb->data, skb->len); /* Queue frame into CCAT TX-FIFO, CCAT ignores the first 8 bytes of the tx descriptor */ addr_and_length = offsetof(struct ccat_eth_frame, length); addr_and_length += ((void *)fifo->next - fifo->dma.virt); addr_and_length += ((skb->len + CCAT_ETH_FRAME_HEAD_LEN) / 8) << 24; iowrite32(addr_and_length, priv->reg.tx_fifo); /* update stats */ atomic64_add(skb->len, &priv->tx_bytes); priv->kfree_skb_any(skb); ccat_eth_fifo_inc(fifo); /* stop queue if tx ring is full */ if (!ccat_eth_frame_sent(fifo->next)) { priv->stop_queue(priv->netdev); } return NETDEV_TX_OK; }
/* * On GC the incoming lbas are not necessarily sequential. Also, some of the * lbas might not be valid entries, which are marked as empty by the GC thread */ int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq) { struct pblk_w_ctx w_ctx; unsigned int bpos, pos; void *data = gc_rq->data; int i, valid_entries; /* Update the write buffer head (mem) with the entries that we can * write. The write in itself cannot fail, so there is no need to * rollback from here on. */ retry: if (!pblk_rb_may_write_gc(&pblk->rwb, gc_rq->secs_to_gc, &bpos)) { io_schedule(); goto retry; } w_ctx.flags = PBLK_IOTYPE_GC; pblk_ppa_set_empty(&w_ctx.ppa); for (i = 0, valid_entries = 0; i < gc_rq->nr_secs; i++) { if (gc_rq->lba_list[i] == ADDR_EMPTY) continue; w_ctx.lba = gc_rq->lba_list[i]; pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + valid_entries); pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_rq->line, gc_rq->paddr_list[i], pos); data += PBLK_EXPOSED_PAGE_SIZE; valid_entries++; } WARN_ONCE(gc_rq->secs_to_gc != valid_entries, "pblk: inconsistent GC write\n"); atomic64_add(valid_entries, &pblk->gc_wa); #ifdef CONFIG_NVM_DEBUG atomic_long_add(valid_entries, &pblk->inflight_writes); atomic_long_add(valid_entries, &pblk->recov_gc_writes); #endif pblk_write_should_kick(pblk); return NVM_IO_OK; }
int fetch_node_callback(void *tree, NID nid, struct node **n) { int r; struct timespec t1, t2; struct tree *t = (struct tree*)tree; gettime(&t1); r = deserialize_node_from_disk(t->fd, t->block, t->hdr, nid, n); gettime(&t2); atomic64_add(&t->status->tree_node_fetch_costs, (uint64_t)time_diff_ms(t1, t2)); atomic64_increment(&t->status->tree_node_fetch_nums); if (r != NESS_OK) __PANIC("fetch node from disk error, errno [%d]", r); return r; }
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); unsigned long iommu_page_index; int npages; npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); dma_addr = dma_addr & PAGE_MASK; if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr); atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages); iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; dma_free_iommu(zdev, iommu_page_index, npages); }
static int ctr_aes_nx_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned long irq_flags; unsigned int processed = 0, to_process; int rc; spin_lock_irqsave(&nx_ctx->lock, irq_flags); do { to_process = nbytes - processed; rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, processed, csbcpb->cpb.aes_ctr.iv); if (rc) goto out; if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; }
static netdev_tx_t ccat_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ccat_eth_priv *const priv = netdev_priv(dev); struct ccat_eth_fifo *const fifo = &priv->tx_fifo; if (skb_is_nonlinear(skb)) { pr_warn("Non linear skb not supported -> drop frame.\n"); atomic64_inc(&fifo->dropped); priv->kfree_skb_any(skb); return NETDEV_TX_OK; } if (skb->len > MAX_PAYLOAD_SIZE) { pr_warn("skb.len %llu exceeds dma buffer %llu -> drop frame.\n", (u64) skb->len, (u64) MAX_PAYLOAD_SIZE); atomic64_inc(&fifo->dropped); priv->kfree_skb_any(skb); return NETDEV_TX_OK; } if (!fifo->ops->ready(fifo)) { netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); priv->stop_queue(priv->netdev); return NETDEV_TX_BUSY; } /* prepare frame in DMA memory */ fifo->ops->queue.skb(fifo, skb); /* update stats */ atomic64_add(skb->len, &fifo->bytes); priv->kfree_skb_any(skb); ccat_eth_fifo_inc(fifo); /* stop queue if tx ring is full */ if (!fifo->ops->ready(fifo)) { priv->stop_queue(priv->netdev); } return NETDEV_TX_OK; }
static void ccat_eth_receive(struct ccat_eth_priv *const priv, const size_t len) { struct sk_buff *const skb = dev_alloc_skb(len + NET_IP_ALIGN); struct ccat_eth_fifo *const fifo = &priv->rx_fifo; struct net_device *const dev = priv->netdev; if (!skb) { pr_info("%s() out of memory :-(\n", __FUNCTION__); atomic64_inc(&fifo->dropped); return; } skb->dev = dev; skb_reserve(skb, NET_IP_ALIGN); fifo->ops->queue.copy_to_skb(fifo, skb, len); skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; atomic64_add(len, &fifo->bytes); netif_rx(skb); }
static void ccat_eth_receive(struct net_device *const dev, const void *const data, const size_t len) { struct sk_buff *const skb = dev_alloc_skb(len + NET_IP_ALIGN); struct ccat_eth_priv *const priv = netdev_priv(dev); if (!skb) { pr_info("%s() out of memory :-(\n", __FUNCTION__); atomic64_inc(&priv->rx_dropped); return; } skb->dev = dev; skb_reserve(skb, NET_IP_ALIGN); skb_copy_to_linear_data(skb, data, len); skb_put(skb, len); skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; atomic64_add(len, &priv->rx_bytes); netif_rx(skb); }
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { struct zpci_dev *zdev = get_zdev(to_pci_dev(dev)); unsigned long nr_pages, iommu_page_index; unsigned long pa = page_to_phys(page) + offset; int flags = ZPCI_PTE_VALID; dma_addr_t dma_addr; /* This rounds up number of pages based on size and offset */ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); iommu_page_index = dma_alloc_iommu(zdev, nr_pages); if (iommu_page_index == -1) goto out_err; /* Use rounded up size */ size = nr_pages * PAGE_SIZE; dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; if (dma_addr + size > zdev->end_dma) { dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n", dma_addr, size, zdev->end_dma); goto out_free; } if (direction == DMA_NONE || direction == DMA_TO_DEVICE) flags |= ZPCI_TABLE_PROTECTED; if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); return dma_addr + (offset & ~PAGE_MASK); } out_free: dma_free_iommu(zdev, iommu_page_index, nr_pages); out_err: dev_err(dev, "Failed to map addr: %lx\n", pa); return DMA_ERROR_CODE; }
/** * cvm_oct_common_get_stats - get the low level ethernet statistics * @dev: Device to get the statistics from * * Returns Pointer to the statistics */ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) { cvmx_pip_port_status_t rx_status; cvmx_pko_port_status_t tx_status; struct octeon_ethernet *priv = netdev_priv(dev); if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { if (octeon_is_simulation()) { /* The simulator doesn't support statistics */ memset(&rx_status, 0, sizeof(rx_status)); memset(&tx_status, 0, sizeof(tx_status)); } else { cvmx_pip_get_port_status(priv->port, 1, &rx_status); cvmx_pko_get_port_status(priv->port, 1, &tx_status); } priv->stats.rx_packets += rx_status.inb_packets; priv->stats.tx_packets += tx_status.packets; priv->stats.rx_bytes += rx_status.inb_octets; priv->stats.tx_bytes += tx_status.octets; priv->stats.multicast += rx_status.multicast_packets; priv->stats.rx_crc_errors += rx_status.inb_errors; priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; /* * The drop counter must be incremented atomically * since the RX tasklet also increments it. */ #ifdef CONFIG_64BIT atomic64_add(rx_status.dropped_packets, (atomic64_t *)&priv->stats.rx_dropped); #else atomic_add(rx_status.dropped_packets, (atomic_t *)&priv->stats.rx_dropped); #endif } return &priv->stats; }
static int ecb_aes_nx_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, int enc) { struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; int rc; if (nbytes > nx_ctx->ap->databytelen) return -EINVAL; if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL); if (rc) goto out; if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); out: return rc; }
static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, u8 *out) { struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; int rc = -EINVAL; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; if (req->assoclen > nx_ctx->ap->databytelen) goto out; if (req->assoclen <= AES_BLOCK_SIZE) { scatterwalk_start(&walk, req->assoc); scatterwalk_copychunks(out, &walk, req->assoclen, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); rc = 0; goto out; } nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0, req->assoclen); nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); out: return rc; }
static int nx_sha512_final(struct shash_desc *desc, u8 *out) { struct sha512_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; u64 count0; unsigned long irq_flags; int rc; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count[0] >= SHA512_BLOCK_SIZE) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE); NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; } else { NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; } NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; count0 = sctx->count[0] * 8; csbcpb->cpb.sha512.message_bit_length_lo = count0; len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg, &nx_ctx->op.inlen, &len, (u8 *)sctx->buf, NX_DS_SHA512); if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) goto out; len = SHA512_DIGEST_SIZE; rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg, &nx_ctx->op.outlen, &len, out, NX_DS_SHA512); if (rc) goto out; if (!nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->sha512_ops)); atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes)); memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; }
static __init int test_atomic64(void) { long long v0 = 0xaaa31337c001d00dLL; long long v1 = 0xdeadbeefdeafcafeLL; long long v2 = 0xfaceabadf00df001LL; long long onestwos = 0x1111111122222222LL; long long one = 1LL; atomic64_t v = ATOMIC64_INIT(v0); long long r = v0; BUG_ON(v.counter != r); atomic64_set(&v, v1); r = v1; BUG_ON(v.counter != r); BUG_ON(atomic64_read(&v) != r); INIT(v0); atomic64_add(onestwos, &v); r += onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_add(-one, &v); r += -one; BUG_ON(v.counter != r); INIT(v0); r += onestwos; BUG_ON(atomic64_add_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r += -one; BUG_ON(atomic64_add_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_sub(onestwos, &v); r -= onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_sub(-one, &v); r -= -one; BUG_ON(v.counter != r); INIT(v0); r -= onestwos; BUG_ON(atomic64_sub_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r -= -one; BUG_ON(atomic64_sub_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_inc(&v); r += one; BUG_ON(v.counter != r); INIT(v0); r += one; BUG_ON(atomic64_inc_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_dec(&v); r -= one; BUG_ON(v.counter != r); INIT(v0); r -= one; BUG_ON(atomic64_dec_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_xchg(&v, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_add_unless(&v, one, v0)); BUG_ON(v.counter != r); INIT(v0); BUG_ON(!atomic64_add_unless(&v, one, v1)); r += one; BUG_ON(v.counter != r); #ifdef CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE INIT(onestwos); BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); r -= one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_dec_if_positive(&v) != -one); BUG_ON(v.counter != r); INIT(-one); BUG_ON(atomic64_dec_if_positive(&v) != (-one - one)); BUG_ON(v.counter != r); #else #warning Please implement atomic64_dec_if_positive for your architecture and select the above Kconfig symbol #endif INIT(onestwos); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_inc_not_zero(&v)); BUG_ON(v.counter != r); INIT(-one); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); #ifdef CONFIG_X86 pr_info("passed for %s platform %s CX8 and %s SSE\n", #ifdef CONFIG_X86_64 "x86-64", #elif defined(CONFIG_X86_CMPXCHG64) "i586+", #else "i386+", #endif boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without", boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without"); #else pr_info("passed\n"); #endif return 0; }
static int gcm_aes_nx_crypt(struct aead_request *req, int enc) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; unsigned int nbytes = req->cryptlen; int rc = -EINVAL; if (nbytes > nx_ctx->ap->databytelen) goto out; desc.info = nx_ctx->priv.gcm.iv; /* initialize the counter */ *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; /* For scenarios where the input message is zero length, AES CTR mode * may be used. Set the source data to be a single block (16B) of all * zeros, and set the input IV value to be the same as the GMAC IV * value. - nx_wb 4.8.1.3 */ if (nbytes == 0) { char src[AES_BLOCK_SIZE] = {}; struct scatterlist sg; desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); if (IS_ERR(desc.tfm)) { rc = -ENOMEM; goto out; } crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key, NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 : NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32); sg_init_one(&sg, src, AES_BLOCK_SIZE); if (enc) crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg, AES_BLOCK_SIZE); else crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg, AES_BLOCK_SIZE); crypto_free_blkcipher(desc.tfm); rc = 0; goto out; } desc.tfm = (struct crypto_blkcipher *)req->base.tfm; csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; if (req->assoclen) { rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); if (rc) goto out; } if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, csbcpb->cpb.aes_gcm.iv_or_cnt); if (rc) goto out; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); if (enc) { /* copy out the auth tag */ scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, req->dst, nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_TO_SG); } else if (req->assoclen) { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; scatterwalk_map_and_copy(itag, req->dst, nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_FROM_SG); rc = memcmp(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; } out: return rc; }
static int ccm_nx_encrypt(struct aead_request *req, struct blkcipher_desc *desc) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned int nbytes = req->cryptlen; unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); unsigned long irq_flags; unsigned int processed = 0, to_process; int rc = -1; spin_lock_irqsave(&nx_ctx->lock, irq_flags); rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, csbcpb->cpb.aes_ccm.in_pat_or_b0); if (rc) goto out; do { /* to process: the AES_BLOCK_SIZE data chunk to process in this * update. This value is bound by sg list limits. */ to_process = nbytes - processed; if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, &to_process, processed, csbcpb->cpb.aes_ccm.iv_or_ctr); if (rc) goto out; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; /* for partial completion, copy following for next * entry into loop... */ memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_ccm.in_s0, csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; /* update stats */ atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); /* copy out the auth tag */ scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, req->dst, nbytes, authsize, SCATTERWALK_TO_SG); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; }
static __init int test_atomic64(void) { long long v0 = 0xaaa31337c001d00dLL; long long v1 = 0xdeadbeefdeafcafeLL; long long v2 = 0xfaceabadf00df001LL; long long onestwos = 0x1111111122222222LL; long long one = 1LL; atomic64_t v = ATOMIC64_INIT(v0); long long r = v0; BUG_ON(v.counter != r); atomic64_set(&v, v1); r = v1; BUG_ON(v.counter != r); BUG_ON(atomic64_read(&v) != r); INIT(v0); atomic64_add(onestwos, &v); r += onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_add(-one, &v); r += -one; BUG_ON(v.counter != r); INIT(v0); r += onestwos; BUG_ON(atomic64_add_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r += -one; BUG_ON(atomic64_add_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_sub(onestwos, &v); r -= onestwos; BUG_ON(v.counter != r); INIT(v0); atomic64_sub(-one, &v); r -= -one; BUG_ON(v.counter != r); INIT(v0); r -= onestwos; BUG_ON(atomic64_sub_return(onestwos, &v) != r); BUG_ON(v.counter != r); INIT(v0); r -= -one; BUG_ON(atomic64_sub_return(-one, &v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_inc(&v); r += one; BUG_ON(v.counter != r); INIT(v0); r += one; BUG_ON(atomic64_inc_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); atomic64_dec(&v); r -= one; BUG_ON(v.counter != r); INIT(v0); r -= one; BUG_ON(atomic64_dec_return(&v) != r); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_xchg(&v, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0); r = v1; BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0); BUG_ON(v.counter != r); INIT(v0); BUG_ON(atomic64_add_unless(&v, one, v0)); BUG_ON(v.counter != r); INIT(v0); BUG_ON(!atomic64_add_unless(&v, one, v1)); r += one; BUG_ON(v.counter != r); #if defined(CONFIG_X86) || defined(CONFIG_MIPS) || defined(CONFIG_PPC) || \ defined(CONFIG_S390) || defined(_ASM_GENERIC_ATOMIC64_H) || defined(CONFIG_ARM) INIT(onestwos); BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1)); r -= one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_dec_if_positive(&v) != -one); BUG_ON(v.counter != r); INIT(-one); BUG_ON(atomic64_dec_if_positive(&v) != (-one - one)); BUG_ON(v.counter != r); #else #warning Please implement atomic64_dec_if_positive for your architecture, and add it to the IF above #endif INIT(onestwos); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); INIT(0); BUG_ON(atomic64_inc_not_zero(&v)); BUG_ON(v.counter != r); INIT(-one); BUG_ON(!atomic64_inc_not_zero(&v)); r += one; BUG_ON(v.counter != r); #ifdef CONFIG_X86 #ifdef CONFIG_DEBUG_PRINTK printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n", #ifdef CONFIG_X86_64 "x86-64", #elif defined(CONFIG_X86_CMPXCHG64) "i586+", #else "i386+", #endif boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without", boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without"); #else ; #endif #else #ifdef CONFIG_DEBUG_PRINTK printk(KERN_INFO "atomic64 test passed\n"); #else ; #endif #endif return 0; }
static int nx_sha256_final(struct shash_desc *desc, u8 *out) { struct sha256_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; u32 max_sg_len; int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count >= SHA256_BLOCK_SIZE) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE); NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; } else { NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; } csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); len = sctx->count & (SHA256_BLOCK_SIZE - 1); in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, &len, max_sg_len); if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { rc = -EINVAL; goto out; } len = SHA256_DIGEST_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len); if (len != SHA256_DIGEST_SIZE) { rc = -EINVAL; goto out; } nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->sha256_ops)); atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes)); memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; }
static int pscnv_vram_to_host(struct pscnv_chunk* vram) { struct pscnv_bo *bo = vram->bo; struct drm_device *dev = bo->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct pscnv_chunk sysram; /* temporarily on stack */ struct pscnv_mm_node *primary_node = bo->primary_node; struct pscnv_vspace *vs = NULL; int res; if (!dev_priv->dma) { pscnv_dma_init(dev); } if (!dev_priv->dma) { NV_ERROR(dev, "pscnv_vram_to_host: no DMA available\n"); return -EINVAL; } if (pscnv_chunk_expect_alloc_type(vram, PSCNV_CHUNK_VRAM, "pscnv_vram_to_host")) { return -EINVAL; } if (!primary_node && pscnv_swapping_debug >= 1) { NV_INFO(dev, "pscnv_swapping_replace: BO %08x/%d-%u has no " "primary node attached, Strange.\n", bo->cookie, bo->serial, vram->idx); } if (primary_node) vs = primary_node->vspace; memset(&sysram, 0, sizeof(struct pscnv_chunk)); sysram.flags = vram->flags | PSCNV_CHUNK_SWAPPED; sysram.bo = bo; sysram.idx = vram->idx; /* increases vram_swapped */ res = pscnv_sysram_alloc_chunk(&sysram); if (res) { NV_ERROR(dev, "pscnv_vram_to_host: pscnv_sysram_alloc_chunk " "failed on %08x/%d-%u\n", bo->cookie, bo->serial, sysram.idx); goto fail_sysram_alloc; } res = pscnv_dma_chunk_to_chunk(vram, &sysram, PSCNV_DMA_ASYNC); if (res) { NV_INFO(dev, "pscnv_vram_to_host: failed to DMA- Transfer!\n"); goto fail_dma; } //pscnv_swapping_memdump(sysram); /* this overwrites existing PTE */ if (vs) { dev_priv->vm->do_unmap(vs, primary_node->start + vram->idx * dev_priv->chunk_size, pscnv_chunk_size(vram)); res = dev_priv->vm->do_map_chunk(vs, &sysram, primary_node->start + sysram.idx * dev_priv->chunk_size); if (res) { NV_INFO(dev, "pscnv_vram_to_host: failed to replace mapping\n"); goto fail_map_chunk; } } pscnv_vram_free_chunk(vram); /* vram chunk is unallocated now, replace its values with the sysram * chunk */ vram->alloc_type = sysram.alloc_type; vram->flags = sysram.flags; vram->pages = sysram.pages; /* refcnt of sysram now belongs to the vram bo, it will unref it, when it gets free'd itself */ return 0; fail_map_chunk: /* reset PTEs to old value, just to be safe */ if (vs) { dev_priv->vm->do_unmap(vs, primary_node->start + sysram.idx * dev_priv->chunk_size, pscnv_chunk_size(&sysram)); dev_priv->vm->do_map_chunk(vs, vram, primary_node->start + vram->idx * dev_priv->chunk_size); } fail_dma: pscnv_sysram_free_chunk(&sysram); fail_sysram_alloc: if (vram->bo->client) atomic64_add(pscnv_chunk_size(vram), &vram->bo->client->vram_demand); return res; }
/* * Track sum of all purrs across all processors. This is used to further * calculate usage values by different applications */ static void cpu_get_purr(void *arg) { atomic64_t *sum = arg; atomic64_add(mfspr(SPRN_PURR), sum); }
static int generate_pat(u8 *iv, struct aead_request *req, struct nx_crypto_ctx *nx_ctx, unsigned int authsize, unsigned int nbytes, u8 *out) { struct nx_sg *nx_insg = nx_ctx->in_sg; struct nx_sg *nx_outsg = nx_ctx->out_sg; unsigned int iauth_len = 0; u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; int rc; unsigned int max_sg_len; /* zero the ctr value */ memset(iv + 15 - iv[0], 0, iv[0] + 1); /* page 78 of nx_wb.pdf has, * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes * in length. If a full message is used, the AES CCA implementation * restricts the maximum AAD length to 2^32 -1 bytes. * If partial messages are used, the implementation supports * 2^64 -1 bytes maximum AAD length. * * However, in the cryptoapi's aead_request structure, * assoclen is an unsigned int, thus it cannot hold a length * value greater than 2^32 - 1. * Thus the AAD is further constrained by this and is never * greater than 2^32. */ if (!req->assoclen) { b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; } else if (req->assoclen <= 14) { /* if associated data is 14 bytes or less, we do 1 GCM * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, * which is fed in through the source buffers here */ b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; b1 = nx_ctx->priv.ccm.iauth_tag; iauth_len = req->assoclen; } else if (req->assoclen <= 65280) { /* if associated data is less than (2^16 - 2^8), we construct * B1 differently and feed in the associated data to a CCA * operation */ b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; iauth_len = 14; } else { b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; iauth_len = 10; } /* generate B0 */ rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); if (rc) return rc; /* generate B1: * add control info for associated data * RFC 3610 and NIST Special Publication 800-38C */ if (b1) { memset(b1, 0, 16); if (req->assoclen <= 65280) { *(u16 *)b1 = (u16)req->assoclen; scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, iauth_len, SCATTERWALK_FROM_SG); } else { *(u16 *)b1 = (u16)(0xfffe); *(u32 *)&b1[2] = (u32)req->assoclen; scatterwalk_map_and_copy(b1 + 6, req->assoc, 0, iauth_len, SCATTERWALK_FROM_SG); } } /* now copy any remaining AAD to scatterlist and call nx... */ if (!req->assoclen) { return rc; } else if (req->assoclen <= 14) { unsigned int len = 16; nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen); if (len != 16) return -EINVAL; nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len, nx_ctx->ap->sglen); if (len != 16) return -EINVAL; /* inlen should be negative, indicating to phyp that its a * pointer to an sg list */ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) return rc; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); } else { unsigned int processed = 0, to_process; processed += iauth_len; /* page_limit: number of sg entries that fit on one page */ max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); do { to_process = min_t(u32, req->assoclen - processed, nx_ctx->ap->databytelen); nx_insg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen, req->assoc, processed, &to_process); if ((to_process + processed) < req->assoclen) { NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_INTERMEDIATE; } else { NX_CPB_FDM(nx_ctx->csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; } nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) return rc; memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0, nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0, AES_BLOCK_SIZE); NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < req->assoclen); result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; } memcpy(out, result, AES_BLOCK_SIZE); return rc; }