static int ccp5_init(struct ccp_device *ccp) { struct device *dev = ccp->dev; struct ccp_cmd_queue *cmd_q; struct dma_pool *dma_pool; char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, qim, i; u64 status; u32 status_lo, status_hi; int ret; /* Find available queues */ qim = 0; qmr = ioread32(ccp->io_regs + Q_MASK_REG); for (i = 0; i < MAX_HW_QUEUES; i++) { if (!(qmr & (1 << i))) continue; /* Allocate a dma pool for this queue */ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", ccp->name, i); dma_pool = dma_pool_create(dma_pool_name, dev, CCP_DMAPOOL_MAX_SIZE, CCP_DMAPOOL_ALIGN, 0); if (!dma_pool) { dev_err(dev, "unable to allocate dma pool\n"); ret = -ENOMEM; } cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; ccp->cmd_q_count++; cmd_q->ccp = ccp; cmd_q->id = i; cmd_q->dma_pool = dma_pool; mutex_init(&cmd_q->q_mutex); /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); if (!cmd_q->qbase) { dev_err(dev, "unable to allocate command queue\n"); ret = -ENOMEM; goto e_pool; } cmd_q->qidx = 0; /* Preset some register values and masks that are queue * number dependent */ cmd_q->reg_control = ccp->io_regs + CMD5_Q_STATUS_INCR * (i + 1); cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; cmd_q->reg_int_enable = cmd_q->reg_control + CMD5_Q_INT_ENABLE_BASE; cmd_q->reg_interrupt_status = cmd_q->reg_control + CMD5_Q_INTERRUPT_STATUS_BASE; cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; cmd_q->reg_int_status = cmd_q->reg_control + CMD5_Q_INT_STATUS_BASE; cmd_q->reg_dma_status = cmd_q->reg_control + CMD5_Q_DMA_STATUS_BASE; cmd_q->reg_dma_read_status = cmd_q->reg_control + CMD5_Q_DMA_READ_STATUS_BASE; cmd_q->reg_dma_write_status = cmd_q->reg_control + CMD5_Q_DMA_WRITE_STATUS_BASE; init_waitqueue_head(&cmd_q->int_queue); dev_dbg(dev, "queue #%u available\n", i); } if (ccp->cmd_q_count == 0) { dev_notice(dev, "no command queues available\n"); ret = -EIO; goto e_pool; } dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); /* Turn off the queues and disable interrupts until ready */ for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol = 0; /* Start with nothing */ iowrite32(cmd_q->qcontrol, cmd_q->reg_control); /* Disable the interrupts */ iowrite32(0x00, cmd_q->reg_int_enable); ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); /* Clear the interrupts */ iowrite32(ALL_INTERRUPTS, cmd_q->reg_interrupt_status); } dev_dbg(dev, "Requesting an IRQ...\n"); /* Request an irq */ ret = ccp->get_irq(ccp); if (ret) { dev_err(dev, "unable to allocate an IRQ\n"); goto e_pool; } dev_dbg(dev, "Loading LSB map...\n"); /* Copy the private LSB mask to the public registers */ status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); status = ((u64)status_hi<<30) | (u64)status_lo; dev_dbg(dev, "Configuring virtual queues...\n"); /* Configure size of each virtual queue accessible to host */ for (i = 0; i < ccp->cmd_q_count; i++) { u32 dma_addr_lo; u32 dma_addr_hi; cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = low_address(cmd_q->qdma_tail); iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); dma_addr_hi = high_address(cmd_q->qdma_tail); cmd_q->qcontrol |= (dma_addr_hi << 16); iowrite32(cmd_q->qcontrol, cmd_q->reg_control); /* Find the LSB regions accessible to the queue */ ccp_find_lsb_regions(cmd_q, status); cmd_q->lsb = -1; /* Unassigned value */ } dev_dbg(dev, "Assigning LSBs...\n"); ret = ccp_assign_lsbs(ccp); if (ret) { dev_err(dev, "Unable to assign LSBs (%d)\n", ret); goto e_irq; } /* Optimization: pre-allocate LSB slots for each queue */ for (i = 0; i < ccp->cmd_q_count; i++) { ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); } dev_dbg(dev, "Starting threads...\n"); /* Create a kthread for each queue */ for (i = 0; i < ccp->cmd_q_count; i++) { struct task_struct *kthread; cmd_q = &ccp->cmd_q[i]; kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, "%s-q%u", ccp->name, cmd_q->id); if (IS_ERR(kthread)) { dev_err(dev, "error creating queue thread (%ld)\n", PTR_ERR(kthread)); ret = PTR_ERR(kthread); goto e_kthread; } cmd_q->kthread = kthread; wake_up_process(kthread); } dev_dbg(dev, "Enabling interrupts...\n"); /* Enable interrupts */ for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; iowrite32(ALL_INTERRUPTS, cmd_q->reg_int_enable); } dev_dbg(dev, "Registering device...\n"); /* Put this on the unit list to make it available */ ccp_add_device(ccp); ret = ccp_register_rng(ccp); if (ret) goto e_kthread; /* Register the DMA engine support */ ret = ccp_dmaengine_register(ccp); if (ret) goto e_hwrng; return 0; e_hwrng: ccp_unregister_rng(ccp); e_kthread: for (i = 0; i < ccp->cmd_q_count; i++) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); e_irq: ccp->free_irq(ccp); e_pool: for (i = 0; i < ccp->cmd_q_count; i++) dma_pool_destroy(ccp->cmd_q[i].dma_pool); return ret; }
static int ccp_init(struct ccp_device *ccp) { struct device *dev = ccp->dev; struct ccp_cmd_queue *cmd_q; struct dma_pool *dma_pool; char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, qim, i; int ret; /* Find available queues */ qim = 0; qmr = ioread32(ccp->io_regs + Q_MASK_REG); for (i = 0; i < MAX_HW_QUEUES; i++) { if (!(qmr & (1 << i))) continue; /* Allocate a dma pool for this queue */ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", ccp->name, i); dma_pool = dma_pool_create(dma_pool_name, dev, CCP_DMAPOOL_MAX_SIZE, CCP_DMAPOOL_ALIGN, 0); if (!dma_pool) { dev_err(dev, "unable to allocate dma pool\n"); ret = -ENOMEM; goto e_pool; } cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; ccp->cmd_q_count++; cmd_q->ccp = ccp; cmd_q->id = i; cmd_q->dma_pool = dma_pool; /* Reserve 2 KSB regions for the queue */ cmd_q->sb_key = KSB_START + ccp->sb_start++; cmd_q->sb_ctx = KSB_START + ccp->sb_start++; ccp->sb_count -= 2; /* Preset some register values and masks that are queue * number dependent */ cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE + (CMD_Q_STATUS_INCR * i); cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE + (CMD_Q_STATUS_INCR * i); cmd_q->int_ok = 1 << (i * 2); cmd_q->int_err = 1 << ((i * 2) + 1); cmd_q->free_slots = ccp_get_free_slots(cmd_q); init_waitqueue_head(&cmd_q->int_queue); /* Build queue interrupt mask (two interrupts per queue) */ qim |= cmd_q->int_ok | cmd_q->int_err; #ifdef CONFIG_ARM64 /* For arm64 set the recommended queue cache settings */ iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + (CMD_Q_CACHE_INC * i)); #endif dev_dbg(dev, "queue #%u available\n", i); } if (ccp->cmd_q_count == 0) { dev_notice(dev, "no command queues available\n"); ret = -EIO; goto e_pool; } dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); /* Disable and clear interrupts until ready */ iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); } iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); /* Request an irq */ ret = ccp->get_irq(ccp); if (ret) { dev_err(dev, "unable to allocate an IRQ\n"); goto e_pool; } /* Initialize the queues used to wait for KSB space and suspend */ init_waitqueue_head(&ccp->sb_queue); init_waitqueue_head(&ccp->suspend_queue); dev_dbg(dev, "Starting threads...\n"); /* Create a kthread for each queue */ for (i = 0; i < ccp->cmd_q_count; i++) { struct task_struct *kthread; cmd_q = &ccp->cmd_q[i]; kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, "%s-q%u", ccp->name, cmd_q->id); if (IS_ERR(kthread)) { dev_err(dev, "error creating queue thread (%ld)\n", PTR_ERR(kthread)); ret = PTR_ERR(kthread); goto e_kthread; } cmd_q->kthread = kthread; wake_up_process(kthread); } dev_dbg(dev, "Enabling interrupts...\n"); /* Enable interrupts */ iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); dev_dbg(dev, "Registering device...\n"); ccp_add_device(ccp); ret = ccp_register_rng(ccp); if (ret) goto e_kthread; /* Register the DMA engine support */ ret = ccp_dmaengine_register(ccp); if (ret) goto e_hwrng; return 0; e_hwrng: ccp_unregister_rng(ccp); e_kthread: for (i = 0; i < ccp->cmd_q_count; i++) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); ccp->free_irq(ccp); e_pool: for (i = 0; i < ccp->cmd_q_count; i++) dma_pool_destroy(ccp->cmd_q[i].dma_pool); return ret; }