void otx_cpt_stop_device(void *dev) { int rc; uint32_t pending, retries = 5; struct cpt_vf *cptvf = (struct cpt_vf *)dev; /* Wait for pending entries to complete */ pending = otx_cpt_read_vq_doorbell(cptvf); while (pending) { CPT_LOG_DP_DEBUG("%s: Waiting for pending %u cmds to complete", cptvf->dev_name, pending); sleep(1); pending = otx_cpt_read_vq_doorbell(cptvf); retries--; if (!retries) break; } if (!retries && pending) { CPT_LOG_ERR("%s: Timeout waiting for commands(%u)", cptvf->dev_name, pending); return; } rc = otx_cpt_send_vf_down(cptvf); if (rc) { CPT_LOG_ERR("Failed to bring down vf %s, rc %d", cptvf->dev_name, rc); return; } }
static int cpt_vq_init(struct cpt_vf *cptvf, uint8_t group) { int err; /* Convey VQ LEN to PF */ err = otx_cpt_send_vq_size_msg(cptvf); if (err) { CPT_LOG_ERR("%s: PF not responding to QLEN msg", cptvf->dev_name); err = -EBUSY; goto cleanup; } /* CPT VF device initialization */ otx_cpt_vfvq_init(cptvf); /* Send msg to PF to assign currnet Q to required group */ cptvf->vfgrp = group; err = otx_cpt_send_vf_grp_msg(cptvf, group); if (err) { CPT_LOG_ERR("%s: PF not responding to VF_GRP msg", cptvf->dev_name); err = -EBUSY; goto cleanup; } CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__); return 0; cleanup: return err; }
int otx_cpt_start_device(void *dev) { int rc; struct cpt_vf *cptvf = (struct cpt_vf *)dev; rc = otx_cpt_send_vf_up(cptvf); if (rc) { CPT_LOG_ERR("Failed to mark CPT VF device %s UP, rc = %d", cptvf->dev_name, rc); return -EFAULT; } if ((cptvf->vftype != SE_TYPE) && (cptvf->vftype != AE_TYPE)) { CPT_LOG_ERR("Fatal error, unexpected vf type %u, for CPT VF " "device %s", cptvf->vftype, cptvf->dev_name); return -ENOENT; } return 0; }
int otx_cpt_put_resource(struct cpt_instance *instance) { struct cpt_vf *cptvf = (struct cpt_vf *)instance; struct rte_memzone *rz; if (!cptvf) { CPT_LOG_ERR("Invalid CPTVF handle"); return -EINVAL; } CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name); rz = (struct rte_memzone *)instance->rsvd; rte_memzone_free(rz); return 0; }
static int otx_cpt_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) { struct rte_cryptodev *cryptodev; char name[RTE_CRYPTODEV_NAME_MAX_LEN]; int retval; if (pci_drv == NULL) return -ENODEV; rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); cryptodev = rte_cryptodev_pmd_allocate(name, rte_socket_id()); if (cryptodev == NULL) return -ENOMEM; cryptodev->device = &pci_dev->device; cryptodev->device->driver = &pci_drv->driver; cryptodev->driver_id = otx_cryptodev_driver_id; /* init user callbacks */ TAILQ_INIT(&(cryptodev->link_intr_cbs)); /* init logtype used in common */ otx_cpt_logtype_init(); /* Invoke PMD device initialization function */ retval = otx_cpt_dev_create(cryptodev); if (retval == 0) return 0; CPT_LOG_ERR("[DRV %s]: Failed to create device " "(vendor_id: 0x%x device_id: 0x%x", pci_drv->driver.name, (unsigned int) pci_dev->id.vendor_id, (unsigned int) pci_dev->id.device_id); cryptodev->attached = RTE_CRYPTODEV_DETACHED; return -ENXIO; }
static int otx_cpt_vf_init(struct cpt_vf *cptvf) { int ret = 0; /* Check ready with PF */ /* Gets chip ID / device Id from PF if ready */ ret = otx_cpt_check_pf_ready(cptvf); if (ret) { CPT_LOG_ERR("%s: PF not responding to READY msg", cptvf->dev_name); ret = -EBUSY; goto exit; } CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__); exit: return ret; }
int otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name) { memset(cptvf, 0, sizeof(struct cpt_vf)); /* Bar0 base address */ cptvf->reg_base = reg_base; strncpy(cptvf->dev_name, name, 32); cptvf->pdev = pdev; /* To clear if there are any pending mbox msgs */ otx_cpt_poll_misc(cptvf); if (otx_cpt_vf_init(cptvf)) { CPT_LOG_ERR("Failed to initialize CPT VF device"); return -1; } return 0; }
int otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance) { int ret = -ENOENT, len, qlen, i; int chunk_len, chunks, chunk_size; struct cpt_vf *cptvf = (struct cpt_vf *)dev; struct cpt_instance *cpt_instance; struct command_chunk *chunk_head = NULL, *chunk_prev = NULL; struct command_chunk *chunk = NULL; uint8_t *mem; const struct rte_memzone *rz; uint64_t dma_addr = 0, alloc_len, used_len; uint64_t *next_ptr; uint64_t pg_sz = sysconf(_SC_PAGESIZE); CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name); cpt_instance = &cptvf->instance; memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue)); memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue)); /* Chunks are of fixed size buffers */ chunks = DEFAULT_CMD_QCHUNKS; chunk_len = DEFAULT_CMD_QCHUNK_SIZE; qlen = chunks * chunk_len; /* Chunk size includes 8 bytes of next chunk ptr */ chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE; /* For command chunk structures */ len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8); /* For pending queue */ len += qlen * RTE_ALIGN(sizeof(struct rid), 8); /* So that instruction queues start as pg size aligned */ len = RTE_ALIGN(len, pg_sz); /* For Instruction queues */ len += chunks * RTE_ALIGN(chunk_size, 128); /* Wastage after instruction queues */ len = RTE_ALIGN(len, pg_sz); rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node, RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB, RTE_CACHE_LINE_SIZE); if (!rz) { ret = rte_errno; goto cleanup; } mem = rz->addr; dma_addr = rz->phys_addr; alloc_len = len; memset(mem, 0, len); cpt_instance->rsvd = (uintptr_t)rz; /* Pending queue setup */ cptvf->pqueue.rid_queue = (struct rid *)mem; cptvf->pqueue.enq_tail = 0; cptvf->pqueue.deq_head = 0; cptvf->pqueue.pending_count = 0; mem += qlen * RTE_ALIGN(sizeof(struct rid), 8); len -= qlen * RTE_ALIGN(sizeof(struct rid), 8); dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8); /* Alignment wastage */ used_len = alloc_len - len; mem += RTE_ALIGN(used_len, pg_sz) - used_len; len -= RTE_ALIGN(used_len, pg_sz) - used_len; dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len; /* Init instruction queues */ chunk_head = &cptvf->cqueue.chead[0]; i = qlen; chunk_prev = NULL; for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) { int csize; chunk = &cptvf->cqueue.chead[i]; chunk->head = mem; chunk->dma_addr = dma_addr; csize = RTE_ALIGN(chunk_size, 128); mem += csize; dma_addr += csize; len -= csize; if (chunk_prev) { next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8); *next_ptr = (uint64_t)chunk->dma_addr; } chunk_prev = chunk; } /* Circular loop */ next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8); *next_ptr = (uint64_t)chunk_head->dma_addr; assert(!len); /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */ cptvf->qsize = chunk_size / 8; cptvf->cqueue.qhead = chunk_head->head; cptvf->cqueue.idx = 0; cptvf->cqueue.cchunk = 0; if (cpt_vq_init(cptvf, group)) { CPT_LOG_ERR("Failed to initialize CPT VQ of device %s", cptvf->dev_name); ret = -EBUSY; goto cleanup; } *instance = cpt_instance; CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name); return 0; cleanup: rte_memzone_free(rz); *instance = NULL; return ret; }