/** Get multi buffer session */ static struct aesni_mb_session * get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op) { struct aesni_mb_session *sess = NULL; if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) { if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_AESNI_MB_PMD)) return NULL; sess = (struct aesni_mb_session *)op->sym->session->_private; } else { void *_sess = NULL; if (rte_mempool_get(qp->sess_mp, (void **)&_sess)) return NULL; sess = (struct aesni_mb_session *) ((struct rte_cryptodev_sym_session *)_sess)->_private; if (unlikely(aesni_mb_set_session_parameters(qp->ops, sess, op->sym->xform) != 0)) { rte_mempool_put(qp->sess_mp, _sess); sess = NULL; } } return sess; }
static void submit_single_io(struct ns_entry *entry) { struct perf_task *task = NULL; uint64_t offset_in_ios; int rc; rte_mempool_get(task_pool, (void **)&task); task->entry = entry; if (g_is_random) { offset_in_ios = rand_r(&seed) % entry->size_in_ios; } else { offset_in_ios = entry->offset_in_ios++; if (entry->offset_in_ios == entry->size_in_ios) { entry->offset_in_ios = 0; } } if ((g_rw_percentage == 100) || (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { rc = nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task); } else { rc = nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task); } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } entry->current_queue_depth++; }
int anscli_ring_send(void *buff, int buff_len) { void *msg; if(buff_len > ANS_RING_MSG_SIZE) { printf("Too long message size, max is %d \n", ANS_RING_MSG_SIZE); return ANS_EMSGPOOL; } if (rte_mempool_get(anscli_message_pool, &msg) < 0) { printf("Getting message failed \n"); return ANS_EMSGPOOL; } rte_memcpy(msg, buff, buff_len); if (rte_ring_enqueue(anscli_ring_tx, msg) < 0) { printf("Sending message to ANS stack failed \n"); rte_mempool_put(anscli_message_pool, msg); return ANS_EMSGPOOL; } return 0; }
/** Get multi buffer session */ static struct aesni_mb_session * get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *crypto_op) { struct aesni_mb_session *sess; if (crypto_op->type == RTE_CRYPTO_OP_WITH_SESSION) { if (unlikely(crypto_op->session->type != RTE_CRYPTODEV_AESNI_MB_PMD)) return NULL; sess = (struct aesni_mb_session *)crypto_op->session->_private; } else { struct rte_cryptodev_session *c_sess = NULL; if (rte_mempool_get(qp->sess_mp, (void **)&c_sess)) return NULL; sess = (struct aesni_mb_session *)c_sess->_private; if (unlikely(aesni_mb_set_session_parameters(qp->ops, sess, crypto_op->xform) != 0)) return NULL; } return sess; }
int netdpcmd_ring_send(void *buff, int buff_len) { void *msg; if(buff_len > NETDP_RING_MSG_SIZE) { printf("Too long message size, max is %d \n", NETDP_RING_MSG_SIZE); return NETDP_EMSGPOOL; } if (rte_mempool_get(netdpcmd_message_pool, &msg) < 0) { printf("Getting message failed \n"); return NETDP_EMSGPOOL; } rte_memcpy(msg, buff, buff_len); if (rte_ring_enqueue(netdpcmd_ring_tx, msg) < 0) { printf("Sending message to NETDP stack failed \n"); rte_mempool_put(netdpcmd_message_pool, msg); return NETDP_EMSGPOOL; } return 0; }
static void submit_single_io(struct ns_worker_ctx *ns_ctx) { struct perf_task *task = NULL; uint64_t offset_in_ios; int rc; struct ns_entry *entry = ns_ctx->entry; if (rte_mempool_get(task_pool, (void **)&task) != 0) { fprintf(stderr, "task_pool rte_mempool_get failed\n"); exit(1); } task->ns_ctx = ns_ctx; if (g_is_random) { offset_in_ios = rand_r(&seed) % entry->size_in_ios; } else { offset_in_ios = ns_ctx->offset_in_ios++; if (ns_ctx->offset_in_ios == entry->size_in_ios) { ns_ctx->offset_in_ios = 0; } } task->submit_tsc = rte_get_timer_cycles(); if ((g_rw_percentage == 100) || (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { #if HAVE_LIBAIO if (entry->type == ENTRY_TYPE_AIO_FILE) { rc = aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PREAD, task->buf, g_io_size_bytes, offset_in_ios * g_io_size_bytes, task); } else #endif { rc = spdk_nvme_ns_cmd_read(entry->u.nvme.ns, ns_ctx->u.nvme.qpair, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task, 0); } } else { #if HAVE_LIBAIO if (entry->type == ENTRY_TYPE_AIO_FILE) { rc = aio_submit(ns_ctx->u.aio.ctx, &task->iocb, entry->u.aio.fd, IO_CMD_PWRITE, task->buf, g_io_size_bytes, offset_in_ios * g_io_size_bytes, task); } else #endif { rc = spdk_nvme_ns_cmd_write(entry->u.nvme.ns, ns_ctx->u.nvme.qpair, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task, 0); } } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } ns_ctx->current_queue_depth++; }
/** * Create driver private_xform data. * * @param dev * Compressdev device * @param xform * xform data from application * @param private_xform * ptr where handle of pmd's private_xform data should be stored * @return * - if successful returns 0 * and valid private_xform handle * - <0 in error cases * - Returns -EINVAL if input parameters are invalid. * - Returns -ENOTSUP if comp device does not support the comp transform. * - Returns -ENOMEM if the private_xform could not be allocated. */ int qat_comp_private_xform_create(struct rte_compressdev *dev, const struct rte_comp_xform *xform, void **private_xform) { struct qat_comp_dev_private *qat = dev->data->dev_private; if (unlikely(private_xform == NULL)) { QAT_LOG(ERR, "QAT: private_xform parameter is NULL"); return -EINVAL; } if (unlikely(qat->xformpool == NULL)) { QAT_LOG(ERR, "QAT device has no private_xform mempool"); return -ENOMEM; } if (rte_mempool_get(qat->xformpool, private_xform)) { QAT_LOG(ERR, "Couldn't get object from qat xform mempool"); return -ENOMEM; } struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)*private_xform; if (xform->type == RTE_COMP_COMPRESS) { if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED || ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT) && qat->interm_buff_mz == NULL)) qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_FIXED_COMP_STATELESS; else if ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DYNAMIC || xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT) && qat->interm_buff_mz != NULL) qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS; else { QAT_LOG(ERR, "IM buffers needed for dynamic deflate. Set size in config file"); return -EINVAL; } qat_xform->checksum_type = xform->compress.chksum; } else { qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS; qat_xform->checksum_type = xform->decompress.chksum; } if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) { QAT_LOG(ERR, "QAT: Problem with setting compression"); return -EINVAL; } return 0; }
void * spdk_mempool_get(struct spdk_mempool *mp) { void *ele = NULL; rte_mempool_get((struct rte_mempool *)mp, &ele); return ele; }
/** Get multi buffer session */ static inline struct aesni_mb_session * get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op) { struct aesni_mb_session *sess = NULL; if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(op->sym->session != NULL)) sess = (struct aesni_mb_session *) get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } else { void *_sess = NULL; void *_sess_private_data = NULL; if (rte_mempool_get(qp->sess_mp, (void **)&_sess)) return NULL; if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data)) return NULL; sess = (struct aesni_mb_session *)_sess_private_data; if (unlikely(aesni_mb_set_session_parameters(qp->op_fns, sess, op->sym->xform) != 0)) { rte_mempool_put(qp->sess_mp, _sess); rte_mempool_put(qp->sess_mp, _sess_private_data); sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; set_sym_session_private_data(op->sym->session, cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; return sess; }
int onvm_nf_send_msg(uint16_t dest, uint8_t msg_type, void *msg_data) { int ret; struct onvm_nf_msg *msg; ret = rte_mempool_get(nf_msg_pool, (void**)(&msg)); if (ret != 0) { RTE_LOG(INFO, APP, "Oh the huge manatee! Unable to allocate msg from pool :(\n"); return ret; } msg->msg_type = msg_type; msg->msg_data = msg_data; return rte_ring_sp_enqueue(nfs[dest].msg_q, (void*)msg); }
static inline int perf_producer(void *arg) { struct prod_data *p = arg; struct test_perf *t = p->t; struct evt_options *opt = t->opt; const uint8_t dev_id = p->dev_id; const uint8_t port = p->port_id; struct rte_mempool *pool = t->pool; const uint64_t nb_pkts = t->nb_pkts; const uint32_t nb_flows = t->nb_flows; uint32_t flow_counter = 0; uint64_t count = 0; struct perf_elt *m; struct rte_event ev; if (opt->verbose_level > 1) printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__, rte_lcore_id(), dev_id, port, p->queue_id); ev.event = 0; ev.op = RTE_EVENT_OP_NEW; ev.queue_id = p->queue_id; ev.sched_type = t->opt->sched_type_list[0]; ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; ev.event_type = RTE_EVENT_TYPE_CPU; ev.sub_event_type = 0; /* stage 0 */ while (count < nb_pkts && t->done == false) { if (rte_mempool_get(pool, (void **)&m) < 0) continue; ev.flow_id = flow_counter++ % nb_flows; ev.event_ptr = m; m->timestamp = rte_get_timer_cycles(); while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) { if (t->done) break; rte_pause(); m->timestamp = rte_get_timer_cycles(); } count++; } return 0; }
int socket_connect(int identifier, struct sock_addr *client_addr) { /* using static ip for current. furute get ip from conf*/ int i; uint8_t ip[4]; ip[0] = 192; ip[1] = 168; ip[2] = 78; ip[3] = 2; uint32_t DestIp = 0; static uint16_t SrcPorts = 0; if(SrcPorts == 0) { SrcPorts = 10000; } SrcPorts ++; for(i=0; i<4; i++) { DestIp |= ip[i] << i*8; } printf("opening connection connect call\n"); Socket_Send_Msg *Msg = NULL; struct tcb *ptcb = get_tcb_by_identifier(identifier); if (rte_mempool_get(buffer_message_pool,(void **) &Msg) < 0) { printf ("Failed to get message buffer\n"); /// / put assert ; } Msg->m_Identifier = identifier; Msg->m_Msg_Type = CONNECTION_OPEN; if (rte_ring_enqueue(ptcb->tcb_socket_ring_send, Msg) < 0) { printf("Failed to send message - message discarded\n"); rte_mempool_put(buffer_message_pool, Msg); } ptcb->ipv4_src = htonl(client_addr->ip); ptcb->sport = client_addr->port; ptcb->ipv4_dst = DestIp; ptcb->dport = SrcPorts; ptcb->next_seq = 1; pthread_mutex_lock(&(ptcb->mutex)); ptcb->WaitingOnConnect = 1; pthread_cond_wait(&(ptcb->condAccept), &(ptcb->mutex)); ptcb->WaitingOnConnect = 0; pthread_mutex_unlock(&(ptcb->mutex)); // wait on sema event of syn-ack. // remove_tcb(identifier); return 0; }
int socket_close(int identifier) { printf("closing tcb\n"); Socket_Send_Msg *Msg = NULL; struct tcb *ptcb = get_tcb_by_identifier(identifier); if (rte_mempool_get(buffer_message_pool, (void **)&Msg) < 0) { printf ("Failed to get message buffer\n"); /// / put assert ; } Msg->m_Identifier = identifier; Msg->m_Msg_Type = SOCKET_CLOSE; if (rte_ring_enqueue(ptcb->tcb_socket_ring_send, Msg) < 0) { printf("Failed to send message - message discarded\n"); rte_mempool_put(buffer_message_pool, Msg); } // remove_tcb(identifier); return 0; }
struct spdk_iscsi_task * spdk_iscsi_task_get(uint32_t *owner_task_ctr, struct spdk_iscsi_task *parent) { struct spdk_iscsi_task *task; int rc; rc = rte_mempool_get(g_spdk_iscsi.task_pool, (void **)&task); if ((rc < 0) || !task) { SPDK_ERRLOG("Unable to get task\n"); abort(); } memset(task, 0, sizeof(*task)); spdk_scsi_task_construct((struct spdk_scsi_task *)task, owner_task_ctr, (struct spdk_scsi_task *)parent); task->scsi.free_fn = spdk_iscsi_task_free; return task; }
int EnqueueMBuf(struct rte_mbuf *mbuf) { struct rte_mbuf **Msg; if (rte_mempool_get(buffer_message_pool, (void **)&Msg) < 0) { printf ("Failed to get rte_mbuf message buffer\n"); /// / put assert ; return -1; } *Msg = mbuf; if (rte_ring_enqueue(ip_to_ether_ring_send, Msg) < 0) { printf("Failed to send rte_mbuf message - message discarded\n"); rte_mempool_put(buffer_message_pool, Msg); } else { printf("mbuf enqueue = %p\n", mbuf); } return 0; }
/** Configure the session from a crypto xform chain (PMD ops callback). * * @param dev Pointer to the device structure. * @param xform Pointer to the crytpo configuration structure. * @param sess Pointer to the empty session structure. * @returns 0 upon success, negative value otherwise. */ static int mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mp) { struct mrvl_crypto_session *mrvl_sess; void *sess_private_data; int ret; if (sess == NULL) { MRVL_CRYPTO_LOG_ERR("Invalid session struct."); return -EINVAL; } if (rte_mempool_get(mp, &sess_private_data)) { CDEV_LOG_ERR("Couldn't get object from session mempool."); return -ENOMEM; } ret = mrvl_crypto_set_session_parameters(sess_private_data, xform); if (ret != 0) { MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters."); /* Return session to mempool */ rte_mempool_put(mp, sess_private_data); return ret; } set_session_private_data(sess, dev->driver_id, sess_private_data); mrvl_sess = (struct mrvl_crypto_session *)sess_private_data; if (sam_session_create(&mrvl_sess->sam_sess_params, &mrvl_sess->sam_sess) < 0) { MRVL_CRYPTO_LOG_DBG("Failed to create session!"); return -EIO; } return 0; }
static void submit_single_io(struct ns_worker_ctx *ns_ctx) { struct reset_task *task = NULL; uint64_t offset_in_ios; int rc; struct ns_entry *entry = ns_ctx->entry; if (rte_mempool_get(task_pool, (void **)&task) != 0) { fprintf(stderr, "task_pool rte_mempool_get failed\n"); exit(1); } task->ns_ctx = ns_ctx; task->ns_ctx->io_submitted++; if (g_is_random) { offset_in_ios = rand_r(&seed) % entry->size_in_ios; } else { offset_in_ios = ns_ctx->offset_in_ios++; if (ns_ctx->offset_in_ios == entry->size_in_ios) { ns_ctx->offset_in_ios = 0; } } if ((g_rw_percentage == 100) || (g_rw_percentage != 0 && ((rand_r(&seed) % 100) < g_rw_percentage))) { rc = spdk_nvme_ns_cmd_read(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task, 0); } else { rc = spdk_nvme_ns_cmd_write(entry->ns, task->buf, offset_in_ios * entry->io_size_blocks, entry->io_size_blocks, io_complete, task, 0); } if (rc != 0) { fprintf(stderr, "starting I/O failed\n"); } ns_ctx->current_queue_depth++; }
/** * CALLED BY NF: * Create a new nf_info struct for this NF * Pass a unique tag for this NF */ static struct onvm_nf_info * ovnm_nf_info_init(const char *tag) { void *mempool_data; struct onvm_nf_info *info; if (rte_mempool_get(nf_info_mp, &mempool_data) < 0) { rte_exit(EXIT_FAILURE, "Failed to get client info memory"); } if (mempool_data == NULL) { rte_exit(EXIT_FAILURE, "Client Info struct not allocated"); } info = (struct onvm_nf_info*) mempool_data; info->instance_id = initial_instance_id; info->service_id = service_id; info->status = NF_WAITING_FOR_ID; info->tag = tag; return info; }
int socket_send(int ser_id, const unsigned char *message, int len) { Socket_Send_Msg *Msg = NULL; struct tcb *ptcb = get_tcb_by_identifier(ser_id); if (rte_mempool_get(buffer_message_pool,(void **) &Msg) < 0) { printf ("Failed to get message buffer\n"); /// / put assert ; } Msg->m_Identifier = ser_id; Msg->m_Len = len; Msg->m_Msg_Type = SEND_DATA; memcpy(Msg->m_Data, message, len); if (rte_ring_enqueue(ptcb->tcb_socket_ring_send, Msg) < 0) { printf("Failed to send message - message discarded\n"); rte_mempool_put(buffer_message_pool, Msg); } printf("****** Enqued for %s and len %d and identifier %d\n",(char *)Msg->m_Data, Msg->m_Len, Msg->m_Identifier); // sendtcppacket(ptcb, mbuf, message, len); // ptcb->send_data(message, len); return 0; }
struct rte_security_session * rte_security_session_create(struct rte_security_ctx *instance, struct rte_security_session_conf *conf, struct rte_mempool *mp) { struct rte_security_session *sess = NULL; if (conf == NULL) return NULL; RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_create, NULL); if (rte_mempool_get(mp, (void **)&sess)) return NULL; if (instance->ops->session_create(instance->device, conf, sess, mp)) { rte_mempool_put(mp, (void *)sess); return NULL; } instance->sess_cnt++; return sess; }