static int rte_event_eth_rx_adapter_init(void) { const char *name = "rte_event_eth_rx_adapter_array"; const struct rte_memzone *mz; unsigned int sz; sz = sizeof(*event_eth_rx_adapter) * RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE; sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); mz = rte_memzone_lookup(name); if (mz == NULL) { mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0, RTE_CACHE_LINE_SIZE); if (mz == NULL) { RTE_EDEV_LOG_ERR("failed to reserve memzone err = %" PRId32, rte_errno); return -rte_errno; } } event_eth_rx_adapter = mz->addr; return 0; }
static int acl_calc_counts_indices(struct acl_node_counters *counts, struct rte_acl_indices *indices, struct rte_acl_trie *trie, struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries, int match_num) { uint32_t n; memset(indices, 0, sizeof(*indices)); memset(counts, 0, sizeof(*counts)); /* Get stats on nodes */ for (n = 0; n < num_tries; n++) { counts->smallest_match = INT32_MAX; match_num = acl_count_trie_types(counts, node_bld_trie[n].trie, match_num, 1); trie[n].smallest = counts->smallest_match; } indices->dfa_index = RTE_ACL_DFA_SIZE + 1; indices->quad_index = indices->dfa_index + counts->dfa * RTE_ACL_DFA_SIZE; indices->single_index = indices->quad_index + counts->quad_vectors; indices->match_index = indices->single_index + counts->single + 1; indices->match_index = RTE_ALIGN(indices->match_index, (XMM_SIZE / sizeof(uint64_t))); return match_num; }
static void * rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size) { struct rte_table_lpm_params *p = (struct rte_table_lpm_params *) params; struct rte_table_lpm *lpm; uint32_t total_size, nht_size; /* Check input parameters */ if (p == NULL) { RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__); return NULL; } if (p->n_rules == 0) { RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__); return NULL; } if (p->entry_unique_size == 0) { RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n", __func__); return NULL; } if (p->entry_unique_size > entry_size) { RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n", __func__); return NULL; } entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t)); /* Memory allocation */ nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size; total_size = sizeof(struct rte_table_lpm) + nht_size; lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm == NULL) { RTE_LOG(ERR, TABLE, "%s: Cannot allocate %u bytes for LPM table\n", __func__, total_size); return NULL; } /* LPM low-level table creation */ lpm->lpm = rte_lpm_create("LPM", socket_id, p->n_rules, 0); if (lpm->lpm == NULL) { rte_free(lpm); RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n"); return NULL; } /* Memory initialization */ lpm->entry_size = entry_size; lpm->entry_unique_size = p->entry_unique_size; lpm->n_rules = p->n_rules; lpm->offset = p->offset; return lpm; }
/* return the size of memory occupied by a ring */ ssize_t rte_ring_get_memsize(unsigned count) { ssize_t sz; /* count must be a power of 2 */ if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK )) { RTE_LOG(ERR, RING, "Requested size is invalid, must be power of 2, and " "do not exceed the size limit %u\n", RTE_RING_SZ_MASK); return -EINVAL; } sz = sizeof(struct rte_ring) + count * sizeof(void *); sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE); return sz; }
/* Do a scatter/gather send where the descriptor points to data. */ int rte_vmbus_chan_send_sglist(struct vmbus_channel *chan, struct vmbus_gpa sg[], uint32_t sglen, void *data, uint32_t dlen, uint64_t xactid, bool *need_sig) { struct vmbus_chanpkt_sglist pkt; unsigned int pktlen, pad_pktlen, hlen; bool send_evt = false; struct iovec iov[4]; uint64_t pad = 0; int error; hlen = offsetof(struct vmbus_chanpkt_sglist, gpa[sglen]); pktlen = hlen + dlen; pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t)); pkt.hdr.type = VMBUS_CHANPKT_TYPE_GPA; pkt.hdr.flags = VMBUS_CHANPKT_FLAG_RC; pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT; pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT; pkt.hdr.xactid = xactid; pkt.rsvd = 0; pkt.gpa_cnt = sglen; iov[0].iov_base = &pkt; iov[0].iov_len = sizeof(pkt); iov[1].iov_base = sg; iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen; iov[2].iov_base = data; iov[2].iov_len = dlen; iov[3].iov_base = &pad; iov[3].iov_len = pad_pktlen - pktlen; error = vmbus_txbr_write(&chan->txbr, iov, 4, &send_evt); /* if caller is batching, just propagate the status */ if (need_sig) *need_sig |= send_evt; else if (error == 0 && send_evt) rte_vmbus_chan_signal_tx(chan); return error; }
/* Do a simple send directly using transmit ring. */ int rte_vmbus_chan_send(struct vmbus_channel *chan, uint16_t type, void *data, uint32_t dlen, uint64_t xactid, uint32_t flags, bool *need_sig) { struct vmbus_chanpkt pkt; unsigned int pktlen, pad_pktlen; const uint32_t hlen = sizeof(pkt); bool send_evt = false; uint64_t pad = 0; struct iovec iov[3]; int error; pktlen = hlen + dlen; pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t)); pkt.hdr.type = type; pkt.hdr.flags = flags; pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT; pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT; pkt.hdr.xactid = xactid; iov[0].iov_base = &pkt; iov[0].iov_len = hlen; iov[1].iov_base = data; iov[1].iov_len = dlen; iov[2].iov_base = &pad; iov[2].iov_len = pad_pktlen - pktlen; error = vmbus_txbr_write(&chan->txbr, iov, 3, &send_evt); /* * caller sets need_sig to non-NULL if it will handle * signaling if required later. * if need_sig is NULL, signal now if needed. */ if (need_sig) *need_sig |= send_evt; else if (error == 0 && send_evt) rte_vmbus_chan_signal_tx(chan); return error; }
static int test_align(void) { #define FAIL_ALIGN(x, i, p)\ {printf(x "() test failed: %u %u\n", i, p);\ return -1;} #define ERROR_FLOOR(res, i, pow) \ (res % pow) || /* check if not aligned */ \ ((res / pow) != (i / pow)) /* check if correct alignment */ #define ERROR_CEIL(res, i, pow) \ (res % pow) || /* check if not aligned */ \ ((i % pow) == 0 ? /* check if ceiling is invoked */ \ val / pow != i / pow : /* if aligned */ \ val / pow != (i / pow) + 1) /* if not aligned, hence +1 */ uint32_t i, p, val; for (i = 1, p = 1; i <= MAX_NUM; i ++) { if (rte_align32pow2(i) != p) FAIL_ALIGN("rte_align32pow2", i, p); if (i == p) p <<= 1; } for (p = 2; p <= MAX_NUM; p <<= 1) { if (!rte_is_power_of_2(p)) FAIL("rte_is_power_of_2"); for (i = 1; i <= MAX_NUM; i++) { /* align floor */ if (RTE_ALIGN_FLOOR((uintptr_t)i, p) % p) FAIL_ALIGN("RTE_ALIGN_FLOOR", i, p); val = RTE_PTR_ALIGN_FLOOR((uintptr_t) i, p); if (ERROR_FLOOR(val, i, p)) FAIL_ALIGN("RTE_PTR_ALIGN_FLOOR", i, p); val = RTE_ALIGN_FLOOR(i, p); if (ERROR_FLOOR(val, i, p)) FAIL_ALIGN("RTE_ALIGN_FLOOR", i, p); /* align ceiling */ val = RTE_PTR_ALIGN((uintptr_t) i, p); if (ERROR_CEIL(val, i, p)) FAIL_ALIGN("RTE_PTR_ALIGN", i, p); val = RTE_ALIGN(i, p); if (ERROR_CEIL(val, i, p)) FAIL_ALIGN("RTE_ALIGN", i, p); val = RTE_ALIGN_CEIL(i, p); if (ERROR_CEIL(val, i, p)) FAIL_ALIGN("RTE_ALIGN_CEIL", i, p); val = RTE_PTR_ALIGN_CEIL((uintptr_t)i, p); if (ERROR_CEIL(val, i, p)) FAIL_ALIGN("RTE_PTR_ALIGN_CEIL", i, p); /* by this point we know that val is aligned to p */ if (!rte_is_aligned((void*)(uintptr_t) val, p)) FAIL("rte_is_aligned"); } } return 0; }
int otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance) { int ret = -ENOENT, len, qlen, i; int chunk_len, chunks, chunk_size; struct cpt_vf *cptvf = (struct cpt_vf *)dev; struct cpt_instance *cpt_instance; struct command_chunk *chunk_head = NULL, *chunk_prev = NULL; struct command_chunk *chunk = NULL; uint8_t *mem; const struct rte_memzone *rz; uint64_t dma_addr = 0, alloc_len, used_len; uint64_t *next_ptr; uint64_t pg_sz = sysconf(_SC_PAGESIZE); CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name); cpt_instance = &cptvf->instance; memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue)); memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue)); /* Chunks are of fixed size buffers */ chunks = DEFAULT_CMD_QCHUNKS; chunk_len = DEFAULT_CMD_QCHUNK_SIZE; qlen = chunks * chunk_len; /* Chunk size includes 8 bytes of next chunk ptr */ chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE; /* For command chunk structures */ len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8); /* For pending queue */ len += qlen * RTE_ALIGN(sizeof(struct rid), 8); /* So that instruction queues start as pg size aligned */ len = RTE_ALIGN(len, pg_sz); /* For Instruction queues */ len += chunks * RTE_ALIGN(chunk_size, 128); /* Wastage after instruction queues */ len = RTE_ALIGN(len, pg_sz); rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node, RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB, RTE_CACHE_LINE_SIZE); if (!rz) { ret = rte_errno; goto cleanup; } mem = rz->addr; dma_addr = rz->phys_addr; alloc_len = len; memset(mem, 0, len); cpt_instance->rsvd = (uintptr_t)rz; /* Pending queue setup */ cptvf->pqueue.rid_queue = (struct rid *)mem; cptvf->pqueue.enq_tail = 0; cptvf->pqueue.deq_head = 0; cptvf->pqueue.pending_count = 0; mem += qlen * RTE_ALIGN(sizeof(struct rid), 8); len -= qlen * RTE_ALIGN(sizeof(struct rid), 8); dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8); /* Alignment wastage */ used_len = alloc_len - len; mem += RTE_ALIGN(used_len, pg_sz) - used_len; len -= RTE_ALIGN(used_len, pg_sz) - used_len; dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len; /* Init instruction queues */ chunk_head = &cptvf->cqueue.chead[0]; i = qlen; chunk_prev = NULL; for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) { int csize; chunk = &cptvf->cqueue.chead[i]; chunk->head = mem; chunk->dma_addr = dma_addr; csize = RTE_ALIGN(chunk_size, 128); mem += csize; dma_addr += csize; len -= csize; if (chunk_prev) { next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8); *next_ptr = (uint64_t)chunk->dma_addr; } chunk_prev = chunk; } /* Circular loop */ next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8); *next_ptr = (uint64_t)chunk_head->dma_addr; assert(!len); /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */ cptvf->qsize = chunk_size / 8; cptvf->cqueue.qhead = chunk_head->head; cptvf->cqueue.idx = 0; cptvf->cqueue.cchunk = 0; if (cpt_vq_init(cptvf, group)) { CPT_LOG_ERR("Failed to initialize CPT VQ of device %s", cptvf->dev_name); ret = -EBUSY; goto cleanup; } *instance = cpt_instance; CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name); return 0; cleanup: rte_memzone_free(rz); *instance = NULL; return ret; }
/* Precalculate WRR polling sequence for all queues in rx_adapter */ static int eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter) { uint8_t d; uint16_t q; unsigned int i; /* Initialize variables for calculation of wrr schedule */ uint16_t max_wrr_pos = 0; unsigned int poll_q = 0; uint16_t max_wt = 0; uint16_t gcd = 0; struct eth_rx_poll_entry *rx_poll = NULL; uint32_t *rx_wrr = NULL; if (rx_adapter->num_rx_polled) { size_t len = RTE_ALIGN(rx_adapter->num_rx_polled * sizeof(*rx_adapter->eth_rx_poll), RTE_CACHE_LINE_SIZE); rx_poll = rte_zmalloc_socket(rx_adapter->mem_name, len, RTE_CACHE_LINE_SIZE, rx_adapter->socket_id); if (rx_poll == NULL) return -ENOMEM; /* Generate array of all queues to poll, the size of this * array is poll_q */ for (d = 0; d < rte_eth_dev_count(); d++) { uint16_t nb_rx_queues; struct eth_device_info *dev_info = &rx_adapter->eth_devices[d]; nb_rx_queues = dev_info->dev->data->nb_rx_queues; if (dev_info->rx_queue == NULL) continue; for (q = 0; q < nb_rx_queues; q++) { struct eth_rx_queue_info *queue_info = &dev_info->rx_queue[q]; if (queue_info->queue_enabled == 0) continue; uint16_t wt = queue_info->wt; rx_poll[poll_q].eth_dev_id = d; rx_poll[poll_q].eth_rx_qid = q; max_wrr_pos += wt; max_wt = RTE_MAX(max_wt, wt); gcd = (gcd) ? gcd_u16(gcd, wt) : wt; poll_q++; } } len = RTE_ALIGN(max_wrr_pos * sizeof(*rx_wrr), RTE_CACHE_LINE_SIZE); rx_wrr = rte_zmalloc_socket(rx_adapter->mem_name, len, RTE_CACHE_LINE_SIZE, rx_adapter->socket_id); if (rx_wrr == NULL) { rte_free(rx_poll); return -ENOMEM; } /* Generate polling sequence based on weights */ int prev = -1; int cw = -1; for (i = 0; i < max_wrr_pos; i++) { rx_wrr[i] = wrr_next(rx_adapter, poll_q, &cw, rx_poll, max_wt, gcd, prev); prev = rx_wrr[i]; } } rte_free(rx_adapter->eth_rx_poll); rte_free(rx_adapter->wrr_sched); rx_adapter->eth_rx_poll = rx_poll; rx_adapter->wrr_sched = rx_wrr; rx_adapter->wrr_len = max_wrr_pos; return 0; }
void * mg_table_lpm_create(void *params, int socket_id, uint32_t entry_size) { struct rte_table_lpm_params *p = (struct rte_table_lpm_params *) params; struct rte_table_lpm *lpm; uint32_t total_size, nht_size; /* Check input parameters */ if (p == NULL) { RTE_LOG(ERR, TABLE, "%s: NULL input parameters\n", __func__); return NULL; } if (p->n_rules == 0) { RTE_LOG(ERR, TABLE, "%s: Invalid n_rules\n", __func__); return NULL; } if (p->entry_unique_size == 0) { RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n", __func__); return NULL; } if (p->entry_unique_size > entry_size) { RTE_LOG(ERR, TABLE, "%s: Invalid entry_unique_size\n", __func__); return NULL; } // XXX ASK: does a 32 bit aligned offset make any sense here? // this prevents me from accessing ip address in payload //if ((p->offset & 0x3) != 0) { // RTE_LOG(ERR, TABLE, "%s: Invalid offset\n", __func__); // return NULL; //} entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t)); /* Memory allocation */ nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size; total_size = sizeof(struct rte_table_lpm) + nht_size; lpm = rte_zmalloc_socket("TABLE", total_size, CACHE_LINE_SIZE, socket_id); if (lpm == NULL) { RTE_LOG(ERR, TABLE, "%s: Cannot allocate %u bytes for LPM table\n", __func__, total_size); return NULL; } /* LPM low-level table creation */ lpm->lpm = rte_lpm_create("LPM", socket_id, p->n_rules, 0); if (lpm->lpm == NULL) { rte_free(lpm); RTE_LOG(ERR, TABLE, "Unable to create low-level LPM table\n"); return NULL; } /* Memory initialization */ lpm->entry_size = entry_size; lpm->entry_unique_size = p->entry_unique_size; lpm->n_rules = p->n_rules; lpm->offset = p->offset; return lpm; }
static void * rte_table_acl_create( void *params, int socket_id, uint32_t entry_size) { struct rte_table_acl_params *p = (struct rte_table_acl_params *) params; struct rte_table_acl *acl; uint32_t action_table_size, acl_rule_list_size, acl_rule_memory_size; uint32_t total_size; RTE_BUILD_BUG_ON(((sizeof(struct rte_table_acl) % RTE_CACHE_LINE_SIZE) != 0)); /* Check input parameters */ if (p == NULL) { RTE_LOG(ERR, TABLE, "%s: Invalid value for params\n", __func__); return NULL; } if (p->name == NULL) { RTE_LOG(ERR, TABLE, "%s: Invalid value for name\n", __func__); return NULL; } if (p->n_rules == 0) { RTE_LOG(ERR, TABLE, "%s: Invalid value for n_rules\n", __func__); return NULL; } if ((p->n_rule_fields == 0) || (p->n_rule_fields > RTE_ACL_MAX_FIELDS)) { RTE_LOG(ERR, TABLE, "%s: Invalid value for n_rule_fields\n", __func__); return NULL; } entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t)); /* Memory allocation */ action_table_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules * entry_size); acl_rule_list_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules * sizeof(struct rte_acl_rule *)); acl_rule_memory_size = RTE_CACHE_LINE_ROUNDUP(p->n_rules * RTE_ACL_RULE_SZ(p->n_rule_fields)); total_size = sizeof(struct rte_table_acl) + action_table_size + acl_rule_list_size + acl_rule_memory_size; acl = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE, socket_id); if (acl == NULL) { RTE_LOG(ERR, TABLE, "%s: Cannot allocate %u bytes for ACL table\n", __func__, total_size); return NULL; } acl->action_table = &acl->memory[0]; acl->acl_rule_list = (struct rte_acl_rule **) &acl->memory[action_table_size]; acl->acl_rule_memory = (uint8_t *) &acl->memory[action_table_size + acl_rule_list_size]; /* Initialization of internal fields */ snprintf(acl->name[0], RTE_ACL_NAMESIZE, "%s_a", p->name); snprintf(acl->name[1], RTE_ACL_NAMESIZE, "%s_b", p->name); acl->name_id = 1; acl->acl_params.name = acl->name[acl->name_id]; acl->acl_params.socket_id = socket_id; acl->acl_params.rule_size = RTE_ACL_RULE_SZ(p->n_rule_fields); acl->acl_params.max_rule_num = p->n_rules; acl->cfg.num_categories = 1; acl->cfg.num_fields = p->n_rule_fields; memcpy(&acl->cfg.defs[0], &p->field_format[0], p->n_rule_fields * sizeof(struct rte_acl_field_def)); acl->ctx = NULL; acl->n_rules = p->n_rules; acl->entry_size = entry_size; return acl; }
/* * Generate the runtime structure using build structure */ int rte_acl_gen(struct rte_acl_ctx *ctx, struct rte_acl_trie *trie, struct rte_acl_bld_trie *node_bld_trie, uint32_t num_tries, uint32_t num_categories, uint32_t data_index_sz, int match_num) { void *mem; size_t total_size; uint64_t *node_array, no_match; uint32_t n, match_index; struct rte_acl_match_results *match; struct acl_node_counters counts; struct rte_acl_indices indices; /* Fill counts and indices arrays from the nodes. */ match_num = acl_calc_counts_indices(&counts, &indices, trie, node_bld_trie, num_tries, match_num); /* Allocate runtime memory (align to cache boundary) */ total_size = RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE) + indices.match_index * sizeof(uint64_t) + (match_num + 2) * sizeof(struct rte_acl_match_results) + XMM_SIZE; mem = rte_zmalloc_socket(ctx->name, total_size, RTE_CACHE_LINE_SIZE, ctx->socket_id); if (mem == NULL) { RTE_LOG(ERR, ACL, "allocation of %zu bytes on socket %d for %s failed\n", total_size, ctx->socket_id, ctx->name); return -ENOMEM; } /* Fill the runtime structure */ match_index = indices.match_index; node_array = (uint64_t *)((uintptr_t)mem + RTE_ALIGN(data_index_sz, RTE_CACHE_LINE_SIZE)); /* * Setup the NOMATCH node (a SINGLE at the * highest index, that points to itself) */ node_array[RTE_ACL_DFA_SIZE] = RTE_ACL_DFA_SIZE | RTE_ACL_NODE_SINGLE; no_match = RTE_ACL_NODE_MATCH; for (n = 0; n < RTE_ACL_DFA_SIZE; n++) node_array[n] = no_match; match = ((struct rte_acl_match_results *)(node_array + match_index)); memset(match, 0, sizeof(*match)); for (n = 0; n < num_tries; n++) { acl_gen_node(node_bld_trie[n].trie, node_array, no_match, &indices, num_categories); if (node_bld_trie[n].trie->node_index == no_match) trie[n].root_index = 0; else trie[n].root_index = node_bld_trie[n].trie->node_index; } ctx->mem = mem; ctx->mem_sz = total_size; ctx->data_indexes = mem; ctx->num_tries = num_tries; ctx->num_categories = num_categories; ctx->match_index = match_index; ctx->no_match = no_match; ctx->idle = node_array[RTE_ACL_DFA_SIZE]; ctx->trans_table = node_array; memcpy(ctx->trie, trie, sizeof(ctx->trie)); acl_gen_log_stats(ctx, &counts); return 0; }