int vr_mirror_meta_entry_set(struct vrouter *router, unsigned int index, unsigned int mir_sip, unsigned short mir_sport, void *meta_data, unsigned int meta_data_len, unsigned short mirror_vrf) { char *buf; struct vr_mirror_meta_entry *me, *me_old; me = vr_malloc(sizeof(*me), VR_MIRROR_META_OBJECT); if (!me) return -ENOMEM; buf = vr_malloc(meta_data_len, VR_MIRROR_META_OBJECT); if (!buf) { vr_free(me, VR_MIRROR_META_OBJECT); return -ENOMEM; } memcpy(buf, meta_data, meta_data_len); me->mirror_router = router; me->mirror_md = buf; me->mirror_md_len = meta_data_len; me->mirror_sip = mir_sip; me->mirror_sport = mir_sport; me->mirror_vrf = mirror_vrf; me_old = vr_itable_set(router->vr_mirror_md, index, me); if (me_old && me_old != VR_ITABLE_ERR_PTR) vr_mirror_meta_entry_destroy(index, (void *)me_old); return 0; }
static int vr_interface_service_enable(struct vr_interface *vif) { int i; /* * handle the case of existing vrf assign table, which * was not freed because the table was not empty. also, * note that for existing vrf table, we do not reset the * entries and the table users count, as per requirements * from agent */ if (!vif->vif_vrf_table) { vif->vif_vrf_table = vr_malloc(sizeof(short) * VIF_VRF_TABLE_ENTRIES); if (!vif->vif_vrf_table) return -ENOMEM; for (i = 0; i < VIF_VRF_TABLE_ENTRIES; i++) vif->vif_vrf_table[i] = -1; /* for the new table, there are no users */ vif->vif_vrf_table_users = 0; } vif->vif_rx = eth_srx; return 0; }
static char * dpdk_nl_trans_alloc(unsigned int size) { char *buf; buf = vr_malloc(size + HDR_LEN, VR_MESSAGE_OBJECT); if (!buf) return NULL; return buf + HDR_LEN; }
static struct vr_timer * fragment_table_scanner_init(struct vrouter *router, struct vr_btable *table) { unsigned int num_entries; struct vr_timer *vtimer; struct scanner_params *scanner; if (!table) return NULL; num_entries = vr_btable_entries(table); scanner = vr_zalloc(sizeof(*scanner), VR_FRAGMENT_SCANNER_OBJECT); if (!scanner) { vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, num_entries); return NULL; } scanner->sp_router = router; scanner->sp_fragment_table = table; scanner->sp_num_entries = num_entries; scanner->sp_last_scanned_entry = -1; vtimer = vr_malloc(sizeof(*vtimer), VR_TIMER_OBJECT); if (!vtimer) { vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, num_entries); goto fail_init; } vtimer->vt_timer = fragment_table_scanner; vtimer->vt_vr_arg = scanner; vtimer->vt_msecs = 1000; if (vr_create_timer(vtimer)) { vr_module_error(-ENOMEM, __FUNCTION__, __LINE__, num_entries); goto fail_init; } return vtimer; fail_init: if (scanner) vr_free(scanner, VR_FRAGMENT_SCANNER_OBJECT); return NULL; }
static int vr_flow_schedule_transition(struct vrouter *router, vr_flow_req *req, struct vr_flow_entry *fe) { struct vr_flow_md *flmd = NULL; flmd = (struct vr_flow_md *)vr_malloc(sizeof(*flmd)); if (!flmd) return -ENOMEM; flmd->flmd_router = router; flmd->flmd_index = req->fr_index; flmd->flmd_action = req->fr_action; flmd->flmd_flags = req->fr_flags; vr_schedule_work(vr_get_cpu(), vr_flow_flush, (void *)flmd); return 0; }
static int usock_clone(struct vr_usocket *parent, int cfd) { struct vr_usocket *child; RTE_LOG(DEBUG, USOCK, "%s[%lx]: parent FD %d cfd %d\n", __func__, pthread_self(), parent->usock_fd, cfd); child = vr_zalloc(sizeof(struct vr_usocket), VR_USOCK_OBJECT); if (!child) { usock_set_error(parent, -ENOMEM); goto error_return; } child->usock_rx_buf = vr_malloc(USOCK_RX_BUF_LEN, VR_USOCK_BUF_OBJECT); if (!child->usock_rx_buf) { usock_set_error(parent, -ENOMEM); goto error_return; } child->usock_buf_len = USOCK_RX_BUF_LEN; child->usock_type = parent->usock_type; child->usock_proto = parent->usock_proto; child->usock_fd = cfd; if (usock_bind_usockets(parent, child)) goto error_return; return 0; error_return: if (child) { if (child->usock_rx_buf) vr_free(child->usock_rx_buf, VR_USOCK_BUF_OBJECT); vr_free(child, VR_USOCK_OBJECT); } return parent->usock_error; }
static int __usock_read(struct vr_usocket *usockp) { int ret; unsigned int offset = usockp->usock_read_offset; unsigned int len = usockp->usock_read_len; unsigned int toread = len - offset; struct nlmsghdr *nlh; unsigned int proto = usockp->usock_proto; char *buf = usockp->usock_rx_buf; if (toread > usockp->usock_buf_len) { toread = usockp->usock_buf_len - offset; } retry_read: if (usockp->usock_owner != pthread_self()) { if (usockp->usock_owner) RTE_LOG(WARNING, USOCK, "WARNING: thread %lx is trying to read" " usocket FD %d owned by thread %lx\n", pthread_self(), usockp->usock_fd, usockp->usock_owner); usockp->usock_owner = pthread_self(); } ret = read(usockp->usock_fd, buf + offset, toread); #ifdef VR_DPDK_USOCK_DUMP if (ret > 0) { RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d read %d bytes\n", __func__, pthread_self(), usockp->usock_fd, ret); rte_hexdump(stdout, "usock buffer dump:", buf + offset, ret); } else if (ret < 0) { RTE_LOG(DEBUG, USOCK, "%s[%lx]: FD %d read returned error %d: %s (%d)\n", __func__, pthread_self(), usockp->usock_fd, ret, rte_strerror(errno), errno); } #endif if (ret <= 0) { if (!ret) return -1; if (errno == EINTR) goto retry_read; if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) return 0; RTE_LOG(ERR, USOCK, "Error reading FD %d: %s (%d)\n", usockp->usock_fd, rte_strerror(errno), errno); return ret; } offset += ret; usockp->usock_read_offset = offset; if (proto == NETLINK) { if (usockp->usock_state == READING_HEADER) { if (usockp->usock_read_offset == usockp->usock_read_len) { usockp->usock_state = READING_DATA; nlh = (struct nlmsghdr *)(usockp->usock_rx_buf); usockp->usock_read_len = nlh->nlmsg_len; } } if (usockp->usock_buf_len < usockp->usock_read_len) { usockp->usock_rx_buf = vr_malloc(usockp->usock_read_len, VR_USOCK_BUF_OBJECT); if (!usockp->usock_rx_buf) { /* bad, but let's recover */ usockp->usock_rx_buf = buf; usockp->usock_read_len -= usockp->usock_read_offset; usockp->usock_read_offset = 0; usockp->usock_state = READING_FAULTY_DATA; } else { memcpy(usockp->usock_rx_buf, buf, usockp->usock_read_offset); vr_free(buf, VR_USOCK_BUF_OBJECT); usockp->usock_buf_len = usockp->usock_read_len; buf = usockp->usock_rx_buf; } } } else if (proto == PACKET) { usockp->usock_read_len = ret; } return ret; }
static char * vr_message_default_malloc(unsigned int size) { return vr_malloc(size); }
int vr_fragment_enqueue(struct vrouter *router, struct vr_fragment_queue *vfq, struct vr_packet *pkt, struct vr_forwarding_md *fmd) { bool swapped = false; unsigned int i; struct vr_packet_node *pnode; struct vr_fragment_queue_element *fqe = NULL, *tail, **tailp; tailp = &vfq->vfq_tail; if (*tailp == NULL) { vfq->vfq_length = 0; } else { if ((vfq->vfq_length + 1) > VR_MAX_FRAGMENTS_PER_CPU_QUEUE) goto fail; } /* Check if the total number of fragmented packets exceeded. */ if (vrouter_host->hos_is_frag_limit_exceeded && vrouter_host->hos_is_frag_limit_exceeded()) { goto fail; } fqe = vr_malloc(sizeof(*fqe), VR_FRAGMENT_QUEUE_ELEMENT_OBJECT); if (!fqe) { goto fail; } fqe->fqe_router = router; fqe->fqe_next = NULL; pkt->vp_flags &= ~VP_FLAG_FLOW_SET; pnode = &fqe->fqe_pnode; vr_flow_fill_pnode(pnode, pkt, fmd); /* * we are actually competing with an existing assembler work that must * be in the process of dequeueing the list from the per-cpu queue. * we try thrice to enqueue our element. It is unlikely that it will * fail more than once * * calculation of vfq_length could be erroneous. But, we will err by * maximum 1, which is fine. */ for (i = 0; i < VR_FRAG_ENQUEUE_ATTEMPTS; i++) { tail = *tailp; fqe->fqe_next = tail; vfq->vfq_length++; swapped = __sync_bool_compare_and_swap(tailp, tail, fqe); if (swapped) { if (tail == NULL) vfq->vfq_length = 1; break; } else { vfq->vfq_length--; if (i == (VR_FRAG_ENQUEUE_ATTEMPTS - 1)) { goto fail; } } } return 0; fail: if (fqe) vr_free(fqe, VR_FRAGMENT_QUEUE_ELEMENT_OBJECT); vr_pfree(pkt, VP_DROP_FRAGMENTS); return -1; }
static char * vr_message_default_malloc(unsigned int size) { return vr_malloc(size, VR_MESSAGE_OBJECT); }