static inline int rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) { uint8_t old_nb_queues = dev->data->nb_queues; struct rte_event_queue_conf *queues_cfg; unsigned int i; RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues, dev->data->dev_id); /* First time configuration */ if (dev->data->queues_cfg == NULL && nb_queues != 0) { /* Allocate memory to store queue configuration */ dev->data->queues_cfg = rte_zmalloc_socket( "eventdev->data->queues_cfg", sizeof(dev->data->queues_cfg[0]) * nb_queues, RTE_CACHE_LINE_SIZE, dev->data->socket_id); if (dev->data->queues_cfg == NULL) { dev->data->nb_queues = 0; RTE_EDEV_LOG_ERR("failed to get mem for queue cfg," "nb_queues %u", nb_queues); return -(ENOMEM); } /* Re-configure */ } else if (dev->data->queues_cfg != NULL && nb_queues != 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->queue_release)(dev, i); /* Re allocate memory to store queue configuration */ queues_cfg = dev->data->queues_cfg; queues_cfg = rte_realloc(queues_cfg, sizeof(queues_cfg[0]) * nb_queues, RTE_CACHE_LINE_SIZE); if (queues_cfg == NULL) { RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory," " nb_queues %u", nb_queues); return -(ENOMEM); } dev->data->queues_cfg = queues_cfg; if (nb_queues > old_nb_queues) { uint8_t new_qs = nb_queues - old_nb_queues; memset(queues_cfg + old_nb_queues, 0, sizeof(queues_cfg[0]) * new_qs); } } else if (dev->data->queues_cfg != NULL && nb_queues == 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP); for (i = nb_queues; i < old_nb_queues; i++) (*dev->dev_ops->queue_release)(dev, i); } dev->data->nb_queues = nb_queues; return 0; }
void * spdk_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr) { void *new_buf = rte_realloc(buf, size, align); if (new_buf && phys_addr) { *phys_addr = rte_malloc_virt2phy(new_buf); } return new_buf; }
static int sfc_filter_cache_match_supported(struct sfc_adapter *sa) { struct sfc_filter *filter = &sa->filter; size_t num = filter->supported_match_num; uint32_t *buf = filter->supported_match; unsigned int retry; int rc; /* Just a guess of possibly sufficient entries */ if (num == 0) num = 16; for (retry = 0; retry < 2; ++retry) { if (num != filter->supported_match_num) { rc = ENOMEM; buf = rte_realloc(buf, num * sizeof(*buf), 0); if (buf == NULL) goto fail_realloc; } rc = efx_filter_supported_filters(sa->nic, buf, num, &num); if (rc == 0) { filter->supported_match_num = num; filter->supported_match = buf; return 0; } else if (rc != ENOSPC) { goto fail_efx_filter_supported_filters; } } SFC_ASSERT(rc == ENOSPC); fail_efx_filter_supported_filters: fail_realloc: /* Original pointer is not freed by rte_realloc() on failure */ rte_free(buf); filter->supported_match = NULL; filter->supported_match_num = 0; return rc; }
int sfc_tx_configure(struct sfc_adapter *sa) { const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf; const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues; int rc = 0; sfc_log_init(sa, "nb_tx_queues=%u (old %u)", nb_tx_queues, sa->txq_count); /* * The datapath implementation assumes absence of boundary * limits on Tx DMA descriptors. Addition of these checks on * datapath would simply make the datapath slower. */ if (encp->enc_tx_dma_desc_boundary != 0) { rc = ENOTSUP; goto fail_tx_dma_desc_boundary; } rc = sfc_tx_check_mode(sa, &dev_conf->txmode); if (rc != 0) goto fail_check_mode; if (nb_tx_queues == sa->txq_count) goto done; if (sa->txq_info == NULL) { sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues, sizeof(sa->txq_info[0]), 0, sa->socket_id); if (sa->txq_info == NULL) goto fail_txqs_alloc; } else { struct sfc_txq_info *new_txq_info; if (nb_tx_queues < sa->txq_count) sfc_tx_fini_queues(sa, nb_tx_queues); new_txq_info = rte_realloc(sa->txq_info, nb_tx_queues * sizeof(sa->txq_info[0]), 0); if (new_txq_info == NULL && nb_tx_queues > 0) goto fail_txqs_realloc; sa->txq_info = new_txq_info; if (nb_tx_queues > sa->txq_count) memset(&sa->txq_info[sa->txq_count], 0, (nb_tx_queues - sa->txq_count) * sizeof(sa->txq_info[0])); } while (sa->txq_count < nb_tx_queues) { rc = sfc_tx_qinit_info(sa, sa->txq_count); if (rc != 0) goto fail_tx_qinit_info; sa->txq_count++; } done: return 0; fail_tx_qinit_info: fail_txqs_realloc: fail_txqs_alloc: sfc_tx_close(sa); fail_check_mode: fail_tx_dma_desc_boundary: sfc_log_init(sa, "failed (rc = %d)", rc); return rc; }
static inline int rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) { uint8_t old_nb_ports = dev->data->nb_ports; void **ports; uint16_t *links_map; struct rte_event_port_conf *ports_cfg; unsigned int i; RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports, dev->data->dev_id); /* First time configuration */ if (dev->data->ports == NULL && nb_ports != 0) { dev->data->ports = rte_zmalloc_socket("eventdev->data->ports", sizeof(dev->data->ports[0]) * nb_ports, RTE_CACHE_LINE_SIZE, dev->data->socket_id); if (dev->data->ports == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to get mem for port meta data," "nb_ports %u", nb_ports); return -(ENOMEM); } /* Allocate memory to store port configurations */ dev->data->ports_cfg = rte_zmalloc_socket("eventdev->ports_cfg", sizeof(dev->data->ports_cfg[0]) * nb_ports, RTE_CACHE_LINE_SIZE, dev->data->socket_id); if (dev->data->ports_cfg == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to get mem for port cfg," "nb_ports %u", nb_ports); return -(ENOMEM); } /* Allocate memory to store queue to port link connection */ dev->data->links_map = rte_zmalloc_socket("eventdev->links_map", sizeof(dev->data->links_map[0]) * nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV, RTE_CACHE_LINE_SIZE, dev->data->socket_id); if (dev->data->links_map == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to get mem for port_map area," "nb_ports %u", nb_ports); return -(ENOMEM); } for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++) dev->data->links_map[i] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); ports = dev->data->ports; ports_cfg = dev->data->ports_cfg; links_map = dev->data->links_map; for (i = nb_ports; i < old_nb_ports; i++) (*dev->dev_ops->port_release)(ports[i]); /* Realloc memory for ports */ ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports, RTE_CACHE_LINE_SIZE); if (ports == NULL) { RTE_EDEV_LOG_ERR("failed to realloc port meta data," " nb_ports %u", nb_ports); return -(ENOMEM); } /* Realloc memory for ports_cfg */ ports_cfg = rte_realloc(ports_cfg, sizeof(ports_cfg[0]) * nb_ports, RTE_CACHE_LINE_SIZE); if (ports_cfg == NULL) { RTE_EDEV_LOG_ERR("failed to realloc port cfg mem," " nb_ports %u", nb_ports); return -(ENOMEM); } /* Realloc memory to store queue to port link connection */ links_map = rte_realloc(links_map, sizeof(dev->data->links_map[0]) * nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV, RTE_CACHE_LINE_SIZE); if (links_map == NULL) { dev->data->nb_ports = 0; RTE_EDEV_LOG_ERR("failed to realloc mem for port_map," "nb_ports %u", nb_ports); return -(ENOMEM); } if (nb_ports > old_nb_ports) { uint8_t new_ps = nb_ports - old_nb_ports; unsigned int old_links_map_end = old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; unsigned int links_map_end = nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; memset(ports + old_nb_ports, 0, sizeof(ports[0]) * new_ps); memset(ports_cfg + old_nb_ports, 0, sizeof(ports_cfg[0]) * new_ps); for (i = old_links_map_end; i < links_map_end; i++) links_map[i] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; } dev->data->ports = ports; dev->data->ports_cfg = ports_cfg; dev->data->links_map = links_map; } else if (dev->data->ports != NULL && nb_ports == 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); ports = dev->data->ports; for (i = nb_ports; i < old_nb_ports; i++) (*dev->dev_ops->port_release)(ports[i]); } dev->data->nb_ports = nb_ports; return 0; }