Пример #1
0
static int
create_table(unsigned with_data, unsigned table_index)
{
	char name[RTE_HASH_NAMESIZE];

	if (with_data)
		/* Table will store 8-byte data */
		sprintf(name, "test_hash%d_data", hashtest_key_lens[table_index]);
	else
		sprintf(name, "test_hash%d", hashtest_key_lens[table_index]);

	ut_params.name = name;
	ut_params.key_len = hashtest_key_lens[table_index];
	ut_params.socket_id = rte_socket_id();
	h[table_index] = rte_hash_find_existing(name);
	if (h[table_index] != NULL)
		/*
		 * If table was already created, free it to create it again,
		 * so we force it is empty
		 */
		rte_hash_free(h[table_index]);
	h[table_index] = rte_hash_create(&ut_params);
	if (h[table_index] == NULL) {
		printf("Error creating table\n");
		return -1;
	}
	return 0;

}
Пример #2
0
void enic_clsf_destroy(struct enic *enic)
{
	u32 index;
	struct enic_fdir_node *key;
	/* delete classifier entries */
	for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
		key = enic->fdir.nodes[index];
		if (key) {
			vnic_dev_classifier(enic->vdev, CLSF_DEL,
				&key->fltr_id, NULL);
			rte_free(key);
		}
	}

	if (enic->fdir.hash) {
		rte_hash_free(enic->fdir.hash);
		enic->fdir.hash = NULL;
	}
}
Пример #3
0
/* Create a new flow table made of an rte_hash table and a fixed size
 * data array for storing values. Only supports IPv4 5-tuple lookups. */
struct onvm_ft*
onvm_ft_create(int cnt, int entry_size) {
        struct rte_hash* hash;
        struct onvm_ft* ft;
        struct rte_hash_parameters ipv4_hash_params = {
            .name = NULL,
            .entries = cnt,
            .key_len = sizeof(struct onvm_ft_ipv4_5tuple),
            .hash_func = NULL,
            .hash_func_init_val = 0,
        };

        char s[64];
        /* create ipv4 hash table. use core number and cycle counter to get a unique name. */
        ipv4_hash_params.name = s;
        ipv4_hash_params.socket_id = rte_socket_id();
        snprintf(s, sizeof(s), "onvm_ft_%d-%"PRIu64, rte_lcore_id(), rte_get_tsc_cycles());
        hash = rte_hash_create(&ipv4_hash_params);
        if (hash == NULL) {
                return NULL;
        }
	ft = (struct onvm_ft*)rte_calloc("table", 1, sizeof(struct onvm_ft), 0);
        if (ft == NULL) {
                rte_hash_free(hash);
                return NULL;
        }
        ft->hash = hash;
        ft->cnt = cnt;
        ft->entry_size = entry_size;
        /* Create data array for storing values */
        ft->data = rte_calloc("entry", cnt, entry_size, 0);
        if (ft->data == NULL) {
                rte_hash_free(hash);
                rte_free(ft);
                return NULL;
        }
        return ft;
}

/* Add an entry in flow table and set data to point to the new value.
Returns:
 index in the array on success
 -EPROTONOSUPPORT if packet is not ipv4.
 -EINVAL if the parameters are invalid.
 -ENOSPC if there is no space in the hash for this key.
*/
int
onvm_ft_add_pkt(struct onvm_ft* table, struct rte_mbuf *pkt, char** data) {
        int32_t tbl_index;
        struct onvm_ft_ipv4_5tuple key;
        int err;

        err = onvm_ft_fill_key(&key, pkt);
        if (err < 0) {
                return err;
        }
        tbl_index = rte_hash_add_key_with_hash(table->hash, (const void *)&key, pkt->hash.rss);
        if (tbl_index >= 0) {
        	*data = &table->data[tbl_index*table->entry_size];
        }
        return tbl_index;
}
Пример #4
0
/* Clears a flow table and frees associated memory */
void
onvm_ft_free(struct onvm_ft *table)
{
        rte_hash_reset(table->hash);
        rte_hash_free(table->hash);
}
Пример #5
0
static void
free_table(unsigned table_index)
{
	rte_hash_free(h[table_index]);
}
Пример #6
0
/*
 * Looks for random keys which
 * ALL can fit in hash table (no errors)
 */
static int
get_input_keys(unsigned with_pushes, unsigned table_index)
{
	unsigned i, j;
	unsigned bucket_idx, incr, success = 1;
	uint8_t k = 0;
	int32_t ret;
	const uint32_t bucket_bitmask = NUM_BUCKETS - 1;

	/* Reset all arrays */
	for (i = 0; i < MAX_ENTRIES; i++)
		slot_taken[i] = 0;

	for (i = 0; i < NUM_BUCKETS; i++)
		buckets[i] = 0;

	for (j = 0; j < hashtest_key_lens[table_index]; j++)
		keys[0][j] = 0;

	/*
	 * Add only entries that are not duplicated and that fits in the table
	 * (cannot store more than BUCKET_SIZE entries in a bucket).
	 * Regardless a key has been added correctly or not (success),
	 * the next one to try will be increased by 1.
	 */
	for (i = 0; i < KEYS_TO_ADD;) {
		incr = 0;
		if (i != 0) {
			keys[i][0] = ++k;
			/* Overflow, need to increment the next byte */
			if (keys[i][0] == 0)
				incr = 1;
			for (j = 1; j < hashtest_key_lens[table_index]; j++) {
				/* Do not increase next byte */
				if (incr == 0)
					if (success == 1)
						keys[i][j] = keys[i - 1][j];
					else
						keys[i][j] = keys[i][j];
				/* Increase next byte by one */
				else {
					if (success == 1)
						keys[i][j] = keys[i-1][j] + 1;
					else
						keys[i][j] = keys[i][j] + 1;
					if (keys[i][j] == 0)
						incr = 1;
					else
						incr = 0;
				}
			}
		}
		success = 0;
		signatures[i] = rte_hash_hash(h[table_index], keys[i]);
		bucket_idx = signatures[i] & bucket_bitmask;
		/*
		 * If we are not inserting keys in secondary location,
		 * when bucket is full, do not try to insert the key
		 */
		if (with_pushes == 0)
			if (buckets[bucket_idx] == BUCKET_SIZE)
				continue;

		/* If key can be added, leave in successful key arrays "keys" */
		ret = rte_hash_add_key_with_hash(h[table_index], keys[i],
						signatures[i]);
		if (ret >= 0) {
			/* If key is already added, ignore the entry and do not store */
			if (slot_taken[ret])
				continue;
			else {
				/* Store the returned position and mark slot as taken */
				slot_taken[ret] = 1;
				positions[i] = ret;
				buckets[bucket_idx]++;
				success = 1;
				i++;
			}
		}
	}

	/* Reset the table, so we can measure the time to add all the entries */
	rte_hash_free(h[table_index]);
	h[table_index] = rte_hash_create(&ut_params);

	return 0;
}
Пример #7
0
/*
 * Initialize a given port using default settings and with the RX buffers
 * coming from the mbuf_pool passed as a parameter.
 * FIXME: Starting with assumption of one thread/core per port
 */
static inline int uhd_dpdk_port_init(struct uhd_dpdk_port *port,
                                     struct rte_mempool *rx_mbuf_pool,
                                     unsigned int mtu)
{
    int retval;

    /* Check for a valid port */
    if (port->id >= rte_eth_dev_count())
        return -ENODEV;

    /* Set up Ethernet device with defaults (1 RX ring, 1 TX ring) */
    /* FIXME: Check if hw_ip_checksum is possible */
    struct rte_eth_conf port_conf = {
        .rxmode = {
            .max_rx_pkt_len = mtu,
            .jumbo_frame = 1,
            .hw_ip_checksum = 1,
        }
    };
    retval = rte_eth_dev_configure(port->id, 1, 1, &port_conf);
    if (retval != 0)
        return retval;

    retval = rte_eth_rx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL, rx_mbuf_pool);
    if (retval < 0)
        return retval;

    retval = rte_eth_tx_queue_setup(port->id, 0, DEFAULT_RING_SIZE,
                 rte_eth_dev_socket_id(port->id), NULL);
    if (retval < 0)
        goto port_init_fail;

    /* Create the hash table for the RX sockets */
    char name[32];
    snprintf(name, sizeof(name), "rx_table_%u", port->id);
    struct rte_hash_parameters hash_params = {
        .name = name,
        .entries = UHD_DPDK_MAX_SOCKET_CNT,
        .key_len = sizeof(struct uhd_dpdk_ipv4_5tuple),
        .hash_func = NULL,
        .hash_func_init_val = 0,
    };
    port->rx_table = rte_hash_create(&hash_params);
    if (port->rx_table == NULL) {
        retval = rte_errno;
        goto port_init_fail;
    }

    /* Create ARP table */
    snprintf(name, sizeof(name), "arp_table_%u", port->id);
    hash_params.name = name;
    hash_params.entries = UHD_DPDK_MAX_SOCKET_CNT;
    hash_params.key_len = sizeof(uint32_t);
    hash_params.hash_func = NULL;
    hash_params.hash_func_init_val = 0;
    port->arp_table = rte_hash_create(&hash_params);
    if (port->arp_table == NULL) {
        retval = rte_errno;
        goto free_rx_table;
    }

    /* Set up list for TX queues */
    LIST_INIT(&port->txq_list);

    /* Start the Ethernet port. */
    retval = rte_eth_dev_start(port->id);
    if (retval < 0) {
        goto free_arp_table;
    }

    /* Display the port MAC address. */
    rte_eth_macaddr_get(port->id, &port->mac_addr);
    RTE_LOG(INFO, EAL, "Port %u MAC: %02x %02x %02x %02x %02x %02x\n",
                (unsigned)port->id,
                port->mac_addr.addr_bytes[0], port->mac_addr.addr_bytes[1],
                port->mac_addr.addr_bytes[2], port->mac_addr.addr_bytes[3],
                port->mac_addr.addr_bytes[4], port->mac_addr.addr_bytes[5]);

    struct rte_eth_link link;
    rte_eth_link_get(port->id, &link);
    RTE_LOG(INFO, EAL, "Port %u UP: %d\n", port->id, link.link_status);

    return 0;

free_arp_table:
    rte_hash_free(port->arp_table);
free_rx_table:
    rte_hash_free(port->rx_table);
port_init_fail:
    return rte_errno;
}

static int uhd_dpdk_thread_init(struct uhd_dpdk_thread *thread, unsigned int id)
{
    if (!ctx || !thread)
        return -EINVAL;

    unsigned int socket_id = rte_lcore_to_socket_id(id);
    thread->id = id;
    thread->rx_pktbuf_pool = ctx->rx_pktbuf_pools[socket_id];
    thread->tx_pktbuf_pool = ctx->tx_pktbuf_pools[socket_id];
    LIST_INIT(&thread->port_list);

    char name[32];
    snprintf(name, sizeof(name), "sockreq_ring_%u", id);
    thread->sock_req_ring = rte_ring_create(
                               name,
                               UHD_DPDK_MAX_PENDING_SOCK_REQS,
                               socket_id,
                               RING_F_SC_DEQ
                            );
    if (!thread->sock_req_ring)
        return -ENOMEM;
    return 0;
}


int uhd_dpdk_init(int argc, char **argv, unsigned int num_ports,
                  int *port_thread_mapping, int num_mbufs, int mbuf_cache_size,
                  int mtu)
{
    /* Init context only once */
    if (ctx)
        return 1;

    if ((num_ports == 0) || (port_thread_mapping == NULL)) {
        return -EINVAL;
    }

    /* Grabs arguments intended for DPDK's EAL */
    int ret = rte_eal_init(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");

    ctx = (struct uhd_dpdk_ctx *) rte_zmalloc("uhd_dpdk_ctx", sizeof(*ctx), rte_socket_id());
    if (!ctx)
        return -ENOMEM;

    ctx->num_threads = rte_lcore_count();
    if (ctx->num_threads <= 1)
        rte_exit(EXIT_FAILURE, "Error: No worker threads enabled\n");

    /* Check that we have ports to send/receive on */
    ctx->num_ports = rte_eth_dev_count();
    if (ctx->num_ports < 1)
        rte_exit(EXIT_FAILURE, "Error: Found no ports\n");
    if (ctx->num_ports < num_ports)
        rte_exit(EXIT_FAILURE, "Error: User requested more ports than available\n");

    /* Get memory for thread and port data structures */
    ctx->threads = rte_zmalloc("uhd_dpdk_thread", RTE_MAX_LCORE*sizeof(struct uhd_dpdk_thread), 0);
    if (!ctx->threads)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for thread data\n");
    ctx->ports = rte_zmalloc("uhd_dpdk_port", ctx->num_ports*sizeof(struct uhd_dpdk_port), 0);
    if (!ctx->ports)
        rte_exit(EXIT_FAILURE, "Error: Could not allocate memory for port data\n");

    /* Initialize the thread data structures */
    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        /* Do one mempool of RX/TX per socket */
        unsigned int socket_id = rte_lcore_to_socket_id(i);
        /* FIXME Probably want to take into account actual number of ports per socket */
        if (ctx->tx_pktbuf_pools[socket_id] == NULL) {
            /* Creates a new mempool in memory to hold the mbufs.
             * This is done for each CPU socket
             */
            const int mbuf_size = mtu + 2048 + RTE_PKTMBUF_HEADROOM;
            char name[32];
            snprintf(name, sizeof(name), "rx_mbuf_pool_%u", socket_id);
            ctx->rx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            snprintf(name, sizeof(name), "tx_mbuf_pool_%u", socket_id);
            ctx->tx_pktbuf_pools[socket_id] = rte_pktmbuf_pool_create(
                                               name,
                                               ctx->num_ports*num_mbufs,
                                               mbuf_cache_size,
                                               0,
                                               mbuf_size,
                                               socket_id
                                           );
            if ((ctx->rx_pktbuf_pools[socket_id]== NULL) ||
                (ctx->tx_pktbuf_pools[socket_id]== NULL))
                rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
        }

        if (uhd_dpdk_thread_init(&ctx->threads[i], i) < 0)
            rte_exit(EXIT_FAILURE, "Error initializing thread %i\n", i);
    }

    unsigned master_lcore = rte_get_master_lcore();

    /* Assign ports to threads and initialize the port data structures */
    for (unsigned int i = 0; i < num_ports; i++) {
        int thread_id = port_thread_mapping[i];
        if (thread_id < 0)
            continue;
        if (((unsigned int) thread_id) == master_lcore)
            RTE_LOG(WARNING, EAL, "User requested master lcore for port %u\n", i);
        if (ctx->threads[thread_id].id != (unsigned int) thread_id)
            rte_exit(EXIT_FAILURE, "Requested inactive lcore %u for port %u\n", (unsigned int) thread_id, i);

        struct uhd_dpdk_port *port = &ctx->ports[i];
        port->id = i;
        port->parent = &ctx->threads[thread_id];
        ctx->threads[thread_id].num_ports++;
        LIST_INSERT_HEAD(&ctx->threads[thread_id].port_list, port, port_entry);

        /* Initialize port. */
        if (uhd_dpdk_port_init(port, port->parent->rx_pktbuf_pool, mtu) != 0)
            rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
                    i);
    }

    RTE_LOG(INFO, EAL, "Init DONE!\n");

    /* FIXME: Create functions to do this */
    RTE_LOG(INFO, EAL, "Starting I/O threads!\n");

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];
        if (!LIST_EMPTY(&t->port_list)) {
            rte_eal_remote_launch(_uhd_dpdk_driver_main, NULL, ctx->threads[i].id);
        }
    }
    return 0;
}

/* FIXME: This will be changed once we have functions to handle the threads */
int uhd_dpdk_destroy(void)
{
    if (!ctx)
        return -ENODEV;

    struct uhd_dpdk_config_req *req = (struct uhd_dpdk_config_req *) rte_zmalloc(NULL, sizeof(*req), 0);
    if (!req)
        return -ENOMEM;

    req->req_type = UHD_DPDK_LCORE_TERM;

    for (int i = rte_get_next_lcore(-1, 1, 0);
        (i < RTE_MAX_LCORE);
        i = rte_get_next_lcore(i, 1, 0))
    {
        struct uhd_dpdk_thread *t = &ctx->threads[i];

        if (LIST_EMPTY(&t->port_list))
            continue;

        if (rte_eal_get_lcore_state(t->id) == FINISHED)
            continue;

        pthread_mutex_init(&req->mutex, NULL);
        pthread_cond_init(&req->cond, NULL);
        pthread_mutex_lock(&req->mutex);
        if (rte_ring_enqueue(t->sock_req_ring, req)) {
            pthread_mutex_unlock(&req->mutex);
            RTE_LOG(ERR, USER2, "Failed to terminate thread %d\n", i);
            rte_free(req);
            return -ENOSPC;
        }
        struct timespec timeout = {
            .tv_sec = 1,
            .tv_nsec = 0
        };
        pthread_cond_timedwait(&req->cond, &req->mutex, &timeout);
        pthread_mutex_unlock(&req->mutex);
    }

    rte_free(req);
    return 0;
}
Пример #8
0
/*
 * Run a hash table performance test based on params.
 */
static int
run_tbl_perf_test(struct tbl_perf_test_params *params)
{
	static unsigned calledCount = 5;
	struct rte_hash_parameters hash_params = {
		.entries = params->entries,
		.bucket_entries = params->bucket_entries,
		.key_len = params->key_len,
		.hash_func = params->hash_func,
		.hash_func_init_val = params->hash_func_init_val,
		.socket_id = rte_socket_id(),
	};
	struct rte_hash *handle;
	double avg_occupancy = 0, ticks = 0;
	uint32_t num_iterations, invalid_pos;
	char name[RTE_HASH_NAMESIZE];
	char hashname[RTE_HASH_NAMESIZE];

	snprintf(name, 32, "test%u", calledCount++);
	hash_params.name = name;

	handle = rte_hash_create(&hash_params);
	RETURN_IF_ERROR(handle == NULL, "hash creation failed");

	switch (params->test_type){
	case ADD_ON_EMPTY:
		ticks = run_single_tbl_perf_test(handle, rte_hash_add_key,
				params, &avg_occupancy, &invalid_pos);
		break;
	case DELETE_ON_EMPTY:
		ticks = run_single_tbl_perf_test(handle, rte_hash_del_key,
				params, &avg_occupancy, &invalid_pos);
		break;
	case LOOKUP_ON_EMPTY:
		ticks = run_single_tbl_perf_test(handle, rte_hash_lookup,
				params, &avg_occupancy, &invalid_pos);
		break;
	case ADD_UPDATE:
		num_iterations = params->num_iterations;
		params->num_iterations = params->entries;
		run_single_tbl_perf_test(handle, rte_hash_add_key, params,
				&avg_occupancy, &invalid_pos);
		params->num_iterations = num_iterations;
		ticks = run_single_tbl_perf_test(handle, rte_hash_add_key,
				params, &avg_occupancy, &invalid_pos);
		break;
	case DELETE:
		num_iterations = params->num_iterations;
		params->num_iterations = params->entries;
		run_single_tbl_perf_test(handle, rte_hash_add_key, params,
				&avg_occupancy, &invalid_pos);

		params->num_iterations = num_iterations;
		ticks = run_single_tbl_perf_test(handle, rte_hash_del_key,
				params, &avg_occupancy, &invalid_pos);
		break;
	case LOOKUP:
		num_iterations = params->num_iterations;
		params->num_iterations = params->entries;
		run_single_tbl_perf_test(handle, rte_hash_add_key, params,
				&avg_occupancy, &invalid_pos);

		params->num_iterations = num_iterations;
		ticks = run_single_tbl_perf_test(handle, rte_hash_lookup,
				params, &avg_occupancy, &invalid_pos);
		break;
	default: return -1;
	}

	snprintf(hashname, RTE_HASH_NAMESIZE, "%s", get_hash_name(params->hash_func));

	printf("%-12s, %-15s, %-16u, %-7u, %-18u, %-8u, %-19.2f, %.2f\n",
		hashname,
		get_tbl_perf_test_desc(params->test_type),
		(unsigned) params->key_len,
		(unsigned) params->entries,
		(unsigned) params->bucket_entries,
		(unsigned) invalid_pos,
		avg_occupancy,
		ticks
	);

	/* Free */
	rte_hash_free(handle);
	return 0;
}

/*
 * Run all hash table performance tests.
 */
static int run_all_tbl_perf_tests(void)
{
	unsigned i;

	printf(" *** Hash table performance test results ***\n");
	printf("Hash Func.  , Operation      , Key size (bytes), Entries, "
	       "Entries per bucket, Errors  , Avg. bucket entries, Ticks/Op.\n");

	/* Loop through every combination of test parameters */
	for (i = 0;
	     i < sizeof(tbl_perf_params) / sizeof(struct tbl_perf_test_params);
	     i++) {

		/* Perform test */
		if (run_tbl_perf_test(&tbl_perf_params[i]) < 0)
			return -1;
	}
	return 0;
}