void benchmark(struct benchmark_config *config) { int i; struct worker_info *producers; struct worker_info *consumers; struct work_queue queue_to_producer; struct work_queue queue_to_consumer; struct work_queue trash_queue; unsigned long long start, elapsed; work_queue_init(&queue_to_producer); work_queue_init(&queue_to_consumer); work_queue_init(&trash_queue); producers = create_workers(config, config->producer_thnum, config->producer, &queue_to_producer, &queue_to_consumer); consumers = create_workers(config, config->consumer_thnum, config->consumer, &queue_to_consumer, &trash_queue); start = stopwatch_start(); for (i = 0; i < config->num_works; i++) { struct work *work = xmalloc(sizeof(*work)); memset(work, 0, sizeof(*work)); work->seed = config->seed_offset + i; work_queue_push(&queue_to_producer, work); } work_queue_close(&queue_to_producer); join_workers(producers, config->producer_thnum); work_queue_close(&queue_to_consumer); join_workers(consumers, config->consumer_thnum); work_queue_close(&trash_queue); elapsed = stopwatch_stop(start); collect_results(config, &trash_queue, start, elapsed); destroy_workers(consumers, config->consumer_thnum); destroy_workers(producers, config->producer_thnum); work_queue_destroy(&queue_to_producer); work_queue_destroy(&queue_to_consumer); work_queue_destroy(&trash_queue); }
/* * Set up a thread's information. */ static void setup_thread(LIBEVENT_THREAD *me) { if (! me->base) { me->base = event_init(); if (! me->base) { moxi_log_write("Can't allocate event base\n"); exit(1); } } /* Listen for notifications from other threads */ event_set(&me->notify_event, me->notify_receive_fd, EV_READ | EV_PERSIST, thread_libevent_process, me); event_base_set(me->base, &me->notify_event); if (event_add(&me->notify_event, 0) == -1) { moxi_log_write("Can't monitor libevent notify pipe\n"); exit(1); } me->new_conn_queue = malloc(sizeof(struct conn_queue)); if (me->new_conn_queue == NULL) { perror("Failed to allocate memory for connection queue"); exit(EXIT_FAILURE); } cq_init(me->new_conn_queue); // TODO: Merge new_conn_queue with work_queue. // me->work_queue = calloc(1, sizeof(work_queue)); if (me->work_queue == NULL) { perror("Failed to allocate memory for work queue"); exit(EXIT_FAILURE); } work_queue_init(me->work_queue, me->base); if (pthread_mutex_init(&me->stats.mutex, NULL) != 0) { perror("Failed to initialize mutex"); exit(EXIT_FAILURE); } me->suffix_cache = cache_create("suffix", SUFFIX_SIZE, sizeof(char*), NULL, NULL); if (me->suffix_cache == NULL) { moxi_log_write("Failed to create suffix cache\n"); exit(EXIT_FAILURE); } me->conn_hash = genhash_init(512, strhash_ops); if (me->conn_hash == NULL) { moxi_log_write("Failed to create connection hash\n"); exit(EXIT_FAILURE); } }
/* * nvme_set_irq will set the new interrupt scheme for this device regardless * of the current irq scheme that is active for this device. It also validates * if the inputs given for setting up new scheme are within bounds. * NOTE: The controller should be disabled before setting up new scheme. */ int nvme_set_irq(struct metrics_device_list *pmetrics_device_elem, struct interrupts *irq_new) { int err = SUCCESS; struct msix_info msix_tbl_info; /* Info for MSI-X tables */ struct nvme_device *pnvme_dev = pmetrics_device_elem->metrics_device; struct interrupts *user_data = NULL; /* Allocating memory for user struct in kernel space */ user_data = kmalloc(sizeof(struct interrupts), GFP_KERNEL); if (user_data == NULL) { LOG_ERR("Unable to alloc kernel memory to copy user data"); err = -ENOMEM; goto fail_out; } if (copy_from_user(user_data, irq_new, sizeof(struct interrupts))) { LOG_ERR("Unable to copy from user space"); err = -EFAULT; goto fail_out; } LOG_DBG("IRQ Scheme = %d", user_data->irq_type); /* First validate if the inputs given are correct */ err = validate_irq_inputs(pmetrics_device_elem, user_data, &msix_tbl_info); if (err < 0) { LOG_ERR("Invalid inputs set or device is not disabled"); return err; } /* lock onto IRQ linked list mutex as we would access the IRQ list */ mutex_lock(&pmetrics_device_elem->irq_process.irq_track_mtx); /* disable the current IRQ scheme */ err = disable_active_irq(pmetrics_device_elem, pnvme_dev-> public_dev.irq_active.irq_type); if (err < 0) { LOG_ERR("Reset of IRQ to INT_NONE failed..."); goto mutex_unlck; } /* initialize work queue */ err = work_queue_init(&pmetrics_device_elem->irq_process); if (err < 0) { LOG_ERR("Failed to initialize resources for work queue/items"); goto mutex_unlck; } /* Switch based on new irq type desired */ switch (user_data->irq_type) { case INT_MSI_SINGLE: /* MSI Single interrupt settings */ err = set_msi_single(pmetrics_device_elem); break; case INT_MSI_MULTI: /* MSI Multi interrupt settings */ err = set_msi_multi(pmetrics_device_elem, user_data->num_irqs); break; case INT_MSIX: /* MSI-X interrupt settings */ err = set_msix(pmetrics_device_elem, user_data->num_irqs, &msix_tbl_info); break; case INT_NONE: /* Set IRQ type to NONE */ /* If here then already the IRQ scheme is none */ break; default: LOG_ERR("Invalid Interrupt Type specified."); err = -EBADRQC; break; } /* Return value can be +ve, 0(SUCCESS) or -ve */ if (err == SUCCESS) { /* Set to the new irq scheme */ pnvme_dev->public_dev.irq_active.irq_type = user_data->irq_type; pnvme_dev->public_dev.irq_active.num_irqs = user_data->num_irqs; /* Following will only be read by ISR */ pmetrics_device_elem->irq_process.irq_type = user_data->irq_type; } /* Fall through is intended */ mutex_unlck: mutex_unlock(&pmetrics_device_elem->irq_process.irq_track_mtx); fail_out: if (user_data != NULL) { kfree(user_data); } return err; }