static void rwlock_test(pthread_t *p, int d, uint64_t *latency, void *(*f)(void *), const char *label) { int t; ck_pr_store_int(&barrier, 0); ck_pr_store_uint(&flag, 0); affinity.delta = d; affinity.request = 0; fprintf(stderr, "Creating threads (%s)...", label); for (t = 0; t < threads; t++) { if (pthread_create(&p[t], NULL, f, latency + t) != 0) { ck_error("ERROR: Could not create thread %d\n", t); } } fprintf(stderr, "done\n"); common_sleep(10); ck_pr_store_uint(&flag, 1); fprintf(stderr, "Waiting for threads to finish acquisition regression..."); for (t = 0; t < threads; t++) pthread_join(p[t], NULL); fprintf(stderr, "done\n\n"); for (t = 1; t <= threads; t++) printf("%10u %20" PRIu64 "\n", t, latency[t - 1]); fprintf(stderr, "\n"); return; }
/* * Resubmit message due to service failover. */ static void iio_msg_resubmit(struct iio_device *device, struct qnio_msg *msg) { int32_t err; struct channel *channel; channel = device->channel; err = channel->cd->chdrv_msg_send(channel, msg); if (err) { msg->hinfo.err = err; if (msg->hinfo.flags & QNIO_FLAG_SYNC_REQ) { ck_pr_store_int(&msg->resp_ready, 1); } else { channel->cd->chdrv_msg_cb(msg); } } return; }
static void * iio_device_failover_thread(void *args) { struct iio_device *device = (struct iio_device *)args; struct iio_vdisk_hostinfo *hostinfo = device->hostinfo; struct iio_vdisk_hostinfo *new_hostinfo; struct channel *new_channel; struct qnio_msg *msg; time_t start_t, end_t; double diff_t; time(&start_t); nioDbg("Starting failover on device %s", device->devid); read_hostinfo: new_hostinfo = iio_read_hostinfo(device->devid); if (new_hostinfo) { free(hostinfo); device->hostinfo = new_hostinfo; hostinfo = new_hostinfo; } hostinfo->failover_idx = -1; retry_nexthost: /* * Find next host */ hostinfo->failover_idx ++; if (hostinfo->failover_idx == hostinfo->nhosts) { goto read_hostinfo; } /* * Open channel to the new host */ new_channel = iio_channel_open(hostinfo->hosts[hostinfo->failover_idx], device->channel->cacert, device->channel->client_key, device->channel->client_cert); if (new_channel == NULL) { time(&end_t); diff_t = difftime(end_t, start_t); if (diff_t > FAILOVER_TIMEOUT) { nioDbg("Failover timedout"); goto err; } usleep(FAILOVER_RETRY_WAIT); goto retry_nexthost; } /* * Close the old channel. */ device->channel->cd->chdrv_close(device->channel); device->channel = new_channel; if (!iio_check_failover_ready(device)) { goto retry_nexthost; } /* * Restart messages */ ck_spinlock_lock(&device->slock); device->state = IIO_DEVICE_ACTIVE; while (!LIST_EMPTY(&device->retryq)) { msg = LIST_ENTRY(device->retryq.next, struct qnio_msg, lnode); LIST_DEL(&msg->lnode); device->retry_msg_count --; ck_spinlock_unlock(&device->slock); nioDbg("Restarting message, msgid=%ld %p", msg->hinfo.cookie, msg); iio_msg_resubmit(device, msg); ck_spinlock_lock(&device->slock); } ck_spinlock_unlock(&device->slock); pthread_exit(0); return NULL; err: /* * Fail all messages. */ ck_spinlock_lock(&device->slock); device->state = IIO_DEVICE_FAILED; while (!LIST_EMPTY(&device->retryq)) { msg = LIST_ENTRY(device->retryq.next, struct qnio_msg, lnode); LIST_DEL(&msg->lnode); nioDbg("No host found failing message, msgid=%ld %p", msg->hinfo.cookie, msg); device->retry_msg_count --; ck_spinlock_unlock(&device->slock); msg->hinfo.err = QNIOERROR_NOCONN; if (msg->hinfo.flags & QNIO_FLAG_SYNC_REQ) { ck_pr_store_int(&msg->resp_ready, 1); } else { client_callback(msg); } } ck_spinlock_unlock(&device->slock); pthread_exit(0); return NULL; }
int main(int argc, char **argv) { pthread_t *readers; pthread_t writer; unsigned int i, curr; void *curr_ptr; ck_bag_iterator_t bag_it; size_t b = CK_BAG_DEFAULT; if (argc >= 2) { int r = atoi(argv[1]); if (r <= 0) { fprintf(stderr, "# entries in block must be > 0\n"); exit(EXIT_FAILURE); } b = (size_t)r; } if (argc >= 3) { int r = atoi(argv[2]); if (r < 16) { fprintf(stderr, "# entries must be >= 16\n"); exit(EXIT_FAILURE); } writer_max = (unsigned int)r; } ck_epoch_init(&epoch_bag, 100); ck_epoch_register(&epoch_bag, &epoch_wr); ck_bag_allocator_set(&allocator, sizeof(struct bag_epoch)); if (ck_bag_init(&bag, b, CK_BAG_ALLOCATE_GEOMETRIC) == false) { fprintf(stderr, "Error: failed ck_bag_init()."); exit(EXIT_FAILURE); } fprintf(stderr, "Configuration: %u entries, %zu bytes/block, %zu entries/block\n", writer_max, bag.info.bytes, bag.info.max); i = 1; /* Basic Test */ ck_bag_put_spmc(&bag, (void *)(uintptr_t)i); ck_bag_remove_spmc(&bag, (void *)(uintptr_t)i); ck_bag_put_spmc(&bag, (void *)(uintptr_t)i); /* Sequential test */ for (i = 1; i <= 10; i++) ck_bag_put_spmc(&bag, (void *)(uintptr_t)i); for (i = 1; i <= 10; i++) ck_bag_remove_spmc(&bag, (void *)(uintptr_t)i); for (i = 10; i > 0; i--) ck_bag_remove_spmc(&bag, (void *)(uintptr_t)i); for (i = 1; i <= 10; i++) ck_bag_put_spmc(&bag, (void *)(uintptr_t)i); ck_bag_iterator_init(&bag_it, &bag); while (ck_bag_next(&bag_it, &curr_ptr)) { curr = (uintptr_t)(curr_ptr); if (curr > (uintptr_t)i) fprintf(stderr, "ERROR: %ju != %u\n", (uintmax_t)curr, i); ck_bag_remove_spmc(&bag, curr_ptr); } /* Concurrent test */ pthread_create(&writer, NULL, writer_thread, NULL); readers = malloc(sizeof(pthread_t) * NUM_READER_THREADS); for (i = 0; i < NUM_READER_THREADS; i++) { pthread_create(&readers[i], NULL, reader, NULL); } sleep(120); ck_pr_store_int(&leave, 1); for (i = 0; i < NUM_READER_THREADS; i++) pthread_join(readers[i], NULL); pthread_join(writer, NULL); fprintf(stderr, "Current entries: %u\n", ck_bag_count(&bag)); ck_bag_destroy(&bag); return 0; }