Ejemplo n.º 1
0
static void *
thread(void *group)
{
	ck_barrier_combining_state_t state = CK_BARRIER_COMBINING_STATE_INITIALIZER;
	int j, counter;
	int i = 0;

	aff_iterate(&a);

	ck_pr_inc_int(&barrier_wait);
	while (ck_pr_load_int(&barrier_wait) != (nthr * ngroups))
		ck_pr_stall();

	for (j = 0; j < ITERATE; j++) {
		i = j++ & (ENTRIES - 1);
		ck_pr_inc_int(&counters[i]);
		ck_barrier_combining(&barrier, group, &state);
		counter = ck_pr_load_int(&counters[i]);
		if (counter != nthr * ngroups * (j / ENTRIES + 1)) {
			fprintf(stderr, "FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr * ngroups);
			exit(EXIT_FAILURE);
		}
	}

	return (NULL);
}
Ejemplo n.º 2
0
static void *
thread(void *b)
{
	ck_barrier_mcs_t *barrier = b;
	ck_barrier_mcs_state_t state;
	int j, counter;
	int i = 0;

	aff_iterate(&a);

	ck_barrier_mcs_subscribe(barrier, &state);

	ck_pr_inc_int(&barrier_wait);
	while (ck_pr_load_int(&barrier_wait) != nthr)
		ck_pr_stall();

	for (j = 0; j < ITERATE; j++) {
		i = j++ & (ENTRIES - 1);
		ck_pr_inc_int(&counters[i]);
		ck_barrier_mcs(barrier, &state);
		counter = ck_pr_load_int(&counters[i]);
		if (counter != nthr * (j / ENTRIES + 1)) {
			fprintf(stderr, "FAILED [%d:%d]: %d != %d\n", i, j - 1, counter, nthr);
			exit(EXIT_FAILURE);
		}
	}

	return (NULL);
}
Ejemplo n.º 3
0
/*
 * This request is sent while failover is in progress. We need to 
 * bypass the failover handling for this request. So, do not
 * use iio_msg_submit(), iio_msg_wait() calls.
 * 
 * Returns 
 *  1 -> Success
 *  0 -> Failure
 */
int
iio_check_failover_ready(struct iio_device *device)
{
    struct qnio_msg *msg = NULL;
    struct channel *channel;
    int err;

    msg = iio_message_alloc(&apictx->msg_pool);
    msg->hinfo.opcode = IRP_VDISK_CHECK_IO_FAILOVER_READY;
    msg->hinfo.data_type = DATA_TYPE_RAW;
    msg->hinfo.payload_size = 0;
    msg->recv = NULL;
    safe_strncpy(msg->hinfo.target, device->devid, NAME_SZ64);
    msg->user_ctx = NULL;
    msg->hinfo.flags = QNIO_FLAG_REQ_NEED_RESP | QNIO_FLAG_SYNC_REQ;
    msg->reserved = device;
    channel = device->channel;
    err = channel->cd->chdrv_msg_send(channel, msg);
    if (!err) {
        while (ck_pr_load_int(&msg->resp_ready) == 0) {
            usleep(SEND_RECV_SLEEP);
        }
        err = msg->hinfo.err;
    }
    iio_message_free(msg);
    if (err) {
        return 0;
    }
    return 1;
}
Ejemplo n.º 4
0
Archivo: order.c Proyecto: binque/ck
static void *
writer_thread(void *unused)
{
	unsigned int i;
	unsigned int iteration = 0;

	(void)unused;

	for (;;) {
		iteration++;
		ck_epoch_write_begin(&epoch_wr);
		for (i = 1; i <= writer_max; i++) {
			if (ck_bag_put_spmc(&bag, (void *)(uintptr_t)i) == false) {
				perror("ck_bag_put_spmc");
				exit(EXIT_FAILURE);
			}

			if (ck_bag_member_spmc(&bag, (void *)(uintptr_t)i) == false) {
				fprintf(stderr, "ck_bag_put_spmc [%u]: %u\n", iteration, i);
				exit(EXIT_FAILURE);
			}
		}

		if (ck_pr_load_int(&leave) == 1)
			break;

		for (i = 1; i < writer_max; i++) {
			void *replace = (void *)(uintptr_t)i;
			if (ck_bag_set_spmc(&bag, (void *)(uintptr_t)i, replace) == false) {
				fprintf(stderr, "ERROR: set %ju != %ju",
						(uintmax_t)(uintptr_t)replace, (uintmax_t)i);
				exit(EXIT_FAILURE);
			}
		}

		for (i = writer_max; i > 0; i--) {
			if (ck_bag_member_spmc(&bag, (void *)(uintptr_t)i) == false) {
				fprintf(stderr, "ck_bag_member_spmc [%u]: %u\n", iteration, i);
				exit(EXIT_FAILURE);
			}

			if (ck_bag_remove_spmc(&bag, (void *)(uintptr_t)i) == false) {
				fprintf(stderr, "ck_bag_remove_spmc [%u]: %u\n", iteration, i);
				exit(EXIT_FAILURE);
			}
		}

		ck_epoch_write_end(&epoch_wr);
	}

	fprintf(stderr, "Writer %u iterations, %u writes per iteration.\n", iteration, writer_max);
	while (ck_pr_load_uint(&barrier) != NUM_READER_THREADS)
		ck_pr_stall();

	ck_pr_inc_uint(&barrier);
	return NULL;
}
Ejemplo n.º 5
0
int
thr_demarshal_set_buffer(cf_socket *sock, buffer_type type, int size)
{
	static int rcv_max = -1;
	static int snd_max = -1;

	const char *proc;
	int *max;

	switch (type) {
	case BUFFER_TYPE_RECEIVE:
		proc = "/proc/sys/net/core/rmem_max";
		max = &rcv_max;
		break;

	case BUFFER_TYPE_SEND:
		proc = "/proc/sys/net/core/wmem_max";
		max = &snd_max;
		break;

	default:
		cf_crash(AS_DEMARSHAL, "Invalid buffer type: %d", (int32_t)type);
		return -1; // cf_crash() should have a "noreturn" attribute, but is a macro
	}

	int tmp = ck_pr_load_int(max);

	if (tmp < 0) {
		if (thr_demarshal_read_integer(proc, &tmp) < 0) {
			cf_warning(AS_DEMARSHAL, "Failed to read %s; should be at least %d. Please verify.", proc, size);
			tmp = size;
		}
	}

	if (tmp < size) {
		cf_warning(AS_DEMARSHAL, "Buffer limit is %d, should be at least %d. Please set %s accordingly.",
				tmp, size, proc);
		return -1;
	}

	ck_pr_cas_int(max, -1, tmp);

	switch (type) {
	case BUFFER_TYPE_RECEIVE:
		cf_socket_set_receive_buffer(sock, size);
		break;

	case BUFFER_TYPE_SEND:
		cf_socket_set_send_buffer(sock, size);
		break;
	}

	return 0;
}
Ejemplo n.º 6
0
int
thr_demarshal_set_buffer(int fd, int option, int size)
{
	static int rcv_max = -1;
	static int snd_max = -1;

	const char *proc;
	int *max;

	switch (option) {
	case SO_RCVBUF:
		proc = "/proc/sys/net/core/rmem_max";
		max = &rcv_max;
		break;

	case SO_SNDBUF:
		proc = "/proc/sys/net/core/wmem_max";
		max = &snd_max;
		break;

	default:
		cf_crash(AS_DEMARSHAL, "Invalid option: %d", option);
		return -1; // cf_crash() should have a "noreturn" attribute, but is a macro
	}

	int tmp = ck_pr_load_int(max);

	if (tmp < 0) {
		if (thr_demarshal_read_integer(proc, &tmp) < 0) {
			cf_warning(AS_DEMARSHAL, "Failed to read %s; should be at least %d. Please verify.", proc, size);
			tmp = size;
		}
	}

	if (tmp < size) {
		cf_warning(AS_DEMARSHAL, "Buffer limit is %d, should be at least %d. Please set %s accordingly.",
				tmp, size, proc);
		return -1;
	}

	ck_pr_cas_int(max, -1, tmp);

	if (setsockopt(fd, SOL_SOCKET, option, &size, sizeof size) < 0) {
		cf_crash(AS_DEMARSHAL, "Failed to set socket buffer for FD %d, size %d, error %d (%s)",
				fd, size, errno, strerror(errno));
	}

	return 0;
}
Ejemplo n.º 7
0
static int32_t
iio_msg_wait(struct qnio_msg *msg)
{
    int retry;

retry:

    while (ck_pr_load_int(&msg->resp_ready) == 0) {
        usleep(SEND_RECV_SLEEP);
    }

    retry = iio_msg_done(msg);
    if (retry) {
        /*
         * If request got resubmitted due to failover
         * wait again.
         */
        goto retry;
    }
    return msg->hinfo.err;
}
Ejemplo n.º 8
0
static void *god_loop(void *arg)
{
    (void)arg;
    int32_t rtrn = 0;
    struct output_writter *output = get_console_writter();

    /* genesis() init's all the needed data structures and creates the first population. */
    rtrn = genesis(output, get_default_allocator());
    if(rtrn < 0)
    {
        output->write(ERROR, "Can't init ga\n");
        return (NULL);
    }

    /* Start main loop for the genetic algorithm.
    Each loop creates a new generation. */
    while(ck_pr_load_int(stop) != TRUE)
    {
    }

    return (NULL);
}
Ejemplo n.º 9
0
void ph_nbio_emitter_run(struct ph_nbio_emitter *emitter, ph_thread_t *thread)
{
  int n, i;
  int max_chunk;

  max_chunk = ph_config_query_int("$.nbio.max_per_wakeup", 1024);

  while (ck_pr_load_int(&_ph_run_loop)) {
    n = kevent(emitter->io_fd, emitter->kqset.events, emitter->kqset.used,
          emitter->kqset.events, MIN(emitter->kqset.size, max_chunk), NULL);

    if (n < 0 && errno != EINTR) {
      ph_panic("kevent: `Pe%d", errno);
    }

    if (n <= 0) {
      continue;
    }

    ph_thread_epoch_begin();
    for (i = 0; i < n; i++) {
      dispatch_kevent(emitter, thread, &emitter->kqset.events[i]);
    }

    if (n + 1 >= emitter->kqset.size) {
      grow_kq_set(&emitter->kqset);
    }
    emitter->kqset.used = 0;

    if (ph_job_have_deferred_items(thread)) {
      ph_job_pool_apply_deferred_items(thread);
    }
    ph_thread_epoch_end();
    ph_thread_epoch_poll();
  }

  dispose_kq_set(&emitter->kqset);
}
Ejemplo n.º 10
0
static void *
thread_brlock(void *pun)
{
	uint64_t s_b, e_b, a, i;
	ck_brlock_reader_t r;
	uint64_t *value = pun;

	if (aff_iterate(&affinity) != 0) {
		perror("ERROR: Could not affine thread");
		exit(EXIT_FAILURE);
	}

	ck_brlock_read_register(&brlock, &r);
	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads)
		ck_pr_stall();

	for (i = 1, a = 0;; i++) {
		s_b = rdtsc();
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		ck_brlock_read_lock(&brlock, &r);
		ck_brlock_read_unlock(&r);
		e_b = rdtsc();

		a += (e_b - s_b) >> 4;

		if (ck_pr_load_uint(&flag) == 1)
			break;
	}

	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads * 2)
		ck_pr_stall();

	*value = (a / i);
	return NULL;
}
Ejemplo n.º 11
0
static void *
thread_lock_rtm(void *pun)
{
	uint64_t s_b, e_b, a, i;
	uint64_t *value = pun;

	if (aff_iterate(&affinity) != 0) {
		perror("ERROR: Could not affine thread");
		exit(EXIT_FAILURE);
	}

	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads)
		ck_pr_stall();

	for (i = 1, a = 0;; i++) {
		s_b = rdtsc();
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_LOCK(ck_rwlock_read, &rw.lock);
		CK_ELIDE_UNLOCK(ck_rwlock_read, &rw.lock);
		e_b = rdtsc();

		a += (e_b - s_b) >> 4;

		if (ck_pr_load_uint(&flag) == 1)
			break;
	}

	ck_pr_inc_int(&barrier);
	while (ck_pr_load_int(&barrier) != threads * 2)
		ck_pr_stall();

	*value = (a / i);
	return NULL;
}
Ejemplo n.º 12
0
void ph_nbio_emitter_run(struct ph_nbio_emitter *emitter, ph_thread_t *thread)
{
  port_event_t *event;
  uint_t n, i, max_chunk, max_sleep;
  ph_job_t *job;
  ph_iomask_t mask;
  struct timespec ts;

  max_chunk = ph_config_query_int("$.nbio.max_per_wakeup", 1024);
  max_sleep = ph_config_query_int("$.nbio.max_sleep", 5000);
  ts.tv_sec = max_sleep / 1000;
  ts.tv_nsec = (max_sleep - (ts.tv_sec * 1000)) * 1000000;
  event = malloc(max_chunk * sizeof(port_event_t));

  while (ck_pr_load_int(&_ph_run_loop)) {
    n = 1;
    memset(event, 0, sizeof(*event));

    if (port_getn(emitter->io_fd, event, max_chunk, &n, &ts)) {
      if (errno != EINTR && errno != ETIME) {
        ph_panic("port_getn: `Pe%d", errno);
      }
      n = 0;
    }

    if (!n) {
      ph_thread_epoch_poll();
      continue;
    }

    for (i = 0; i < n; i++) {
      ph_thread_epoch_begin();

      switch (event[i].portev_source) {
        case PORT_SOURCE_TIMER:
          gettimeofday(&thread->now, NULL);
          thread->refresh_time = false;
          ph_nbio_emitter_timer_tick(emitter);
          break;

        case PORT_SOURCE_USER:
          break;

        case PORT_SOURCE_FD:
          thread->refresh_time = true;
          job = event[i].portev_user;

          switch (event[i].portev_events & (POLLIN|POLLOUT|POLLERR|POLLHUP)) {
            case POLLIN:
              mask = PH_IOMASK_READ;
              break;
            case POLLOUT:
              mask = PH_IOMASK_WRITE;
              break;
            case POLLIN|POLLOUT:
              mask = PH_IOMASK_READ|PH_IOMASK_WRITE;
              break;
            default:
              mask = PH_IOMASK_ERR;
          }
          job->kmask = 0;
          ph_nbio_emitter_dispatch_immediate(emitter, job, mask);
          break;
      }

      if (ph_job_have_deferred_items(thread)) {
        ph_job_pool_apply_deferred_items(thread);
      }
      ph_thread_epoch_end();
      ph_thread_epoch_poll();
    }
  }

  free(event);
}
Ejemplo n.º 13
0
Archivo: order.c Proyecto: binque/ck
static void *
reader(void *arg)
{
	void *curr_ptr;
	intptr_t curr, prev, curr_max, prev_max;
	unsigned long long n_entries = 0, iterations = 0;
	ck_epoch_record_t epoch_record;
	ck_bag_iterator_t iterator;
	struct ck_bag_block *block = NULL;

	(void)arg;

	ck_epoch_register(&epoch_bag, &epoch_record);

	/*
	 * Check if entries within a block are sequential. Since ck_bag inserts
	 * newly occupied blocks at the beginning of the list, there is no ordering
	 * guarantee across the bag.
	 */
	for (;;) {
		ck_epoch_read_begin(&epoch_record);
		ck_bag_iterator_init(&iterator, &bag);
		curr_max = prev_max = prev = -1;
		block = NULL;

		while (ck_bag_next(&iterator, &curr_ptr) == true) {
			if (block != iterator.block) {
				prev = -1;
				curr = 0;
				prev_max = curr_max;
				curr_max = 0;
				block = iterator.block;
			}

			curr = (uintptr_t)(curr_ptr);
			if (curr < prev) {
				/* Ascending order within block violated */
				fprintf(stderr, "%p: %p: %ju\n", (void *)&epoch_record, (void *)iterator.block, (uintmax_t)curr);
				fprintf(stderr, "ERROR: %ju < %ju \n",
				    (uintmax_t)curr, (uintmax_t)prev);
				exit(EXIT_FAILURE);
			} else if (prev_max != -1 && curr > prev_max) {
				/* Max of prev block > max of current block */
				fprintf(stderr, "%p: %p: %ju\n", (void *)&epoch_record, (void *)iterator.block, (uintmax_t)curr);
				fprintf(stderr, "ERROR: %ju > prev_max: %ju\n",
				    (uintmax_t)curr, (uintmax_t)prev_max);
				exit(EXIT_FAILURE);
			}

			curr_max = curr;

			prev = curr;
			n_entries++;
		}

		ck_epoch_read_end(&epoch_record);

		iterations++;
		if (ck_pr_load_int(&leave) == 1)
			break;
	}

	fprintf(stderr, "Read %llu entries in %llu iterations.\n", n_entries, iterations);

	ck_pr_inc_uint(&barrier);
	while (ck_pr_load_uint(&barrier) != NUM_READER_THREADS + 1)
		ck_pr_stall();

	return NULL;

}
Ejemplo n.º 14
0
static void *
test_spmc(void *c)
{
	unsigned int observed = 0;
	unsigned long previous = 0;
	unsigned int seed;
	int i, k, j, tid;
	struct context *context = c;
	ck_ring_buffer_t *buffer;

	buffer = context->buffer;
        if (aff_iterate(&a)) {
                perror("ERROR: Could not affine thread");
                exit(EXIT_FAILURE);
        }

	tid = ck_pr_faa_int(&eb, 1);
	ck_pr_fence_memory();
	while (ck_pr_load_int(&eb) != nthr - 1);

	for (i = 0; i < ITERATIONS; i++) {
		for (j = 0; j < size; j++) {
			struct entry *o;
			int spin;

			/* Keep trying until we encounter at least one node. */
			if (j & 1) {
				while (ck_ring_dequeue_spmc(&ring_spmc, buffer,
				    &o) == false);
			} else {
				while (ck_ring_trydequeue_spmc(&ring_spmc, buffer,
				    &o) == false);
			}

			observed++;
			if (o->value < 0
			    || o->value != o->tid
			    || o->magic != 0xdead
			    || (previous != 0 && previous >= o->value_long)) {
				ck_error("[0x%p] (%x) (%d, %d) >< (0, %d)\n",
					(void *)o, o->magic, o->tid, o->value, size);
			}

			o->magic = 0xbeef;
			o->value = -31337;
			o->tid = -31338;
			previous = o->value_long;

			if (ck_pr_faa_uint(&o->ref, 1) != 0) {
				ck_error("[%p] We dequeued twice.\n", (void *)o);
			}

			if ((i % 4) == 0) {
				spin = common_rand_r(&seed) % 16384;
				for (k = 0; k < spin; k++) {
					ck_pr_stall();
				}
			}

			free(o);
		}
	}

	fprintf(stderr, "[%d] Observed %u\n", tid, observed);
	return NULL;
}
Ejemplo n.º 15
0
void ph_nbio_emitter_run(struct ph_nbio_emitter *emitter, ph_thread_t *thread)
{
    struct epoll_event *event;
    int n, i;
    int max_chunk, max_sleep;

    max_chunk = ph_config_query_int("$.nbio.max_per_wakeup", 1024);
    max_sleep = ph_config_query_int("$.nbio.max_sleep", 5000);
    event = malloc(max_chunk * sizeof(struct epoll_event));

    while (ck_pr_load_int(&_ph_run_loop)) {
        n = epoll_wait(emitter->io_fd, event, max_chunk, max_sleep);
        thread->refresh_time = true;

        if (n < 0) {
            if (errno != EINTR) {
                ph_log(PH_LOG_ERR, "epoll_wait: `Pe%d", errno);
            }
            ph_job_collector_emitter_call(emitter);
            ph_thread_epoch_poll();
            continue;
        }

        if (n == 0) {
            continue;
        }

        ph_thread_epoch_begin();
        for (i = 0; i < n; i++) {
            ph_iomask_t mask = 0;
            ph_job_t *job = event[i].data.ptr;

            if (job->mask == 0) {
                // Ignore: disabled for now
                continue;
            }

            switch (event[i].events & (EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)) {
            case EPOLLIN:
                mask = PH_IOMASK_READ;
                break;
            case EPOLLOUT:
                mask = PH_IOMASK_WRITE;
                break;
            case EPOLLIN|EPOLLOUT:
                mask = PH_IOMASK_READ|PH_IOMASK_WRITE;
                break;
            default:
                mask = PH_IOMASK_ERR;
            }
            // We can't just clear kmask completely because ONESHOT retains
            // the existence of the item; we need to know it is there so that
            // we can MOD it instead of ADD it later.
            job->kmask = DEFAULT_POLL_MASK;
            ph_nbio_emitter_dispatch_immediate(emitter, job, mask);
            if (ph_job_have_deferred_items(thread)) {
                ph_job_pool_apply_deferred_items(thread);
            }
        }
        ph_thread_epoch_end();
        ph_job_collector_emitter_call(emitter);
        ph_thread_epoch_poll();
    }

    free(event);
}