SXE_RETURN sxe_ring_buffer_consumed(void * base, SXE_RING_BUFFER_CONTEXT * context, unsigned len) { SXE_RETURN result = SXE_RETURN_OK; SXEE81("sxe_ring_buffer_consumed(len:%u)", len); if ((result = sxe_ring_buffer_check_over_run(base, context)) == SXE_RETURN_ERROR_INTERNAL) { goto SXE_ERROR_OUT; } SXEL92("data_block was: %p, moving to %p", context->data_block, context->data_block + len); context->data_block += len; context->data_block_len -= len; SXEA10(context->data_block <= SXE_RING_BUFFER_WRITEN_END + 1, "the data block did not go off the end of the array"); if ((context->data_block == SXE_RING_BUFFER_WRITEN_END + 1) && (context->data_block != SXE_RING_BUFFER_CURRENT)) { SXEA10(context->data_block_len == 0, "If this consume brought us to the end of the array, the remaining length must be zero"); SXEL90("Wrapping data_block to the start of the ring"); context->data_block = SXE_RING_BUFFER_ARRAY_BASE; context->itteration++; } SXE_EARLY_OR_ERROR_OUT: SXEL93("context data_block: %p, data_block_len: %u itteration: %u", context->data_block, context->data_block_len, context->itteration); SXER81("return %s", sxe_return_to_string(result)); return result; }
void * sxe_sync_ev_delete(void * sync_point) { SXEE81("sxe_sync_ev_delete(sync_point=%p)", sync_point); sxe_pool_set_indexed_element_state(sxe_sync_ev_pool, (SXE_SYNC_EV *)sync_point - sxe_sync_ev_pool, 1, 0); SXER80("return NULL"); return NULL; }
void sxe_sync_ev_post(void * sync_point) { SXE_SYNC_EV * sync_ev = (SXE_SYNC_EV *)sync_point; SXEE81("sxe_sync_ev_post(sync_point=%p)", sync_point); SXEA11(sendto(sync_ev->sock, (MOCK_SOCKET_VOID *)&sync_point, sizeof(sync_point), 0, (struct sockaddr *)&sxe_sync_ev_addr, sizeof(sxe_sync_ev_addr)) == sizeof(sync_point), "Can't send to sync_point listener port: %s", sxe_socket_get_last_error_as_str()); SXER80("return"); }
void sxe_ring_buffer_next_writable_block_size(void * base, SXE_RING_BUFFER_CONTEXT * context, unsigned size) { SXEE81("sxe_ring_buffer_next_writable_block_size(size=%u)", size); SXEA10(size <= SXE_RING_BUFFER_SIZE, "The requested size is equall or smaller then the ring"); sxe_ring_buffer_next_writable_block(base, context); if (context->writable_block_len < size) { sxe_ring_buffer_force_ring_wrap(base); sxe_ring_buffer_next_writable_block(base, context); } SXER80("return"); }
void * sxe_sync_ev_new(void * user_data) { unsigned id; SXEE81("sxe_sync_ev_new(user_data=%p)", user_data); SXEA10((id = sxe_pool_set_oldest_element_state(sxe_sync_ev_pool, 0, 1)) != SXE_POOL_NO_INDEX, "Could not allocate a sync object"); sxe_sync_ev_pool[id].sock = sxe_sync_ev_socket(); sxe_sync_ev_pool[id].user_data = user_data; SXER81("return sync=%p", &sxe_sync_ev_pool[id]); return &sxe_sync_ev_pool[id]; }
void * sxe_ring_buffer_new(unsigned size) { void * base; SXEE81("sxe_ring_buffer_new(size=%u)", size); SXEA10((base = malloc(size + SXE_RING_BUFFER_SIZE_OF_INTERNALS)) != NULL, "Error allocating sxe-ring-buffer"); SXE_RING_BUFFER_SIZE = size; SXE_RING_BUFFER_ITERATION = 0; SXE_RING_BUFFER_CURRENT = SXE_RING_BUFFER_ARRAY_BASE; SXE_RING_BUFFER_WRITEN_END = SXE_RING_BUFFER_CURRENT; SXEL92("Array base=%p, End of Array=%p", SXE_RING_BUFFER_ARRAY_BASE, SXE_RING_BUFFER_END); SXER81("return base=%p", base); return base; }
static SXE_THREAD_RETURN SXE_STDCALL test_thread_main(void * lock) { SXEE81("test_thread_main(lock=%p)", lock); SXEA10(lock == &ping, "Ping lock not passed to the thread"); SXEA10(sxe_spinlock_take(&pong) == SXE_SPINLOCK_STATUS_TAKEN, "Pong lock not taken by thread"); SXEA10(sxe_spinlock_take(&ping) == SXE_SPINLOCK_STATUS_TAKEN, "Ping lock not taken by thread"); SXEL10("thread: about to pong the main thread"); sxe_spinlock_give(&pong); for (;;) { sleep(1); } SXER80("return NULL"); return (SXE_THREAD_RETURN)0; }
void sxe_ring_buffer_wrote_block(void * base, SXE_RING_BUFFER_CONTEXT * context, unsigned len) { SXEE81("sxe_ring_buffer_wrote_block(len=%u)", len); SXEA10(context->writable_block == SXE_RING_BUFFER_CURRENT, "The current pointer is still where it was when you asked for it"); SXEA10(len <= (unsigned)(SXE_RING_BUFFER_END - SXE_RING_BUFFER_CURRENT + 1), "Did not overwrite end of buffer ring"); SXE_RING_BUFFER_CURRENT = SXE_RING_BUFFER_CURRENT + len; if (SXE_RING_BUFFER_CURRENT == (SXE_RING_BUFFER_END + 1)) { SXEL90("Wrote to the last byte in the array, wrapping pointer"); SXE_RING_BUFFER_ITERATION++; SXE_RING_BUFFER_CURRENT = SXE_RING_BUFFER_ARRAY_BASE; SXE_RING_BUFFER_WRITEN_END = SXE_RING_BUFFER_END; } else { if (SXE_RING_BUFFER_WRITEN_END < SXE_RING_BUFFER_CURRENT) { SXE_RING_BUFFER_WRITEN_END = SXE_RING_BUFFER_CURRENT - 1; } } SXEL93("Current: %p, Writen End %p, Itteration: %u", SXE_RING_BUFFER_CURRENT, SXE_RING_BUFFER_WRITEN_END, SXE_RING_BUFFER_ITERATION); SXER80("return"); }
/** * Step to the next object in a pool state * * @param walker Pointer to the pool state walker * * @return Index of the next object or SXE_POOL_NO_INDEX if the end of the state queue has been reached. * * @note Thread safety is implemented by verifying that the last node stepped to is still in the same state queue. If it is not, * the state queue is rewalked to find a node with a time or count greater than or equal to the time that the last stepped * to node had when it was stepped to. */ unsigned sxe_pool_walker_step(SXE_POOL_WALKER * walker) { SXE_POOL_NODE * node; SXE_POOL_IMPL * pool = walker->pool; unsigned result; SXEE81("sxe_pool_walker_step(walker=%p)", walker); if ((result = sxe_pool_lock(pool)) == SXE_POOL_LOCK_NOT_TAKEN) { goto SXE_ERROR_OUT; } /* If not at the head of the state queue and the current object has been moved to another state. */ if (((node = sxe_list_walker_find(&walker->list_walker)) != NULL) && (SXE_LIST_NODE_GET_ID(&node->list_node) != walker->state)) /* TODO: Check for touching */ { SXEL83("sxe_pool_walker_step: node %u moved from state %s to state %s by another thread", node - SXE_POOL_NODES(pool), (*pool->state_to_string)(walker->state), (*pool->state_to_string)(SXE_LIST_NODE_GET_ID(&node->list_node))); /* If there is a previous object and it has not been moved, get the new next one. */ if (((node = sxe_list_walker_back(&walker->list_walker)) != NULL) && (SXE_LIST_NODE_GET_ID(&node->list_node) == walker->state)) /* TODO: Check for touching */ { node = sxe_list_walker_step(&walker->list_walker); } else { sxe_list_walker_construct(&walker->list_walker, &SXE_POOL_QUEUE(pool)[walker->state]); while ((node = sxe_list_walker_step(&walker->list_walker)) != NULL) { if (pool->options & SXE_POOL_OPTION_TIMED) { if (node->last.time >= walker->last.time) { /* Coverage Exclusion: TODO refactor SXE_POOL_TIME */ break; /* Coverage Exclusion: TODO refactor SXE_POOL_TIME */ } } else { if (node->last.count >= walker->last.count) { break; } } } } } else { node = sxe_list_walker_step(&walker->list_walker); } result = SXE_POOL_NO_INDEX; if (node != NULL) { result = node - SXE_POOL_NODES(pool); if (pool->options & SXE_POOL_OPTION_TIMED) { walker->last.time = node->last.time; } else { walker->last.count = node->last.count; } } sxe_pool_unlock(pool); SXE_ERROR_OUT: SXER81("return %s", sxe_pool_return_to_string(result)); return result; }
static void select_poll (EV_P_ ev_tstamp timeout) { struct timeval tv; int res; int fd_setsize; EV_RELEASE_CB; tv.tv_sec = (long)timeout; tv.tv_usec = (long)((timeout - (ev_tstamp)tv.tv_sec) * 1e6); #if EV_SELECT_USE_FD_SET fd_setsize = sizeof (fd_set); #else fd_setsize = vec_max * NFDBYTES; #endif SXEE81("select_poll(timeout=%f)", timeout); memcpy (vec_ro, vec_ri, fd_setsize); memcpy (vec_wo, vec_wi, fd_setsize); #ifdef _WIN32 SXEL80("Using select() on Windows"); /* pass in the write set as except set. * the idea behind this is to work around a windows bug that causes * errors to be reported as an exception and not by setting * the writable bit. this is so uncontrollably lame. */ memcpy (vec_eo, vec_wi, fd_setsize); res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, (fd_set *)vec_eo, &tv); #elif EV_SELECT_USE_FD_SET SXEL80("Using select() with fd_set..."); fd_setsize = anfdmax < FD_SETSIZE ? anfdmax : FD_SETSIZE; res = select (fd_setsize, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); #else SXEL80("Using select() without fd_set..."); res = select (vec_max * NFDBITS, (fd_set *)vec_ro, (fd_set *)vec_wo, 0, &tv); #endif EV_ACQUIRE_CB; if (expect_false (res < 0)) { SXEL81("expect_false (res < 0) // res=%d", res); #if EV_SELECT_IS_WINSOCKET errno = WSAGetLastError (); SXEL81("errno=%d", errno); #endif #ifdef WSABASEERR /* on windows, select returns incompatible error codes, fix this */ if (errno >= WSABASEERR && errno < WSABASEERR + 1000) if (errno == WSAENOTSOCK) errno = EBADF; else errno -= WSABASEERR; SXEL81("errno=%d // after fixing error code", errno); #endif #ifdef _WIN32 /* select on windows errornously returns EINVAL when no fd sets have been * provided (this is documented). what microsoft doesn't tell you that this bug * exists even when the fd sets _are_ provided, so we have to check for this bug * here and emulate by sleeping manually. * we also get EINVAL when the timeout is invalid, but we ignore this case here * and assume that EINVAL always means: you have to wait manually. */ if (errno == EINVAL) { SXEL81("ev_sleep(timeout=%f)", timeout); ev_sleep (timeout); SXER80("return // errno == EINVAL"); return; } #endif if (errno == EBADF) fd_ebadf (EV_A); else if (errno == ENOMEM && !syserr_cb) fd_enomem (EV_A); else if (errno != EINTR) ev_syserr ("(libev) select"); SXER80("return // expect_false (res < 0)"); return; } #if EV_SELECT_USE_FD_SET { int fd; SXEL80("#if EV_SELECT_USE_FD_SET"); for (fd = 0; fd < anfdmax; ++fd) if (anfds [fd].events) { int events = 0; #if EV_SELECT_IS_WINSOCKET SOCKET handle = anfds [fd].handle; #else int handle = fd; #endif if (FD_ISSET (handle, (fd_set *)vec_ro)) events |= EV_READ; if (FD_ISSET (handle, (fd_set *)vec_wo)) events |= EV_WRITE; #ifdef _WIN32 if (FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE; #endif if (expect_true (events)) fd_event (EV_A_ fd, events); } SXEL80("#endif // EV_SELECT_USE_FD_SET"); } #else { int word, bit; SXEL80("#if *not* EV_SELECT_USE_FD_SET"); for (word = vec_max; word--; ) { fd_mask word_r = ((fd_mask *)vec_ro) [word]; fd_mask word_w = ((fd_mask *)vec_wo) [word]; #ifdef _WIN32 word_w |= ((fd_mask *)vec_eo) [word]; #endif if (word_r || word_w) for (bit = NFDBITS; bit--; ) { fd_mask mask = 1UL << bit; int events = 0; events |= word_r & mask ? EV_READ : 0; events |= word_w & mask ? EV_WRITE : 0; if (expect_true (events)) fd_event (EV_A_ word * NFDBITS + bit, events); } } SXEL80("#endif // *not* EV_SELECT_USE_FD_SET"); } #endif SXER80("return"); }