// Poll epoll and aio and handle any events. If can_sleep is true // and no aio events are expected, epoll will block. static void do_poll(bool can_sleep, struct timespec next_wakeup) { struct epoll_event epoll_events[MAX_EVENTS]; #if ENABLE_AIO struct io_event aio_events[MAX_EVENTS]; // We need to be careful to not let either aio or epoll events // starve the other. We do this by always doing nonblocking polls // of aio and only having epoll block if we aren't expecting aio // events. When both types of events are coming in, we switch // between processing the two types. // Poll for aio events. Don't block. int aio_cnt = io_getevents(state.aio_ctx, 0, MAX_EVENTS, aio_events, NULL); if (aio_cnt < 0) { fail2(1, -aio_cnt, "io_getevents"); } bool expect_aio = aio_cnt == MAX_EVENTS; for (int i = 0; i < aio_cnt; i++) { handle_aio_event(&aio_events[i]); } #else #define expect_aio 0 #endif // If we aren't expecting aio, block indefinitely, otherwise // just poll. int epoll_timeout = expect_aio || !can_sleep ? 0 : compute_timeout(next_wakeup); int epoll_cnt = epoll_wait(state.epoll_fd, epoll_events, MAX_EVENTS, epoll_timeout); for (int i = 0; i < epoll_cnt; i++) { #if ENABLE_AIO if (epoll_events[i].data.ptr == state.aio_dummy_event) { uint64_t eventfd_val; if (read(state.aio_eventfd, &eventfd_val, sizeof(eventfd_val)) < 0) fail(1, "eventfd read"); } else #endif { handle_epoll_event(&epoll_events[i]); } } }
struct ndpi_LruCacheEntry* lru_allocCacheStringNode(struct ndpi_LruCache *cache, char *key, char *value, u_int32_t timeout) { struct ndpi_LruCacheEntry *node = (struct ndpi_LruCacheEntry*)ndpi_calloc(1, sizeof(struct ndpi_LruCacheEntry)); if(unlikely(traceLRU)) printf("%s(key=%s, value=%s)", __FUNCTION__, key, value); if(node == NULL) printf("ERROR: Not enough memory?"); else { node->numeric_node = 0; node->u.str.key = ndpi_strdup(key), node->u.str.value = ndpi_strdup(value); node->u.str.expire_time = (timeout == 0) ? 0 : (compute_timeout(timeout) + get_now()); #ifdef FULL_STATS cache->mem_size += sizeof(struct ndpi_LruCacheEntry) + strlen(key) + strlen(value); //printf("%s(key=%s, value=%s) [memory: %u]", __FUNCTION__, key, value, cache->mem_size); #endif } return(node); }
int ndpi_add_to_lru_cache_str_timeout(struct ndpi_LruCache *cache, char *key, char *value, u_int32_t timeout) { if(cache->hash_size == 0) return(0); else { u_int32_t hash_val = lru_hash_string(key); u_int32_t hash_id = hash_val % cache->hash_size; struct ndpi_LruCacheEntry *node; u_int8_t node_already_existing = 0; int rc = 0; if(unlikely(traceLRU)) printf("%s(key=%s, value=%s)", __FUNCTION__, key, value); // validate_unit_len(cache, hash_id); cache->num_cache_add++; /* [1] Add to hash */ if(cache->hash[hash_id] == NULL) { if((node = lru_allocCacheStringNode(cache, key, value, timeout)) == NULL) { rc = -1; goto ret_add_to_lru_cache; } cache->hash[hash_id] = node; cache->current_hash_size[hash_id]++; } else { /* Check if the element exists */ struct ndpi_LruCacheEntry *head = cache->hash[hash_id]; while(head != NULL) { if(strcmp(head->u.str.key, key) == 0) { /* Duplicated key found */ node = head; if(node->u.str.value) { #ifdef FULL_STATS cache->mem_size -= strlen(node->u.str.value); #endif ndpi_free(node->u.str.value); } node->u.str.value = ndpi_strdup(value); /* Overwrite old value */ #ifdef FULL_STATS cache->mem_size += strlen(value); #endif node->u.str.expire_time = (timeout == 0) ? 0 : (compute_timeout(timeout) + get_now()); node_already_existing = 1; break; } else head = head->next; } if(!node_already_existing) { if((node = lru_allocCacheStringNode(cache, key, value, timeout)) == NULL) { rc = -2; goto ret_add_to_lru_cache; } node->next = cache->hash[hash_id]; cache->hash[hash_id] = node; cache->current_hash_size[hash_id]++; } } trim_subhash(cache, hash_id); // validate_unit_len(cache, hash_id); ret_add_to_lru_cache: return(rc); } }
/* * Read a character from a connection w/ timeout */ ssize_t fetch_read(conn_t *conn, char *buf, size_t len) { struct timeval timeout_end; struct pollfd pfd; int timeout_cur; ssize_t rlen; int r; if (len == 0) return 0; if (conn->next_len != 0) { if (conn->next_len < len) len = conn->next_len; memmove(buf, conn->next_buf, len); conn->next_len -= len; conn->next_buf += len; return len; } if (fetchTimeout) { gettimeofday(&timeout_end, NULL); timeout_end.tv_sec += fetchTimeout; } pfd.fd = conn->sd; for (;;) { pfd.events = conn->buf_events; if (fetchTimeout && pfd.events) { do { timeout_cur = compute_timeout(&timeout_end); if (timeout_cur < 0) { errno = ETIMEDOUT; fetch_syserr(); return (-1); } errno = 0; r = poll(&pfd, 1, timeout_cur); if (r == -1) { if (errno == EINTR && fetchRestartCalls) continue; fetch_syserr(); return (-1); } } while (pfd.revents == 0); } #ifdef WITH_SSL if (conn->ssl != NULL) { rlen = SSL_read(conn->ssl, buf, len); if (rlen == -1) { switch (SSL_get_error(conn->ssl, rlen)) { case SSL_ERROR_WANT_READ: conn->buf_events = POLLIN; break; case SSL_ERROR_WANT_WRITE: conn->buf_events = POLLOUT; break; default: errno = EIO; fetch_syserr(); return -1; } } else { /* Assume buffering on the SSL layer. */ conn->buf_events = 0; } } else #endif rlen = read(conn->sd, buf, len); if (rlen >= 0) break; if (errno != EINTR || !fetchRestartCalls) return (-1); } return (rlen); }