void ws_connection::callback(async_fd & async) { if (async.ready_read()) { read_cb(); } if (tcp_connection_.close_connection) { VLOG(3) << "close_connection"; return; } if (async.ready_write()) { write_cb(); } }
void curlapi_http_cb(gchar *inputurl, gchar *inputparams, GSList *args, gboolean oauth) { gchar *url = g_strdup(inputurl); gchar *params = g_strdup(inputparams); struct curl_slist *oauthheader = NULL; CURL *curlapi = curl_easy_init(); GString *buffer = g_string_new(NULL); GSList *threadargs = g_slist_copy(args); CURLcode returncode; threadargs = g_slist_prepend(threadargs, buffer); curl_easy_setopt(curlapi, CURLOPT_WRITEFUNCTION, curlapi_http_write_cb); curl_easy_setopt(curlapi, CURLOPT_WRITEDATA, threadargs); oauthheader = curlapi_get_oauthheader(&url, ¶ms, oauth); curl_easy_setopt(curlapi, CURLOPT_URL, url); curl_easy_setopt(curlapi, CURLOPT_HTTPHEADER, oauthheader); if(params) curl_easy_setopt(curlapi, CURLOPT_POSTFIELDS, params); //curl_easy_setopt(curlapi, CURLOPT_VERBOSE, 1); curl_easy_setopt(curlapi, CURLOPT_FAILONERROR, 1); returncode = curl_easy_perform(curlapi); if(returncode != CURLE_OK && returncode != CURLE_WRITE_ERROR) { gchar *httperror = NULL; void (*write_cb)(GSList *args) = NULL; httperror = curlapi_http(url, params, oauth); threadargs = g_slist_remove(threadargs, buffer); write_cb = g_slist_nth_data(threadargs, 0); threadargs = g_slist_append(threadargs, g_strdup(httperror)); write_cb(g_slist_nth(threadargs, 1)); g_free(httperror); } g_free(url); g_free(params); g_string_free(buffer, TRUE); if(oauthheader) curl_slist_free_all(oauthheader); curl_easy_cleanup(curlapi); }
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap) { char buf[MALLOC_PRINTF_BUFSIZE]; if (write_cb == NULL) { /* * The caller did not provide an alternate write_cb callback * function, so use the default one. malloc_write() is an * inline function, so use malloc_message() directly here. */ write_cb = je_malloc_message; cbopaque = NULL; } malloc_vsnprintf(buf, sizeof(buf), format, ap); write_cb(cbopaque, buf); }
void ndev_worker::run() { // TODO: sockbuf size configuration instead of meaningless values sockbuf skb_in(mtu_, 256); sockbuf skb_out(1, 0); ndev_pool::write_callback_t write_cb; while (!pool_.is_stop()) { if (skb_in.read(*this) > 0) { std::get<tack::ethernet>(layers_).process_packet(skb_in); } if (pool_.pick_task(skb_out, write_cb)) { int64_t ret = skb_in.write(*this); if (write_cb) { write_cb(ret); } } } }
static size_t curlapi_http_write_cb(char *ptr, size_t size, size_t nmemb, void *userdata) { gsize length = 0; GString *buffer = NULL; GSList *args = NULL; GSList *threadargs = NULL; gboolean (*write_cb)(GSList *args) = NULL; gchar *string = NULL; gchar *fullstring = NULL; length = size * nmemb; args = (GSList *) userdata; buffer = g_slist_nth_data(args, 0); write_cb = g_slist_nth_data(args, 1); string = g_strndup(ptr, length); if(g_strcmp0("\r\n", &string[length - 2]) == 0) { string[length - 2] = '\0'; g_string_append(buffer, string); g_free(string); fullstring = g_strdup(buffer->str); g_string_set_size(buffer, 0); threadargs = g_slist_copy(g_slist_nth(args, 2)); threadargs = g_slist_append(threadargs, fullstring); //Make sure write_cb returns true, else die. if(write_cb(threadargs) == FALSE) length = 0; } else { g_string_append(buffer, string); g_free(string); } return(length); }
static _IO_ssize_t _IO_cookie_write (_IO_FILE *fp, const void *buf, _IO_ssize_t size) { struct _IO_cookie_file *cfile = (struct _IO_cookie_file *) fp; cookie_write_function_t *write_cb = cfile->__io_functions.write; #ifdef PTR_DEMANGLE PTR_DEMANGLE (write_cb); #endif if (write_cb == NULL) { fp->_flags |= _IO_ERR_SEEN; return 0; } _IO_ssize_t n = write_cb (cfile->__cookie, buf, size); if (n < size) fp->_flags |= _IO_ERR_SEEN; return n; }
int select_work(int listenfd) { int fdcount = 0; int sock[FD_SETSIZE] = {0}; sock[fdcount++] = listenfd; while(1) { fd_set read_set, write_set, error_set; FD_ZERO(&read_set); FD_ZERO(&write_set); FD_ZERO(&error_set); struct timeval tval; tval.tv_sec = 0; tval.tv_usec = 100 * 1000; // 100ms int ready = 0; int i = 0; int nfds = 0; for (i = 0; i < fdcount; i++) { if (sock[i] > 0) { FD_SET(sock[i], &read_set); FD_SET(sock[i], &write_set); FD_SET(sock[i], &error_set); } if (sock[i] > nfds) { nfds = sock[i]; } } ready = select(nfds + 1, &read_set, &write_set, &error_set, &tval); if (ready < 0) { perror("select error"); return -1; } else if (ready == 0) { //printf("no select events\n"); usleep(100); } else { for (i = 0; i < fdcount; i++) { if (FD_ISSET(sock[i], &write_set) && sock[i] > 0) { if (write_cb(sock[i]) < 0) { sock[i] = 0; } } if (FD_ISSET(sock[i], &read_set) && sock[i] > 0) { if (sock[i] == listenfd) { // accept events struct sockaddr_in client_addr; bzero(&client_addr, sizeof(client_addr)); socklen_t socklen = sizeof(struct sockaddr_in); int fd = accept(listenfd, (struct sockaddr*)&client_addr, &socklen); if (fd < 0) { perror("accept error"); return -1; } printf("accept client from[%s]\n", inet_ntoa(client_addr.sin_addr)); sock[fdcount++] = fd; FD_SET(fd, &read_set); } else { if (read_cb(sock[i]) < 0) { sock[i] = 0; } } } } } } return 0; }
void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { int err; uint64_t epoch; size_t u64sz; bool general = true; bool merged = true; bool unmerged = true; bool bins = true; bool large = true; /* * Refresh stats, in case mallctl() was called by the application. * * Check for OOM here, since refreshing the ctl cache can trigger * allocation. In practice, none of the subsequent mallctl()-related * calls in this function will cause OOM if this one succeeds. * */ epoch = 1; u64sz = sizeof(uint64_t); err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); if (err != 0) { if (err == EAGAIN) { malloc_write("<jemalloc>: Memory allocation failure in " "mallctl(\"epoch\", ...)\n"); return; } malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " "...)\n"); abort(); } if (write_cb == NULL) { /* * The caller did not provide an alternate write_cb callback * function, so use the default one. malloc_write() is an * inline function, so use malloc_message() directly here. */ write_cb = je_malloc_message; cbopaque = NULL; } if (opts != NULL) { unsigned i; for (i = 0; opts[i] != '\0'; i++) { switch (opts[i]) { case 'g': general = false; break; case 'm': merged = false; break; case 'a': unmerged = false; break; case 'b': bins = false; break; case 'l': large = false; break; default:; } } } write_cb(cbopaque, "___ Begin jemalloc statistics ___\n"); if (general) { int err; const char *cpv; bool bv; unsigned uv; ssize_t ssv; size_t sv, bsz, ssz, sssz, cpsz; bsz = sizeof(bool); ssz = sizeof(size_t); sssz = sizeof(ssize_t); cpsz = sizeof(const char *); CTL_GET("version", &cpv, const char *); malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); CTL_GET("config.debug", &bv, bool); malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", bv ? "enabled" : "disabled"); #define OPT_WRITE_BOOL(n) \ if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %s\n", bv ? "true" : "false"); \ } #define OPT_WRITE_SIZE_T(n) \ if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zu\n", sv); \ } #define OPT_WRITE_SSIZE_T(n) \ if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zd\n", ssv); \ } #define OPT_WRITE_CHAR_P(n) \ if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \ == 0) { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": \"%s\"\n", cpv); \ } write_cb(cbopaque, "Run-time option settings:\n"); OPT_WRITE_BOOL(abort) OPT_WRITE_SIZE_T(lg_chunk) OPT_WRITE_SIZE_T(narenas) OPT_WRITE_SSIZE_T(lg_dirty_mult) OPT_WRITE_BOOL(stats_print) OPT_WRITE_BOOL(junk) OPT_WRITE_SIZE_T(quarantine) OPT_WRITE_BOOL(redzone) OPT_WRITE_BOOL(zero) OPT_WRITE_BOOL(utrace) OPT_WRITE_BOOL(valgrind) OPT_WRITE_BOOL(xmalloc) OPT_WRITE_BOOL(tcache) OPT_WRITE_SSIZE_T(lg_tcache_max) OPT_WRITE_BOOL(prof) OPT_WRITE_CHAR_P(prof_prefix) OPT_WRITE_BOOL(prof_active) OPT_WRITE_SSIZE_T(lg_prof_sample) OPT_WRITE_BOOL(prof_accum) OPT_WRITE_SSIZE_T(lg_prof_interval) OPT_WRITE_BOOL(prof_gdump) OPT_WRITE_BOOL(prof_final) OPT_WRITE_BOOL(prof_leak) #undef OPT_WRITE_BOOL #undef OPT_WRITE_SIZE_T #undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_CHAR_P malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); CTL_GET("arenas.narenas", &uv, unsigned); malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv); malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", sizeof(void *)); CTL_GET("arenas.quantum", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); CTL_GET("arenas.page", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t); if (ssv >= 0) { malloc_cprintf(write_cb, cbopaque, "Min active:dirty page ratio per arena: %u:1\n", (1U << ssv)); } else { write_cb(cbopaque, "Min active:dirty page ratio per arena: N/A\n"); } if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0)) == 0) { malloc_cprintf(write_cb, cbopaque, "Maximum thread-cached size class: %zu\n", sv); } if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 && bv) { CTL_GET("opt.lg_prof_sample", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Average profile sample interval: %"PRIu64 " (2^%zu)\n", (((uint64_t)1U) << sv), sv); CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); if (ssv >= 0) { malloc_cprintf(write_cb, cbopaque, "Average profile dump interval: %"PRIu64 " (2^%zd)\n", (((uint64_t)1U) << ssv), ssv); } else { write_cb(cbopaque, "Average profile dump interval: N/A\n"); } } CTL_GET("opt.lg_chunk", &sv, size_t); malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n", (ZU(1) << sv), sv); } if (config_stats) { size_t *cactive; size_t allocated, active, mapped; size_t chunks_current, chunks_high; uint64_t chunks_total; size_t huge_allocated; uint64_t huge_nmalloc, huge_ndalloc; CTL_GET("stats.cactive", &cactive, size_t *); CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.active", &active, size_t); CTL_GET("stats.mapped", &mapped, size_t); malloc_cprintf(write_cb, cbopaque, "Allocated: %zu, active: %zu, mapped: %zu\n", allocated, active, mapped); malloc_cprintf(write_cb, cbopaque, "Current active ceiling: %zu\n", atomic_read_z(cactive)); /* Print chunk stats. */ CTL_GET("stats.chunks.total", &chunks_total, uint64_t); CTL_GET("stats.chunks.high", &chunks_high, size_t); CTL_GET("stats.chunks.current", &chunks_current, size_t); malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " "highchunks curchunks\n"); malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n", chunks_total, chunks_high, chunks_current); /* Print huge stats. */ CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t); CTL_GET("stats.huge.allocated", &huge_allocated, size_t); malloc_cprintf(write_cb, cbopaque, "huge: nmalloc ndalloc allocated\n"); malloc_cprintf(write_cb, cbopaque, " %12"PRIu64" %12"PRIu64" %12zu\n", huge_nmalloc, huge_ndalloc, huge_allocated); if (merged) { unsigned narenas; CTL_GET("arenas.narenas", &narenas, unsigned); { bool initialized[narenas]; size_t isz; unsigned i, ninitialized; isz = sizeof(initialized); xmallctl("arenas.initialized", initialized, &isz, NULL, 0); for (i = ninitialized = 0; i < narenas; i++) { if (initialized[i]) ninitialized++; } if (ninitialized > 1 || unmerged == false) { /* Print merged arena stats. */ malloc_cprintf(write_cb, cbopaque, "\nMerged arenas stats:\n"); stats_arena_print(write_cb, cbopaque, narenas, bins, large); } } } if (unmerged) { unsigned narenas; /* Print stats for each arena. */ CTL_GET("arenas.narenas", &narenas, unsigned); { bool initialized[narenas]; size_t isz; unsigned i; isz = sizeof(initialized); xmallctl("arenas.initialized", initialized, &isz, NULL, 0); for (i = 0; i < narenas; i++) { if (initialized[i]) { malloc_cprintf(write_cb, cbopaque, "\narenas[%u]:\n", i); stats_arena_print(write_cb, cbopaque, i, bins, large); } } } } } write_cb(cbopaque, "--- End jemalloc statistics ---\n"); }
/* * NET_Monitor * Monitors the given sockets with the given timeout in milliseconds * It ignores closed and loopback sockets. * Calls the callback function read_cb(socket_t *) with the socket as parameter when incoming data was detected on it * Calls the callback function write_cb(socket_t *) with the socket as parameter when the socket is ready to accept outgoing data * Calls the callback function exception_cb(socket_t *) with the socket as parameter when a socket exception was detected on that socket * For both callbacks, NULL can be passed. When NULL is passed for the exception_cb, no exception detection is performed * Incoming data is always detected, even if the 'read_cb' callback was NULL. */ int NET_Monitor( int msec, socket_t *sockets[], void (*read_cb)(socket_t *, void*), void (*write_cb)(socket_t *, void*), void (*exception_cb)(socket_t *, void*), void *privatep[] ) { struct timeval timeout; fd_set fdsetr, fdsetw, fdsete; fd_set *p_fdsetw = NULL, *p_fdsete = NULL; int i, ret; int fdmax = 0; if( !sockets || !sockets[0] ) return 0; FD_ZERO( &fdsetr ); if( write_cb ) { FD_ZERO( &fdsetw ); p_fdsetw = &fdsetw; } if( exception_cb ) { FD_ZERO( &fdsete ); p_fdsete = &fdsete; } for( i = 0; sockets[i]; i++ ) { if (!sockets[i]->open) continue; switch( sockets[i]->type ) { case SOCKET_UDP: #ifdef TCP_SUPPORT case SOCKET_TCP: #endif assert( sockets[i]->handle > 0 ); fdmax = max( (int)sockets[i]->handle, fdmax ); FD_SET( sockets[i]->handle, &fdsetr ); // network socket if( p_fdsetw ) FD_SET( sockets[i]->handle, p_fdsetw ); if( p_fdsete ) FD_SET( sockets[i]->handle, p_fdsete ); break; case SOCKET_LOOPBACK: default: continue; } } timeout.tv_sec = msec / 1000; timeout.tv_usec = ( msec % 1000 ) * 1000; ret = select( fdmax+1, &fdsetr, p_fdsetw, p_fdsete, &timeout ); if ( ( ret > 0) && ( read_cb || write_cb || exception_cb ) ) { // Launch callbacks for( i = 0; sockets[i]; i++ ) { if (!sockets[i]->open) continue; switch( sockets[i]->type ) { case SOCKET_UDP: #ifdef TCP_SUPPORT case SOCKET_TCP: #endif if ( (exception_cb) && (FD_ISSET(sockets[i]->handle, p_fdsete )) ) { exception_cb(sockets[i], privatep ? privatep[i] : NULL); } if ( (read_cb) && (FD_ISSET(sockets[i]->handle, &fdsetr )) ) { read_cb(sockets[i], privatep ? privatep[i] : NULL); } if ( (write_cb) && (FD_ISSET(sockets[i]->handle, p_fdsetw )) ) { write_cb(sockets[i], privatep ? privatep[i] : NULL); } break; case SOCKET_LOOPBACK: default: continue; } } } return ret; }