int async_add_fd_read(struct wsgi_request *wsgi_req, int fd, int timeout) { struct uwsgi_async_fd *last_uad = NULL, *uad = wsgi_req->waiting_fds; if (fd < 0) return -1; // find first slot while (uad) { last_uad = uad; uad = uad->next; } uad = uwsgi_malloc(sizeof(struct uwsgi_async_fd)); uad->fd = fd; uad->event = event_queue_read(); uad->prev = last_uad; uad->next = NULL; if (last_uad) { last_uad->next = uad; } else { wsgi_req->waiting_fds = uad; } if (timeout > 0) { async_add_timeout(wsgi_req, timeout); } uwsgi.async_waiting_fd_table[fd] = wsgi_req; wsgi_req->async_force_again = 1; return event_queue_add_fd_read(uwsgi.async_queue, fd); }
static int uwsgi_hook_appendn(char *arg) { char *space = strchr(arg, ' '); if (!space) { uwsgi_log("invalid hook appendn syntax, must be: <file> <string>\n"); return -1; } *space = 0; int fd = open(arg, O_WRONLY|O_CREAT|O_APPEND, 0666); if (fd < 0) { uwsgi_error_open(arg); *space = ' '; return -1; } *space = ' '; size_t l = strlen(space+1); char *buf = uwsgi_malloc(l + 1); memcpy(buf, space+1, l); buf[l] = '\n'; if (write(fd, buf, l+1) != (ssize_t) (l+1)) { uwsgi_error("uwsgi_hook_appendn()/write()"); free(buf); close(fd); return -1; } free(buf); close(fd); return 0; }
static void runqueue_push(struct wsgi_request *wsgi_req) { // do not push the same request in the runqueue struct uwsgi_async_request *uar = uwsgi.async_runqueue; while(uar) { if (uar->wsgi_req == wsgi_req) return; uar = uar->next; } uar = uwsgi_malloc(sizeof(struct uwsgi_async_request)); uar->prev = NULL; uar->next = NULL; uar->wsgi_req = wsgi_req; if (uwsgi.async_runqueue == NULL) { uwsgi.async_runqueue = uar; } else { uar->prev = uwsgi.async_runqueue_last; } if (uwsgi.async_runqueue_last) { uwsgi.async_runqueue_last->next = uar; } uwsgi.async_runqueue_last = uar; }
void uwsgi_async_init() { int i; uwsgi.async_queue = event_queue_init(); if (uwsgi.async_queue < 0) { exit(1); } uwsgi_add_sockets_to_queue(uwsgi.async_queue, -1); uwsgi.rb_async_timeouts = uwsgi_init_rb_timer(); // a stack of unused cores uwsgi.async_queue_unused = uwsgi_malloc(sizeof(struct wsgi_request *) * uwsgi.async); // fill it with default values for (i = 0; i < uwsgi.async; i++) { uwsgi.async_queue_unused[i] = &uwsgi.workers[uwsgi.mywid].cores[i].req; } // the first available core is the last one uwsgi.async_queue_unused_ptr = uwsgi.async - 1; }
void uwsgi_python_fixup() { // set hacky modifier 30 uwsgi.p[30] = uwsgi_malloc( sizeof(struct uwsgi_plugin) ); memcpy(uwsgi.p[30], uwsgi.p[0], sizeof(struct uwsgi_plugin) ); uwsgi.p[30]->init_thread = NULL; uwsgi.p[30]->atexit = NULL; }
int hr_https_add_vars(struct http_session *hr, struct uwsgi_buffer *out) { // HTTPS (adapted from nginx) if (hr->session.ugs->mode == UWSGI_HTTP_SSL) { if (uwsgi_buffer_append_keyval(out, "HTTPS", 5, "on", 2)) return -1; hr->ssl_client_cert = SSL_get_peer_certificate(hr->ssl); if (hr->ssl_client_cert) { X509_NAME *name = X509_get_subject_name(hr->ssl_client_cert); if (name) { hr->ssl_client_dn = X509_NAME_oneline(name, NULL, 0); if (uwsgi_buffer_append_keyval(out, "HTTPS_DN", 8, hr->ssl_client_dn, strlen(hr->ssl_client_dn))) return -1; } if (uhttp.https_export_cert) { hr->ssl_bio = BIO_new(BIO_s_mem()); if (hr->ssl_bio) { if (PEM_write_bio_X509(hr->ssl_bio, hr->ssl_client_cert) > 0) { size_t cc_len = BIO_pending(hr->ssl_bio); hr->ssl_cc = uwsgi_malloc(cc_len); BIO_read(hr->ssl_bio, hr->ssl_cc, cc_len); if (uwsgi_buffer_append_keyval(out, "HTTPS_CC", 8, hr->ssl_cc, cc_len)) return -1; } } } } } else if (hr->session.ugs->mode == UWSGI_HTTP_FORCE_SSL) { hr->force_ssl = 1; } return 0; }
void *uwsgi_libffi_get_value(char *what, ffi_type *t) { if (t == &ffi_type_sint32) { int32_t *num = uwsgi_malloc(sizeof(int32_t)); *num = atoi(what); return num; } return NULL; }
ssize_t uwsgi_lf_ctime(struct wsgi_request *wsgi_req, char **buf) { *buf = uwsgi_malloc(26); #ifdef __sun__ ctime_r((const time_t *) &wsgi_req->start_of_request.tv_sec, *buf, 26); #else ctime_r((const time_t *) &wsgi_req->start_of_request.tv_sec, *buf); #endif return 24; }
static char *uwsgi_cgi_get_docroot(char *path_info, uint16_t path_info_len, int *need_free, int *is_a_file, int *discard_base, char **script_name) { struct uwsgi_dyn_dict *udd = uc.mountpoint, *choosen_udd = NULL; int best_found = 0; struct stat st; char *path = NULL; if (uc.has_mountpoints) { while(udd) { if (udd->vallen) { if (!uwsgi_starts_with(path_info, path_info_len, udd->key, udd->keylen) && udd->keylen > best_found) { best_found = udd->keylen ; choosen_udd = udd; path = udd->value; *script_name = udd->key; *discard_base = udd->keylen; if (udd->key[udd->keylen-1] == '/') { *discard_base = *discard_base-1; } } } udd = udd->next; } } if (choosen_udd == NULL) { choosen_udd = uc.default_cgi; if (!choosen_udd) return NULL; path = choosen_udd->key; } if (choosen_udd->status == 0) { char *tmp_udd = uwsgi_malloc(PATH_MAX+1); if (!realpath(path, tmp_udd)) { free(tmp_udd); return NULL; } if (stat(tmp_udd, &st)) { uwsgi_error("stat()"); free(tmp_udd); return NULL; } if (!S_ISDIR(st.st_mode)) { *is_a_file = 1; } *need_free = 1; return tmp_udd; } if (choosen_udd->status == 2) *is_a_file = 1; return path; }
static int uwsgi_libffi_hook(char *arg) { size_t argc = 0; size_t i; char **argv = uwsgi_split_quoted(arg, strlen(arg), " \t", &argc); if (!argc) goto end; void *func = dlsym(RTLD_DEFAULT, argv[0]); if (!func) goto destroy; ffi_cif cif; ffi_type **args_type = (ffi_type **) uwsgi_malloc(sizeof(ffi_type*) * (argc-1)); void **values = uwsgi_malloc(sizeof(void*) * (argc-1)); for(i=1;i<argc;i++) { size_t skip = 0; args_type[i-1] = uwsgi_libffi_get_type(argv[i], &skip); void *v = uwsgi_libffi_get_value(argv[i] + skip, args_type[i-1]); values[i-1] = v ? v : &argv[i]; uwsgi_log("%d = %s %p\n", i, argv[i], values[i-1]); } if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, argc-1, &ffi_type_sint64, args_type) == FFI_OK) { int64_t rc = 0; uwsgi_log("ready to call\n"); ffi_call(&cif, func, &rc, values); } uwsgi_log("ready to call2\n"); for(i=0;i<(argc-1);i++) { char **ptr = (char **) values[i]; if (*ptr != argv[i+1]) { free(values[i]); } } free(args_type); free(values); destroy: for(i=0;i<argc;i++) { free(argv[i]); } end: free(argv); return -1; }
static void uwsgi_crypto_logger_setup_encryption(struct uwsgi_crypto_logger_conf *uclc) { if (!uwsgi.ssl_initialized) { uwsgi_ssl_init(); } uclc->encrypt_ctx = uwsgi_malloc(sizeof(EVP_CIPHER_CTX)); EVP_CIPHER_CTX_init(uclc->encrypt_ctx); const EVP_CIPHER *cipher = EVP_get_cipherbyname(uclc->algo); if (!cipher) { uwsgi_log_safe("[uwsgi-logcrypto] unable to find algorithm/cipher\n"); exit(1); } int cipher_len = EVP_CIPHER_key_length(cipher); size_t s_len = strlen(uclc->secret); if ((unsigned int) cipher_len > s_len) { char *secret_tmp = uwsgi_malloc(cipher_len); memcpy(secret_tmp, uclc->secret, s_len); memset(secret_tmp + s_len, 0, cipher_len - s_len); uclc->secret = secret_tmp; } int iv_len = EVP_CIPHER_iv_length(cipher); size_t s_iv_len = 0; if (uclc->iv) { s_iv_len = strlen(uclc->iv); } if ((unsigned int) iv_len > s_iv_len) { char *secret_tmp = uwsgi_malloc(iv_len); memcpy(secret_tmp, uclc->iv, s_iv_len); memset(secret_tmp + s_iv_len, '0', iv_len - s_iv_len); uclc->iv = secret_tmp; } if (EVP_EncryptInit_ex(uclc->encrypt_ctx, cipher, NULL, (const unsigned char *) uclc->secret, (const unsigned char *) uclc->iv) <= 0) { uwsgi_error_safe("uwsgi_crypto_logger_setup_encryption()/EVP_EncryptInit_ex()"); exit(1); } }
struct uwsgi_fmon *event_queue_ack_file_monitor(int eq, int id) { ssize_t rlen = 0; struct inotify_event ie, *bie, *iie; int i,j; int items = 0; unsigned int isize = sizeof(struct inotify_event); struct uwsgi_fmon *uf = NULL; if (ioctl(id, FIONREAD, &isize) < 0) { uwsgi_error("ioctl()"); return NULL; } if (isize > sizeof(struct inotify_event)) { bie = uwsgi_malloc(isize); rlen = read(id, bie, isize); } else { rlen = read(id, &ie, sizeof(struct inotify_event)); bie = &ie; } if (rlen < 0) { uwsgi_error("read()"); } else { items = isize/(sizeof(struct inotify_event)); #ifdef UWSGI_DEBUG uwsgi_log("inotify returned %d items\n", items); #endif for(j=0;j<items;j++) { iie = &bie[j]; for(i=0;i<ushared->files_monitored_cnt;i++) { if (ushared->files_monitored[i].registered) { if (ushared->files_monitored[i].fd == id && ushared->files_monitored[i].id == iie->wd) { uf = &ushared->files_monitored[i]; } } } } if (items > 1) { free(bie); } return uf; } return NULL; }
static int u_offload_pipe_do(struct uwsgi_thread *ut, struct uwsgi_offload_request *uor, int fd) { ssize_t rlen; // setup if (fd == -1) { event_queue_add_fd_read(ut->queue, uor->fd); return 0; } switch(uor->status) { // read event from fd case 0: if (!uor->buf) { uor->buf = uwsgi_malloc(4096); } rlen = read(uor->fd, uor->buf, 4096); if (rlen > 0) { uor->to_write = rlen; uor->pos = 0; if (event_queue_del_fd(ut->queue, uor->fd, event_queue_read())) return -1; if (event_queue_add_fd_write(ut->queue, uor->s)) return -1; uor->status = 1; return 0; } if (rlen < 0) { uwsgi_offload_retry uwsgi_error("u_offload_pipe_do() -> read()"); } return -1; // write event on s case 1: rlen = write(uor->s, uor->buf + uor->pos, uor->to_write); if (rlen > 0) { uor->to_write -= rlen; uor->pos += rlen; if (uor->to_write == 0) { if (event_queue_del_fd(ut->queue, uor->s, event_queue_write())) return -1; if (event_queue_add_fd_read(ut->queue, uor->fd)) return -1; uor->status = 0; } return 0; } else if (rlen < 0) { uwsgi_offload_retry uwsgi_error("u_offload_pipe_do() -> write()"); } return -1; default: break; } return -1; }
static void uwsgi_alarm_thread_loop(struct uwsgi_thread *ut) { // add uwsgi_alarm_fd; struct uwsgi_alarm_fd *uafd = uwsgi.alarm_fds; while(uafd) { event_queue_add_fd_read(ut->queue, uafd->fd); uafd = uafd->next; } char *buf = uwsgi_malloc(uwsgi.alarm_msg_size + sizeof(long)); for (;;) { int interesting_fd = -1; int ret = event_queue_wait(ut->queue, -1, &interesting_fd); if (ret > 0) { if (interesting_fd == ut->pipe[1]) { ssize_t len = read(ut->pipe[1], buf, uwsgi.alarm_msg_size + sizeof(long)); if (len > (ssize_t)(sizeof(long) + 1)) { size_t msg_size = len - sizeof(long); char *msg = buf + sizeof(long); long ptr = 0; memcpy(&ptr, buf, sizeof(long)); struct uwsgi_alarm_instance *uai = (struct uwsgi_alarm_instance *) ptr; if (!uai) break; uwsgi_alarm_run(uai, msg, msg_size); } } // check for alarm_fd else { uafd = uwsgi.alarm_fds; int fd_read = 0; while(uafd) { if (interesting_fd == uafd->fd) { if (fd_read) goto raise; size_t remains = uafd->buf_len; while(remains) { ssize_t len = read(uafd->fd, uafd->buf + (uafd->buf_len-remains), remains); if (len <= 0) { uwsgi_error("[uwsgi-alarm-fd]/read()"); uwsgi_log("[uwsgi-alarm-fd] i will stop monitoring fd %d\n", uafd->fd); event_queue_del_fd(ut->queue, uafd->fd, event_queue_read()); break; } remains-=len; } fd_read = 1; raise: uwsgi_alarm_run(uafd->alarm, uafd->msg, uafd->msg_len); } uafd = uafd->next; } } } } free(buf); }
static int uwsgi_lua_init(){ uwsgi_log("Initializing Lua environment... (%d lua_States)\n", uwsgi.cores); ulua.L = uwsgi_malloc( sizeof(lua_State*) * uwsgi.cores ); // ok the lua engine is ready return 0; }
char *uwsgi_strncopy(char *s, int len) { char *buf; buf = uwsgi_malloc(len + 1); buf[len] = 0; memcpy(buf, s, len); return buf; }
//use this instead of fprintf to avoid buffering mess with udp logging void uwsgi_log(const char *fmt, ...) { va_list ap; char logpkt[4096]; int rlen = 0; int ret; struct timeval tv; char sftime[64]; char ctime_storage[26]; time_t now; if (uwsgi.logdate) { if (uwsgi.log_strftime) { now = uwsgi_now(); rlen = strftime(sftime, 64, uwsgi.log_strftime, localtime(&now)); memcpy(logpkt, sftime, rlen); memcpy(logpkt + rlen, " - ", 3); rlen += 3; } else { gettimeofday(&tv, NULL); #ifdef __sun__ ctime_r((const time_t *) &tv.tv_sec, ctime_storage, 26); #else ctime_r((const time_t *) &tv.tv_sec, ctime_storage); #endif memcpy(logpkt, ctime_storage, 24); memcpy(logpkt + 24, " - ", 3); rlen = 24 + 3; } } va_start(ap, fmt); ret = vsnprintf(logpkt + rlen, 4096 - rlen, fmt, ap); va_end(ap); if (ret >= 4096) { char *tmp_buf = uwsgi_malloc(rlen + ret + 1); memcpy(tmp_buf, logpkt, rlen); va_start(ap, fmt); ret = vsnprintf(tmp_buf + rlen, ret + 1, fmt, ap); va_end(ap); rlen = write(2, tmp_buf, rlen + ret); free(tmp_buf); return; } rlen += ret; // do not check for errors rlen = write(2, logpkt, rlen); }
int u_green_init() { static int i; if (!ug.ugreen) { return 0; } ug.u_stack_size = UGREEN_DEFAULT_STACKSIZE; if (ug.stackpages > 0) { ug.u_stack_size = ug.stackpages * uwsgi.page_size; } uwsgi_log("initializing %d uGreen threads with stack size of %lu (%lu KB)\n", uwsgi.async, (unsigned long) ug.u_stack_size, (unsigned long) ug.u_stack_size/1024); ug.contexts = uwsgi_malloc( sizeof(ucontext_t) * uwsgi.async); for(i=0;i<uwsgi.async;i++) { getcontext(&ug.contexts[i]); ug.contexts[i].uc_stack.ss_sp = mmap(NULL, ug.u_stack_size + (uwsgi.page_size*2) , PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE, -1, 0) + uwsgi.page_size; if (!ug.contexts[i].uc_stack.ss_sp) { uwsgi_error("mmap()"); exit(1); } // set guard pages for stack if (mprotect(ug.contexts[i].uc_stack.ss_sp - uwsgi.page_size, uwsgi.page_size, PROT_NONE)) { uwsgi_error("mprotect()"); exit(1); } if (mprotect(ug.contexts[i].uc_stack.ss_sp + ug.u_stack_size, uwsgi.page_size, PROT_NONE)) { uwsgi_error("mprotect()"); exit(1); } ug.contexts[i].uc_stack.ss_size = ug.u_stack_size; } uwsgi.schedule_to_main = u_green_schedule_to_main; uwsgi.schedule_to_req = u_green_schedule_to_req; return 0; }
static struct uwsgi_fastrouter_socket *uwsgi_fastrouter_new_socket(char *name) { struct uwsgi_fastrouter_socket *uwsgi_sock = ufr.sockets, *old_uwsgi_sock; if (!uwsgi_sock) { ufr.sockets = uwsgi_malloc(sizeof(struct uwsgi_fastrouter_socket)); uwsgi_sock = ufr.sockets; } else { while(uwsgi_sock) { old_uwsgi_sock = uwsgi_sock; uwsgi_sock = uwsgi_sock->next; } uwsgi_sock = uwsgi_malloc(sizeof(struct uwsgi_fastrouter_socket)); old_uwsgi_sock->next = uwsgi_sock; } memset(uwsgi_sock, 0, sizeof(struct uwsgi_fastrouter_socket)); uwsgi_sock->name = name; return uwsgi_sock; }
struct uwsgi_mule_farm *uwsgi_mule_farm_new(struct uwsgi_mule_farm **umf, struct uwsgi_mule *um) { struct uwsgi_mule_farm *uwsgi_mf = *umf, *old_umf; if (!uwsgi_mf) { *umf = uwsgi_malloc(sizeof(struct uwsgi_mule_farm)); uwsgi_mf = *umf; } else { while (uwsgi_mf) { old_umf = uwsgi_mf; uwsgi_mf = uwsgi_mf->next; } uwsgi_mf = uwsgi_malloc(sizeof(struct uwsgi_mule_farm)); old_umf->next = uwsgi_mf; } uwsgi_mf->mule = um; uwsgi_mf->next = NULL; return uwsgi_mf; }
char *uwsgi_concat2(char *one, char *two) { char *buf; size_t len = strlen(one) + strlen(two) + 1; buf = uwsgi_malloc(len); buf[len - 1] = 0; memcpy(buf, one, strlen(one)); memcpy(buf + strlen(one), two, strlen(two)); return buf; }
char *uwsgi_concat2nn(char *one, int s1, char *two, int s2, int *len) { char *buf; *len = s1 + s2 + 1; buf = uwsgi_malloc(*len); buf[*len - 1] = 0; memcpy(buf, one, s1); memcpy(buf + s1, two, s2); return buf; }
void runqueue_push(struct wsgi_request *wsgi_req) { struct uwsgi_async_request *uar; if (uwsgi.async_runqueue == NULL) { // empty runqueue, create a new one uwsgi.async_runqueue = uwsgi_malloc(sizeof(struct uwsgi_async_request)); uwsgi.async_runqueue->next = NULL; uwsgi.async_runqueue->prev = NULL; uwsgi.async_runqueue->wsgi_req = wsgi_req; uwsgi.async_runqueue_last = uwsgi.async_runqueue; } else { uar = uwsgi_malloc(sizeof(struct uwsgi_async_request)); uar->prev = uwsgi.async_runqueue_last; uar->next = NULL; uar->wsgi_req = wsgi_req; uwsgi.async_runqueue_last->next = uar; uwsgi.async_runqueue_last = uar; } uwsgi.async_runqueue_cnt++; }
struct uwsgi_socket *uwsgi_new_shared_socket(char *name) { struct uwsgi_socket *uwsgi_sock = uwsgi.shared_sockets, *old_uwsgi_sock; if (!uwsgi_sock) { uwsgi.shared_sockets = uwsgi_malloc(sizeof(struct uwsgi_socket)); uwsgi_sock = uwsgi.shared_sockets; } else { while (uwsgi_sock) { old_uwsgi_sock = uwsgi_sock; uwsgi_sock = uwsgi_sock->next; } uwsgi_sock = uwsgi_malloc(sizeof(struct uwsgi_socket)); old_uwsgi_sock->next = uwsgi_sock; } memset(uwsgi_sock, 0, sizeof(struct uwsgi_socket)); uwsgi_sock->name = name; uwsgi_sock->fd = -1; return uwsgi_sock; }
static int uwsgi_rados_read_sync(struct wsgi_request *wsgi_req, rados_ioctx_t ctx, const char *key, uint64_t off, uint64_t remains, size_t bufsize) { char* buf = uwsgi_malloc(UMIN(remains, bufsize)); while(remains > 0) { int rlen = rados_read(ctx, key, buf, UMIN(remains, bufsize), off); if (rlen <= 0) goto end; if (uwsgi_response_write_body_do(wsgi_req, buf, rlen)) goto end; remains -= rlen; off += rlen; } free(buf); return 0; end: free(buf); return -1; }
char *uwsgi_concat2n(char *one, int s1, char *two, int s2) { char *buf; size_t len = s1 + s2 + 1; buf = uwsgi_malloc(len); buf[len - 1] = 0; memcpy(buf, one, s1); memcpy(buf + s1, two, s2); return buf; }
static int uwsgi_rados_delete(struct wsgi_request *wsgi_req, rados_ioctx_t ctx, char *key, int timeout) { if (uwsgi.async < 1) { return rados_remove(ctx, key); } struct uwsgi_rados_io *urio = &urados.urio[wsgi_req->async_id]; int ret = -1; // increase request counter pthread_mutex_lock(&urio->mutex); urio->rid++; pthread_mutex_unlock(&urio->mutex); struct uwsgi_rados_cb *urcb = uwsgi_malloc(sizeof(struct uwsgi_rados_cb)); // map the current request id to the callback urcb->rid = urio->rid; // map urio to the callback urcb->urio = urio; rados_completion_t comp; // we use the safe cb here if (rados_aio_create_completion(urcb, NULL, uwsgi_rados_read_async_cb, &comp) < 0) { free(urcb); goto end; } if (rados_aio_remove(ctx, key, comp) < 0) { free(urcb); rados_aio_release(comp); goto end; } // wait for the callback to be executed if (uwsgi.wait_read_hook(urio->fds[0], timeout) <= 0) { rados_aio_release(comp); goto end; } char ack = 1; if (read(urio->fds[0], &ack, 1) != 1) { rados_aio_release(comp); uwsgi_error("uwsgi_rados_delete()/read()"); goto end; } if (rados_aio_is_safe_and_cb(comp)) { ret = rados_aio_get_return_value(comp); } rados_aio_release(comp); end: return ret; }
char *uwsgi_concat4n(char *one, int s1, char *two, int s2, char *three, int s3, char *four, int s4) { char *buf; size_t len = s1 + s2 + s3 + s4 + 1; buf = uwsgi_malloc(len); buf[len - 1] = 0; memcpy(buf, one, s1); memcpy(buf + s1, two, s2); memcpy(buf + s1 + s2, three, s3); memcpy(buf + s1 + s2 + s3, four, s4); return buf; }
static void uwsgi_alarm_curl_loop(struct uwsgi_thread *ut) { int interesting_fd; ut->buf = uwsgi_malloc(uwsgi.log_master_bufsize); CURL *curl = curl_easy_init(); // ARGH !!! if (!curl) return; curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]); curl_easy_setopt(curl, CURLOPT_TIMEOUT, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]); curl_easy_setopt(curl, CURLOPT_READFUNCTION, uwsgi_alarm_curl_read_callback); curl_easy_setopt(curl, CURLOPT_READDATA, ut); curl_easy_setopt(curl, CURLOPT_UPLOAD, 1L); curl_easy_setopt(curl, CURLOPT_POST, 1L); struct curl_slist *expect = NULL; expect = curl_slist_append(expect, "Expect:"); curl_easy_setopt(curl, CURLOPT_HTTPHEADER, expect); struct uwsgi_alarm_curl_config *uacc = (struct uwsgi_alarm_curl_config *) ut->data; char *opts = uwsgi_str(uacc->arg); // fill curl options char *ctx = NULL; char *p = strtok_r(opts, ";", &ctx); while(p) { uwsgi_alarm_curl_setopt(curl, uwsgi_str(p), uacc); p = strtok_r(NULL, ";", &ctx); } for(;;) { int ret = event_queue_wait(ut->queue, -1, &interesting_fd); if (ret < 0) return; if (ret == 0) continue; if (interesting_fd != ut->pipe[1]) continue; ssize_t rlen = read(ut->pipe[1], ut->buf, uwsgi.log_master_bufsize); if (rlen <= 0) continue; ut->pos = 0; ut->len = (size_t) rlen; ut->custom0 = 0; curl_easy_setopt(curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t) ut->len); CURLcode res = curl_easy_perform(curl); if (res != CURLE_OK) { uwsgi_log_alarm("-curl] curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); } } }
static void fiber_init_apps(void) { if (!ufiber.enabled) return; if (uwsgi.async <= 1) { uwsgi_log("the fiber loop engine requires async mode\n"); exit(1); } ufiber.fib = uwsgi_malloc( sizeof(VALUE) * uwsgi.async ); uwsgi.schedule_to_main = fiber_schedule_to_main; uwsgi.schedule_to_req = fiber_schedule_to_req; ur.unprotected = 1; uwsgi_log("*** fiber suspend engine enabled ***\n"); }