void uwsgi_setup_workers() { int i, j; // allocate shared memory for workers + master uwsgi.workers = (struct uwsgi_worker *) uwsgi_calloc_shared(sizeof(struct uwsgi_worker) * (uwsgi.numproc + 1 + uwsgi.grunt)); for (i = 0; i <= uwsgi.numproc; i++) { // allocate memory for apps uwsgi.workers[i].apps = (struct uwsgi_app *) uwsgi_calloc_shared(sizeof(struct uwsgi_app) * uwsgi.max_apps); // allocate memory for cores uwsgi.workers[i].cores = (struct uwsgi_core *) uwsgi_calloc_shared(sizeof(struct uwsgi_core) * uwsgi.cores); // this is a trick for avoiding too much memory areas void *ts = uwsgi_calloc_shared(sizeof(void *) * uwsgi.max_apps * uwsgi.cores); // add 4 bytes for uwsgi header void *buffers = uwsgi_malloc_shared((uwsgi.buffer_size+4) * uwsgi.cores); void *hvec = uwsgi_malloc_shared(sizeof(struct iovec) * uwsgi.vec_size * uwsgi.cores); void *post_buf = NULL; if (uwsgi.post_buffering > 0) post_buf = uwsgi_malloc_shared(uwsgi.post_buffering_bufsize * uwsgi.cores); for (j = 0; j < uwsgi.cores; j++) { // allocate shared memory for thread states (required for some language, like python) uwsgi.workers[i].cores[j].ts = ts + ((sizeof(void *) * uwsgi.max_apps) * j); // raw per-request buffer (+4 bytes for uwsgi header) uwsgi.workers[i].cores[j].buffer = buffers + ((uwsgi.buffer_size+4) * j); // iovec for uwsgi vars uwsgi.workers[i].cores[j].hvec = hvec + ((sizeof(struct iovec) * uwsgi.vec_size) * j); if (post_buf) uwsgi.workers[i].cores[j].post_buf = post_buf + (uwsgi.post_buffering_bufsize * j); } // master does not need to following steps... if (i == 0) continue; uwsgi.workers[i].signal_pipe[0] = -1; uwsgi.workers[i].signal_pipe[1] = -1; snprintf(uwsgi.workers[i].name, 0xff, "uWSGI worker %d", i); snprintf(uwsgi.workers[i].snapshot_name, 0xff, "uWSGI snapshot %d", i); } uint64_t total_memory = (sizeof(struct uwsgi_app) * uwsgi.max_apps) + (sizeof(struct uwsgi_core) * uwsgi.cores) + (sizeof(void *) * uwsgi.max_apps * uwsgi.cores) + (uwsgi.buffer_size * uwsgi.cores) + (sizeof(struct iovec) * uwsgi.vec_size * uwsgi.cores); if (uwsgi.post_buffering > 0) { total_memory += (uwsgi.post_buffering_bufsize * uwsgi.cores); } total_memory *= (uwsgi.numproc + uwsgi.master_process); if (uwsgi.numproc > 0) uwsgi_log("mapped %llu bytes (%llu KB) for %d cores\n", (unsigned long long) total_memory, (unsigned long long) (total_memory / 1024), uwsgi.cores * uwsgi.numproc); }
static struct uwsgi_lock_item *uwsgi_register_lock(char *id, int rw) { struct uwsgi_lock_item *uli = uwsgi.registered_locks; if (!uli) { uwsgi.registered_locks = uwsgi_malloc_shared(sizeof(struct uwsgi_lock_item)); uwsgi.registered_locks->id = id; uwsgi.registered_locks->pid = 0; if (rw) { uwsgi.registered_locks->lock_ptr = uwsgi_malloc_shared(uwsgi.rwlock_size); } else { uwsgi.registered_locks->lock_ptr = uwsgi_malloc_shared(uwsgi.lock_size); } uwsgi.registered_locks->rw = rw; uwsgi.registered_locks->next = NULL; return uwsgi.registered_locks; } while (uli) { if (!uli->next) { uli->next = uwsgi_malloc_shared(sizeof(struct uwsgi_lock_item)); if (rw) { uli->next->lock_ptr = uwsgi_malloc_shared(uwsgi.rwlock_size); } else { uli->next->lock_ptr = uwsgi_malloc_shared(uwsgi.lock_size); } uli->next->id = id; uli->next->pid = 0; uli->next->rw = rw; uli->next->next = NULL; return uli->next; } uli = uli->next; } uwsgi_log("*** DANGER: unable to allocate lock %s ***\n", id); exit(1); }