static inline void *_mk_event_loop_create(int size) { mk_event_ctx_t *ctx; /* Main event context */ ctx = mk_mem_malloc_z(sizeof(mk_event_ctx_t)); if (!ctx) { return NULL; } /* Create the epoll instance */ ctx->efd = epoll_create1(EPOLL_CLOEXEC); if (ctx->efd == -1) { mk_libc_error("epoll_create"); mk_mem_free(ctx); return NULL; } /* Allocate space for events queue */ ctx->events = mk_mem_malloc_z(sizeof(struct epoll_event) * size); if (!ctx->events) { close(ctx->efd); mk_mem_free(ctx); return NULL; } ctx->queue_size = size; return ctx; }
static inline void *_mk_event_loop_create(int size) { struct mk_event_ctx *ctx; /* Main event context */ ctx = mk_mem_malloc_z(sizeof(struct mk_event_ctx)); if (!ctx) { return NULL; } /* Create the epoll instance */ ctx->kfd = kqueue(); if (ctx->kfd == -1) { mk_libc_error("kqueue"); mk_mem_free(ctx); return NULL; } /* Allocate space for events queue */ ctx->events = mk_mem_malloc_z(sizeof(struct kevent) * size); if (!ctx->events) { close(ctx->kfd); mk_mem_free(ctx); return NULL; } ctx->queue_size = size; return ctx; }
/* This function is called when a thread is created */ void mk_cache_worker_init() { char *cache_error; mk_ptr_t *p_tmp; /* Cache header request -> last modified */ p_tmp = mk_mem_malloc_z(sizeof(mk_ptr_t)); p_tmp->data = mk_mem_malloc_z(32); p_tmp->len = -1; MK_TLS_SET(mk_tls_cache_header_lm, p_tmp); /* Cache header request -> content length */ p_tmp = mk_mem_malloc_z(sizeof(mk_ptr_t)); p_tmp->data = mk_mem_malloc_z(MK_UTILS_INT2MKP_BUFFER_LEN); p_tmp->len = -1; MK_TLS_SET(mk_tls_cache_header_cl, p_tmp); /* Cache gmtime buffer */ MK_TLS_SET(mk_tls_cache_gmtime, mk_mem_malloc(sizeof(struct tm))); /* Cache the most used text representations of utime2gmt */ MK_TLS_SET(mk_tls_cache_gmtext, mk_mem_malloc_z(sizeof(struct mk_gmt_cache) * MK_GMT_CACHES)); /* Cache buffer for strerror_r(2) */ cache_error = mk_mem_malloc(MK_UTILS_ERROR_SIZE); pthread_setspecific(mk_utils_error_key, (void *) cache_error); /* Virtual hosts: initialize per thread-vhost data */ mk_vhost_fdt_worker_init(); }
static inline void *_mk_event_loop_create(int size) { struct mk_event_ctx *ctx; /* Override caller 'size', we always use FD_SETSIZE */ size = FD_SETSIZE; /* Main event context */ ctx = mk_mem_malloc_z(sizeof(struct mk_event_ctx)); if (!ctx) { return NULL; } FD_ZERO(&ctx->rfds); FD_ZERO(&ctx->wfds); /* Allocate space for events queue, re-use the struct mk_event */ ctx->events = mk_mem_malloc_z(sizeof(struct mk_event *) * size); if (!ctx->events) { mk_mem_free(ctx); return NULL; } /* Fired events (upon select(2) return) */ ctx->fired = mk_mem_malloc_z(sizeof(struct mk_event) * size); if (!ctx->fired) { mk_mem_free(ctx->events); mk_mem_free(ctx); return NULL; } ctx->queue_size = size; return ctx; }
/* Create a new loop */ mk_event_loop_t *mk_event_loop_create(int size) { void *backend; mk_event_loop_t *loop; backend = _mk_event_loop_create(size); if (!backend) { return NULL; } loop = mk_mem_malloc_z(sizeof(mk_event_loop_t)); if (!loop) { return NULL; } loop->events = mk_mem_malloc_z(sizeof(mk_event_t) * size); if (!loop->events) { mk_mem_free(loop); return NULL; } loop->size = size; loop->data = backend; return loop; }
struct mk_iov *mk_iov_create(int n, int offset) { int i; struct mk_iov *iov; iov = mk_mem_malloc_z(sizeof(struct mk_iov)); iov->iov_idx = offset; iov->io = mk_mem_malloc(n * sizeof(struct iovec)); iov->buf_to_free = mk_mem_malloc(n * sizeof(void *)); iov->buf_idx = 0; iov->total_len = 0; iov->size = n; /* * Make sure to set to zero initial entries when an offset * is specified */ if (offset > 0) { for (i=0; i < offset; i++) { iov->io[i].iov_base = NULL; iov->io[i].iov_len = 0; } } return iov; }
struct mk_iov *mk_iov_create(int n, int offset) { int s_all; int s_iovec; int s_free_buf; void *p; struct mk_iov *iov; s_all = sizeof(struct mk_iov); /* main mk_iov structure */ s_iovec = (n * sizeof(struct iovec)); /* iovec array size */ s_free_buf = (n * sizeof(void *)); /* free buf array */ p = mk_mem_malloc_z(s_all + s_iovec + s_free_buf); if (!p) { return NULL; } /* Set pointer address */ iov = p; iov->io = p + sizeof(struct mk_iov); iov->buf_to_free = (void *) (p + sizeof(struct mk_iov) + s_iovec); mk_iov_init(iov, n, offset); return iov; }
/* This function is called when a thread is created */ void mk_cache_thread_init() { mk_pointer *cache_header_lm; mk_pointer *cache_header_cl; mk_pointer *cache_header_ka; mk_pointer *cache_header_ka_max; struct tm *cache_utils_gmtime; struct mk_iov *cache_iov_header; struct mk_gmt_cache *cache_utils_gmt_text; /* Cache header request -> last modified */ cache_header_lm = mk_mem_malloc_z(sizeof(mk_pointer)); cache_header_lm->data = mk_mem_malloc_z(32); cache_header_lm->len = -1; pthread_setspecific(mk_cache_header_lm, (void *) cache_header_lm); /* Cache header request -> content length */ cache_header_cl = mk_mem_malloc_z(sizeof(mk_pointer)); cache_header_cl->data = mk_mem_malloc_z(MK_UTILS_INT2MKP_BUFFER_LEN); cache_header_cl->len = -1; pthread_setspecific(mk_cache_header_cl, (void *) cache_header_cl); /* Cache header response -> keep-alive */ cache_header_ka = mk_mem_malloc_z(sizeof(mk_pointer)); mk_string_build(&cache_header_ka->data, &cache_header_ka->len, "Keep-Alive: timeout=%i, max=", config->keep_alive_timeout); pthread_setspecific(mk_cache_header_ka, (void *) cache_header_ka); /* Cache header response -> max=%i */ cache_header_ka_max = mk_mem_malloc_z(sizeof(mk_pointer)); cache_header_ka_max->data = mk_mem_malloc_z(64); cache_header_ka_max->len = 0; pthread_setspecific(mk_cache_header_ka_max, (void *) cache_header_ka_max); /* Cache iov header struct */ cache_iov_header = mk_iov_create(32, 0); pthread_setspecific(mk_cache_iov_header, (void *) cache_iov_header); /* Cache gmtime buffer */ cache_utils_gmtime = mk_mem_malloc(sizeof(struct tm)); pthread_setspecific(mk_cache_utils_gmtime, (void *) cache_utils_gmtime); /* Cache the most used text representations of utime2gmt */ cache_utils_gmt_text = mk_mem_malloc_z(sizeof(struct mk_gmt_cache) * MK_GMT_CACHES); pthread_setspecific(mk_cache_utils_gmt_text, (void *) cache_utils_gmt_text); }
/* If the URI contains hexa format characters it will return * convert the Hexa values to ASCII character */ char *mk_utils_url_decode(mk_ptr_t uri) { int tmp, hex_result; unsigned int i; int buf_idx = 0; char *buf; char hex[3]; if ((tmp = mk_string_char_search(uri.data, '%', uri.len)) < 0) { return NULL; } i = tmp; buf = mk_mem_malloc_z(uri.len); if (i > 0) { strncpy(buf, uri.data, i); buf_idx = i; } while (i < uri.len) { if (uri.data[i] == '%' && i + 2 < uri.len) { memset(hex, '\0', sizeof(hex)); strncpy(hex, uri.data + i + 1, 2); hex[2] = '\0'; hex_result = mk_utils_hex2int(hex, 2); if (hex_result != -1) { buf[buf_idx] = hex_result; } else { mk_mem_free(buf); return NULL; } i += 2; } else { buf[buf_idx] = uri.data[i]; } i++; buf_idx++; } buf[buf_idx] = '\0'; return buf; }
void *mk_epoll_init(int efd, mk_epoll_handlers * handler, int max_events) { int i, fd, ret = -1; int num_fds; int fds_timeout; struct epoll_event *events; struct sched_list_node *sched; /* Get thread conf */ sched = mk_sched_get_thread_conf(); fds_timeout = log_current_utime + config->timeout; events = mk_mem_malloc_z(max_events*sizeof(struct epoll_event)); while (1) { ret = -1; num_fds = epoll_wait(efd, events, max_events, MK_EPOLL_WAIT_TIMEOUT); for (i = 0; i < num_fds; i++) { fd = events[i].data.fd; if (events[i].events & EPOLLIN) { MK_TRACE("[FD %i] EPoll Event READ", fd); ret = (*handler->read) (fd); } else if (events[i].events & EPOLLOUT) { MK_TRACE("[FD %i] EPoll Event WRITE", fd); ret = (*handler->write) (fd); } else if (events[i].events & (EPOLLHUP | EPOLLERR | EPOLLRDHUP)) { MK_TRACE("[FD %i] EPoll Event EPOLLHUP/EPOLLER", fd); ret = (*handler->error) (fd); } if (ret < 0) { MK_TRACE("[FD %i] Epoll Event FORCE CLOSE | ret = %i", fd, ret); (*handler->close) (fd); } } /* Check timeouts and update next one */ if (log_current_utime >= fds_timeout) { mk_sched_check_timeouts(sched); fds_timeout = log_current_utime + config->timeout; } } }
static int mk_request_parse(struct client_session *cs) { int i, end; int blocks = 0; struct session_request *sr_node; struct mk_list *sr_list, *sr_head; for (i = 0; i <= cs->body_pos_end; i++) { /* * Pipelining can just exists in a persistent connection or * well known as KeepAlive, so if we are in keepalive mode * we should check if we have multiple request in our body buffer */ end = mk_string_search(cs->body + i, mk_endblock.data, MK_STR_SENSITIVE) + i; if (end < 0) { return -1; } /* Allocating request block */ if (blocks == 0) { sr_node = &cs->sr_fixed; memset(sr_node, '\0', sizeof(struct session_request)); } else { sr_node = mk_mem_malloc_z(sizeof(struct session_request)); } mk_request_init(sr_node); /* We point the block with a mk_ptr_t */ sr_node->body.data = cs->body + i; sr_node->body.len = end - i; /* Method, previous catch in mk_http_pending_request */ if (i == 0) { sr_node->method = cs->first_method; } else { sr_node->method = mk_http_method_get(sr_node->body.data); } /* Looking for POST data */ if (sr_node->method == MK_HTTP_METHOD_POST) { int offset; offset = end + mk_endblock.len; sr_node->data = mk_method_get_data(cs->body + offset, cs->body_length - offset); } /* Increase index to the end of the current block */ i = (end + mk_endblock.len) - 1; /* Link block */ mk_list_add(&sr_node->_head, &cs->request_list); /* Update counter */ blocks++; } /* DEBUG BLOCKS struct mk_list *head; struct session_request *entry; printf("\n*******************\n"); mk_list_foreach(head, &cs->request_list) { entry = mk_list_entry(head, struct session_request, _head); mk_ptr_print(entry->body); fflush(stdout); } */ /* Checking pipelining connection */ if (blocks > 1) { sr_list = &cs->request_list; mk_list_foreach(sr_head, sr_list) { sr_node = mk_list_entry(sr_head, struct session_request, _head); /* Pipelining request must use GET or HEAD methods */ if (sr_node->method != MK_HTTP_METHOD_GET && sr_node->method != MK_HTTP_METHOD_HEAD) { return -1; } }
/* MAIN */ int main(int argc, char **argv) { int opt, run_daemon = 0; char *file_config = NULL; static const struct option long_opts[] = { { "configdir", required_argument, NULL, 'c' }, { "daemon", no_argument, NULL, 'D' }, { "version", no_argument, NULL, 'v' }, { "help", no_argument, NULL, 'h' }, { NULL, 0, NULL, 0 } }; while ((opt = getopt_long(argc, argv, "DSvhc:", long_opts, NULL)) != -1) { switch (opt) { case 'v': mk_version(); exit(EXIT_SUCCESS); case 'h': mk_help(EXIT_SUCCESS); case 'D': run_daemon = 1; break; case 'c': file_config = optarg; break; case '?': printf("Monkey: Invalid option or option needs an argument.\n"); mk_help(EXIT_FAILURE); } } /* setup basic configurations */ config = mk_mem_malloc_z(sizeof(struct server_config)); if (!file_config) config->file_config = MONKEY_PATH_CONF; else config->file_config = file_config; if (run_daemon) config->is_daemon = MK_TRUE; else config->is_daemon = MK_FALSE; #ifdef TRACE monkey_init_time = time(NULL); MK_TRACE("Monkey TRACE is enabled"); env_trace_filter = getenv("MK_TRACE_FILTER"); pthread_mutex_init(&mutex_trace, (pthread_mutexattr_t *) NULL); #endif mk_version(); mk_signal_init(); mk_config_start_configure(); mk_sched_init(); mk_plugin_init(); /* Server listening socket */ config->server_fd = mk_socket_server(config->serverport, config->listen_addr); /* Running Monkey as daemon */ if (config->is_daemon == MK_TRUE) { mk_utils_set_daemon(); } /* Register PID of Monkey */ mk_utils_register_pid(); /* Workers: logger and clock */ mk_utils_worker_spawn((void *) mk_clock_worker_init); /* Init mk pointers */ mk_mem_pointers_init(); /* Init thread keys */ mk_thread_keys_init(); /* Change process owner */ mk_user_set_uidgid(); /* Configuration sanity check */ mk_config_sanity_check(); /* Print server details */ mk_details(); /* Invoke Plugin PRCTX hooks */ mk_plugin_core_process(); /* Launch monkey http workers */ mk_server_launch_workers(); /* Server loop, let's listen for incomming clients */ mk_server_loop(config->server_fd); mk_mem_free(config); return 0; }
/* MAIN */ int main(int argc, char **argv) { int opt; int port_override = -1; int workers_override = -1; int run_daemon = 0; char *path_config = NULL; char *server_config = NULL; static const struct option long_opts[] = { { "configdir", required_argument, NULL, 'c' }, { "serverconf",required_argument, NULL, 's' }, { "build", no_argument, NULL, 'b' }, { "daemon", no_argument, NULL, 'D' }, { "port", required_argument, NULL, 'p' }, { "workers", required_argument, NULL, 'w' }, { "version", no_argument, NULL, 'v' }, { "help", no_argument, NULL, 'h' }, { NULL, 0, NULL, 0 } }; while ((opt = getopt_long(argc, argv, "bDSvhp:w:c:s:", long_opts, NULL)) != -1) { switch (opt) { case 'b': mk_build_info(); exit(EXIT_SUCCESS); case 'v': mk_version(); exit(EXIT_SUCCESS); case 'h': mk_help(EXIT_SUCCESS); case 'D': run_daemon = 1; break; case 'p': port_override = atoi(optarg); break; case 'w': workers_override = atoi(optarg); break; case 'c': path_config = optarg; break; case 's': server_config = optarg; break; case '?': mk_help(EXIT_FAILURE); } } /* setup basic configurations */ config = mk_mem_malloc_z(sizeof(struct server_config)); /* set configuration path */ if (!path_config) { config->path_config = MONKEY_PATH_CONF; } else { config->path_config = path_config; } /* set target configuration file for the server */ if (!server_config) { config->server_config = M_DEFAULT_CONFIG_FILE; } else { config->server_config = server_config; } if (run_daemon) config->is_daemon = MK_TRUE; else config->is_daemon = MK_FALSE; #ifdef TRACE monkey_init_time = time(NULL); MK_TRACE("Monkey TRACE is enabled"); env_trace_filter = getenv("MK_TRACE_FILTER"); pthread_mutex_init(&mutex_trace, (pthread_mutexattr_t *) NULL); #endif mk_version(); mk_signal_init(); #ifdef LINUX_TRACE mk_info("Linux Trace enabled"); #endif /* Override number of thread workers */ if (workers_override >= 0) { config->workers = workers_override; } else { config->workers = -1; } /* Core and Scheduler setup */ mk_config_start_configure(); mk_sched_init(); /* Clock init that must happen before starting threads */ mk_clock_sequential_init(); /* Load plugins */ mk_plugin_init(); mk_plugin_read_config(); /* Override TCP port if it was set in the command line */ if (port_override > 0) { config->serverport = port_override; } /* Server listening socket */ config->server_fd = mk_socket_server(config->serverport, config->listen_addr); /* Running Monkey as daemon */ if (config->is_daemon == MK_TRUE) { mk_utils_set_daemon(); } /* Register PID of Monkey */ mk_utils_register_pid(); /* Workers: logger and clock */ mk_utils_worker_spawn((void *) mk_clock_worker_init, NULL); /* Init mk pointers */ mk_mem_pointers_init(); /* Init thread keys */ mk_thread_keys_init(); /* Change process owner */ mk_user_set_uidgid(); /* Configuration sanity check */ mk_config_sanity_check(); /* Print server details */ mk_details(); /* Invoke Plugin PRCTX hooks */ mk_plugin_core_process(); /* Launch monkey http workers */ mk_server_launch_workers(); /* Wait until all workers report as ready */ while (1) { int i, ready = 0; pthread_mutex_lock(&mutex_worker_init); for (i = 0; i < config->workers; i++) { if (sched_list[i].initialized) ready++; } pthread_mutex_unlock(&mutex_worker_init); if (ready == config->workers) break; usleep(10000); } /* Server loop, let's listen for incomming clients */ mk_server_loop(config->server_fd); mk_mem_free(config); return 0; }
/* * Initialize the global Event structure used by threads to access the * global file descriptor table. */ int mk_event_initalize() { int i; int ret; mk_event_fdt_t *efdt; struct rlimit rlim; /* * Event File Descriptor Table (EFDT) * ---------------------------------- * The main requirement for this implementation is that we need to maintain * a state of each file descriptor registered events, such as READ, WRITE, * SLEEPING, etc. This is required not by Monkey core but is a fundamental * piece to let plugins perform safe operations over file descriptors and * their events. * * The EFDT is created in the main process context and aims to be used by * every Worker thread. Once a connection arrives and it's notified to the * Worker, this last one will register the file descriptor status on the * EFDT. * * The EFDT is a fixed size array that contains entries for each possible * file descriptor number assigned for a TCP connection. In order to make * sure the assigned number can be used as an index of the array, we have * verified that the Linux Kernel always assigns a number in a range as * defined in __alloc_fd() on file file.c: * * start: > 2 * * end : rlim.rlim.cur * * The maximum number assigned is always the process soft limit for * RLIMIT_NOFILE, so basically we are safe trusting on this model. * * Note: as we make sure each file descriptor number is only handled by one * thread, there is no race conditions. */ efdt = mk_mem_malloc_z(sizeof(mk_event_fdt_t)); if (!efdt) { mk_err("Event: could not allocate memory for event FD Table"); return -1; } /* * Despites what config->server_capacity says, we need to prepare to handle * a high number of file descriptors as process limit allows. */ ret = getrlimit(RLIMIT_NOFILE, &rlim); if (ret == -1) { mk_libc_error("getrlimit"); return -1; } efdt->size = rlim.rlim_cur; efdt->states = mk_mem_malloc_z(sizeof(struct mk_event_fd_state) * efdt->size); if (!efdt->states) { mk_err("Event: could not allocate memory for events states on FD Table"); return -1; } /* mark all file descriptors as available */ for (i = 0; i < efdt->size; i++) { efdt->states[i].fd = -1; efdt->states[i].mask = MK_EVENT_EMPTY; } mk_events_fdt = efdt; return 0; }