struct mk_iov *mk_iov_create(int n, int offset) { int i; struct mk_iov *iov; iov = mk_mem_malloc_z(sizeof(struct mk_iov)); iov->iov_idx = offset; iov->io = mk_mem_malloc(n * sizeof(struct iovec)); iov->buf_to_free = mk_mem_malloc(n * sizeof(void *)); iov->buf_idx = 0; iov->total_len = 0; iov->size = n; /* * Make sure to set to zero initial entries when an offset * is specified */ if (offset > 0) { for (i=0; i < offset; i++) { iov->io[i].iov_base = NULL; iov->io[i].iov_len = 0; } } return iov; }
/* This function is called when a thread is created */ void mk_cache_worker_init() { char *cache_error; mk_ptr_t *p_tmp; /* Cache header request -> last modified */ p_tmp = mk_mem_malloc_z(sizeof(mk_ptr_t)); p_tmp->data = mk_mem_malloc_z(32); p_tmp->len = -1; MK_TLS_SET(mk_tls_cache_header_lm, p_tmp); /* Cache header request -> content length */ p_tmp = mk_mem_malloc_z(sizeof(mk_ptr_t)); p_tmp->data = mk_mem_malloc_z(MK_UTILS_INT2MKP_BUFFER_LEN); p_tmp->len = -1; MK_TLS_SET(mk_tls_cache_header_cl, p_tmp); /* Cache gmtime buffer */ MK_TLS_SET(mk_tls_cache_gmtime, mk_mem_malloc(sizeof(struct tm))); /* Cache the most used text representations of utime2gmt */ MK_TLS_SET(mk_tls_cache_gmtext, mk_mem_malloc_z(sizeof(struct mk_gmt_cache) * MK_GMT_CACHES)); /* Cache buffer for strerror_r(2) */ cache_error = mk_mem_malloc(MK_UTILS_ERROR_SIZE); pthread_setspecific(mk_utils_error_key, (void *) cache_error); /* Virtual hosts: initialize per thread-vhost data */ mk_vhost_fdt_worker_init(); }
int mk_epoll_state_init() { struct mk_list *state_list = mk_mem_malloc(sizeof(struct mk_list)); mk_list_init(state_list); return pthread_setspecific(mk_epoll_state_k, (void *) state_list); }
int mk_user_init(struct client_session *cs, struct session_request *sr) { int limit; const int offset = 2; /* The user is defined after the '/~' string, so offset = 2 */ const int user_len = 255; char user[user_len], *user_uri; struct passwd *s_user; if (sr->uri_processed.len <= 2) { return -1; } limit = mk_string_char_search(sr->uri_processed.data + offset, '/', sr->uri_processed.len); if (limit == -1) { limit = (sr->uri_processed.len) - offset; } if (limit + offset >= (user_len)) { return -1; } memcpy(user, sr->uri_processed.data + offset, limit); user[limit] = '\0'; MK_TRACE("user: '******'", user); /* Check system user */ if ((s_user = getpwnam(user)) == NULL) { mk_request_error(MK_CLIENT_NOT_FOUND, cs, sr); return -1; } if (sr->uri_processed.len > (unsigned int) (offset+limit)) { user_uri = mk_mem_malloc(sr->uri_processed.len); if (!user_uri) { return -1; } memcpy(user_uri, sr->uri_processed.data + (offset + limit), sr->uri_processed.len - offset - limit); user_uri[sr->uri_processed.len - offset - limit] = '\0'; mk_string_build(&sr->real_path.data, &sr->real_path.len, "%s/%s%s", s_user->pw_dir, config->user_dir, user_uri); mk_mem_free(user_uri); } else { mk_string_build(&sr->real_path.data, &sr->real_path.len, "%s/%s", s_user->pw_dir, config->user_dir); } sr->user_home = MK_TRUE; return 0; }
void *mk_mem_malloc_z(const size_t size) { void *buf = mk_mem_malloc(size); if (!buf) return NULL; memset(buf, '\0', size); return buf; }
char *mk_pointer_to_buf(mk_pointer p) { char *buf; buf = mk_mem_malloc(p.len + 1); memcpy(buf, p.data, p.len); buf[p.len] = '\0'; return (char *) buf; }
struct mk_thread_channel_elem *mk_thread_channel_elem_create(void *data) { struct mk_thread_channel_elem *elem; elem = mk_mem_malloc(sizeof(*elem)); if (!elem) { return NULL; } elem->data = data; return elem; }
/* Create a new channel */ struct mk_channel *mk_channel_new(int type, int fd) { struct mk_channel *channel; channel = mk_mem_malloc(sizeof(struct mk_channel)); channel->type = type; channel->fd = fd; mk_list_init(&channel->streams); return channel; }
/* This function is called when a thread is created */ void mk_cache_thread_init() { mk_pointer *cache_header_lm; mk_pointer *cache_header_cl; mk_pointer *cache_header_ka; mk_pointer *cache_header_ka_max; struct tm *cache_utils_gmtime; struct mk_iov *cache_iov_header; struct mk_gmt_cache *cache_utils_gmt_text; /* Cache header request -> last modified */ cache_header_lm = mk_mem_malloc_z(sizeof(mk_pointer)); cache_header_lm->data = mk_mem_malloc_z(32); cache_header_lm->len = -1; pthread_setspecific(mk_cache_header_lm, (void *) cache_header_lm); /* Cache header request -> content length */ cache_header_cl = mk_mem_malloc_z(sizeof(mk_pointer)); cache_header_cl->data = mk_mem_malloc_z(MK_UTILS_INT2MKP_BUFFER_LEN); cache_header_cl->len = -1; pthread_setspecific(mk_cache_header_cl, (void *) cache_header_cl); /* Cache header response -> keep-alive */ cache_header_ka = mk_mem_malloc_z(sizeof(mk_pointer)); mk_string_build(&cache_header_ka->data, &cache_header_ka->len, "Keep-Alive: timeout=%i, max=", config->keep_alive_timeout); pthread_setspecific(mk_cache_header_ka, (void *) cache_header_ka); /* Cache header response -> max=%i */ cache_header_ka_max = mk_mem_malloc_z(sizeof(mk_pointer)); cache_header_ka_max->data = mk_mem_malloc_z(64); cache_header_ka_max->len = 0; pthread_setspecific(mk_cache_header_ka_max, (void *) cache_header_ka_max); /* Cache iov header struct */ cache_iov_header = mk_iov_create(32, 0); pthread_setspecific(mk_cache_iov_header, (void *) cache_iov_header); /* Cache gmtime buffer */ cache_utils_gmtime = mk_mem_malloc(sizeof(struct tm)); pthread_setspecific(mk_cache_utils_gmtime, (void *) cache_utils_gmtime); /* Cache the most used text representations of utime2gmt */ cache_utils_gmt_text = mk_mem_malloc_z(sizeof(struct mk_gmt_cache) * MK_GMT_CACHES); pthread_setspecific(mk_cache_utils_gmt_text, (void *) cache_utils_gmt_text); }
/* Create a new stream instance */ struct mk_stream *mk_stream_new(int type, struct mk_channel *channel, void *buffer, size_t size, void *data, void (*cb_finished) (struct mk_stream *), void (*cb_bytes_consumed) (struct mk_stream *, long), void (*cb_exception) (struct mk_stream *, int)) { struct mk_stream *stream; stream = mk_mem_malloc(sizeof(struct mk_stream)); mk_stream_set(stream, type, channel, buffer, size, data, cb_finished, cb_bytes_consumed, cb_exception); return stream; }
/* * @METHOD_NAME: chan_create * @METHOD_DESC: create a channel(pipe) for dthread communication. * @METHOD_PROTO: mk_thread_channel_t *chan_create(int size) * @METHOD_PARAM: size the buffered size of the channel. * @METHOD_RETURN: returns a new channel. */ struct mk_thread_channel *mk_thread_channel_create(int size) { struct mk_thread_channel *chan; chan = mk_mem_malloc(sizeof(*chan)); if (!chan) { return NULL; } chan->size = size + 1; chan->used = 0; mk_list_init(&chan->bufs); chan->sender = -1; chan->receiver = -1; chan->ended = 0; chan->done = 0; return chan; }
int mk_buffer_cat(mk_ptr_t *p, char *buf1, int len1, char *buf2, int len2) { /* Validate lengths */ if (mk_unlikely(len1 < 0 || len2 < 0)) { return -1; } /* alloc space */ p->data = (char *) mk_mem_malloc(len1 + len2 + 1); /* copy data */ memcpy(p->data, buf1, len1); memcpy(p->data + len1, buf2, len2); p->data[len1 + len2] = '\0'; /* assign len */ p->len = len1 + len2; return 0; }
/* * This routine creates a timer, since this select(2) backend aims to be used * in very old systems to be compatible, we cannot trust timerfd_create(2) * will be available (e.g: Cygwin), so our workaround is to create a pipe(2) * and a thread, this thread writes a byte upon the expiration time is reached. */ static inline int _mk_event_timeout_create(struct mk_event_ctx *ctx, int expire, void *data) { int ret; int fd[2]; pid_t tid; struct mk_event *event; struct fd_timer *timer; timer = mk_mem_malloc(sizeof(struct fd_timer)); if (!timer) { return -1; } ret = pipe(fd); if (ret < 0) { mk_mem_free(timer); mk_libc_error("pipe"); return ret; } event = (struct mk_event *) data; event->fd = fd[0]; event->type = MK_EVENT_NOTIFICATION; event->mask = MK_EVENT_EMPTY; _mk_event_add(ctx, fd[0], MK_EVENT_NOTIFICATION, MK_EVENT_READ, data); event->mask = MK_EVENT_READ; /* Compose the timer context, this is released inside the worker thread */ timer->fd = fd[1]; timer->expiration = expire; /* Now the dirty workaround, create a thread */ mk_utils_worker_spawn(_timeout_worker, timer); return fd[0]; }
static int mk_http_directory_redirect_check(struct client_session *cs, struct session_request *sr) { int port_redirect = 0; char *host; char *location = 0; char *real_location = 0; unsigned long len; /* * We have to check if there is a slash at the end of * this string. If it doesn't exist, we send a redirection header. */ if (sr->uri_processed.data[sr->uri_processed.len - 1] == '/') { return 0; } host = mk_ptr_t_to_buf(sr->host); /* * Add ending slash to the location string */ location = mk_mem_malloc(sr->uri_processed.len + 2); memcpy(location, sr->uri_processed.data, sr->uri_processed.len); location[sr->uri_processed.len] = '/'; location[sr->uri_processed.len + 1] = '\0'; /* FIXME: should we done something similar for SSL = 443 */ if (sr->host.data && sr->port > 0) { if (sr->port != config->standard_port) { port_redirect = sr->port; } } if (port_redirect > 0) { mk_string_build(&real_location, &len, "%s://%s:%i%s", config->transport, host, port_redirect, location); } else { mk_string_build(&real_location, &len, "%s://%s%s", config->transport, host, location); } MK_TRACE("Redirecting to '%s'", real_location); mk_mem_free(host); mk_header_set_http_status(sr, MK_REDIR_MOVED); sr->headers.content_length = 0; mk_ptr_t_reset(&sr->headers.content_type); sr->headers.location = real_location; sr->headers.cgi = SH_NOCGI; sr->headers.pconnections_left = (config->max_keep_alive_request - cs->counter_connections); mk_header_send(cs->socket, cs, sr); mk_server_cork_flag(cs->socket, TCP_CORK_OFF); /* * we do not free() real_location * as it's freed by iov */ mk_mem_free(location); sr->headers.location = NULL; return -1; }
static int mk_request_parse(struct client_session *cs) { int i, end; int blocks = 0; struct session_request *sr_node; struct mk_list *sr_list, *sr_head; for (i = 0; i <= cs->body_pos_end; i++) { /* * Pipelining can just exists in a persistent connection or * well known as KeepAlive, so if we are in keepalive mode * we should check if we have multiple request in our body buffer */ if (cs->counter_connections > 0) { /* * Look for CRLFCRLF (\r\n\r\n), maybe some pipelining * request can be involved. */ end = mk_string_search(cs->body + i, mk_endblock.data, MK_STR_SENSITIVE) + i; } else { end = cs->body_pos_end; } if (end < 0) { return -1; } /* Allocating request block */ if (blocks == 0) { sr_node = &cs->sr_fixed; } else { sr_node = mk_mem_malloc(sizeof(struct session_request)); } mk_request_init(sr_node); /* We point the block with a mk_pointer */ sr_node->body.data = cs->body + i; sr_node->body.len = end - i; /* Method, previous catch in mk_http_pending_request */ if (i == 0) { sr_node->method = cs->first_method; } else { sr_node->method = mk_http_method_get(sr_node->body.data); } /* Looking for POST data */ if (sr_node->method == HTTP_METHOD_POST) { int offset; offset = end + mk_endblock.len; sr_node->data = mk_method_get_data(cs->body + offset, cs->body_length - offset); if (sr_node->data.len >= 0) { i += sr_node->data.len; } } /* Increase index to the end of the current block */ i = (end + mk_endblock.len) - 1; /* Link block */ mk_list_add(&sr_node->_head, &cs->request_list); /* Update counter */ blocks++; } /* DEBUG BLOCKS struct mk_list *head; struct session_request *entry; printf("\n*******************\n"); mk_list_foreach(head, &cs->request_list) { entry = mk_list_entry(head, struct session_request, _head); mk_pointer_print(entry->body); fflush(stdout); } */ /* Checking pipelining connection */ if (blocks > 1) { sr_list = &cs->request_list; mk_list_foreach(sr_head, sr_list) { sr_node = mk_list_entry(sr_head, struct session_request, _head); /* Pipelining request must use GET or HEAD methods */ if (sr_node->method != HTTP_METHOD_GET && sr_node->method != HTTP_METHOD_HEAD) { return -1; } }