struct mk_iov *mk_iov_realloc(struct mk_iov *mk_io, int new_size) { int i; struct mk_iov *iov; /* * We do not perform a memory realloc because our struct iov have * self references on it 'io' and 'buf_to_free' pointers. So we create a * new mk_iov and perform a data migration. */ iov = mk_iov_create(new_size, 0); if (!iov) { return NULL; } /* Migrate data */ iov->iov_idx = mk_io->iov_idx; iov->buf_idx = mk_io->buf_idx; iov->size = new_size; iov->total_len = mk_io->total_len; for (i = 0; i < mk_io->iov_idx; i++) { iov->io[i].iov_base = mk_io->io[i].iov_base; iov->io[i].iov_len = mk_io->io[i].iov_len; } for (i = 0; i < mk_io->buf_idx; i++) { iov->buf_to_free[i] = mk_io->buf_to_free[i]; } return iov; }
/* This function is called when a thread is created */ void mk_cache_worker_init() { char *cache_error; mk_ptr_t *p_tmp; /* Cache header request -> last modified */ p_tmp = mk_mem_malloc_z(sizeof(mk_ptr_t)); p_tmp->data = mk_mem_malloc_z(32); p_tmp->len = -1; MK_TLS_SET(mk_tls_cache_header_lm, p_tmp); /* Cache header request -> content length */ p_tmp = mk_mem_malloc_z(sizeof(mk_ptr_t)); p_tmp->data = mk_mem_malloc_z(MK_UTILS_INT2MKP_BUFFER_LEN); p_tmp->len = -1; MK_TLS_SET(mk_tls_cache_header_cl, p_tmp); /* Cache iov header struct */ MK_TLS_SET(mk_tls_cache_iov_header, mk_iov_create(32, 0)); /* Cache gmtime buffer */ MK_TLS_SET(mk_tls_cache_gmtime, mk_mem_malloc(sizeof(struct tm))); /* Cache the most used text representations of utime2gmt */ MK_TLS_SET(mk_tls_cache_gmtext, mk_mem_malloc_z(sizeof(struct mk_gmt_cache) * MK_GMT_CACHES)); /* Cache buffer for strerror_r(2) */ cache_error = mk_mem_malloc(MK_UTILS_ERROR_SIZE); pthread_setspecific(mk_utils_error_key, (void *) cache_error); /* Virtual hosts: initialize per thread-vhost data */ mk_vhost_fdt_worker_init(); }
/* This function is called when a thread is created */ void mk_cache_thread_init() { mk_pointer *cache_header_lm; mk_pointer *cache_header_cl; mk_pointer *cache_header_ka; mk_pointer *cache_header_ka_max; struct tm *cache_utils_gmtime; struct mk_iov *cache_iov_header; struct mk_gmt_cache *cache_utils_gmt_text; /* Cache header request -> last modified */ cache_header_lm = mk_mem_malloc_z(sizeof(mk_pointer)); cache_header_lm->data = mk_mem_malloc_z(32); cache_header_lm->len = -1; pthread_setspecific(mk_cache_header_lm, (void *) cache_header_lm); /* Cache header request -> content length */ cache_header_cl = mk_mem_malloc_z(sizeof(mk_pointer)); cache_header_cl->data = mk_mem_malloc_z(MK_UTILS_INT2MKP_BUFFER_LEN); cache_header_cl->len = -1; pthread_setspecific(mk_cache_header_cl, (void *) cache_header_cl); /* Cache header response -> keep-alive */ cache_header_ka = mk_mem_malloc_z(sizeof(mk_pointer)); mk_string_build(&cache_header_ka->data, &cache_header_ka->len, "Keep-Alive: timeout=%i, max=", config->keep_alive_timeout); pthread_setspecific(mk_cache_header_ka, (void *) cache_header_ka); /* Cache header response -> max=%i */ cache_header_ka_max = mk_mem_malloc_z(sizeof(mk_pointer)); cache_header_ka_max->data = mk_mem_malloc_z(64); cache_header_ka_max->len = 0; pthread_setspecific(mk_cache_header_ka_max, (void *) cache_header_ka_max); /* Cache iov header struct */ cache_iov_header = mk_iov_create(32, 0); pthread_setspecific(mk_cache_iov_header, (void *) cache_iov_header); /* Cache gmtime buffer */ cache_utils_gmtime = mk_mem_malloc(sizeof(struct tm)); pthread_setspecific(mk_cache_utils_gmtime, (void *) cache_utils_gmtime); /* Cache the most used text representations of utime2gmt */ cache_utils_gmt_text = mk_mem_malloc_z(sizeof(struct mk_gmt_cache) * MK_GMT_CACHES); pthread_setspecific(mk_cache_utils_gmt_text, (void *) cache_utils_gmt_text); }