struct ldap_connections_pool *ldap_pool_create(char *server, int port, char *user, char *password) { struct ldap_connections_pool *pool; ci_thread_mutex_lock(&ldap_connections_pool_mtx); pool = search_ldap_pools(server, port, (user != NULL? user : ""), (password != NULL? password : "")); if(pool) { ci_thread_mutex_unlock(&ldap_connections_pool_mtx); return pool; } pool = malloc(sizeof(struct ldap_connections_pool)); if(!pool) { ci_thread_mutex_unlock(&ldap_connections_pool_mtx); return NULL; } strncpy(pool->server, server, CI_MAXHOSTNAMELEN); pool->server[CI_MAXHOSTNAMELEN]='\0'; pool->port = port; pool->ldapversion = LDAP_VERSION3; pool->next = NULL; if(user) { strncpy(pool->user,user,256); pool->user[255] = '\0'; } else pool->user[0] = '\0'; if(password) { strncpy(pool->password,password,256); pool->password[255] = '\0'; } else pool->password[0] = '\0'; pool->connections = 0; pool->inactive = NULL; pool->used = NULL; snprintf(pool->ldap_uri,1024,"%s://%s:%d","ldap",pool->server,pool->port); pool->ldap_uri[1023] = '\0'; ci_thread_mutex_init(&pool->mutex); #ifdef LDAP_MAX_CONNECTIONS pool->max_connections = 0; ci_thread_cond_init(&pool->pool_cond); #endif add_ldap_pool(pool); ci_thread_mutex_unlock(&ldap_connections_pool_mtx); return pool; }
int wait_for_queue(struct connections_queue *q){ ci_debug_printf(7,"Waiting for a request....\n"); if(ci_thread_mutex_lock(&(q->cond_mtx))!=0) return -1; if(ci_thread_cond_wait(&(q->queue_cond),&(q->cond_mtx))!=0){ ci_thread_mutex_unlock(&(q->cond_mtx)); return -1; } if(ci_thread_mutex_unlock(&(q->cond_mtx))!=0) return -1; return 1; }
int get_from_queue(struct connections_queue *q, ci_connection_t *con){ if(ci_thread_mutex_lock(&(q->queue_mtx))!=0) return -1; if(q->used==0){ ci_thread_mutex_unlock(&(q->queue_mtx)); return 0; } q->used--; memcpy(con,&(q->connections[q->used]),sizeof(ci_connection_t)); ci_thread_mutex_unlock(&(q->queue_mtx)); return 1; }
int put_to_queue(struct connections_queue *q,ci_connection_t *con){ int ret; if(ci_thread_mutex_lock(&(q->queue_mtx))!=0) return -1; if(q->used==q->size){ ci_thread_mutex_unlock(&(q->queue_mtx)); ci_debug_printf(1,"put_to_queue_fatal error used=%d size=%d\n",q->used,q->size); return 0; } memcpy(&(q->connections[q->used]),con,sizeof(ci_connection_t)); ret=++q->used; ci_thread_mutex_unlock(&(q->queue_mtx)); ci_thread_cond_signal(&(q->queue_cond)); //???? return ret; }
int ldap_connection_release(struct ldap_connections_pool *pool, LDAP *ldap, int close_connection) { struct ldap_connection *cur,*prev; if (ci_thread_mutex_lock(&pool->mutex)!=0) return 0; for (prev = NULL, cur = pool->used; cur != NULL; prev = cur, cur = cur->next) { if (cur->ldap == ldap) { if(cur == pool->used) pool->used = pool->used->next; else prev->next = cur->next; break; } } if (!cur) { ci_debug_printf(0, "Not ldap connection in used list! THIS IS A BUG! please contact authors\n!"); close_connection = 1; } if (close_connection) { pool->connections--; ldap_unbind_ext_s(ldap, NULL, NULL); if (cur) free(cur); } else { cur->next = pool->inactive; pool->inactive = cur; } ci_thread_mutex_unlock(&pool->mutex); return 1; }
/*Does not realy needed*/ void ci_stat_area_uint64_inc(struct stat_area *area,int ID, int count) { if (!area->mem_block) return; if (ID<0 || ID>=area->mem_block->counters64_size) return; ci_thread_mutex_lock(&area->mtx); area->mem_block->counters64[ID] += count; ci_thread_mutex_unlock(&area->mtx); }
void ci_stat_uint64_inc(int ID, int count) { if (!STATS || !STATS->mem_block) return; if (ID<0 || ID>=STATS->mem_block->counters64_size) return; ci_thread_mutex_lock(&STATS->mtx); STATS->mem_block->counters64[ID] += count; ci_thread_mutex_unlock(&STATS->mtx); }
void ci_stat_area_reset(struct stat_area *area) { int i; ci_thread_mutex_lock(&(area->mtx)); for (i=0; i<area->mem_block->counters64_size; i++) area->mem_block->counters64[i] = 0; for (i=0; i<area->mem_block->counterskbs_size; i++) { area->mem_block->counterskbs[i].kb = 0; area->mem_block->counterskbs[i].bytes = 0; } ci_thread_mutex_unlock(&(area->mtx)); }
int wait_achild_to_die() { DWORD i, count, ret; HANDLE died_child, *child_handles = malloc(sizeof(HANDLE) * childs_queue.size); child_shared_data_t *ach; while (1) { ci_thread_mutex_lock(&control_process_mtx); for (i = 0, count = 0; i < (DWORD) childs_queue.size; i++) { if (childs_queue.childs[i].pid != 0) child_handles[count++] = childs_queue.childs[i].pid; } if (count == 0) { Sleep(100); continue; } ci_thread_mutex_unlock(&control_process_mtx); ret = WaitForMultipleObjects(count, child_handles, TRUE, INFINITE); if (ret == WAIT_TIMEOUT) { ci_debug_printf(1, "What !@#$%^&!!!!! No Timeout exists!!!!!!"); continue; } if (ret == WAIT_FAILED) { ci_debug_printf(1, "Wait failed. Try again!!!!!!"); continue; } ci_thread_mutex_lock(&control_process_mtx); died_child = child_handles[ret]; ci_debug_printf(1, "Child with handle %d died, lets clean-up the queue\n", died_child); ach = get_child_data(&childs_queue, died_child); CloseHandle(ach->pipe); remove_child(&childs_queue, died_child); CloseHandle(died_child); ci_thread_mutex_unlock(&control_process_mtx); } }
/*Does not realy needed*/ void ci_stat_area_kbs_inc(struct stat_area *area,int ID, int count) { if (!area->mem_block) return; if (ID<0 || ID>=area->mem_block->counterskbs_size) return; ci_thread_mutex_lock(&area->mtx); area->mem_block->counterskbs[ID].bytes += count; area->mem_block->counterskbs[ID].kb += (area->mem_block->counterskbs[ID].bytes >> 10); area->mem_block->counterskbs[ID].bytes &= 0x3FF; ci_thread_mutex_unlock(&area->mtx); }
void ci_stat_kbs_inc(int ID, int count) { if (!STATS->mem_block) return; if (ID<0 || ID>=STATS->mem_block->counterskbs_size) return; ci_thread_mutex_lock(&STATS->mtx); STATS->mem_block->counterskbs[ID].bytes += count; STATS->mem_block->counterskbs[ID].kb += (STATS->mem_block->counterskbs[ID].bytes >> 10); STATS->mem_block->counterskbs[ID].bytes &= 0x3FF; ci_thread_mutex_unlock(&STATS->mtx); }
int do_file(int fd, char *filename){ FILE *f; char lg[10],lbuf[512],tmpbuf[522]; int bytes,len,totalbytesout,totalbytesin; if((f=fopen(filename,"r"))==NULL) return 0; buildrespmodfile(f,lbuf); if(icap_write(fd,lbuf,strlen(lbuf))<0) return 0; // printf("Sending file:\n"); totalbytesout=strlen(lbuf); while((len=fread(lbuf,sizeof(char),512,f))>0){ totalbytesout+=len; bytes=sprintf(lg,"%X\r\n",len); if(icap_write(fd,lg,bytes)<0){ printf("Error writing to socket.....\n"); return 0; } if(icap_write(fd,lbuf,len)<0){ printf("Error writing to socket.....\n"); return 0; } icap_write(fd,"\r\n",2); // printf("Sending chunksize :%d\n",len); } icap_write(fd,"0\r\n\r\n",5); fclose(f); // printf("Done(%d bytes). Reading responce.....\n",totalbytesout); if((totalbytesin=readallresponce(fd))<0){ printf("Read all responce error;\n"); return -1; } // printf("Done(%d bytes).\n",totalbytes); ci_thread_mutex_lock(&statsmtx); in_bytes_stats+=totalbytesin; out_bytes_stats+=totalbytesout; ci_thread_mutex_unlock(&statsmtx); return 1; }
int do_file(ci_request_t *req, char *input_file, int *keepalive) { int fd_in,fd_out; int ret; ci_headers_list_t *headers; if ((fd_in = open(input_file, O_RDONLY)) < 0) { ci_debug_printf(1, "Error opening file %s\n", input_file); return 0; } fd_out = 0; headers = ci_headers_create(); build_headers(fd_in, headers); ret = ci_client_icapfilter(req, CONN_TIMEOUT, headers, &fd_in, (int (*)(void *, char *, int)) fileread, &fd_out, (int (*)(void *, char *, int)) filewrite); close(fd_in); if (ret <=0 && req->bytes_out == 0 ){ ci_debug_printf(2, "Is the ICAP connection closed?\n"); *keepalive = 0; return 0; } if (ret<= 0) { ci_debug_printf(1, "Error sending requests \n"); *keepalive = 0; return 0; } *keepalive=req->keepalive; ci_headers_destroy(headers); // printf("Done(%d bytes).\n",totalbytes); ci_thread_mutex_lock(&statsmtx); in_bytes_stats += req->bytes_in; out_bytes_stats += req->bytes_out; ci_thread_mutex_unlock(&statsmtx); return 1; }
LinkedCascade *getFreeCascade(ImageCategory *category) { LinkedCascade *item = NULL; if(category->freeing_category) return NULL; ci_thread_mutex_lock(&category->mutex); while(!category->free_cascade) { ci_thread_cond_wait(&category->cond, &category->mutex); } // Ok, we should have a free cascade: // Remove free cascade item from free list item = category->free_cascade; category->free_cascade = item->next; ci_thread_mutex_unlock(&category->mutex); return item; // Tell caller the cascade }
void unBusyCascade(ImageCategory *category, LinkedCascade *item) { int need_signal = 0; if(item == NULL || category->freeing_category) return; if(ci_thread_mutex_lock(&category->mutex) != 0) { ci_debug_printf(1, "unBusyCascade: failed to lock\n"); } if(!category->free_cascade) need_signal = 1; // Add item back to free list item->next = category->free_cascade; category->free_cascade = item; // Clean up and return if(need_signal) ci_thread_cond_signal(&category->cond); ci_thread_mutex_unlock(&category->mutex); }
LDAP *ldap_connection_open(struct ldap_connections_pool *pool) { struct ldap_connection *conn; struct berval ldap_passwd, *servercred; int ret; char *ldap_user; if (ci_thread_mutex_lock(&pool->mutex)!=0) return NULL; #ifdef LDAP_MAX_CONNECTIONS do { #endif if (pool->inactive) { conn = pool->inactive; pool->inactive = pool->inactive->next; conn->next = pool->used; pool->used = conn; conn->hits++; ci_thread_mutex_unlock(&pool->mutex); return conn->ldap; } #ifdef LDAP_MAX_CONNECTIONS if (pool->connections >= pool->max_connections) { /*wait for an ldap connection to be released. The condwait will unlock pool->mutex */ if (ci_thread_cond_wait(&(pool->pool_cond), &(pool->mutex)) != 0) { ci_thread_mutex_unlock(&(pool->mutex)); return NULL; } } } while(pool->connections >= pool->max_connections); #else ci_thread_mutex_unlock(&pool->mutex); #endif conn=malloc(sizeof(struct ldap_connection)); if (!conn) { #ifdef LDAP_MAX_CONNECTIONS ci_thread_mutex_unlock(&pool->mutex); #endif return NULL; } conn->hits = 1; ret = ldap_initialize(&conn->ldap, pool->ldap_uri); if (!conn->ldap) { #ifdef LDAP_MAX_CONNECTIONS ci_thread_mutex_unlock(&pool->mutex); #endif ci_debug_printf(1, "Error allocating memory for ldap connection: %s!\n", ldap_err2string(ret)); free(conn); return NULL; } ldap_set_option(conn->ldap, LDAP_OPT_PROTOCOL_VERSION, &(pool->ldapversion)); if (pool->user[0] != '\0') ldap_user = pool->user; else ldap_user = NULL; if (pool->password[0] != '\0') { ldap_passwd.bv_val = pool->password; ldap_passwd.bv_len = strlen(pool->password); } else { ldap_passwd.bv_val = NULL; ldap_passwd.bv_len = 0; } ret = ldap_sasl_bind_s( conn->ldap, ldap_user, LDAP_SASL_SIMPLE, &ldap_passwd, NULL, NULL, &servercred ); if (ret != LDAP_SUCCESS) { ci_debug_printf(1, "Error bind to ldap server: %s!\n",ldap_err2string(ret)); #ifdef LDAP_MAX_CONNECTIONS ci_thread_mutex_unlock(&pool->mutex); #endif ldap_unbind_ext_s(conn->ldap, NULL, NULL); free(conn); return NULL; } if(servercred) { ber_bvfree(servercred); } #ifdef LDAP_MAX_CONNECTIONS /*we are already locked*/ #else if (ci_thread_mutex_lock(&pool->mutex)!= 0) { ci_debug_printf(1, "Error locking mutex while opening ldap connection!\n"); ldap_unbind_ext_s(conn->ldap, NULL, NULL); free(conn); return NULL; } #endif pool->connections++; conn->next = pool->used; pool->used = conn; ci_thread_mutex_unlock(&pool->mutex); return conn->ldap; }
int common_mutex_unlock(common_mutex_t *mtx) { if(mtx->isproc) return 0; return ci_thread_mutex_unlock(&mtx->mtx.thread_mutex); }
void child_main(int sockfd, int pipefd) { ci_thread_t thread; int i, ret; signal(SIGTERM, SIG_IGN); /*Ignore parent requests to kill us untill we are up and running */ ci_thread_mutex_init(&threads_list_mtx); ci_thread_mutex_init(&counters_mtx); ci_thread_cond_init(&free_server_cond); ci_stat_attach_mem(child_data->stats, child_data->stats_size, NULL); threads_list = (server_decl_t **) malloc((CONF.THREADS_PER_CHILD + 1) * sizeof(server_decl_t *)); con_queue = init_queue(CONF.THREADS_PER_CHILD); for (i = 0; i < CONF.THREADS_PER_CHILD; i++) { if ((threads_list[i] = newthread(con_queue)) == NULL) { exit(-1); // FATAL error..... } ret = ci_thread_create(&thread, (void *(*)(void *)) thread_main, (void *) threads_list[i]); threads_list[i]->srv_pthread = thread; } threads_list[CONF.THREADS_PER_CHILD] = NULL; /*Now start the listener thread.... */ ret = ci_thread_create(&thread, (void *(*)(void *)) listener_thread, (void *) &sockfd); listener_thread_id = thread; /*set srand for child......*/ srand(((unsigned int)time(NULL)) + (unsigned int)getpid()); /*I suppose that all my threads are up now. We can setup our signal handlers */ child_signals(); /* A signal from parent may comes while we are starting. Listener will not accept any request in this case, (it checks on the beggining of the accept loop for parent commands) so we can shutdown imediatelly even if the parent said gracefuly.*/ if (child_data->father_said) child_data->to_be_killed = IMMEDIATELY; /*start child commands may have non thread safe code but the worker threads does not serving requests yet.*/ commands_execute_start_child(); /*Signal listener to start accepting requests.*/ int doStart = 0; do { ci_thread_mutex_lock(&counters_mtx); doStart = listener_running; ci_thread_mutex_unlock(&counters_mtx); if (!doStart) ci_usleep(5); } while(!doStart); ci_thread_cond_signal(&free_server_cond); while (!child_data->to_be_killed) { char buf[512]; int bytes; if ((ret = ci_wait_for_data(pipefd, 1, wait_for_read)) > 0) { /*data input */ bytes = ci_read_nonblock(pipefd, buf, 511); if (bytes == 0) { ci_debug_printf(1, "Parent closed the pipe connection! Going to term immediately!\n"); child_data->to_be_killed = IMMEDIATELY; } else { buf[bytes] = '\0'; handle_child_process_commands(buf); } } else if (ret < 0) { ci_debug_printf(1, "An error occured while waiting for commands from parent. Terminating!\n"); child_data->to_be_killed = IMMEDIATELY; } if (!listener_running && !child_data->to_be_killed) { ci_debug_printf(1, "Ohh!! something happened to listener thread! Terminating\n"); child_data->to_be_killed = GRACEFULLY; } commands_exec_scheduled(); } ci_debug_printf(5, "Child :%d going down :%s\n", getpid(), child_data->to_be_killed == IMMEDIATELY? "IMMEDIATELY" : "GRACEFULLY"); cancel_all_threads(); commands_execute_stop_child(); exit_normaly(); }
void listener_thread(int *fd) { ci_connection_t conn; socklen_t claddrlen = sizeof(struct sockaddr_in); int haschild = 1, jobs_in_queue = 0; int pid, sockfd; sockfd = *fd; thread_signals(1); /*Wait main process to signal us to start accepting requests*/ ci_thread_mutex_lock(&counters_mtx); listener_running = 1; ci_thread_cond_wait(&free_server_cond, &counters_mtx); ci_thread_mutex_unlock(&counters_mtx); pid = getpid(); for (;;) { //Global for if (child_data->to_be_killed) { ci_debug_printf(5, "Listener of pid:%d exiting!\n", pid); goto LISTENER_FAILS_UNLOCKED; } if (!ci_proc_mutex_lock(&accept_mutex)) { if (errno == EINTR) { ci_debug_printf(5, "proc_mutex_lock interrupted (EINTR received, pid=%d)!\n", pid); /*Try again to take the lock */ continue; } else { ci_debug_printf(1, "Unknown errno %d in proc_mutex_lock of pid %d. Exiting!\n", errno, pid); goto LISTENER_FAILS_UNLOCKED; } } child_data->idle = 0; ci_debug_printf(7, "Child %d getting requests now ...\n", pid); do { //Getting requests while we have free servers..... #ifndef SINGLE_ACCEPT fd_set fds; int ret; do { FD_ZERO(&fds); FD_SET(sockfd, &fds); errno = 0; ret = select(sockfd + 1, &fds, NULL, NULL, NULL); if (ret < 0) { if (errno != EINTR) { ci_debug_printf(1, "Error in select %d! Exiting server!\n", errno); goto LISTENER_FAILS; } if (child_data->to_be_killed) { ci_debug_printf(5, "Listener server signalled to exit!\n"); goto LISTENER_FAILS; } } } while (errno == EINTR); #endif do { errno = 0; claddrlen = sizeof(conn.claddr.sockaddr); if (((conn.fd = accept(sockfd, (struct sockaddr *) &(conn.claddr.sockaddr), &claddrlen)) == -1)) { if (errno != EINTR) { ci_debug_printf(1, "Error accept %d!\nExiting server!\n", errno); goto LISTENER_FAILS; } /*Here we are going to exit only if accept interrupted by a signal else if we accepted an fd we must add it to queue for processing. */ if (errno == EINTR && child_data->to_be_killed) { ci_debug_printf(5, "Listener server signalled to exit!\n"); goto LISTENER_FAILS; } } } while (errno == EINTR && !child_data->to_be_killed); claddrlen = sizeof(conn.srvaddr.sockaddr); getsockname(conn.fd, (struct sockaddr *) &(conn.srvaddr.sockaddr), &claddrlen); ci_fill_sockaddr(&conn.claddr); ci_fill_sockaddr(&conn.srvaddr); icap_socket_opts(sockfd, MAX_SECS_TO_LINGER); if ((jobs_in_queue = put_to_queue(con_queue, &conn)) == 0) { ci_debug_printf(1, "ERROR!!!!!! NO AVAILABLE SERVERS! THIS IS A BUG!!!!!!!!\n"); ci_debug_printf(1, "Jobs in Queue: %d, Free servers: %d, Used Servers: %d, Requests: %d\n", jobs_in_queue, child_data->freeservers, child_data->usedservers, child_data->requests); goto LISTENER_FAILS; } (child_data->connections)++; //NUM of Requests.... if (child_data->to_be_killed) { ci_debug_printf(5, "Listener server must exit!\n"); goto LISTENER_FAILS; } ci_thread_mutex_lock(&counters_mtx); haschild = ((child_data->freeservers - jobs_in_queue) > 0 ? 1 : 0); ci_thread_mutex_unlock(&counters_mtx); } while (haschild); ci_debug_printf(7, "Child %d STOPS getting requests now ...\n", pid); child_data->idle = 1; while (!ci_proc_mutex_unlock(&accept_mutex)) { if (errno != EINTR) { ci_debug_printf(1, "Error:%d while trying to unlock proc_mutex, exiting listener of server:%d\n", errno, pid); goto LISTENER_FAILS_UNLOCKED; } ci_debug_printf(5, "Mutex lock interrupted while trying to unlock proc_mutex, pid: %d\n", pid); } ci_thread_mutex_lock(&counters_mtx); if ((child_data->freeservers - connections_pending(con_queue)) <= 0) { ci_debug_printf(7, "Child %d waiting for a thread to accept more connections ...\n", pid); ci_thread_cond_wait(&free_server_cond, &counters_mtx); } ci_thread_mutex_unlock(&counters_mtx); } LISTENER_FAILS_UNLOCKED: listener_running = 0; return; LISTENER_FAILS: listener_running = 0; errno = 0; while (!ci_proc_mutex_unlock(&accept_mutex)) { if (errno != EINTR) { ci_debug_printf(1, "Error:%d while trying to unlock proc_mutex of server:%d\n", errno, pid); break; } ci_debug_printf(7, "Mutex lock interrupted while trying to unlock proc_mutex before terminating\n"); } return; }
int thread_main(server_decl_t * srv) { ci_connection_t con; char clientname[CI_MAXHOSTNAMELEN + 1]; int ret, request_status = CI_NO_STATUS; int keepalive_reqs; //*********************** thread_signals(0); //************************* srv->srv_id = getpid(); //Setting my pid ... for (;;) { /* If we must shutdown IMEDIATELLY it is time to leave the server else if we are going to shutdown GRACEFULLY we are going to die only if there are not any accepted connections */ if (child_data->to_be_killed == IMMEDIATELY) { srv->running = 0; return 1; } if ((ret = get_from_queue(con_queue, &con)) == 0) { if (child_data->to_be_killed) { srv->running = 0; return 1; } ret = wait_for_queue(con_queue); continue; } if (ret < 0) { //An error has occured ci_debug_printf(1, "Fatal Error!!! Error getting a connection from connections queue!!!\n"); break; } ci_thread_mutex_lock(&counters_mtx); /*Update counters as soon as possible */ (child_data->freeservers)--; (child_data->usedservers)++; ci_thread_mutex_unlock(&counters_mtx); ci_netio_init(con.fd); ret = 1; if (srv->current_req == NULL) srv->current_req = newrequest(&con); else ret = recycle_request(srv->current_req, &con); if (srv->current_req == NULL || ret == 0) { ci_sockaddr_t_to_host(&(con.claddr), clientname, CI_MAXHOSTNAMELEN); ci_debug_printf(1, "Request from %s denied...\n", clientname); hard_close_connection((&con)); goto end_of_main_loop_thread; /*The request rejected. Log an error and continue ... */ } keepalive_reqs = 0; do { if (MAX_KEEPALIVE_REQUESTS > 0 && keepalive_reqs >= MAX_KEEPALIVE_REQUESTS) srv->current_req->keepalive = 0; /*do not keep alive connection */ if (child_data->to_be_killed) /*We are going to die do not keep-alive */ srv->current_req->keepalive = 0; if ((request_status = process_request(srv->current_req)) == CI_NO_STATUS) { ci_debug_printf(5, "Process request timeout or interrupted....\n"); ci_request_reset(srv->current_req); break; } srv->served_requests++; srv->served_requests_no_reallocation++; keepalive_reqs++; /*Increase served requests. I dont like this. The delay is small but I don't like... */ ci_thread_mutex_lock(&counters_mtx); (child_data->requests)++; ci_thread_mutex_unlock(&counters_mtx); log_access(srv->current_req, request_status); // break; //No keep-alive ...... if (child_data->to_be_killed == IMMEDIATELY) break; //Just exiting the keep-alive loop /*if we are going to term gracefully we will try to keep our promice for keepalived request.... */ if (child_data->to_be_killed == GRACEFULLY && srv->current_req->keepalive == 0) break; ci_debug_printf(8, "Keep-alive:%d\n", srv->current_req->keepalive); if (srv->current_req->keepalive && keepalive_request(srv->current_req)) { ci_debug_printf(8, "Server %d going to serve new request from client (keep-alive) \n", srv->srv_id); } else break; } while (1); if (srv->current_req) { if (request_status != CI_OK || child_data->to_be_killed) { hard_close_connection(srv->current_req->connection); } else { close_connection(srv->current_req->connection); } } if (srv->served_requests_no_reallocation > MAX_REQUESTS_BEFORE_REALLOCATE_MEM) { ci_debug_printf(5, "Max requests reached, reallocate memory and buffers .....\n"); ci_request_destroy(srv->current_req); srv->current_req = NULL; srv->served_requests_no_reallocation = 0; } end_of_main_loop_thread: ci_thread_mutex_lock(&counters_mtx); (child_data->freeservers)++; (child_data->usedservers)--; ci_thread_mutex_unlock(&counters_mtx); ci_thread_cond_signal(&free_server_cond); } srv->running = 0; return 0; }
void child_main(int sockfd){ ci_connection_t conn; int claddrlen=sizeof(struct sockaddr_in); ci_thread_t thread; char clientname[300]; int i,retcode,haschild=1,jobs_in_queue=0; int pid=0; child_signals(); pid=getpid(); ci_thread_mutex_init(&threads_list_mtx); ci_thread_mutex_init(&counters_mtx); ci_thread_cond_init(&free_server_cond); threads_list=(server_decl_t **)malloc((START_SERVERS+1)*sizeof(server_decl_t *)); con_queue=init_queue(START_SERVERS); for(i=0;i<START_SERVERS;i++){ if((threads_list[i]=newthread(con_queue))==NULL){ exit(-1);// FATAL error..... } retcode=ci_thread_create(&thread,(void *(*)(void *))thread_main,(void *)threads_list[i]); } threads_list[START_SERVERS]=NULL; for(;;){ //Global for if(!ci_proc_mutex_lock(&accept_mutex)){ if(errno==EINTR){ debug_printf(5,"EINTR received\n"); if(child_data->to_be_killed) goto end_child_main; continue; } } child_data->idle=0; debug_printf(7,"Child %d getting requests now ...\n",pid); do{//Getting requests while we have free servers..... do{ errno = 0; if(((conn.fd = accept(sockfd, (struct sockaddr *)&(conn.claddr), &claddrlen)) == -1) && errno != EINTR){ debug_printf(1,"error accept .... %d\nExiting server ....\n",errno); exit(-1); //For the moment ....... goto end_child_main ; } if(errno==EINTR && child_data->to_be_killed) goto end_child_main; }while(errno==EINTR); getsockname(conn.fd,(struct sockaddr *)&(conn.srvaddr),&claddrlen); icap_socket_opts(sockfd); if((jobs_in_queue=put_to_queue(con_queue,&conn))==0){ debug_printf(1,"ERROR!!!!!!NO AVAILABLE SERVERS!!!!!!!!!\n"); child_data->to_be_killed=GRACEFULLY; debug_printf(1,"Jobs in Queue:%d,Free servers:%d, Used Servers :%d, Requests %d\n", jobs_in_queue, child_data->freeservers,child_data->usedservers,child_data->requests); goto end_child_main; } ci_thread_mutex_lock(&counters_mtx); haschild=(child_data->freeservers?1:0); ci_thread_mutex_unlock(&counters_mtx); (child_data->connections)++; //NUM of Requests.... }while(haschild); child_data->idle=1; ci_proc_mutex_unlock(&accept_mutex); ci_thread_mutex_lock(&counters_mtx); if(child_data->freeservers==0){ debug_printf(7,"Child %d waiting for a thread to accept more connections ...\n",pid); ci_thread_cond_wait(&free_server_cond,&counters_mtx); } ci_thread_mutex_unlock(&counters_mtx); } end_child_main: cancel_all_threads(); exit_normaly(); }
int thread_main(server_decl_t *srv){ ci_connection_t con; ci_thread_mutex_t cont_mtx; char clientname[CI_MAXHOSTNAMELEN+1]; int max,ret,request_status=0; request_t *tmp; //*********************** thread_signals(); //************************* srv->srv_id=getpid(); //Setting my pid ... srv->srv_pthread=pthread_self(); for(;;){ if(child_data->to_be_killed) return; //Exiting thread..... if((ret=get_from_queue(con_queue,&con))==0){ wait_for_queue(con_queue); //It is better that the wait_for_queue to be //moved into the get_from_queue continue; } ci_thread_mutex_lock(&counters_mtx); (child_data->freeservers)--; (child_data->usedservers)++; ci_thread_mutex_unlock(&counters_mtx); if(ret<0){ //An error has occured debug_printf(1,"Error getting from connections queue\n"); break; } /* icap_addrtohost(&(con.claddr.sin_addr),clientname, CI_MAXHOSTNAMELEN); debug_printf(1,"Client name: %s server %d\n",clientname,srv->srv_id); */ icap_netio_init(con.fd); if(srv->current_req==NULL) srv->current_req=newrequest(&con); else recycle_request(srv->current_req,&con); do{ if((request_status=process_request(srv->current_req))<0){ debug_printf(5,"Process request timeout or interupted....\n"); reset_request(srv->current_req); break;// } srv->served_requests++; srv->served_requests_no_reallocation++; /*Increase served requests. I dont like this. The delay is small but I don't like...*/ ci_thread_mutex_lock(&counters_mtx); (child_data->requests)++; ci_thread_mutex_unlock(&counters_mtx); log_access(srv->current_req,request_status); // break; //No keep-alive ...... if(child_data->to_be_killed) return; //Exiting thread..... debug_printf(8,"Keep-alive:%d\n",srv->current_req->keepalive); if(srv->current_req->keepalive && check_for_keepalive_data(srv->current_req->connection->fd)){ reset_request(srv->current_req); debug_printf(8,"Server %d going to serve new request from client(keep-alive) \n", srv->srv_id); } else break; }while(1); if(srv->current_req){ if(request_status<0) hard_close_connection(srv->current_req->connection); else close_connection(srv->current_req->connection); } if(srv->served_requests_no_reallocation > MAX_REQUESTS_BEFORE_REALLOCATE_MEM){ debug_printf(5,"Max requests reached, reallocate memory and buffers .....\n"); destroy_request(srv->current_req); srv->current_req=NULL; srv->served_requests_no_reallocation=0; } ci_thread_mutex_lock(&counters_mtx); (child_data->freeservers)++; (child_data->usedservers)--; ci_thread_mutex_unlock(&counters_mtx); ci_thread_cond_signal(&free_server_cond); } return 0; }
int threadjobsendfiles(){ struct sockaddr_in addr; struct hostent *hent; int port=1344; int fd,indx; int arand; hent = gethostbyname(servername); if(hent == NULL) addr.sin_addr.s_addr = inet_addr(servername); else memcpy(&addr.sin_addr, hent->h_addr, hent->h_length); addr.sin_family = AF_INET; addr.sin_port = htons(port); while(1){ fd = socket(AF_INET, SOCK_STREAM, 0); if(fd == -1){ printf("Error oppening socket ....\n"); return -1; } if(connect(fd, (struct sockaddr *)&addr, sizeof(addr))){ printf("Error connecting to socket .....\n"); exit(-1); return -1; } for(;;){ ci_thread_mutex_lock(&filemtx); indx=file_indx; if(file_indx==(FILES_NUMBER-1)) file_indx=0; else file_indx++; ci_thread_mutex_unlock(&filemtx); if(do_file(fd,FILES[indx])<=0) break; sleep(1); // usleep(100000); ci_thread_mutex_lock(&statsmtx); requests_stats++; arand=rand(); /*rasnd is not thread safe ....*/ ci_thread_mutex_unlock(&statsmtx); if(_THE_END){ close(fd); return 0; } arand=(int)(( ((double)arand)/(double)RAND_MAX)*10.0); if(arand==5 || arand==7 || arand==3 ){ // 30% possibility .... // printf("OK, closing the connection......\n"); break; } // printf("Keeping alive connection\n"); } close(fd); } }
int worker_main(ci_socket sockfd) { ci_connection_t conn; int claddrlen = sizeof(struct sockaddr_in); // char clientname[300]; int haschild = 1, jobs_in_queue = 0; int pid = 0, error; for (;;) { //Global for if (!ci_proc_mutex_lock(&accept_mutex)) { if (child_data->to_be_killed) return 1; continue; } child_data->idle = 0; pid = (int) child_data->pid; ci_debug_printf(1, "Child %d getting requests now ...\n", pid); do { //Getting requests while we have free servers..... ci_debug_printf(1, "In accept loop..................\n"); error = 0; if (((conn.fd = accept(sockfd, (struct sockaddr *) &(conn.claddr.sockaddr), &claddrlen)) == INVALID_SOCKET) && // if(((conn.fd = WSAAccept(sockfd, (struct sockaddr *)&(conn.claddr), &claddrlen,NULL,NULL)) == INVALID_SOCKET ) && (error = WSAGetLastError())) { ci_debug_printf(1, "error accept .... %d\nExiting server ....\n", error); exit(-1); //For the moment ....... } ci_debug_printf(1, "Accepting one connection...\n"); claddrlen = sizeof(conn.srvaddr.sockaddr); getsockname(conn.fd, (struct sockaddr *) &(conn.srvaddr.sockaddr), &claddrlen); ci_fill_sockaddr(&conn.claddr); ci_fill_sockaddr(&conn.srvaddr); icap_socket_opts(sockfd, MAX_SECS_TO_LINGER); if ((jobs_in_queue = put_to_queue(con_queue, &conn)) == 0) { ci_debug_printf(1, "ERROR!!!!!!NO AVAILABLE SERVERS!!!!!!!!!\n"); // child_data->to_be_killed=GRACEFULLY; ci_debug_printf(1, "Jobs in Queue: %d, Free servers: %d, Used Servers: %d, Requests: %d\n", jobs_in_queue, child_data->freeservers, child_data->usedservers, child_data->requests); } ci_thread_mutex_lock(&counters_mtx); haschild = (child_data->freeservers ? 1 : 0); ci_thread_mutex_unlock(&counters_mtx); (child_data->connections)++; //NUM of Requests.... } while (haschild); child_data->idle = 1; ci_proc_mutex_unlock(&accept_mutex); ci_thread_mutex_lock(&counters_mtx); if (child_data->freeservers == 0) { ci_debug_printf(1, "Child %d waiting for a thread to accept more connections ...\n", pid); ci_thread_cond_wait(&free_server_cond, &counters_mtx); } ci_thread_mutex_unlock(&counters_mtx); } }
int do_file(int fd, char *filename, int *keepalive) { FILE *f; char lg[10], lbuf[512]; int bytes, len, totalbytesout, totalbytesin; unsigned int arand; if ((f = fopen(filename, "r")) == NULL) return 0; buildrespmodfile(f, lbuf); if (icap_write(fd, lbuf, strlen(lbuf)) < 0) return 0; // printf("Sending file:\n"); totalbytesout = strlen(lbuf); len = rand_r(&arand); len++; len = (int) ((((float) len) / RAND_MAX) * 510.0); len = (len < 512 ? len : 512); len = (len > 0 ? len : 1); while ((len = fread(lbuf, sizeof(char), len, f)) > 0) { totalbytesout += len; bytes = sprintf(lg, "%X\r\n", len); if (icap_write(fd, lg, bytes) < 0) { printf("Error writing to socket:%s (after %d bytes).....\n", lg, totalbytesout); req_errors_rw++; return 0; } if (icap_write(fd, lbuf, len) < 0) { printf("Error writing to socket.....\n"); req_errors_rw++; return 0; } icap_write(fd, "\r\n", 2); // printf("Sending chunksize :%d\n",len); len = rand_r(&arand); len++; len = (int) ((((float) len) / RAND_MAX) * 510.0); len = (len < 512 ? len : 512); len = (len > 0 ? len : 1); } icap_write(fd, "0\r\n\r\n", 5); fclose(f); // printf("Done(%d bytes). Reading responce.....\n",totalbytesout); if ((totalbytesin = readallresponce(fd, keepalive)) < 0) { printf("Read all responce error;\n"); return -1; } // printf("Done(%d bytes).\n",totalbytes); ci_thread_mutex_lock(&statsmtx); in_bytes_stats += totalbytesin; out_bytes_stats += totalbytesout; ci_thread_mutex_unlock(&statsmtx); return 1; }
int start_server() { #ifdef MULTICHILD int child_indx, i; HANDLE child_handle; ci_thread_t mon_thread; int childs, freeservers, used, maxrequests; ci_proc_mutex_init(&accept_mutex); ci_thread_mutex_init(&control_process_mtx); if (!create_childs_queue(&childs_queue, MAX_CHILDS)) { log_server(NULL, "Can't init shared memory.Fatal error, exiting!\n"); ci_debug_printf(1, "Can't init shared memory.Fatal error, exiting!\n"); exit(0); } for (i = 0; i < START_CHILDS + 2; i++) { child_handle = start_child(LISTEN_SOCKET); } /*Start died childs monitor thread*/ /* ci_thread_create(&mon_thread, (void *(*)(void *))wait_achild_to_die, (void *)NULL); */ while (1) { if (check_for_died_child(5000)) continue; // Sleep(5000); childs_queue_stats(&childs_queue, &childs, &freeservers, &used, &maxrequests); ci_debug_printf(1, "Server stats: \n\t Childs:%d\n\t Free servers:%d\n\tUsed servers:%d\n\tRequests served:%d\n", childs, freeservers, used, maxrequests); if ((freeservers <= MIN_FREE_SERVERS && childs < MAX_CHILDS) || childs < START_CHILDS) { ci_debug_printf(1, "Going to start a child .....\n"); child_handle = start_child(LISTEN_SOCKET); } else if (freeservers >= MAX_FREE_SERVERS && childs > START_CHILDS) { ci_thread_mutex_lock(&control_process_mtx); if ((child_indx = find_an_idle_child(&childs_queue)) < 0) continue; childs_queue.childs[child_indx].to_be_killed = GRACEFULLY; tell_child_to_die(childs_queue.childs[child_indx].pipe); ci_thread_mutex_unlock(&control_process_mtx); ci_debug_printf(1, "Going to stop child %d .....\n", childs_queue.childs[child_indx].pid); } } /* for(i=0;i<START_CHILDS;i++){ pid=wait(&status); ci_debug_printf(1,"The child %d died with status %d\n",pid,status); } */ #else child_data = (child_shared_data_t *) malloc(sizeof(child_shared_data_t)); child_data->pid = 0; child_data->freeservers = START_SERVERS; child_data->usedservers = 0; child_data->requests = 0; child_data->connections = 0; child_data->to_be_killed = 0; child_data->idle = 1; child_main(LISTEN_SOCKET); #endif return 1; }
int init_virusdb() { int ret; unsigned int no = 0; virusdb = malloc(sizeof(struct virus_db)); memset(virusdb, 0, sizeof(struct virus_db)); if (!virusdb) return 0; #ifdef HAVE_LIBCLAMAV_095 if((ret = cl_init(CL_INIT_DEFAULT))) { ci_debug_printf(1, "!Can't initialize libclamav: %s\n", cl_strerror(ret)); return 0; } if(!(virusdb->db = cl_engine_new())) { ci_debug_printf(1, "Clamav DB load: Cannot create new clamav engine\n"); return 0; } if ((ret = cl_load(cl_retdbdir(), virusdb->db, &no, CL_DB_STDOPT))) { ci_debug_printf(1, "Clamav DB load: cl_load failed: %s\n", cl_strerror(ret)); #elif defined(HAVE_LIBCLAMAV_09X) if ((ret = cl_load(cl_retdbdir(), &(virusdb->db), &no, CL_DB_STDOPT))) { ci_debug_printf(1, "Clamav DB load: cl_load failed: %s\n", cl_strerror(ret)); #else if ((ret = cl_loaddbdir(cl_retdbdir(), &(virusdb->db), &no))) { ci_debug_printf(1, "cl_loaddbdir: %s\n", cl_perror(ret)); #endif return 0; } #ifdef HAVE_LIBCLAMAV_095 if ((ret = cl_engine_compile(virusdb->db))) { #else if ((ret = cl_build(virusdb->db))) { #endif ci_debug_printf(1, "Database initialization error: %s\n", cl_strerror(ret)); #ifdef HAVE_LIBCLAMAV_095 cl_engine_free(virusdb->db); #else cl_free(virusdb->db); #endif free(virusdb); virusdb = NULL; return 0; } ci_thread_mutex_init(&db_mutex); virusdb->refcount = 1; old_virusdb = NULL; return 1; } /* Instead of using struct virus_db and refcount's someone can use the cl_dup function of clamav library, but it is undocumented so I did not use it. The following implementation we are starting to reload clamav db while threads are scanning for virus but we are not allow any child to start a new scan until we are loading DB. */ /*#define DB_NO_FULL_LOCK 1*/ #undef DB_NO_FULL_LOCK int reload_virusdb() { struct virus_db *vdb = NULL; int ret; unsigned int no = 0; ci_thread_mutex_lock(&db_mutex); if (old_virusdb) { ci_debug_printf(1, "Clamav DB reload pending, cancelling.\n"); ci_thread_mutex_unlock(&db_mutex); return 0; } #ifdef DB_NO_FULL_LOCK ci_thread_mutex_unlock(&db_mutex); #endif vdb = malloc(sizeof(struct virus_db)); if (!vdb) return 0; memset(vdb, 0, sizeof(struct virus_db)); ci_debug_printf(9, "db_reload going to load db\n"); #ifdef HAVE_LIBCLAMAV_095 if(!(vdb->db = cl_engine_new())) { ci_debug_printf(1, "Clamav DB load: Cannot create new clamav engine\n"); return 0; } if ((ret = cl_load(cl_retdbdir(), vdb->db, &no, CL_DB_STDOPT))) { ci_debug_printf(1, "Clamav DB reload: cl_load failed: %s\n", cl_strerror(ret)); #elif defined(HAVE_LIBCLAMAV_09X) if ((ret = cl_load(cl_retdbdir(), &(vdb->db), &no, CL_DB_STDOPT))) { ci_debug_printf(1, "Clamav DB reload: cl_load failed: %s\n", cl_strerror(ret)); #else if ((ret = cl_loaddbdir(cl_retdbdir(), &(vdb->db), &no))) { ci_debug_printf(1, "Clamav DB reload: cl_loaddbdir failed: %s\n", cl_perror(ret)); #endif return 0; } ci_debug_printf(9, "loaded. Going to build\n"); #ifdef HAVE_LIBCLAMAV_095 if ((ret = cl_engine_compile(vdb->db))) { #else if ((ret = cl_build(vdb->db))) { #endif ci_debug_printf(1, "Clamav DB reload: Database initialization error: %s\n", cl_strerror(ret)); #ifdef HAVE_LIBCLAMAV_095 cl_engine_free(vdb->db); #else cl_free(vdb->db); #endif free(vdb); vdb = NULL; #ifdef DB_NO_FULL_LOCK /*no lock needed */ #else ci_thread_mutex_unlock(&db_mutex); #endif return 0; } ci_debug_printf(9, "Done releasing.....\n"); #ifdef DB_NO_FULL_LOCK ci_thread_mutex_lock(&db_mutex); #endif old_virusdb = virusdb; old_virusdb->refcount--; ci_debug_printf(9, "Old VirusDB refcount:%d\n", old_virusdb->refcount); if (old_virusdb->refcount <= 0) { #ifdef HAVE_LIBCLAMAV_095 cl_engine_free(old_virusdb->db); #else cl_free(old_virusdb->db); #endif free(old_virusdb); old_virusdb = NULL; } virusdb = vdb; virusdb->refcount = 1; ci_thread_mutex_unlock(&db_mutex); return 1; } CL_ENGINE *get_virusdb() { struct virus_db *vdb; ci_thread_mutex_lock(&db_mutex); vdb = virusdb; vdb->refcount++; ci_thread_mutex_unlock(&db_mutex); return vdb->db; } void release_virusdb(CL_ENGINE * db) { ci_thread_mutex_lock(&db_mutex); if (virusdb && db == virusdb->db) virusdb->refcount--; else if (old_virusdb && (db == old_virusdb->db)) { old_virusdb->refcount--; ci_debug_printf(9, "Old VirusDB refcount: %d\n", old_virusdb->refcount); if (old_virusdb->refcount <= 0) { #ifdef HAVE_LIBCLAMAV_095 cl_engine_free(old_virusdb->db); #else cl_free(old_virusdb->db); #endif free(old_virusdb); old_virusdb = NULL; } } else { ci_debug_printf(1, "BUG in srv_clamav service! please contact the author\n"); } ci_thread_mutex_unlock(&db_mutex); } void destroy_virusdb() { if (virusdb) { #ifdef HAVE_LIBCLAMAV_095 cl_engine_free(virusdb->db); #else cl_free(virusdb->db); #endif free(virusdb); virusdb = NULL; } if (old_virusdb) { #ifdef HAVE_LIBCLAMAV_095 cl_engine_free(old_virusdb->db); #else cl_free(old_virusdb->db); #endif free(old_virusdb); old_virusdb = NULL; } } void set_istag(ci_service_xdata_t * srv_xdata) { char istag[SERVICE_ISTAG_SIZE + 1]; char str_version[64]; char *daily_path; char *s1, *s2; struct cl_cvd *d1; int version = 0, cfg_version = 0; struct stat daily_stat; /*instead of 128 should be strlen("/daily.inc/daily.info")+1*/ daily_path = malloc(strlen(cl_retdbdir()) + 128); if (!daily_path) /*???????? */ return; sprintf(daily_path, "%s/daily.cvd", cl_retdbdir()); if(stat(daily_path,&daily_stat) != 0){ /* if the clamav_lib_path/daily.cvd does not exists */ sprintf(daily_path, "%s/daily.cld", cl_retdbdir()); if(stat(daily_path,&daily_stat) != 0){ /* else try to use the clamav_lib_path/daily.inc/daly.info file instead" */ sprintf(daily_path, "%s/daily.inc/daily.info", cl_retdbdir()); } } if ((d1 = cl_cvdhead(daily_path))) { version = d1->version; free(d1); } free(daily_path); s1 = (char *) cl_retver(); s2 = str_version; while (*s1 != '\0' && s2 - str_version < 64) { if (*s1 != '.') { *s2 = *s1; s2++; } s1++; } *s2 = '\0'; /*cfg_version maybe must set by user when he is changing the srv_clamav configuration.... */ snprintf(istag, SERVICE_ISTAG_SIZE, "-%.3d-%s-%d%d", cfg_version, str_version, cl_retflevel(), version); istag[SERVICE_ISTAG_SIZE] = '\0'; ci_service_set_istag(srv_xdata, istag); }
int threadjobsendfiles() { ci_request_t *req; ci_connection_t *conn; int port = 1344; int indx, keepalive, ret; int arand; while (1) { if (!(conn = ci_client_connect_to(servername, port, AF_INET))) { ci_debug_printf(1, "Failed to connect to icap server.....\n"); exit(-1); } req = ci_client_request(conn, servername, service); req->type = ICAP_RESPMOD; req->preview = 512; for (;;) { ci_thread_mutex_lock(&filemtx); indx = file_indx; if (file_indx == (FILES_NUMBER - 1)) file_indx = 0; else file_indx++; ci_thread_mutex_unlock(&filemtx); keepalive = 0; if ((ret = do_file(req, FILES[indx], &keepalive)) <= 0) { ci_thread_mutex_lock(&statsmtx); if (ret == 0) soft_failed_requests_stats++; else failed_requests_stats++; ci_thread_mutex_unlock(&statsmtx); printf("Request failed...\n"); break; } ci_thread_mutex_lock(&statsmtx); requests_stats++; arand = rand(); /*rand is not thread safe .... */ ci_thread_mutex_unlock(&statsmtx); if (_THE_END) { printf("The end: thread dying\n"); ci_request_destroy(req); return 0; } if (keepalive == 0) break; arand = (int) ((((double) arand) / (double) RAND_MAX) * 10.0); if (arand == 5 || arand == 7 || arand == 3) { // 30% possibility .... // printf("OK, closing the connection......\n"); break; } // sleep(1); usleep(500000); // printf("Keeping alive connection\n"); ci_client_request_reuse(req); } ci_hard_close(conn->fd); ci_request_destroy(req); usleep(1000000); } }