void PoolAllocator::free(ID p_mem) { mt_lock(); Entry *e = get_entry(p_mem); if (!e) { mt_unlock(); ERR_PRINT("!e"); return; } if (e->lock) { mt_unlock(); ERR_PRINT("e->lock"); return; } EntryIndicesPos entry_indices_pos; bool index_found = find_entry_index(&entry_indices_pos, e); if (!index_found) { mt_unlock(); ERR_FAIL_COND(!index_found); } for (int i = entry_indices_pos; i < (entry_count - 1); i++) { entry_indices[i] = entry_indices[i + 1]; } entry_count--; free_mem += aligned(e->len); e->clear(); mt_unlock(); }
void PoolAllocator::unlock(ID p_mem) { if (!needs_locking) return; mt_lock(); Entry *e = get_entry(p_mem); if (e->lock == 0) { mt_unlock(); ERR_PRINT("e->lock == 0"); return; } e->lock--; mt_unlock(); }
Error PoolAllocator::lock(ID p_mem) { if (!needs_locking) return OK; mt_lock(); Entry *e = get_entry(p_mem); if (!e) { mt_unlock(); ERR_PRINT("!e"); return ERR_INVALID_PARAMETER; } e->lock++; mt_unlock(); return OK; }
void f_aap_log_as_array(INT32 args) { struct log_entry *le; struct log *l = LTHIS->log; int n = 0; pop_n_elems(args); mt_lock( &l->log_lock ); le = l->log_head; l->log_head = l->log_tail = 0; mt_unlock( &l->log_lock ); while(le) { struct log_entry *l; n++; push_log_entry(le); l = le->next; free_log_entry(le); le = l; } { f_aggregate(n); } }
void aap_log_append(int sent, struct args *arg, int reply) { struct log *l = arg->log; /* we do not incude the body, only the headers et al.. */ struct log_entry *le=new_log_entry(arg->res.body_start-3); char *data_to=((char *)le)+sizeof(struct log_entry); le->t = aap_get_time(); le->sent_bytes = sent; le->reply = reply; le->received_bytes = arg->res.body_start + arg->res.content_len; MEMCPY(data_to, arg->res.data, arg->res.body_start-4); le->raw.str = data_to; le->raw.len = arg->res.body_start-4; le->url.str = (data_to + (size_t)(arg->res.url-arg->res.data)); le->url.len = arg->res.url_len; le->from = arg->from; le->method.str = data_to; le->method.len = arg->res.method_len; le->protocol = arg->res.protocol; le->next = 0; mt_lock( &l->log_lock ); if(l->log_head) { l->log_tail->next = le; l->log_tail = le; } else { l->log_head = le; l->log_tail = le; } mt_unlock( &l->log_lock ); }
bool PoolAllocator::is_locked(ID p_mem) const { if (!needs_locking) return false; mt_lock(); const Entry *e = ((PoolAllocator *)(this))->get_entry(p_mem); if (!e) { mt_unlock(); ERR_PRINT("!e"); return false; } bool locked = e->lock; mt_unlock(); return locked; }
int PoolAllocator::get_size(ID p_mem) const { int size; mt_lock(); const Entry *e = get_entry(p_mem); if (!e) { mt_unlock(); ERR_PRINT("!e"); return 0; } size = e->len; mt_unlock(); return size; }
void *PoolAllocator::get(ID p_mem) { if (!needs_locking) { Entry *e=get_entry(p_mem); if (!e) { ERR_FAIL_COND_V(!e,NULL); }; return &pool[e->pos]; } mt_lock(); Entry *e=get_entry(p_mem); if (!e) { mt_unlock(); ERR_FAIL_COND_V(!e,NULL); } if (e->lock==0) { //assert(0); mt_unlock(); ERR_PRINT( "e->lock == 0" ); return NULL; } if (e->pos<0 || (int)e->pos>=pool_size) { mt_unlock(); ERR_PRINT("e->pos<0 || e->pos>=pool_size"); return NULL; } void *ptr=&pool[e->pos]; mt_unlock(); return ptr; }
struct args *new_args(void) { struct args *res; mt_lock( &arg_lock ); num_args++; if( next_free_arg ) res = free_arg_list[--next_free_arg]; else res = aap_malloc( sizeof( struct args ) ); mt_unlock( &arg_lock ); return res; }
void free_args( struct args *arg ) { num_args--; if( arg->res.data ) aap_free( arg->res.data ); if( arg->fd ) fd_close( arg->fd ); mt_lock( &arg_lock ); if( next_free_arg < 100 ) free_arg_list[ next_free_arg++ ] = arg; else aap_free(arg); mt_unlock( &arg_lock ); }
void f_aap_log_size(INT32 args) { int n=1; struct log *l = LTHIS->log; struct log_entry *le; if(!l) { push_int(0); return; } mt_lock( &l->log_lock ); le = l->log_head; while((le = le->next)) n++; mt_unlock( &l->log_lock ); push_int(n); }
void cos_func(void* arg) { assert(arg == (void*)1); while (1) { mt_lock(lock); printf("Cos %d\n", counter); counter++; mt_unlock(lock); if (quit) break; mt_sleep(100); } }
static void finished_p(struct callback *foo, void *b, void *c) { extern void f_low_aap_reqo__init( struct c_request_object * ); aap_clean_cache(); while(request) { struct args *arg; struct object *o; struct c_request_object *obj; mt_lock(&queue_mutex); arg = request; request = arg->next; mt_unlock(&queue_mutex); o = clone_object( request_program, 0 ); /* see requestobject.c */ obj = (struct c_request_object *)get_storage(o, c_request_program ); MEMSET(obj, 0, sizeof(struct c_request_object)); obj->request = arg; obj->done_headers = allocate_mapping( 20 ); obj->misc_variables = allocate_mapping( 40 ); f_low_aap_reqo__init( obj ); push_object( o ); assign_svalue_no_free(sp++, &arg->args); /* { */ /* JMP_BUF recovery; */ /* free_svalue(& throw_value); */ /* mark_free_svalue (&throw_value); */ /* if(SETJMP(recovery)) */ /* { */ /* } */ /* else */ /* { */ apply_svalue(&arg->cb, 2); /* } */ /* } */ pop_stack(); } }
Error PoolAllocator::resize(ID p_mem, int p_new_size) { mt_lock(); Entry *e = get_entry(p_mem); if (!e) { mt_unlock(); ERR_FAIL_COND_V(!e, ERR_INVALID_PARAMETER); } if (needs_locking && e->lock) { mt_unlock(); ERR_FAIL_COND_V(e->lock, ERR_ALREADY_IN_USE); } int alloc_size = aligned(p_new_size); if (aligned(e->len) == alloc_size) { e->len = p_new_size; mt_unlock(); return OK; } else if (e->len > (uint32_t)p_new_size) { free_mem += aligned(e->len); free_mem -= alloc_size; e->len = p_new_size; mt_unlock(); return OK; } //p_new_size = align(p_new_size) int _free = free_mem; // - static_area_size; if ((_free + aligned(e->len)) - alloc_size < 0) { mt_unlock(); ERR_FAIL_V(ERR_OUT_OF_MEMORY); }; EntryIndicesPos entry_indices_pos; bool index_found = find_entry_index(&entry_indices_pos, e); if (!index_found) { mt_unlock(); ERR_FAIL_COND_V(!index_found, ERR_BUG); } //no need to move stuff around, it fits before the next block int next_pos; if (entry_indices_pos + 1 == entry_count) { next_pos = pool_size; // - static_area_size; } else { next_pos = entry_array[entry_indices[entry_indices_pos + 1]].pos; }; if ((next_pos - e->pos) > alloc_size) { free_mem += aligned(e->len); e->len = p_new_size; free_mem -= alloc_size; mt_unlock(); return OK; } //it doesn't fit, compact around BEFORE current index (make room behind) compact(entry_indices_pos + 1); if ((next_pos - e->pos) > alloc_size) { //now fits! hooray! free_mem += aligned(e->len); e->len = p_new_size; free_mem -= alloc_size; mt_unlock(); if (free_mem < free_mem_peak) free_mem_peak = free_mem; return OK; } //STILL doesn't fit, compact around AFTER current index (make room after) compact_up(entry_indices_pos + 1); if ((entry_array[entry_indices[entry_indices_pos + 1]].pos - e->pos) > alloc_size) { //now fits! hooray! free_mem += aligned(e->len); e->len = p_new_size; free_mem -= alloc_size; mt_unlock(); if (free_mem < free_mem_peak) free_mem_peak = free_mem; return OK; } mt_unlock(); ERR_FAIL_V(ERR_OUT_OF_MEMORY); }
void aap_handle_connection(struct args *arg) { char *buffer, *p, *tmp; ptrdiff_t pos, buffer_len; #ifdef HAVE_TIMEOUTS int *timeout = NULL; #endif start: pos=0; buffer_len=8192; if(arg->res.data && arg->res.data_len > 0) { p = buffer = arg->res.data; buffer_len = MAXIMUM(arg->res.data_len,8192); arg->res.data=0; } else p = buffer = aap_malloc(8192); if(arg->res.leftovers && arg->res.leftovers_len) { if(!buffer) { perror("AAP: Failed to allocate buffer (leftovers)"); failed(arg); return; } buffer_len = arg->res.leftovers_len; MEMCPY(buffer, arg->res.leftovers, arg->res.leftovers_len); pos = arg->res.leftovers_len; arg->res.leftovers=0; if((tmp = my_memmem("\r\n\r\n", 4, buffer, pos))) goto ok; p += arg->res.leftovers_len; } if(!buffer) { perror("AAP: Failed to allocate buffer"); failed(arg); return; } #ifdef HAVE_TIMEOUTS if( arg->timeout ) timeout = aap_add_timeout_thr(th_self(), arg->timeout); while( !timeout || !(*timeout) ) #else while(1) #endif /* HAVE_TIMEOUTS */ { ptrdiff_t data_read = fd_read(arg->fd, p, buffer_len-pos); if(data_read <= 0) { #ifdef AAP_DEBUG fprintf(stderr, "AAP: Read error/eof.\n"); #endif /* AAP_DEBUG */ arg->res.data = buffer; free_args( arg ); #ifdef HAVE_TIMEOUTS if( timeout ) { aap_remove_timeout_thr( timeout ); timeout=NULL; } #endif return; } pos += data_read; if((tmp = my_memmem("\r\n\r\n", 4, MAXIMUM(p-3, buffer), data_read+(p-3>buffer?3:0)))) goto ok; p += data_read; if(pos >= buffer_len) { buffer_len *= 2; if(buffer_len > MAXLEN) break; buffer = realloc(buffer, buffer_len); p = buffer+pos; if(!buffer) { perror("AAP: Failed to allocate memory (reading)"); break; } } } arg->res.data = buffer; failed( arg ); #ifdef HAVE_TIMEOUTS if( timeout ) { aap_remove_timeout_thr( timeout ); timeout=NULL; } #endif return; ok: #ifdef HAVE_TIMEOUTS if (timeout) { aap_remove_timeout_thr( timeout ); timeout=NULL; } #endif /* HAVE_TIMEOUTS */ arg->res.body_start = (tmp+4)-buffer; arg->res.data = buffer; arg->res.data_len = pos; switch(parse(arg)) { case 1: mt_lock(&queue_mutex); if(!request) { request = last = arg; arg->next = 0; } else { last->next = arg; last = arg; arg->next = 0; } mt_unlock(&queue_mutex); wake_up_backend(); return; case -1: goto start; case 0: ; } }
PoolAllocator::ID PoolAllocator::alloc(int p_size) { ERR_FAIL_COND_V(p_size < 1, POOL_ALLOCATOR_INVALID_ID); #ifdef DEBUG_ENABLED if (p_size > free_mem) OS::get_singleton()->debug_break(); #endif ERR_FAIL_COND_V(p_size > free_mem, POOL_ALLOCATOR_INVALID_ID); mt_lock(); if (entry_count == entry_max) { mt_unlock(); ERR_PRINT("entry_count==entry_max"); return POOL_ALLOCATOR_INVALID_ID; } int size_to_alloc = aligned(p_size); EntryIndicesPos new_entry_indices_pos; if (!find_hole(&new_entry_indices_pos, size_to_alloc)) { /* No hole could be found, try compacting mem */ compact(); /* Then search again */ if (!find_hole(&new_entry_indices_pos, size_to_alloc)) { mt_unlock(); ERR_PRINT("memory can't be compacted further"); return POOL_ALLOCATOR_INVALID_ID; } } EntryArrayPos new_entry_array_pos; bool found_free_entry = get_free_entry(&new_entry_array_pos); if (!found_free_entry) { mt_unlock(); ERR_FAIL_COND_V(!found_free_entry, POOL_ALLOCATOR_INVALID_ID); } /* move all entry indices up, make room for this one */ for (int i = entry_count; i > new_entry_indices_pos; i--) { entry_indices[i] = entry_indices[i - 1]; } entry_indices[new_entry_indices_pos] = new_entry_array_pos; entry_count++; Entry &entry = entry_array[entry_indices[new_entry_indices_pos]]; entry.len = p_size; entry.pos = (new_entry_indices_pos == 0) ? 0 : entry_end(entry_array[entry_indices[new_entry_indices_pos - 1]]); //alloc either at begining or end of previous entry.lock = 0; entry.check = (check_count++) & CHECK_MASK; free_mem -= size_to_alloc; if (free_mem < free_mem_peak) free_mem_peak = free_mem; ID retval = (entry_indices[new_entry_indices_pos] << CHECK_BITS) | entry.check; mt_unlock(); //ERR_FAIL_COND_V( (uintptr_t)get(retval)%align != 0, retval ); return retval; }
void f_aap_log_as_commonlog_to_file(INT32 args) { struct log_entry *le; struct log *l = LTHIS->log; int n = 0; int mfd, ot=0; struct object *f; struct tm tm; FILE *foo; static const char *month[] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Oct", "Sep", "Nov", "Dec", }; get_all_args("log_as_commonlog_to_file", args, "%o", &f); f->refs++; pop_n_elems(args); apply(f, "query_fd", 0); mfd = fd_dup(sp[-1].u.integer); if(mfd < 1)Pike_error("Bad fileobject to ->log_as_commonlog_to_file\n"); pop_stack(); foo = fdopen( mfd, "w" ); if(!foo) Pike_error("Bad fileobject to ->log_as_commonlog_to_file\n"); THREADS_ALLOW(); mt_lock( &l->log_lock ); le = l->log_head; l->log_head = l->log_tail = 0; mt_unlock( &l->log_lock ); while(le) { int i; struct tm *tm_p; struct log_entry *l = le->next; /* remotehost rfc931 authuser [date] "request" status bytes */ if(le->t != ot) { time_t t = (time_t)le->t; #ifdef HAVE_GMTIME_R gmtime_r( &t, &tm ); #else #ifdef HAVE_GMTIME tm_p = gmtime( &t ); /* This will break if two threads run gmtime() at once. */ #else #ifdef HAVE_LOCALTIME tm_p = localtime( &t ); /* This will break if two threads run localtime() at once. */ #endif #endif if (tm_p) tm = *tm_p; #endif ot = le->t; } /* date format: [03/Feb/1998:23:08:20 +0000] */ /* GET [URL] HTTP/1.0 */ for(i=13; i<le->raw.len; i++) if(le->raw.str[i] == '\r') { le->raw.str[i] = 0; break; } #ifdef HAVE_INET_NTOP if(SOCKADDR_FAMILY(le->from) != AF_INET) { char buffer[64]; fprintf(foo, "%s - %s [%02d/%s/%d:%02d:%02d:%02d +0000] \"%s\" %d %ld\n", inet_ntop(SOCKADDR_FAMILY(le->from), SOCKADDR_IN_ADDR(le->from), buffer, sizeof(buffer)), /* hostname */ "-", /* remote-user */ tm.tm_mday, month[tm.tm_mon], tm.tm_year+1900, tm.tm_hour, tm.tm_min, tm.tm_sec, /* date */ le->raw.str, /* request line */ le->reply, /* reply code */ DO_NOT_WARN((long)le->sent_bytes)); /* bytes transfered */ } else #endif /* HAVE_INET_NTOP */ fprintf(foo, "%d.%d.%d.%d - %s [%02d/%s/%d:%02d:%02d:%02d +0000] \"%s\" %d %ld\n", ((unsigned char *)&le->from.ipv4.sin_addr)[ 0 ], ((unsigned char *)&le->from.ipv4.sin_addr)[ 1 ], ((unsigned char *)&le->from.ipv4.sin_addr)[ 2 ], ((unsigned char *)&le->from.ipv4.sin_addr)[ 3 ], /* hostname */ "-", /* remote-user */ tm.tm_mday, month[tm.tm_mon], tm.tm_year+1900, tm.tm_hour, tm.tm_min, tm.tm_sec, /* date */ le->raw.str, /* request line */ le->reply, /* reply code */ DO_NOT_WARN((long)le->sent_bytes)); /* bytes transfered */ free_log_entry( le ); n++; le = l; } fclose(foo); fd_close(mfd); THREADS_DISALLOW(); push_int(n); }