static int deadpool_init() { if(!pool) { get_environment(); get_config(); if(config->tordns_enabled) { pool = init_pool( config->tordns_cache_size, config->tordns_deadpool_range->localip, config->tordns_deadpool_range->localnet, config->defaultserver.address, config->defaultserver.port ); if(!pool) { show_msg(MSGERR, "failed to initialize deadpool: tordns disabled\n"); } } } return 0; }
static int l_repos_create (lua_State *L) { const char *path = luaL_checkstring (L, 1); svn_repos_t *repos_p; apr_pool_t *pool; if (init_pool (&pool) != 0) { return init_pool_error (L); } path = svn_path_canonicalize (path, pool); svn_error_t *err; err = svn_repos_create (&repos_p, path, NULL, NULL, NULL, NULL, pool); IF_ERROR_RETURN (err, pool, L); return 0; }
void SimQueueThreadStateInit(SimQueueStruct *queue, SimQueueThreadState *th_state, int pid) { TVEC_SET_ZERO(&th_state->mask); TVEC_SET_ZERO(&th_state->my_enq_bit); TVEC_SET_ZERO(&th_state->enq_toggle); TVEC_REVERSE_BIT(&th_state->my_enq_bit, pid); TVEC_SET_BIT(&th_state->mask, pid); th_state->enq_toggle = TVEC_NEGATIVE(th_state->mask); init_pool(&th_state->pool_node, sizeof(Node)); TVEC_SET_ZERO(&th_state->mask); TVEC_SET_ZERO(&th_state->my_deq_bit); TVEC_SET_ZERO(&th_state->deq_toggle); TVEC_REVERSE_BIT(&th_state->my_deq_bit, pid); TVEC_SET_BIT(&th_state->mask, pid); th_state->deq_toggle = TVEC_NEGATIVE(th_state->mask); th_state->deq_local_index = 0; th_state->enq_local_index = 0; th_state->backoff = 1; th_state->mybank = TVEC_GET_BANK_OF_BIT(pid); }
/* See _gcry_secmem_init. This function is expected to be called with the secmem lock held. */ static void secmem_init (size_t n) { if (!n) { #ifdef USE_CAPABILITIES /* drop all capabilities */ { cap_t cap; cap = cap_from_text ("all-eip"); cap_set_proc (cap); cap_free (cap); } #elif !defined(HAVE_DOSISH_SYSTEM) uid_t uid; disable_secmem = 1; uid = getuid (); if (uid != geteuid ()) { if (setuid (uid) || getuid () != geteuid () || !setuid (0)) log_fatal ("failed to drop setuid\n"); } #endif } else { if (n < MINIMUM_POOL_SIZE) n = MINIMUM_POOL_SIZE; if (! pool_okay) { init_pool (n); lock_pool (pool, n); } else log_error ("Oops, secure memory pool already initialized\n"); } }
/* * test_open -- test case for opening remote pool */ static int test_open(const struct test_case *tc, int argc, char *argv[]) { if (argc < 5) UT_FATAL("usage: test_open <id> <pool set> " "<target> <pool>"); const char *id_str = argv[0]; const char *pool_set = argv[1]; const char *target = argv[2]; const char *pool_path = argv[3]; const char *size_str = argv[4]; int id = atoi(id_str); UT_ASSERT(id >= 0 && id < MAX_IDS); struct pool_entry *pool = &pools[id]; UT_ASSERTeq(pool->rpp, NULL); unsigned nlanes = NLANES; init_pool(pool, pool_path, size_str); struct rpmem_pool_attr pool_attr; pool->rpp = rpmem_open(target, pool_set, pool->pool, pool->size, &nlanes, &pool_attr); if (pool->rpp) { check_pool_attr(&pool_attr); UT_ASSERTne(nlanes, 0); UT_OUT("%s: opened", pool_set); } else { UT_OUT("!%s", pool_set); free_pool(pool); } return 5; }
/* Initialize the secure memory system. If running with the necessary privileges, the secure memory pool will be locked into the core in order to prevent page-outs of the data. Furthermore allocated secure memory will be wiped out when released. */ void _gcry_secmem_init (size_t n) { SECMEM_LOCK; if (!n) { #ifdef USE_CAPABILITIES /* drop all capabilities */ cap_set_proc (cap_from_text ("all-eip")); #elif !defined(HAVE_DOSISH_SYSTEM) uid_t uid; disable_secmem = 1; uid = getuid (); if (uid != geteuid ()) { if (setuid (uid) || getuid () != geteuid () || !setuid (0)) log_fatal ("failed to drop setuid\n"); } #endif } else { if (n < DEFAULT_POOL_SIZE) n = DEFAULT_POOL_SIZE; if (! pool_okay) { init_pool (n); lock_pool (pool, n); } else log_error ("Oops, secure memory pool already initialized\n"); } SECMEM_UNLOCK; }
inline static void *Execute(void* Arg) { long i; long rnum; long id = (long) Arg; volatile int j; fastRandomSetSeed(id + 1); init_pool(&pool_node, sizeof(ListNode)); BarrierWait(&bar); if (id == 0) d1 = getTimeMillis(); for (i = 0; i < RUNS; i++) { push((Object)1, id); rnum = fastRandomRange(1, MAX_WORK); for (j = 0; j < rnum; j++) ; pop(id); rnum = fastRandomRange(1, MAX_WORK); for (j = 0; j < rnum; j++) ; } return NULL; }
void *process_clients(void *arg) { int ret; struct worker_data *mydata = (struct worker_data *)arg; struct context_pool *pool; struct epoll_event evt; struct epoll_event evts[EVENTS_PER_BATCH]; int cpu_id; int ep_fd; int i; struct conn_context *ctx; if (enable_keepalive) http_response = http_200_keepalive; else http_response = http_200; http_response_len = strlen(http_response); ret = bind_process_cpu(mydata->cpu_id); if (ret < 0) { perror("Unable to Bind worker on CPU"); exit_cleanup(); } pool = init_pool(MAX_CONNS_PER_WORKER); if ((ep_fd = epoll_create(MAX_CONNS_PER_WORKER)) < 0) { perror("Unable to create epoll FD"); exit_cleanup(); } for (i = 0; i < la_num; i++) { ctx = alloc_context(pool); ctx->fd = la[i].listen_fd; ctx->handler = process_accept; cpu_id = mydata->cpu_id; ctx->cpu_id = cpu_id; ctx->ep_fd = ep_fd; evt.events = EPOLLIN | EPOLLHUP | EPOLLERR; evt.data.ptr = ctx; if (epoll_ctl(ctx->ep_fd, EPOLL_CTL_ADD, ctx->fd, &evt) < 0) { perror("Unable to add Listen Socket to epoll"); exit_cleanup(); } } wdata[cpu_id].polls_min = EVENTS_PER_BATCH; while (1) { int num_events; int i; int events; num_events = epoll_wait(ep_fd, evts, EVENTS_PER_BATCH, -1); if (num_events < 0) { if (errno == EINTR) continue; perror("epoll_wait() error"); } if (!num_events) wdata[cpu_id].polls_mpt++; else if (num_events < wdata[cpu_id].polls_min) wdata[cpu_id].polls_min = num_events; if (num_events > wdata[cpu_id].polls_max) wdata[cpu_id].polls_max = num_events; wdata[cpu_id].polls_sum += num_events; wdata[cpu_id].polls_cnt++; wdata[cpu_id].polls_avg = wdata[cpu_id].polls_sum / wdata[cpu_id].polls_cnt; wdata[cpu_id].polls_lst = num_events; for (i = 0 ; i < num_events; i++) { int active_fd; events = evts[i].events; ctx = evts[i].data.ptr; ctx->events = events; if (ctx->flags & PROXY_BACKEND_EVENT) active_fd = ctx->end_fd; else active_fd = ctx->fd; print_d("%dth event[0x%x] at fd %d\n", i, events, active_fd); ctx->handler(ctx); } } return NULL; }
int main(int argc,char* argv[]) { int status; char* config = CONFIG; char* optstr = "vc:?"; struct sockaddr_in cli_addr; socklen_t len_sock; struct epoll_event* ep_events; struct epoll_event event; //must have 2 paraments if(argc == 1) { print_usage(); return 0; } //get the command line paraments while( (opt = getopt(argc,argv,optstr) )!= -1) { switch (opt) { case 'c': config = optstr; break; case 'v': printf("0.1VERSION\n"); break; case '?': print_usage(); return 0; } } // print the debug file sev_debug("config file: %s \n"); // read the config file,check if success char buff[BUFLEN]; as_conf* config = (as_conf*)malloc(sizeof(as_conf)); status = read_config(config,buff,config,BUFLEN); if(status != OK) { log_err("read config file error !"); exit(-1); } int listen_fd; listen_fd = creat_sockfd(9000); status = set_socket_nonblock(listen_fd); if(status != OK) { log_err("set sock fd block err!"); exit(-1); } //create epoll ctl fd //malloc set of events int epfd = as_epoll_create(EPOLLSIZE); ep_events = (struct epoll_event*)malloc(EPOLLSIZE*sizeof(struct epoll_event)); //construct event and register event to the set event.data.fd = listen_fd; event.events = EPOLLIN | EPOLLET; as_epoll_add(epfd,listen_fd,&event); //initial the thread pool // the pool use singleton pattern, pool is defined in .h init_pool(10); //main loop while(1) { int n_eves; n_eves = as_epoll_wait(epfd,ep_events,MAXEVESZ,TIMEOUT); int i,cli_fd; for(i=0;i<n_eves;i++) { cli_fd = ep_events[i].data.fd; if(listen_fd == cli_fd) { //some new clients ask to accept //accept,make_non_block,add_event //len_sock int fd = accept(listen_fd,(struct sockaddr*)&cli_addr,&len_sock); if(fd < 0) { //log_err log_err("accept cli fd error ! \n"); break; } //set fd non block //check status if normal status = set_socket_nonblock(fd); //set event paraments event.data.fd = fd; event.events = EPOLLIN | EPOLLET; //add event-->epoll as_epoll_add(epfd,fd,&event); } else { //accepted client's data ready to do_resquest //or need to close //how to detect the fd is a close signal if( (ep_events[i].events & EPOLLERR)|| (ep_events[i].events & EPOLLHUP) || !(ep_events[i].events & EPOLLIN) ) { //log_info..need to close this client fd close(fd); continue; } //log_info need to do the resquest printf("new task client:%d \n",fd); status = pool_addtask(do_resquest,&cli_fd); printf("task from client:%d status:%d",fd,status); } } // need to fill timeout func. //printf("if timer end,the server will stop !\n"); if(!as_timer()) { log_info("long time no resquest,this server is about to out of service!"); break; } } destroy_pool(pool); return 0; }
DirectResult fusion_shm_pool_create( FusionWorld *world, const char *name, unsigned int max_size, bool debug, FusionSHMPoolShared **ret_pool ) { int i; DirectResult ret; FusionSHM *shm; FusionSHMShared *shared; D_MAGIC_ASSERT( world, FusionWorld ); D_MAGIC_ASSERT( world->shared, FusionWorldShared ); D_ASSERT( name != NULL ); D_ASSERT( max_size > 0 ); D_ASSERT( ret_pool != NULL ); D_DEBUG_AT( Fusion_SHMPool, "%s( %p [%d], '%s', %d, %p, %sdebug )\n", __FUNCTION__, world, world->shared->world_index, name, max_size, ret_pool, debug ? "" : "non-" ); #if !DIRECT_BUILD_DEBUGS debug = false; #endif shm = &world->shm; D_MAGIC_ASSERT( shm, FusionSHM ); shared = shm->shared; D_MAGIC_ASSERT( shared, FusionSHMShared ); if (max_size < 8192) { D_ERROR( "Fusion/SHMPool: Maximum size (%d) should be 8192 at least!\n", max_size ); return DR_INVARG; } ret = fusion_skirmish_prevail( &shared->lock ); if (ret) goto error; if (shared->num_pools == FUSION_SHM_MAX_POOLS) { D_ERROR( "Fusion/SHMPool: Maximum number of pools (%d) already reached!\n", FUSION_SHM_MAX_POOLS ); ret = DR_LIMITEXCEEDED; goto error; } for (i=0; i<FUSION_SHM_MAX_POOLS; i++) { if (!shared->pools[i].active) break; D_MAGIC_ASSERT( &shared->pools[i], FusionSHMPoolShared ); D_MAGIC_ASSUME( &shm->pools[i], FusionSHMPool ); } D_ASSERT( i < FUSION_SHM_MAX_POOLS ); D_DEBUG_AT( Fusion_SHMPool, " -> index %d\n", i ); memset( &shm->pools[i], 0, sizeof(FusionSHMPool) ); memset( &shared->pools[i], 0, sizeof(FusionSHMPoolShared) ); shared->pools[i].index = i; ret = init_pool( shm, &shm->pools[i], &shared->pools[i], name, max_size, debug ); if (ret) goto error; shared->num_pools++; fusion_skirmish_dismiss( &shared->lock ); *ret_pool = &shared->pools[i]; D_DEBUG_AT( Fusion_SHMPool, " -> %p\n", *ret_pool ); return DR_OK; error: fusion_skirmish_dismiss( &shared->lock ); return ret; }
/* * Do any per-module initialization that is separate to each * configured instance of the module. e.g. set up connections * to external databases, read configuration files, set up * dictionary entries, etc. * * If configuration information is given in the config section * that must be referenced in later calls, store a handle to it * in *instance otherwise put a null pointer there. * * Boyan: * Setup a hashes wich we will use later * parse a module and give him a chance to live * */ static int perl_instantiate(CONF_SECTION *conf, void **instance) { PERL_INST *inst = (PERL_INST *) instance; HV *rad_reply_hv; HV *rad_check_hv; HV *rad_request_hv; HV *rad_request_proxy_hv; HV *rad_request_proxy_reply_hv; AV *end_AV; char *embed[4], *xlat_name; int exitstatus = 0, argc=0; /* * Set up a storage area for instance data */ inst = rad_malloc(sizeof(PERL_INST)); memset(inst, 0, sizeof(PERL_INST)); /* * If the configuration parameters can't be parsed, then * fail. */ if (cf_section_parse(conf, inst, module_config) < 0) { free(inst); return -1; } embed[0] = NULL; if (inst->perl_flags) { embed[1] = inst->perl_flags; embed[2] = inst->module; embed[3] = "0"; argc = 4; } else { embed[1] = inst->module; embed[2] = "0"; argc = 3; } #ifdef USE_ITHREADS inst->perl = interp; if ((inst->perl = perl_alloc()) == NULL) { radlog(L_DBG, "rlm_perl: No memory for allocating new perl !"); return (-1); } perl_construct(inst->perl); PL_perl_destruct_level = 2; { dTHXa(inst->perl); } PERL_SET_CONTEXT(inst->perl); #else if ((inst->perl = perl_alloc()) == NULL) { radlog(L_ERR, "rlm_perl: No memory for allocating new perl !"); return -1; } perl_construct(inst->perl); #endif #if PERL_REVISION >= 5 && PERL_VERSION >=8 PL_exit_flags |= PERL_EXIT_DESTRUCT_END; #endif exitstatus = perl_parse(inst->perl, xs_init, argc, embed, NULL); end_AV = PL_endav; PL_endav = Nullav; if(!exitstatus) { exitstatus = perl_run(inst->perl); } else { radlog(L_ERR,"rlm_perl: perl_parse failed: %s not found or has syntax errors. \n", inst->module); return (-1); } PL_endav = end_AV; newXS("radiusd::radlog",XS_radiusd_radlog, "rlm_perl.c"); rad_reply_hv = newHV(); rad_check_hv = newHV(); rad_request_hv = newHV(); rad_request_proxy_hv = newHV(); rad_request_proxy_reply_hv = newHV(); rad_reply_hv = get_hv("RAD_REPLY",1); rad_check_hv = get_hv("RAD_CHECK",1); rad_request_hv = get_hv("RAD_REQUEST",1); rad_request_proxy_hv = get_hv("RAD_REQUEST_PROXY",1); rad_request_proxy_reply_hv = get_hv("RAD_REQUEST_PROXY_REPLY",1); xlat_name = cf_section_name2(conf); if (xlat_name == NULL) xlat_name = cf_section_name1(conf); if (xlat_name){ inst->xlat_name = strdup(xlat_name); xlat_register(xlat_name, perl_xlat, inst); } #ifdef USE_ITHREADS if ((init_pool(conf, inst)) == -1) { radlog(L_ERR,"Couldn't init a pool of perl clones. Exiting"); return -1; } #endif *instance = inst; return 0; }
inline void LFStackThreadStateInit(LFStackThreadState *th_state, int min_back, int max_back) { init_backoff(&th_state->backoff, min_back, max_back, 1); init_pool(&th_state->pool, sizeof(Node)); }
void CCQueueThreadStateInit(CCQueueStruct *object_struct, CCQueueThreadState *lobject_struct, int pid) { CCSynchThreadStateInit(&lobject_struct->enqueue_thread_state, (int)pid); CCSynchThreadStateInit(&lobject_struct->dequeue_thread_state, (int)pid); init_pool(&pool_node, sizeof(Node)); }
int main(int argc, char* argv[]) { int sock, client_sock; socklen_t cli_size; struct sockaddr_in addr, cli_addr; static pool pool; if (argc != 4) { fprintf(stderr, "usage: %s <HTTP port> <log file> <www folder>\n", argv[0]); return EXIT_FAILURE; } http_port = atoi(argv[1]); log_file = argv[2]; root = argv[3]; if (write_log(DEBUG, "Server is starting...") == E_FILEOPEN) { fprintf(stderr, "Failed to start log engine... Closing the server\n"); return EXIT_FAILURE; } fprintf(stdout, "----- Echo Server -----\n"); /* all networked programs must create a socket */ if ((sock = socket(PF_INET, SOCK_STREAM, 0)) == -1) { write_log(ERROR, "Failed creating socket... Closing the server..."); return EXIT_FAILURE; } addr.sin_family = AF_INET; addr.sin_port = htons(http_port); addr.sin_addr.s_addr = INADDR_ANY; /* servers bind sockets to ports---notify the OS they accept connections */ if (bind(sock, (struct sockaddr *) &addr, sizeof(addr))) { close_socket(sock); write_log(ERROR, "Failed binding socket... Closing the server..."); return EXIT_FAILURE; } if (listen(sock, 5)) { close_socket(sock); write_log(ERROR, "Failed listening on socket... Closing the server..."); return EXIT_FAILURE; } init_pool(sock, &pool); /* finally, loop waiting for input and then write it back */ while (1) { pool.ready_set = pool.read_set; if ((pool.nready = select(pool.maxfd + 1, &pool.ready_set, NULL, NULL, NULL)) == -1) { close_socket(sock); write_log(ERROR, "Failed selecting from pool... Closing the server..."); return EXIT_FAILURE; } if (FD_ISSET(sock, &pool.ready_set)) { cli_size = sizeof(cli_addr); if ((client_sock = accept(sock, (struct sockaddr *) &cli_addr, &cli_size)) == -1) { close_socket(sock); write_log(ERROR, "Failed accepting connection.. Closing the server..."); return EXIT_FAILURE; } log_fp = fopen(log_file, "a"); fprintf(log_fp, "%s\t%s\tAccepting a new client: %d\n", current_time(), DEBUG, client_sock); fclose(log_fp); /* add the client to the pool */ if (add_client(client_sock, &pool) != 0) { close_socket(sock); // TODO: send some error code to client?? write_log(ERROR, "Failed adding client: Too many clients... Closing the server..."); return EXIT_FAILURE; } } /* check each client and process the ready connected descriptor */ if (check_clients(&pool) != 0) { close_socket(sock); return EXIT_FAILURE; } } close_socket(sock); return EXIT_SUCCESS; }
int main(int argc, char* argv[]) { int sock, s_sock, client_fd; socklen_t client_size; struct sockaddr_in addr, client_addr; struct timeval tv; static pool pool; sigset_t mask; if (argc != 9) usage_exit(); // parse arguments STATE.port = (int)strtol(argv[1], (char**)NULL, 10); STATE.s_port = (int)strtol(argv[2], (char**)NULL, 10); strcpy(STATE.log_path, argv[3]); strcpy(STATE.lck_path, argv[4]); strcpy(STATE.www_path, argv[5]); strcpy(STATE.cgi_path, argv[6]); strcpy(STATE.key_path, argv[7]); strcpy(STATE.ctf_path, argv[8]); if (STATE.www_path[strlen(STATE.www_path)-1] == '/') STATE.www_path[strlen(STATE.www_path)-1] = '\0'; daemonize(); STATE.log = log_open(STATE.log_path); Log("Start Liso server. Server is running in background. \n"); /* all networked programs must create a socket * PF_INET - IPv4 Internet protocols * SOCK_STREAM - sequenced, reliable, two-way, connection-based byte stream * 0 (protocol) - use default protocol */ // create sock for HTTP connection if ((sock = socket(PF_INET, SOCK_STREAM, 0)) == -1) { Log("Error: failed creating socket for HTTP connection.\n"); fclose(STATE.log); return EXIT_FAILURE; } STATE.sock = sock; Log("Create socket success: sock = %d \n", sock); addr.sin_family = AF_INET; addr.sin_port = htons(STATE.port); addr.sin_addr.s_addr = INADDR_ANY; /* servers bind sockets to ports---notify the OS they accept connections */ if (bind(sock, (struct sockaddr *) &addr, sizeof(addr))) { Log("Error: failed binding socket.\n"); clean(); return EXIT_FAILURE; } Log("Bind success! \n"); if (listen(sock, MAX_CONN)) { Log("Error: listening on socket.\n"); clean(); return EXIT_FAILURE; } Log("Listen success! >>>>>>>>>>>>>>>>>>>> \n"); // create sock for HTTPS connection Log("Create sock for HTTPS connection \n"); if ((s_sock = socket(PF_INET, SOCK_STREAM, 0)) == -1) { Log("Error: failed creating socket for HTTPS connection.\n"); close(sock); fclose(STATE.log); return EXIT_FAILURE; } STATE.s_sock = s_sock; Log("Create HTTPS socket success: sock = %d \n", s_sock); addr.sin_family = AF_INET; addr.sin_port = htons(STATE.s_port); addr.sin_addr.s_addr = INADDR_ANY; /* servers bind sockets to ports---notify the OS they accept connections */ if (bind(s_sock, (struct sockaddr *) &addr, sizeof(addr))) { Log("Error: failed binding socket.\n"); close(sock); close(s_sock); fclose(STATE.log); return EXIT_FAILURE; } Log("Bind success! \n"); if (listen(s_sock, MAX_CONN)) { Log("Error: listening on socket.\n"); close(sock); close(s_sock); fclose(STATE.log); return EXIT_FAILURE; } Log("Listen success! >>>>>>>>>>>>>>>>>>>> \n"); init_pool(&pool); // the main loop to wait for connections and serve requests while (KEEPON) { tv.tv_sec = 1; // timeout = 1 sec tv.tv_usec = 0; pool.ready_set = pool.read_set; sigemptyset(&mask); sigaddset(&mask, SIGHUP); sigprocmask(SIG_BLOCK, &mask, NULL); pool.nready = select(pool.maxfd+1, &pool.ready_set, NULL, NULL, &tv); sigprocmask(SIG_UNBLOCK, &mask, NULL); if (pool.nready < 0) { if (errno == EINTR) { Log("Shut down Server >>>>>>>>>>>>>>>>>>>> \n"); break; } Log("Error: select error \n"); continue; } // if there is new connection, accept and add the new client to pool if (FD_ISSET(sock, &pool.ready_set)) { client_size = sizeof(client_addr); client_fd = accept(sock, (struct sockaddr *) &client_addr, &client_size); if (client_fd < 0) ///TODO { Log("Error: accepting connection. \n"); continue; } Log("accept client: client_fd=%d \n", client_fd); if (STATE.is_full) { pool.nready--; serve_error(client_fd, "503", "Service Unavailable", "Server is too busy right now. Please try again later.", 1); close(client_fd); } else add_client(client_fd, &pool); } // process each ready connected descriptor check_clients(&pool); } lisod_shutdown(); return EXIT_SUCCESS; // to make compiler happy }
int main(int argc , char *argv[]) { char *ip = argv[1]; /* local ip addr */ char *port = argv[2]; /* local port */ int sockfd , listenfd , accepfd , optval=1 ; struct sockaddr_in laddr, raddr; socklen_t rlen = sizeof(struct sockaddr); laddr.sin_family = AF_INET; laddr.sin_addr.s_addr = inet_addr(ip); laddr.sin_port = htons(atoi(port)); pthread_t tid ; if(argc < 2) { printf("argc < 2 \n"); return -1 ; } if (-1 == (listenfd = socket(AF_INET, SOCK_STREAM, 0))) /* /etc/protocol s lists "protocol <-> number" */ { printf("sockfd function is failed ,errno is :%s",strerror(errno)); return -1 ; } #if 1 /*eliminates "address already in use " error form bind */ if(-1==setsockopt(listenfd,SOL_SOCKET,SO_REUSEADDR,(const void *)&optval ,sizeof(int))) { printf("setsockopt function is error , errno is : %s \n",strerror(errno)); return -1 ; } #endif /* bind, syscall : bind */ if (-1 == bind(listenfd, (struct sockaddr *)&laddr, sizeof(struct sockaddr))) { printf("bind function is error , errno is : %s \n",strerror(errno)); return -1 ; } if (-1 == listen(listenfd, 5)) /* "man 2 listen" for detail information about argument 2 (backlog). in linux kernel 3.x, it doesn't care the backlog */ { printf("listen function is error , errno is : %d \n",errno); return -1 ; } #if 1 int val ; if ((val = fcntl(listenfd , F_GETFL , 0)) == -1 ) { printf("fcntl is failed ,errno %d\n",errno); return -1; } if (fcntl(listenfd , F_SETFL , val|O_NONBLOCK) == -1 ) { printf("fcntl1 is failed \n"); return -1; } #endif init_pool(&sp,Num_slots); for(int i = 0 ;i < Num_Thread ; i++) pthread_create(&tid,NULL,my_thread,NULL); while(1) { if(-1 == (accepfd = accept(listenfd,(struct sockaddr *)&raddr,&rlen)) ) { if(errno != EWOULDBLOCK || errno != EAGAIN) { printf("accept function is error , errno is : %d \n",errno); return -1 ; } } else { printf("arrive \n"); pool_insert(accepfd , &sp); } } return 0 ; }
static void *alloc(unsigned long size_pow, enum m_state state) { /* Pool not initialized */ if (mp.base_addr == NULL) { /* Try to initialize it */ if (init_pool() == -1) { errno = ENOMEM; return NULL; } } // LOG("BEFORE ALLOC\n"); // print_avail_map(); /* Size in power of 2, which needs to be reserved */ //LOG("Request for 2^%lu = %luB\n", size_pow, pow2(size_pow)); /* Size of the first suitable block which is available (in pow of 2) */ unsigned long j; /* Find the value of 'j' in available memory space if possible */ //LOG("--## FINDING J ##--\n"); for (j=size_pow; j < mp.pool_size; j++) { if (!list_empty(&mp.avail[j])) { break; } //LOG("j=%lu not suitable\n", j); } /* Do we need to enlarge the pool? - by making buddy for existing largest block */ //LOG("--## ENLARGING ##--\n"); while (list_empty(&mp.avail[j])) { //LOG("j=%lu <= pool_size=%lu <= max=%lu\n", j, mp.pool_size, mp.max_size); /* Cannot adress this amount of memory */ if (get_pow((unsigned long)mp.base_addr) + size_pow >= mp.max_size) { //TODO ktery pouzit? //if (mp.max_size >= mp.max_size) { LOG("Maximum size reached!\n"); errno = ENOMEM; return NULL; } //LOG("Enlarging the pool.\n"); void *new_addr; if ((new_addr = sbrk(pow2(mp.pool_size))) == (void *)-1) { LOG("sbrk pool-enlarge error!\n"); errno = ENOMEM; return NULL; } //TODO bez prepsani zpusobuje neporadek s valgrindem //memset(new_addr, 0, pow2(mp.pool_size)); /* Pool was enlarged, we have twice as much space */ mp.pool_size++; /* New memory block, buddy for previous block, will live in this space */ //LOG("new_addr = %p\n", new_addr); struct block *nb = (struct block *) new_addr; nb->state = FREE; nb->k_size = mp.pool_size - 1; //LOG("k_size = %lu\n", nb->k_size); /* Avail array must be edited, we've got new free block of size 2^(nb->k_size) */ struct block *p; p = mp.avail[nb->k_size].next; //LOG("p point %p head to %p\n", p, &mp.avail[nb->k_size]); nb->next = p; p->prev = nb; nb->prev = (struct block *) &mp.avail[nb->k_size]; mp.avail[nb->k_size].next = nb; } /* We now have the 'j' value set */ //LOG("--## REMOVING BLOCK FROM AVAIL ARRAY ##--\n"); /* Remove this block from avail array */ struct block *l, *p; l = mp.avail[j].prev; //LOG("L position = %p\n", l); p = l->prev; mp.avail[j].prev = p; p->next = (struct block *) &mp.avail[j]; enum m_state block_state = l->state; l->state = USED; /* Now we need to divide the block if space in it is too large */ //LOG("--## DIVIDING BLOCK ##--\n"); while (j != size_pow) { //LOG("divination for j=%lu\n", j); j--; p = (struct block *)((unsigned long)l + pow2(j)); //LOG("pointering to %p\n", p); p->state = FREE; p->k_size = j; p->prev = (struct block *) &mp.avail[j]; p->next = (struct block *) &mp.avail[j]; /* Add this block into avail array */ mp.avail[j].prev = p; mp.avail[j].next = p; } l->k_size = size_pow; /* Does the memory need to be cleared? */ if (state == ZERO && block_state != ZERO) { memset(B_DATA(l), 0, B_DATA_SIZE(l)); } // LOG("AFTER ALLOC\n"); // print_avail_map(); // LOG("\n"); return B_DATA(l); }
int main(int argc, char *argv[]) { fprintf(stderr,"Proxy Start yuruiz\n"); int http_port; char *log_file; int http_listen_socket, http_client_sock; struct sockaddr_in http_addr, cli_addr; socklen_t conn_size; pool conn_pool; if (argc < 7 || argc > 8) { printf(USAGE, argv[0]); return EXIT_FAILURE; } log_file = argv[1]; alpha = atof(argv[2]); http_port = atoi(argv[3]); fake_ip = argv[4]; proxy.dns_ip = argv[5]; proxy.dns_port = argv[6]; if (argc == 8) { www_ip = argv[7]; } loginit(log_file); fprintf(stderr,"-------------------Server Start------------------\n"); if ((http_listen_socket = open_port(http_port, &http_addr)) == -1) { fprintf(stderr,"Open port failed\n"); return EXIT_FAILURE; } // parse fake-ip bzero(&proxy.myaddr, sizeof(struct sockaddr_in)); proxy.myaddr.sin_family = AF_INET; inet_aton(fake_ip, &proxy.myaddr.sin_addr); proxy.myaddr.sin_port = htons(0); init_pool(http_listen_socket, &conn_pool); do { // printf("Pool start\n"); conn_pool.ready_set = conn_pool.read_set; conn_pool.nconn = select(conn_pool.maxfd + 1, &conn_pool.ready_set, NULL, NULL, NULL); conn_size = sizeof(cli_addr); if (FD_ISSET(http_listen_socket, &conn_pool.ready_set)) { fprintf(stderr,"Adding new http connection\n"); if ((http_client_sock = accept(http_listen_socket, (struct sockaddr *) &cli_addr, &conn_size)) == -1) { fprintf(stderr,"Error accepting http connection.\n"); continue; } add_conn(http_client_sock, &conn_pool, &cli_addr); } conn_handle(&conn_pool); } while (1); close_socket(http_listen_socket); return EXIT_SUCCESS; }