BOOL on_message(HWND wnd,UINT msg,WPARAM wp,LPARAM lp) { switch(msg) { case WM_INITDIALOG: { m_wnd = wnd; HWND wnd_tab = GetDlgItem(wnd, IDC_TAB1); unsigned n, count = tabsize(g_tabs_appearance); for (n=0; n<count; n++) { uTabCtrl_InsertItemText(wnd_tab, n, g_tabs_appearance[n]->get_name()); } TabCtrl_SetCurSel(wnd_tab, cfg_child_appearance); make_child(); } break; case WM_DESTROY: m_wnd = NULL; break; case WM_NOTIFY: switch (((LPNMHDR)lp)->idFrom) { case IDC_TAB1: switch (((LPNMHDR)lp)->code) { case TCN_SELCHANGE: { cfg_child_appearance = TabCtrl_GetCurSel(GetDlgItem(wnd, IDC_TAB1)); make_child(); } break; } break; } break; case WM_PARENTNOTIFY: switch(wp) { case WM_DESTROY: { if (m_wnd_child && (HWND)lp == m_wnd_child) m_wnd_child = NULL; } break; } break; } return 0; }
BOOL CALLBACK DialogProc(HWND wnd,UINT msg,WPARAM wp,LPARAM lp) { switch(msg) { case WM_INITDIALOG: { HWND wnd_tab = GetDlgItem(wnd, IDC_TAB1); //uSendMessage(wnd_tab, TCM_SETMINTABWIDTH, 0, 35); unsigned n, count = tabsize(g_tabs); for (n=0; n<count; n++) { uTabCtrl_InsertItemText(wnd_tab, n, g_tabs[n]->get_name()); } TabCtrl_SetCurSel(wnd_tab, cfg_child); make_child(wnd); } break; case WM_DESTROY: break; case WM_NOTIFY: switch (((LPNMHDR)lp)->idFrom) { case IDC_TAB1: switch (((LPNMHDR)lp)->code) { case TCN_SELCHANGE: { cfg_child = TabCtrl_GetCurSel(GetDlgItem(wnd, IDC_TAB1)); make_child(wnd); } break; } break; } break; case WM_PARENTNOTIFY: switch(wp) { case WM_DESTROY: { if (child && (HWND)lp == child) child = 0; } break; } break; } return 0; }
int main(int argc, char* argv[])//ip port nchild { if(argc != 4) { printf("EXE IP PORT NCHILD ! \n"); exit(1); } int nchild = atoi(argv[3]); int index ; pchild_t parr = (pchild_t)calloc(nchild, sizeof(child_t)); make_child(parr, nchild);//创建子进程 //socket int fd_listen ; if((fd_listen = socket(AF_INET, SOCK_STREAM, 0)) == -1) { perror("socket !"); exit(1); } //bind struct sockaddr_in server_addr ; memset(&server_addr, 0, sizeof(server_addr)); server_addr.sin_family = AF_INET ; server_addr.sin_port = htons( atoi(argv[2]) ) ; server_addr.sin_addr.s_addr = inet_addr(argv[1]); if(-1 == bind(fd_listen,(const struct sockaddr*)&server_addr, sizeof(server_addr))) { perror("bind"); close(fd_listen); exit(1); } //listen }
int main() { int i; int argc = 0; int old_pipe[] = {0,1}; char **args; while(1) { printf("$ "); args = getln(&argc); if(argc > 0){ if(args != NULL && strcmp(args[0],"") && args[0] != NULL ){ for(i = 0; i < strlen(args[0]); i++){ args[0][i] = tolower(args[0][i]); } if(strcmp(args[0],"exit") == 0 || strcmp(args[0],"quit") == 0 ){ printf("exiting program\n"); exit(0); }else if(strcmp(args[0],"arg") == 0){ arg(args,argc); }else if(strcmp(args[0],"add") == 0){ add(args); }else if(strcmp(args[0],"mult") == 0){ mult(args); }else{ for(i = 0; args[i] != NULL; i++) { if(strcmp(args[i],"|") == 0){ args[i][0]=' '; creat_pipe(arg_array(args,argc,i+1),arg_array(args,i+1,0),argc-i,i+1, old_pipe); i = 0; break; }else if(strcmp(args[i],"&") == 0){ args[i][0]=' '; i = -1; break; } } if(i == -1){ make_child(args,1,argc); }else if(i != 0){ make_child(args,0, argc); } } } } } return 0; }
void sigHandler(int) { int status; pid_t id = wait(&status); int pos = process_to_socket[id]; process_to_socket.erase(id); std::pair<pid_t, int> res = make_child(pos + 1); children[pos] = res.second; process_to_socket.insert(std::make_pair(res.first, pos)); }
/* start up a bunch of worker threads */ static void startup_workers(int number_to_start) { int i; for (i = 0; number_to_start && i < ap_threads_limit; ++i) { if (ap_scoreboard_image->servers[0][i].status != WORKER_DEAD) { continue; } if (make_child(ap_server_conf, i) < 0) { break; } --number_to_start; } }
static void test_exclusive(CuTest *tc, const char *lockname) { apr_proc_t *child[CHILDREN]; apr_status_t rv; int n; rv = apr_proc_mutex_create(&proc_lock, lockname, APR_LOCK_DEFAULT, p); apr_assert_success(tc, "create the mutex", rv); for (n = 0; n < CHILDREN; n++) make_child(tc, &child[n], p); for (n = 0; n < CHILDREN; n++) await_child(tc, child[n]); CuAssert(tc, "Locks don't appear to work", *x == MAX_COUNTER); }
static void sig_alrm(int signo) { fprintf(stderr,"SIGALRM\n"); fd_set rfd,wfd; struct timeval tv; tv.tv_sec=0; tv.tv_usec=100; int retval,maxfd; int i; FD_ZERO(&rfd); FD_ZERO(&wfd); maxfd=-1; for (i=0;i<nchild;i++) { /* Parent reads from pipe 1, writes to pipe 0*/ // FD_SET(pipefd[i][0][1],&wfd); // if (pipefd[i][0][1]>maxfd) // maxfd=pipefd[i][0][1]; FD_SET(pipefd[i][1][0],&rfd); if (pipefd[i][1][0]>maxfd) maxfd=pipefd[i][1][0]; }; retval=select(maxfd+1,&rfd,&wfd,NULL,&tv); if (retval==-1) perror("select()"); else { for (i=0;i<nchild;i++) if (!FD_ISSET(pipefd[i][1][0],&rfd) && childsub[i]!=-1) //if (!FD_ISSET(pipefd[i][0][1],&wfd) && !FD_ISSET(pipefd[i][1][0],&rfd)) { // fprintf(stderr,"kill %d, data %d\n",i,sublen[childsub[i]]); unsortData+=sublen[childsub[i]]; subread[childsub[i]]=0; childsub[i]=-1; kill(cpid[i],SIGINT); make_child(i); } } siglongjmp(jmpbuf,1); }
int main(int argc, char ** argv) { struct sigaction act; int pid, i, ret; int fail_cnt = 0; test_init(argc, argv); if (prctl(PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0) == -1) { pr_perror("PR_SET_CHILD_SUBREAPER"); return -1; } ret = sigaction(SIGCHLD, NULL, &act); if (ret < 0) { pr_perror("sigaction() failed"); return -1; } act.sa_flags |= SA_NOCLDSTOP | SA_SIGINFO | SA_RESTART; act.sa_sigaction = sigchld_handler; sigemptyset(&act.sa_mask); sigaddset(&act.sa_mask, SIGCHLD); ret = sigaction(SIGCHLD, &act, NULL); if (ret < 0) { pr_perror("sigaction() failed"); return -1; } processes = mmap(NULL, MEM_SIZE, PROT_WRITE | PROT_READ, MAP_SHARED | MAP_ANONYMOUS, 0, 0); if (processes == NULL) { pr_perror("Unable to map share memory"); return 1; } for (i = 0; i < PR_MAX; i++) { if (socketpair(PF_UNIX, SOCK_STREAM, 0, processes[i].sks) == -1) { pr_perror("socketpair"); return 1; } } nr_processes++; pid = make_child(0, 0); if (pid < 0) return -1; while(nr_processes < PR_MAX) { int op, id; int flags = lrand48() % 2; op = get_rnd_op(); if (op == TEST_DIE || op == TEST_DIE_WAIT || op == TEST_SUBREAPER) { if (nr_processes == 1) continue; else id = lrand48() % (nr_processes - 1) + 1; } else if (op == TEST_FORK) { id = nr_processes * 9 / 10 + lrand48() % nr_processes / 10; while (processes[id].dead != 0) id--; } else id = lrand48() % nr_processes; if (processes[id].dead) continue; send_command(id, op, flags); } for (i = 0; i < nr_processes; i++) { if (processes[i].dead) continue; if (processes[i].pid == 0) continue; processes[i].sid = getsid(processes[i].pid); if (processes[i].sid == -1) { pr_perror("getsid(%d)", i); goto err; } } test_daemon(); test_waitsig(); for (i = 0; i < nr_processes; i++) { pid_t sid; if (processes[i].dead) continue; if (processes[i].pid == 0) continue; sid = getsid(processes[i].pid); if (sid == -1) { pr_perror("getsid(%d)", i); goto err; } if (sid != processes[i].sid) { fail("%d, %d: wrong sid %d (expected %d)", i, processes[i].pid, sid, processes[i].sid); fail_cnt++; } } if (fail_cnt) goto err; pass(); cleanup(); return 0; err: cleanup(); return 1; }
static void handle_command() { int sk = processes[current].sks[0], ret, status = 0; struct command cmd; ret = read(sk, &cmd, sizeof(cmd)); if (ret != sizeof(cmd)) { pr_perror("Unable to get command"); goto err; } switch (cmd.cmd) { case TEST_FORK: { pid_t pid; pid = make_child(cmd.arg1, cmd.arg2 ? CLONE_PARENT : 0); if (pid < 0) { status = -1; goto err; } test_msg("%3d: fork(%d, %x) = %d\n", current, cmd.arg1, cmd.arg2, pid); processes[cmd.arg1].pid = pid; } break; case TEST_SUBREAPER: test_msg("%3d: subreaper(%d)\n", current, cmd.arg1); if (prctl(PR_SET_CHILD_SUBREAPER, cmd.arg1, 0, 0, 0) == -1) { pr_perror("PR_SET_CHILD_SUBREAPER"); status = -1; } break; case TEST_SETSID: if (getsid(getpid()) == getpid()) break; test_msg("%3d: setsid()\n", current); if(setsid() == -1) { pr_perror("setsid"); status = -1; } break; case TEST_DIE_WAIT: test_msg("%3d: wait()\n", current); case TEST_DIE: test_msg("%3d: die()\n", current); processes[current].dead = 1; shutdown(sk, SHUT_RDWR); if (cmd.cmd == TEST_DIE_WAIT) exit(2); exit(0); default: pr_perror("Unknown operation %d", cmd.cmd); status = -1; break; } ret = write(sk, &status, sizeof(status)); if (ret != sizeof(status)) { pr_perror("Unable to answer"); goto err; } if (status < 0) goto err; return; err: shutdown(sk, SHUT_RDWR); exit(1); }
void Function::buildTree(char*& c) { std::stack<TOKEN> t; t.push(FUNCT); std::string vars = ""; node *current = tree; std::stack<node*> open_p; bool ok = true, as_child = false; while (!t.empty() && ok) switch (t.top()) { case FUNCT: if (*c == 0) t.pop(); else if (strchr("+-*/^",*c)) t.push(OPT_BI); else t.push(EXP); break; case OPEN_P: if (*c == '(') { t.pop(); if (!tree) { tree = new node(); current = tree; open_p.push(tree); } else { node *tmp = new node(); if (current->child[0]) current->child[1] = tmp; else current->child[0] = tmp; current = tmp; open_p.push(tmp); } c++; } else ok = false; break; case CLOSE_P: if (*c == ')') { t.pop(); node *x = open_p.top()->child[0]; open_p.top()->value = x->value; open_p.top()->type = x->type; open_p.top()->child[0] = x->child[0]; open_p.top()->child[1] = x->child[1]; current = open_p.top(); open_p.pop(); delete x; as_child = true; c++; } else if (strchr("+-*/^",*c)) { t.push(EXP); t.push(BI); } else ok = false; break; case BI: if (strchr("+-*/^",*c)) { t.pop(); node *tmp = new node(); switch(*c) { case '+':tmp->f2 = add; break; case '-':tmp->f2 = sub; break; case '*':tmp->f2 = mul; break; case '/':tmp->f2 = Function::div; break; case '^':tmp->f2 = pw; break; } tmp->type = 2; if (*c == '+' || *c == '-' || current->type == 3 || as_child) make_child(current, tmp); else { if (current->child[0]) if (current->child[1]) { make_child(current->child[1], tmp); current = current->child[1]; } else current->child[1] = tmp; else current->child[0] = tmp; } as_child = false; c++; } else ok = false; break; case UN: break; case EXP: if (*c == 'a' || *c == 's' || *c == '-' || *c == 'c' || *c == 'l' || *c == 't') { t.pop(); t.push(CLOSE_P); t.push(EXP); t.push(OPEN_P); t.push(UN); } else if (*c == '(') { t.pop(); t.push(CLOSE_P); t.push(EXP); t.push(OPEN_P); } else if (strchr("ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789%",*c)) { t.pop(); t.push(VAR); } else ok = false; break; case VAR: { t.pop(); node *tmp = new node(); tmp->type = 3; if (!tree) current = tree = tmp; else { if (current->child[0]) current->child[1] = tmp; else current->child[0] = tmp; if (!open_p.empty()) if (current == open_p.top()) current = tmp; } if (strchr("ABCDEFGHIJKLMNOPQRSTUVWXYZ",*c)) { tmp->type = 4; tmp->var = vars.find(*c); if (tmp->var == -1) { vars += *c; tmp->var = numVar; numVar++; } c++; t.push(OPT_BI); } else if (strchr("0123456789",*c)) { std::string v; for (; strchr("0123456789",*c); c++) if (*c == 0) break; else v += *c; tmp->value = atof(v.c_str()); } else if (*c == '%') { c++; if (*c == 'p'){ c++; if (*c == 'i') { current->value = M_PI; c++; } else ok = false; } else if (*c == 'e') { current->value = M_E; c++; } else ok = false; } else ok = false; } break; case OPT_BI: if (*c == 0) t.pop(); else if (strchr("+-*/^",*c)) { t.pop(); t.push(EXP); t.push(BI); } else ok = false; } if (!ok) deleteNode(tree); }
int main(int argc,char **argv) { if(argc != 4){ printf("error args\n"); return -1; } pchild cptr; int num = atoi(argv[3]); cptr = (pchild)calloc(num,sizeof(child)); make_child(cptr,num); int sfd = socket(AF_INET,SOCK_STREAM,0); if(-1 == sfd){ perror("socket"); return -1; } struct sockaddr_in ser; int ret; bzero(&ser,sizeof(ser)); ser.sin_family = AF_INET; ser.sin_port = htons(atoi(argv[2])); ser.sin_addr.s_addr = inet_addr(argv[1]); ret = bind(sfd,(struct sockaddr *)&ser,sizeof(struct sockaddr)); if(-1 == ret){ perror("bind"); return -1; } listen(sfd,num); int epfd; epfd = epoll_create(1); if(-1 == epfd){ perror("epoll_create"); return -1; } struct epoll_event ev,*evs; evs = (struct epoll_event*)calloc(num + 1,sizeof(struct epoll_event)); ev.events = EPOLLIN; ev.data.fd = sfd; ret = epoll_ctl(epfd,EPOLL_CTL_ADD,sfd,&ev); if(-1 == ret){ perror("epoll_ctl"); return -1; } int i; for(i = 0;i < num;i++){ evs[i].events = EPOLLIN; evs[i].data.fd = cptr[i].fds; ret = epoll_ctl(epfd,EPOLL_CTL_ADD,cptr[i].fds,&evs[i]); if(-1 == ret){ perror("epoll_ctl1"); return -1; } } int j; int new_fd; while(1){ ret = epoll_wait(epfd,evs,num + 1,-1); if(ret > 0){ for(i = 0;i < ret;i++){ if(evs[i].data.fd == sfd && evs[i].events == EPOLLIN){ new_fd = accept(sfd,NULL,NULL); if(-1 == new_fd){ perror("accept"); return -1; } printf("one client connect,new_fd is %d\n",new_fd); for(j = 0;j < num;j++){ if(cptr[j].busy == 0){ break; } } if(j != num){ send_fd(cptr[j].fds,new_fd); cptr[j].busy = 1;//将new_fd给对应子进程,标识为忙碌 } } } for(j = 0;j < num;j++){ if(evs[i].data.fd == sfd && evs[i].events == EPOLLIN){ cptr[j].busy = 0; } } } } return 0; }
/* * Solves the puzzle and prints results. * Returns 1 on success and 0 on nonexistence of a solution. */ static int solve(int *start, int *end) { Grid *root, *goal, *cur, *child, *iter, **result; Pqueue *pq; Set *visited; int goal_grid_code, child_code; int i, ch; int path_length; root = (Grid *) malloc(sizeof(Grid)); memcpy(root->g, start, sizeof(int) * 9); root->parent = NULL; root->hole = 1; root->depth = 0; goal = (Grid *) malloc(sizeof(Grid)); memcpy(goal->g, end, sizeof(int) * 9); goal_grid_code = grid_code(goal); get_correct_positions(goal); path_length = 0; i = 0; pq = pqueue_new(); visited = set_new(4); pqueue_insert(pq, root); set_insert(visited, grid_code(root)); while (!empty(pq)) { cur = pqueue_extract_min(pq); if (verbose) { fprintf(output, "%d.\n", ++i); fprintf(output, "Depth: %d\n", cur->depth); fprintf(output, "Grid:\n"); grid_print(output, cur); fprintf(output, "f: %2d\n", weight(cur)); fprintf(output, "\n"); } if (grid_code(cur) == goal_grid_code) break; ch = 0; #define ADD_CHILD() { \ child_code = grid_code(child); \ if (!set_contains(visited, child_code)) { \ set_insert(visited, child_code); \ pqueue_insert(pq, child); \ cur->child[ch++] = child; \ } else \ free(child); \ } /* Hole not on the left wall. */ if (cur->hole % 3 > 0) { child = make_child(cur); grid_move_hole(child, child->hole - 1); ADD_CHILD(); } /* Hole not on the right wall. */ if (cur->hole % 3 < 2) { child = make_child(cur); grid_move_hole(child, child->hole + 1); ADD_CHILD(); } /* Hole not on the top wall. */ if (cur->hole / 3 > 0) { child = make_child(cur); grid_move_hole(child, child->hole - 3); ADD_CHILD(); } /* Hole not on the bottom wall. */ if (cur->hole / 3 < 2) { child = make_child(cur); grid_move_hole(child, child->hole + 3); ADD_CHILD(); } #undef ADD_CHILD /* End of children character. */ cur->child[ch] = NULL; if (verbose) { fprintf(output, "Children:\n"); grid_children(output, cur); fprintf(output, "------------------------\n"); fprintf(output, "\n"); STEP(); } } if (grid_code(cur) != goal_grid_code) return 0; /* Collect result path. */ for (iter = cur; iter != NULL; iter = iter->parent) path_length ++; result = (Grid**) malloc(sizeof(Grid*) * path_length); i = path_length - 1; for (iter = cur; iter != NULL; iter = iter->parent) result[i--] = iter; if (verbose) fprintf(output, "Solution sequence:\n"); for (i = 0; i < path_length; i++) { grid_print(output, result[i]); STEP(); fprintf(output, "\n"); } /* Clean up. */ grid_dispose(root); set_dispose(visited); pqueue_dispose(pq); free(result); free(goal); return 1; }
static void perform_idle_server_maintenance(apr_pool_t *p) { int i; int idle_count; worker_score *ws; int free_length; int free_slots[MAX_SPAWN_RATE]; int last_non_dead; int total_non_dead; /* initialize the free_list */ free_length = 0; idle_count = 0; last_non_dead = -1; total_non_dead = 0; for (i = 0; i < ap_threads_limit; ++i) { int status; if (i >= ap_max_workers_limit && free_length == idle_spawn_rate) break; ws = &ap_scoreboard_image->servers[0][i]; status = ws->status; if (status == WORKER_DEAD) { /* try to keep children numbers as low as possible */ if (free_length < idle_spawn_rate) { free_slots[free_length] = i; ++free_length; } } else if (status == WORKER_IDLE_KILL) { /* If it is already marked to die, skip it */ continue; } else { /* We consider a starting server as idle because we started it * at least a cycle ago, and if it still hasn't finished starting * then we're just going to swamp things worse by forking more. * So we hopefully won't need to fork more if we count it. * This depends on the ordering of SERVER_READY and SERVER_STARTING. */ if (status <= WORKER_READY) { ++ idle_count; } ++total_non_dead; last_non_dead = i; } } DBPRINT2("Total: %d Idle Count: %d \r", total_non_dead, idle_count); ap_max_workers_limit = last_non_dead + 1; if (idle_count > ap_threads_max_free) { /* kill off one child... we use the pod because that'll cause it to * shut down gracefully, in case it happened to pick up a request * while we were counting */ idle_spawn_rate = 1; ap_update_child_status_from_indexes(0, last_non_dead, WORKER_IDLE_KILL, (request_rec *) NULL); DBPRINT1("\nKilling idle thread: %d\n", last_non_dead); } else if (idle_count < ap_threads_min_free) { /* terminate the free list */ if (free_length == 0) { /* only report this condition once */ static int reported = 0; if (!reported) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00220) "server reached MaxRequestWorkers setting, consider" " raising the MaxRequestWorkers setting"); reported = 1; } idle_spawn_rate = 1; } else { if (idle_spawn_rate >= 8) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00221) "server seems busy, (you may need " "to increase StartServers, or Min/MaxSpareServers), " "spawning %d children, there are %d idle, and " "%d total children", idle_spawn_rate, idle_count, total_non_dead); } DBPRINT0("\n"); for (i = 0; i < free_length; ++i) { DBPRINT1("Spawning additional thread slot: %d\n", free_slots[i]); make_child(ap_server_conf, free_slots[i]); } /* the next time around we want to spawn twice as many if this * wasn't good enough, but not if we've just done a graceful */ if (hold_off_on_exponential_spawning) { --hold_off_on_exponential_spawning; } else if (idle_spawn_rate < MAX_SPAWN_RATE) { idle_spawn_rate *= 2; } } } else { idle_spawn_rate = 1; } }
void relations_mixin<false, D>::map_sub_entity(const sub_entity_name n, const entity_id p) const { make_child(p, n); sub_entities_component().sub_entities_by_name[n] = p; }
int main(int argc, char *argv[]) { // parse parameters std::string host; std::string port; int rez = 0; while ( (rez = getopt(argc, argv, "h:p:d:")) != -1) { switch (rez) { case 'h': host = optarg; break; case 'p': port = optarg; break; case 'd': setDirectory(optarg); break; default: break; } } #ifdef ENABLE_SIGNALS // Set action on SIGCHILD { __sigset_t set; sigfillset(&set); struct sigaction action; action.sa_handler = &sigHandler; action.sa_mask = set; action.sa_flags = SA_RESTART; sigaction(SIGCHLD, &action, NULL); } #endif #ifdef ENABLE_DEAMON pid_t pid = fork(); if (pid == 0) { #endif // Create Master Socket int MasterSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); int optval = 1; setsockopt(MasterSocket, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval)); struct sockaddr_in SockAddr; SockAddr.sin_family = AF_INET; SockAddr.sin_port = htons(atoi(port.c_str())); inet_pton(AF_INET, host.c_str(), &(SockAddr.sin_addr)); bind(MasterSocket, (struct sockaddr *)(&SockAddr), sizeof(SockAddr)); listen(MasterSocket, SOMAXCONN); // Create child process for (int i = 0; i < max_child; ++i) { std::pair<pid_t, int> res = make_child(i + 1); children[i] = res.second; process_to_socket.insert(std::make_pair(res.first, i)); } int counter = 0; // Getting incoming connections char buff[] = ""; while (true) { int incoming_socket = accept(MasterSocket, 0, 0); int size = -1; while (size < 0) { size = socket_fd_write(children[counter], buff, sizeof(buff), incoming_socket); counter = (counter + 1) % max_child; } } #ifdef ENABLE_DEAMON } #endif return 0; }
void relations_mixin<false, D>::add_sub_entity(const entity_id p, const sub_entity_name optional_name = sub_entity_name::INVALID) const { make_child(p, optional_name); sub_entities_component().other_sub_entities.push_back(p); }
int main(int argc, char ** argv) { int pid, i; int fail_cnt = 0; test_init(argc, argv); processes = mmap(NULL, PAGE_SIZE, PROT_WRITE | PROT_READ, MAP_SHARED | MAP_ANONYMOUS, 0, 0); if (processes == NULL) { pr_perror("Unable to map share memory"); return 1; } for (i = 0; i < nr_processes; i++) { if (socketpair(PF_UNIX, SOCK_STREAM, 0, processes[i].sks) == -1) { pr_perror("socketpair"); return 1; } } pid = make_child(0, 0); if (pid < 0) return -1; /* * 5 5 \_ session02 ( 0) * 6 6 \_ session02 ( 1) * 8 7 | \_ session02 ( 3) * 15 12 | \_ session02 (10) * 10 10 \_ session02 ( 5) * 11 7 \_ session02 ( 6) * 13 12 \_ session02 ( 8) */ send_command(0, TEST_SUBREAPER, 1, 0); send_command(0, TEST_SETSID, 0, 0); send_command(0, TEST_FORK, 1, 0); send_command(1, TEST_FORK, 2, 0); send_command(2, TEST_SETSID, 0, 0); send_command(2, TEST_FORK, 3, CLONE_PARENT); send_command(2, TEST_DIE, 0, 0); send_command(1, TEST_WAIT, 2, 0); send_command(3, TEST_FORK, 4, 0); send_command(4, TEST_FORK, 5, 0); send_command(5, TEST_FORK, 6, 0); send_command(5, TEST_FORK, 7, 0); send_command(7, TEST_SETSID, 0, 0); send_command(7, TEST_FORK, 8, CLONE_PARENT); send_command(7, TEST_FORK, 9, CLONE_PARENT); send_command(7, TEST_DIE, 0, 0); send_command(5, TEST_WAIT, 7, 0); send_command(9, TEST_FORK, 10, 0); send_command(1, TEST_SUBREAPER, 1, 0); send_command(9, TEST_DIE, 0, 0); send_command(5, TEST_WAIT, 9, 0); send_command(1, TEST_SUBREAPER, 0, 0); send_command(4, TEST_DIE, 0, 0); send_command(3, TEST_WAIT, 4, 0); send_command(1, TEST_SETSID, 0, 0); send_command(5, TEST_SETSID, 0, 0); for (i = 0; i < nr_processes; i++) { if (processes[i].dead) continue; if (processes[i].pid == 0) continue; processes[i].sid = getsid(processes[i].pid); if (processes[i].sid == -1) { pr_perror("getsid(%d)", i); goto err; } } test_daemon(); test_waitsig(); for (i = 0; i < nr_processes; i++) { pid_t sid; if (processes[i].dead) continue; if (processes[i].pid == 0) continue; sid = getsid(processes[i].pid); if (sid == -1) { pr_perror("getsid(%d)", i); goto err; } if (sid != processes[i].sid) { fail("%d, %d: wrong sid %d (expected %d)", i, processes[i].pid, sid, processes[i].sid); fail_cnt++; } } if (fail_cnt) goto err; pass(); return 0; err: cleanup(); return 1; }
int ap_mpm_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s) { int index; int remaining_children_to_start; apr_status_t rv; ap_log_pid(pconf, ap_pid_fname); first_server_limit = server_limit; if (changed_limit_at_restart) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, "WARNING: Attempt to change ServerLimit " "ignored during restart"); changed_limit_at_restart = 0; } /* Initialize cross-process accept lock */ ap_lock_fname = apr_psprintf(_pconf, "%s.%" APR_PID_T_FMT, ap_server_root_relative(_pconf, ap_lock_fname), ap_my_pid); rv = apr_proc_mutex_create(&accept_mutex, ap_lock_fname, ap_accept_lock_mech, _pconf); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "Couldn't create accept lock"); mpm_state = AP_MPMQ_STOPPING; return 1; } #if APR_USE_SYSVSEM_SERIALIZE if (ap_accept_lock_mech == APR_LOCK_DEFAULT || ap_accept_lock_mech == APR_LOCK_SYSVSEM) { #else if (ap_accept_lock_mech == APR_LOCK_SYSVSEM) { #endif rv = unixd_set_proc_mutex_perms(accept_mutex); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "Couldn't set permissions on cross-process lock; " "check User and Group directives"); mpm_state = AP_MPMQ_STOPPING; return 1; } } if (!is_graceful) { if (ap_run_pre_mpm(s->process->pool, SB_SHARED) != OK) { mpm_state = AP_MPMQ_STOPPING; return 1; } /* fix the generation number in the global score; we just got a new, * cleared scoreboard */ ap_scoreboard_image->global->running_generation = ap_my_generation; } set_signals(); if (one_process) { AP_MONCONTROL(1); } if (ap_daemons_max_free < ap_daemons_min_free + 1) /* Don't thrash... */ ap_daemons_max_free = ap_daemons_min_free + 1; /* If we're doing a graceful_restart then we're going to see a lot * of children exiting immediately when we get into the main loop * below (because we just sent them AP_SIG_GRACEFUL). This happens pretty * rapidly... and for each one that exits we'll start a new one until * we reach at least daemons_min_free. But we may be permitted to * start more than that, so we'll just keep track of how many we're * supposed to start up without the 1 second penalty between each fork. */ remaining_children_to_start = ap_daemons_to_start; if (remaining_children_to_start > ap_daemons_limit) { remaining_children_to_start = ap_daemons_limit; } if (!is_graceful) { startup_children(remaining_children_to_start); remaining_children_to_start = 0; } else { /* give the system some time to recover before kicking into * exponential mode */ hold_off_on_exponential_spawning = 10; } ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, "%s configured -- resuming normal operations", ap_get_server_version()); ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, "Server built: %s", ap_get_server_built()); #ifdef AP_MPM_WANT_SET_ACCEPT_LOCK_MECH ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, "AcceptMutex: %s (default: %s)", apr_proc_mutex_name(accept_mutex), apr_proc_mutex_defname()); #endif restart_pending = shutdown_pending = 0; mpm_state = AP_MPMQ_RUNNING; while (!restart_pending && !shutdown_pending) { int child_slot; apr_exit_why_e exitwhy; int status, processed_status; /* this is a memory leak, but I'll fix it later. */ apr_proc_t pid; ap_wait_or_timeout(&exitwhy, &status, &pid, pconf); /* XXX: if it takes longer than 1 second for all our children * to start up and get into IDLE state then we may spawn an * extra child */ if (pid.pid != -1) { processed_status = ap_process_child_status(&pid, exitwhy, status); if (processed_status == APEXIT_CHILDFATAL) { mpm_state = AP_MPMQ_STOPPING; return 1; } /* non-fatal death... note that it's gone in the scoreboard. */ child_slot = find_child_by_pid(&pid); if (child_slot >= 0) { (void) ap_update_child_status_from_indexes(child_slot, 0, SERVER_DEAD, (request_rec *) NULL); if (processed_status == APEXIT_CHILDSICK) { /* child detected a resource shortage (E[NM]FILE, ENOBUFS, etc) * cut the fork rate to the minimum */ idle_spawn_rate = 1; } else if (remaining_children_to_start && child_slot < ap_daemons_limit) { /* we're still doing a 1-for-1 replacement of dead * children with new children */ make_child(ap_server_conf, child_slot); --remaining_children_to_start; } #if APR_HAS_OTHER_CHILD } else if (apr_proc_other_child_read(&pid, status) == 0) { /* handled */ #endif } else if (is_graceful) { /* Great, we've probably just lost a slot in the * scoreboard. Somehow we don't know about this * child. */ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, "long lost child came home! (pid %ld)", (long)pid.pid); } /* Don't perform idle maintenance when a child dies, * only do it when there's a timeout. Remember only a * finite number of children can die, and it's pretty * pathological for a lot to die suddenly. */ continue; } else if (remaining_children_to_start) { /* we hit a 1 second timeout in which none of the previous * generation of children needed to be reaped... so assume * they're all done, and pick up the slack if any is left. */ startup_children(remaining_children_to_start); remaining_children_to_start = 0; /* In any event we really shouldn't do the code below because * few of the servers we just started are in the IDLE state * yet, so we'd mistakenly create an extra server. */ continue; } perform_idle_server_maintenance(pconf); #ifdef TPF shutdown_pending = os_check_server(tpf_server_name); ap_check_signals(); sleep(1); #endif /*TPF */ } mpm_state = AP_MPMQ_STOPPING; if (shutdown_pending) { /* Time to gracefully shut down: * Kill child processes, tell them to call child_exit, etc... */ if (unixd_killpg(getpgrp(), SIGTERM) < 0) { ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, "killpg SIGTERM"); } ap_reclaim_child_processes(1); /* Start with SIGTERM */ /* cleanup pid file on normal shutdown */ { const char *pidfile = NULL; pidfile = ap_server_root_relative (pconf, ap_pid_fname); if ( pidfile != NULL && unlink(pidfile) == 0) ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, "removed PID file %s (pid=%ld)", pidfile, (long)getpid()); } ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, "caught SIGTERM, shutting down"); return 1; } /* we've been told to restart */ apr_signal(SIGHUP, SIG_IGN); if (one_process) { /* not worth thinking about */ return 1; } /* advance to the next generation */ /* XXX: we really need to make sure this new generation number isn't in * use by any of the children. */ ++ap_my_generation; ap_scoreboard_image->global->running_generation = ap_my_generation; if (is_graceful) { ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, "Graceful restart requested, doing restart"); /* kill off the idle ones */ ap_mpm_pod_killpg(pod, ap_max_daemons_limit); /* This is mostly for debugging... so that we know what is still * gracefully dealing with existing request. This will break * in a very nasty way if we ever have the scoreboard totally * file-based (no shared memory) */ for (index = 0; index < ap_daemons_limit; ++index) { if (ap_scoreboard_image->servers[index][0].status != SERVER_DEAD) { ap_scoreboard_image->servers[index][0].status = SERVER_GRACEFUL; } } } else { /* Kill 'em off */ if (unixd_killpg(getpgrp(), SIGHUP) < 0) { ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, "killpg SIGHUP"); } ap_reclaim_child_processes(0); /* Not when just starting up */ ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, "SIGHUP received. Attempting to restart"); } return 0; } /* This really should be a post_config hook, but the error log is already * redirected by that point, so we need to do this in the open_logs phase. */ static int prefork_open_logs(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { apr_status_t rv; pconf = p; ap_server_conf = s; if ((num_listensocks = ap_setup_listeners(ap_server_conf)) < 1) { ap_log_error(APLOG_MARK, APLOG_ALERT|APLOG_STARTUP, 0, NULL, "no listening sockets available, shutting down"); return DONE; } if ((rv = ap_mpm_pod_open(pconf, &pod))) { ap_log_error(APLOG_MARK, APLOG_CRIT|APLOG_STARTUP, rv, NULL, "Could not open pipe-of-death."); return DONE; } return OK; }
static int make_child(server_rec *s, int slot) { int pid; if (slot + 1 > ap_max_daemons_limit) { ap_max_daemons_limit = slot + 1; } if (one_process) { apr_signal(SIGHUP, just_die); /* Don't catch AP_SIG_GRACEFUL in ONE_PROCESS mode :) */ apr_signal(SIGINT, just_die); #ifdef SIGQUIT apr_signal(SIGQUIT, SIG_DFL); #endif apr_signal(SIGTERM, just_die); child_main(slot); } (void) ap_update_child_status_from_indexes(slot, 0, SERVER_STARTING, (request_rec *) NULL); #ifdef _OSD_POSIX /* BS2000 requires a "special" version of fork() before a setuid() call */ if ((pid = os_fork(unixd_config.user_name)) == -1) { #elif defined(TPF) if ((pid = os_fork(s, slot)) == -1) { #else if ((pid = fork()) == -1) { #endif ap_log_error(APLOG_MARK, APLOG_ERR, errno, s, "fork: Unable to fork new process"); /* fork didn't succeed. Fix the scoreboard or else * it will say SERVER_STARTING forever and ever */ (void) ap_update_child_status_from_indexes(slot, 0, SERVER_DEAD, (request_rec *) NULL); /* In case system resources are maxxed out, we don't want Apache running away with the CPU trying to fork over and over and over again. */ sleep(10); return -1; } if (!pid) { #ifdef HAVE_BINDPROCESSOR /* by default AIX binds to a single processor * this bit unbinds children which will then bind to another cpu */ int status = bindprocessor(BINDPROCESS, (int)getpid(), PROCESSOR_CLASS_ANY); if (status != OK) { ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, "processor unbind failed %d", status); } #endif RAISE_SIGSTOP(MAKE_CHILD); AP_MONCONTROL(1); /* Disable the parent's signal handlers and set up proper handling in * the child. */ apr_signal(SIGHUP, just_die); apr_signal(SIGTERM, just_die); /* The child process doesn't do anything for AP_SIG_GRACEFUL. * Instead, the pod is used for signalling graceful restart. */ apr_signal(AP_SIG_GRACEFUL, SIG_IGN); child_main(slot); } ap_scoreboard_image->parent[slot].pid = pid; return 0; } /* start up a bunch of children */ static void startup_children(int number_to_start) { int i; for (i = 0; number_to_start && i < ap_daemons_limit; ++i) { if (ap_scoreboard_image->servers[i][0].status != SERVER_DEAD) { continue; } if (make_child(ap_server_conf, i) < 0) { break; } --number_to_start; } } /* * idle_spawn_rate is the number of children that will be spawned on the * next maintenance cycle if there aren't enough idle servers. It is * doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by * without the need to spawn. */ static int idle_spawn_rate = 1; #ifndef MAX_SPAWN_RATE #define MAX_SPAWN_RATE (32) #endif static int hold_off_on_exponential_spawning; static void perform_idle_server_maintenance(apr_pool_t *p) { int i; int to_kill; int idle_count; worker_score *ws; int free_length; int free_slots[MAX_SPAWN_RATE]; int last_non_dead; int total_non_dead; /* initialize the free_list */ free_length = 0; to_kill = -1; idle_count = 0; last_non_dead = -1; total_non_dead = 0; for (i = 0; i < ap_daemons_limit; ++i) { int status; if (i >= ap_max_daemons_limit && free_length == idle_spawn_rate) break; ws = &ap_scoreboard_image->servers[i][0]; status = ws->status; if (status == SERVER_DEAD) { /* try to keep children numbers as low as possible */ if (free_length < idle_spawn_rate) { free_slots[free_length] = i; ++free_length; } } else { /* We consider a starting server as idle because we started it * at least a cycle ago, and if it still hasn't finished starting * then we're just going to swamp things worse by forking more. * So we hopefully won't need to fork more if we count it. * This depends on the ordering of SERVER_READY and SERVER_STARTING. */ if (status <= SERVER_READY) { ++ idle_count; /* always kill the highest numbered child if we have to... * no really well thought out reason ... other than observing * the server behaviour under linux where lower numbered children * tend to service more hits (and hence are more likely to have * their data in cpu caches). */ to_kill = i; } ++total_non_dead; last_non_dead = i; } } ap_max_daemons_limit = last_non_dead + 1; if (idle_count > ap_daemons_max_free) { /* kill off one child... we use the pod because that'll cause it to * shut down gracefully, in case it happened to pick up a request * while we were counting */ ap_mpm_pod_signal(pod); idle_spawn_rate = 1; } else if (idle_count < ap_daemons_min_free) { /* terminate the free list */ if (free_length == 0) { /* only report this condition once */ static int reported = 0; if (!reported) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, "server reached MaxClients setting, consider" " raising the MaxClients setting"); reported = 1; } idle_spawn_rate = 1; } else { if (idle_spawn_rate >= 8) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, "server seems busy, (you may need " "to increase StartServers, or Min/MaxSpareServers), " "spawning %d children, there are %d idle, and " "%d total children", idle_spawn_rate, idle_count, total_non_dead); } for (i = 0; i < free_length; ++i) { #ifdef TPF if (make_child(ap_server_conf, free_slots[i]) == -1) { if(free_length == 1) { shutdown_pending = 1; ap_log_error(APLOG_MARK, APLOG_EMERG, 0, ap_server_conf, "No active child processes: shutting down"); } } #else make_child(ap_server_conf, free_slots[i]); #endif /* TPF */ } /* the next time around we want to spawn twice as many if this * wasn't good enough, but not if we've just done a graceful */ if (hold_off_on_exponential_spawning) { --hold_off_on_exponential_spawning; } else if (idle_spawn_rate < MAX_SPAWN_RATE) { idle_spawn_rate *= 2; } } } else { idle_spawn_rate = 1; } }
int main(int argc, char ** argv) { if (signal(SIGPIPE,sig_pipe)==SIG_ERR) perror("signal_SIGPIPE"); if (signal(SIGALRM,sig_alrm)==SIG_ERR) perror("signal_SIGALRM"); int i; for (i=1;i<8;i++) { if (!strncmp(argv[i],"-n",2)) nsublist=atoi(argv[i+1]); else if (!strncmp(argv[i],"-p",2)) nchild=atoi(argv[i+1]); else if (!strncmp(argv[i],"-e",2)) strcpy(sortpath,argv[i+1]); else if (!strncmp(argv[i],"-k",2)) ansk=atoi(argv[i+1]); } read(0,&ndata,4); int unsortData=ndata; int sortData=0; // printf("n:%d,p:%d,k:%d,path:%s;data %d\n",nsublist,nchild,ansk,sortpath,ndata); fflush(stdout); int buf[trans_buf_size]; int nByte,remainByte; char bufname[100]; int unreadData=ndata; for (i=0;i<((ndata%nsublist)?nsublist+1:nsublist);i++) { tmpfp[i]=tmpfile(); tmpfp2[i]=tmpfile(); subread[i]=0; sublen[i]=ndata/nsublist; if (unreadData<sublen[i]) sublen[i]=unreadData; remainByte=sublen[i]*4; while (remainByte>=4*trans_buf_size) { memset(buf,0,trans_buf_size*4); nByte=read(0,buf,4*trans_buf_size); fwrite(buf,4,nByte/4,tmpfp2[i]); remainByte-=nByte; }; while (remainByte!=0) { memset(buf,0,trans_buf_size*4); nByte=read(0,buf,remainByte); fwrite(buf,4,nByte/4,tmpfp2[i]); remainByte-=nByte; }; unreadData-=sublen[i]; }; // printf("read data finish!\n"); fflush(stdout); for (i=0;i<nchild;i++) make_child(i); // printf("make child finish!\n"); fflush(stdout); fd_set rfd,wfd; struct timeval tv; tv.tv_sec=100; tv.tv_usec=0; int retval,maxfd; for (i=0;i<nchild;i++) childsub[i]=-1; while (!(sortData==ndata)) { if (sigsetjmp(jmpbuf,1)==1) { // fprintf(stderr,"jumped\n"); unsortData=ndata-sortData; } /* Parent reads from pipe 1, writes to pipe 0*/ FD_ZERO(&rfd); FD_ZERO(&wfd); maxfd=-1; // fprintf(stderr,"data unsorted: %d\n",unsortData); if (unsortData>0) for (i=0;i<nchild;i++) { FD_SET(pipefd[i][0][1],&wfd); if (pipefd[i][0][1]>maxfd) maxfd=pipefd[i][0][1]; }; for (i=0;i<nchild;i++) { FD_SET(pipefd[i][1][0],&rfd); if (pipefd[i][1][0]>maxfd) maxfd=pipefd[i][1][0]; }; alarm(15); retval=select(maxfd+1,&rfd,&wfd,NULL,&tv); if (retval==-1) { if (errno==EINTR) continue; } else if (retval==0) continue; else { if (unsortData>0) { for (i=0;i<nchild;i++) if (childsub[i]==-1 && FD_ISSET(pipefd[i][0][1],&wfd)) { // if (sigsetjmp(jmpbuf,1)==1) // { // fprintf(stderr,"jumped\n"); // continue; // } alarm(15); int k; for (k=0;k<((ndata%nsublist)?nsublist+1:nsublist);k++) if (!subread[k]) break; // printf("send sublist %d to %d, pid %d \n",k,i,cpid[i]); fflush(stdout); fseek(tmpfp2[k],0,SEEK_SET); int tp=sublen[k]; if (write(pipefd[i][0][1],&tp,4)!=4) if (errno==EPIPE) { //perror("sigpipe"); kill(cpid[i],SIGINT); make_child(i); continue; } remainByte=sublen[k]*4; while (remainByte>=4*trans_buf_size) { memset(buf,0,trans_buf_size*4); nByte=fread(buf,4,trans_buf_size,tmpfp2[k])*4; if (write(pipefd[i][0][1],buf,nByte)!=nByte) if (errno==EPIPE) { kill(cpid[i],SIGINT); make_child(i); continue; } remainByte-=nByte; }; while (remainByte!=0) { memset(buf,0,trans_buf_size*4); nByte=fread(buf,4,remainByte/4,tmpfp2[k])*4; if (write(pipefd[i][0][1],buf,nByte)!=nByte) if (errno==EPIPE) { kill(cpid[i],SIGINT); make_child(i); continue; } remainByte-=nByte; }; // printf("send sublist %d to %d finish, send %d, remain %d\n",k,i,sublen[k],unsortData); fflush(stdout); unsortData-=sublen[k]; subread[k]=1; childsub[i]=k; if (unsortData==0) break; }; }; for (i=0;i<nchild;i++) if (FD_ISSET(pipefd[i][1][0],&rfd)) { alarm(15); fseek(tmpfp[childsub[i]],0,SEEK_SET); // if (sigsetjmp(jmpbuf,1)==1) // { // fprintf(stderr,"jumped\n"); // continue; // } // printf("receive from %d begin\n",i); fflush(stdout); int remainByte=sublen[childsub[i]]*4; while (remainByte>=4*trans_buf_size) { memset(buf,0,trans_buf_size*4); nByte=read(pipefd[i][1][0],buf,4*trans_buf_size); fwrite(buf,4,nByte/4,tmpfp[childsub[i]]); remainByte-=nByte; }; while (remainByte!=0) { memset(buf,0,trans_buf_size*4); nByte=read(pipefd[i][1][0],buf,remainByte); fwrite(buf,4,nByte/4,tmpfp[childsub[i]]); remainByte-=nByte; } // printf("receive sublist %d from %d end, data soted %d, total sorted %d\n",childsub[i],i,sublen[childsub[i]],sortData); fflush(stdout); sortData+=sublen[childsub[i]]; childsub[i]=-1; } } }; // printf("sort finish,begin to merge\n"); fflush(stdout); printf("%d",findAns(ansk)); for (i=0;i<nchild;i++) kill(cpid[i],SIGINT); return 1; }
int main(int argc,char *argv[]) {//exe ip port cnt if(argc != 4) { perror("usage:exe cnt!\n"); exit(-1); } int chld_cnt = atoi(argv[3]); SA server_addr ; int fd_server, max_fd, index; fd_set read_set, ready_set ; pCHLD chlds = (pCHLD)calloc(chld_cnt, sizeof(pCHLD)); max_fd = make_child(chlds, chld_cnt) ; bzero(&server_addr, sizeof(SA)); fd_server = socket(AF_INET, SOCK_DGRAM, 0); if(fd_server == -1) { perror("sock"); exit(-1); } server_addr.sin_family = AF_INET ; server_addr.sin_port = htons(atoi(argv[2])); server_addr.sin_addr.s_addr = inet_addr(argv[1]); if(-1 == bind(fd_server, (struct sockaddr*)&server_addr, sizeof(SA))) { perror("bind"); close(fd_server); exit(-1); } max_fd = max_fd > fd_server ? max_fd : fd_server ; FD_ZERO(&read_set); FD_SET(fd_server, &read_set) ; for(index = 0; index < chld_cnt ; index ++) { FD_SET(chlds[index].s_read, &read_set); } struct timeval tm; while(1) { tm.tv_usec = 1000 ; tm.tv_sec = 0; ready_set = read_set ; select(max_fd + 1, &read_set, NULL,NULL, &tm); if(FD_ISSET(fd_server, &read_set)) { char buf[1024] = ""; SA from_addr ; int addrlen = sizeof(SA); bzero(&from_addr, sizeof(SA)); printf("begin recv....\n"); recvfrom(fd_server, buf,1024, 0, (struct sockaddr*)&from_addr, &addrlen) ; puts(buf); for(index = 0; index < chld_cnt; index ++ ) { if(chlds[index].s_flag == S_IDLE) { break; } } if(index == chld_cnt) { } else { chlds[index].s_flag = S_BUSY; chlds[index].s_cnt ++; FILE* fp = fdopen(chlds[index].s_write, "w"); fprintf(fp, "%d %d %s", from_addr.sin_port, from_addr.sin_addr.s_addr, buf); //port ip request fflush(fp); } } for(index = 0; index < chld_cnt; index ++) { char ch ; if(FD_ISSET(chlds[index].s_read, &ready_set)) { read((chlds[index].s_read), &ch, 1); chlds[index].s_flag = S_IDLE; } } } return 0 ; }