void check_sync(void){ int i; //check semaphore sem_init(&mutex, 1); for(i=0;i<N;i++){ sem_init(&s[i], 0); int pid = kernel_thread(philosopher_using_semaphore, (void *)i, 0); if (pid <= 0) { panic("create No.%d philosopher_using_semaphore failed.\n"); } philosopher_proc_sema[i] = find_proc(pid); set_proc_name(philosopher_proc_sema[i], "philosopher_sema_proc"); } //check condition variable monitor_init(&mt, N); for(i=0;i<N;i++){ state_condvar[i]=THINKING; int pid = kernel_thread(philosopher_using_condvar, (void *)i, 0); if (pid <= 0) { panic("create No.%d philosopher_using_condvar failed.\n"); } philosopher_proc_condvar[i] = find_proc(pid); set_proc_name(philosopher_proc_condvar[i], "philosopher_condvar_proc"); } }
// proc_init - set up the first kernel thread idleproc "idle" by itself and // - create the second kernel thread init_main void proc_init(void) { int i; list_init(&proc_list); for (i = 0; i < HASH_LIST_SIZE; i ++) { list_init(hash_list + i); } if ((idleproc = alloc_proc()) == NULL) { panic("cannot alloc idleproc.\n"); } idleproc->pid = 0; idleproc->state = PROC_RUNNABLE; idleproc->kstack = (uintptr_t)bootstack; idleproc->need_resched = 1; set_proc_name(idleproc, "idle"); nr_process ++; current = idleproc; int pid = kernel_thread(init_main, "Hello world!!", 0); if (pid <= 0) { panic("create init_main failed.\n"); } initproc = find_proc(pid); set_proc_name(initproc, "init"); assert(idleproc != NULL && idleproc->pid == 0); assert(initproc != NULL && initproc->pid == 1); }
int ipc_event_send(int pid, int event, unsigned int timeout) { struct proc_struct *proc; if ((proc = find_proc(pid)) == NULL || proc->state == PROC_ZOMBIE) { return -E_INVAL; } if (proc == current || proc == idleproc || proc == initproc) { return -E_INVAL; } #ifdef UCONFIG_SWAP if(proc == kswapd) return -E_INVAL; #endif if (proc->wait_state == WT_EVENT_RECV) { wakeup_proc(proc); } current->event_box.event = event; unsigned long saved_ticks; timer_t __timer, *timer = ipc_timer_init(timeout, &saved_ticks, &__timer); uint32_t flags; if ((flags = send_event(proc, timer)) == 0) { return 0; } assert(flags == WT_INTERRUPTED); return ipc_check_timeout(timeout, saved_ticks); }
void increment_ancestors(proc *p) { p->desc++; if(p->ppid && (p->pid != p->ppid)) increment_ancestors(find_proc(p->ppid)); else return; }
SkXfermode::F16Proc SkXfermode::onGetF16Proc(uint32_t flags) const { SkASSERT(0 == (flags & ~3)); flags &= 3; Mode mode; return this->asMode(&mode) ? find_proc(mode, flags) : gProcs_General[flags]; }
pid_t sys_waitpid(pid_t pid, int *status, int options) { if(options != 0) { // only support option 0. errno = EINVAL; return -1; } if(pid == curproc->p_pid) { return pid; } struct exitc *c; // status pointer validation vaddr_t sp = (vaddr_t)status; struct addrspace *as = curproc->p_addrspace; if (as != NULL && !valid_address_check(as, sp)) { // out of vaddr boundary for this proc errno = EFAULT; return -1; } struct proc* p = find_proc(pid); // search for process if(p != NULL) { lock_acquire(p->p_lk); while(find_proc(pid) != NULL) { cv_wait(p->p_cv, p->p_lk); } lock_release(p->p_lk); } c = find_exitc(pid); if(c) { *status = c->exitcode; } else { errno = ESRCH; return -1; } /*unsigned num = exitcarray_num(codes); for (unsigned i = 0 ; i < num ; i++) { if (exitcarray_get(codes, i) == c) { exitcarray_remove(codes,i); } }*/ return pid; }
// do_kill - kill process with pid int do_kill(int pid, int error_code) { struct proc_struct *proc; if ((proc = find_proc(pid)) != NULL) { return __do_kill(proc, error_code); } return -E_INVAL; }
sys_thread_t sys_thread_new(char *name, void (* thread)(void *arg), void *arg, int stacksize, int prio) { int pid = kernel_thread((kthrfunc)thread, arg, 0); if (pid <= 0) { panic("create tcp_ip thread failed"); } set_proc_name(find_proc(pid), name); return pid; }
void ker_send(uint_8 pid, struct message* sms){ struct pcb* s; uint_16 s1, s2; int i, sp; s = find_proc(pid); /* printk(" IN SEND, dst:%d", pid); */ for (i = 0; i < MS_CAP; i++) if (mailbuf[i].occupy == FALSE) /* make sure it will not be filled*/ break; sp = i; mailbuf[i].occupy = TRUE; s1 = (uint_16)sms; s2 = (uint_16)&mailbuf[i].msg; for (i = 0; i < MESSAGE_SIZE; i++) physics_copy(current_pcb->cs, s1+i, kernel_segment, s2+i); mailbuf[sp].msg.source = current_pcb->pid; /* if (current_pcb->pid == 7 || pid == 7) { printk(" src: %d; dst: %d", current_pcb->pid, pid); printk(" msg:(%d %d)\n", mailbuf[sp].msg.type, mailbuf[sp].msg.p1); } */ if (s->mail.value < 0){ for (i = 0; i < MESSAGE_SIZE; i++) physics_copy(kernel_segment, s2+i, s->cs, s->mes_off+i); mailbuf[sp].occupy = FALSE; } else { mailbuf[sp].next = s->MesQ; s->MesQ = &mailbuf[sp]; } /* if (current_pcb->pid == 7 || pid == 7) printk("V\n"); */ V(&s->mail); }
// proc_init - set up the first kernel thread idleproc "idle" by itself and // - create the second kernel thread init_main void proc_init(void) { int i; list_init(&proc_list); if ((idleproc = alloc_proc()) == NULL) { panic("cannot alloc idleproc.\n"); } idleproc->pid = 0; idleproc->state = PROC_RUNNABLE; idleproc->kstack = (uintptr_t)bootstack; idleproc->need_resched = 1; set_proc_name(idleproc, "idle"); nr_process ++; current = idleproc; int pid1 = kernel_thread(init_main, "init main1: Hello world!!", 0); int pid2 = kernel_thread(init_main, "init main2: Hello world!!", 0); int pid3 = kernel_thread(init_main, "init main3: Lab4 spoc discussion!!", 0); if (pid1 <= 0 || pid2 <= 0 || pid3 <= 0) { panic("create kernel thread init_main1 or 2 or 3 failed.\n"); } initproc1 = find_proc(pid1); initproc2 = find_proc(pid2); initproc3 = find_proc(pid3); set_proc_name(initproc1, "init1"); set_proc_name(initproc2, "init2"); set_proc_name(initproc3, "init3"); cprintf("proc_init:: Created kernel thread init_main--> pid: %d, name: %s\n", initproc1->pid, initproc1->name); cprintf("proc_init:: Created kernel thread init_main--> pid: %d, name: %s\n", initproc2->pid, initproc2->name); cprintf("proc_init:: Created kernel thread init_main--> pid: %d, name: %s\n", initproc3->pid, initproc3->name); assert(idleproc != NULL && idleproc->pid == 0); }
// proc_init - set up the first kernel thread idleproc "idle" by itself and // - create the second kernel thread init_main void proc_init(void) { int i; int cpuid = myid(); struct proc_struct *idle; spinlock_init(&proc_lock); list_init(&proc_list); list_init(&proc_mm_list); for (i = 0; i < HASH_LIST_SIZE; i++) { list_init(hash_list + i); } idle = alloc_proc(); if (idle == NULL) { panic("cannot alloc idleproc.\n"); } idle->pid = cpuid; idle->state = PROC_RUNNABLE; // No need to be set for kthread (no privilege switch) // idleproc->kstack = (uintptr_t)bootstack; idle->need_resched = 1; idle->tf = NULL; if ((idle->fs_struct = fs_create()) == NULL) { panic("create fs_struct (idleproc) failed.\n"); } fs_count_inc(idle->fs_struct); char namebuf[32]; snprintf(namebuf, 32, "idle/%d", cpuid); set_proc_name(idle, namebuf); nr_process++; idleproc = idle; current = idle; int pid = ucore_kernel_thread(init_main, NULL, 0); if (pid <= 0) { panic("create init_main failed.\n"); } initproc = find_proc(pid); set_proc_name(initproc, "kinit"); char *proc_init="Proc init OK"; assert(idleproc != NULL && idleproc->pid == cpuid); assert(initproc != NULL && initproc->pid == sysconf.lcpu_count); }
// proc_init - set up the first kernel thread idleproc "idle" by itself and // - create the second kernel thread init_main void proc_init(void) { int i; int lcpu_idx = pls_read(lcpu_idx); int lapic_id = pls_read(lapic_id); int lcpu_count = pls_read(lcpu_count); list_init(&proc_list); list_init(&proc_mm_list); for (i = 0; i < HASH_LIST_SIZE; i ++) { list_init(hash_list + i); } pls_write(idleproc, alloc_proc()); if (idleproc == NULL) { panic("cannot alloc idleproc.\n"); } idleproc->pid = lcpu_idx; idleproc->state = PROC_RUNNABLE; // XXX // idleproc->kstack = (uintptr_t)bootstack; idleproc->need_resched = 1; idleproc->tf = NULL; if ((idleproc->fs_struct = fs_create()) == NULL) { panic("create fs_struct (idleproc) failed.\n"); } fs_count_inc(idleproc->fs_struct); char namebuf[32]; snprintf(namebuf, 32, "idle/%d", lapic_id); set_proc_name(idleproc, namebuf); nr_process ++; pls_write(current, idleproc); int pid = kernel_thread(init_main, NULL, 0); if (pid <= 0) { panic("create init_main failed.\n"); } initproc = find_proc(pid); set_proc_name(initproc, "init"); assert(idleproc != NULL && idleproc->pid == lcpu_idx); assert(initproc != NULL && initproc->pid == lcpu_count); }
int do_getsetpriority() { int r, arg_which, arg_who, arg_pri; struct mproc *rmp; arg_which = m_in.m_lc_pm_priority.which; arg_who = m_in.m_lc_pm_priority.who; arg_pri = 1 /* for SETPRIORITY we set highest priority */ /* Code common to GETPRIORITY and SETPRIORITY. */ /* Only support PRIO_PROCESS for now. */ if (arg_which != PRIO_PROCESS) return(EINVAL); if (arg_who == 0) rmp = mp; else if ((rmp = find_proc(arg_who)) == NULL) return(ESRCH); if (mp->mp_effuid != SUPER_USER && mp->mp_effuid != rmp->mp_effuid && mp->mp_effuid != rmp->mp_realuid) return EPERM; /* If GET, that's it. */ if (call_nr == PM_GETPRIORITY) { return(rmp->mp_nice - PRIO_MIN); } /* Only root is allowed to reduce the nice level. */ if (rmp->mp_nice > arg_pri && mp->mp_effuid != SUPER_USER) return(EACCES); /* We're SET, and it's allowed. * * The value passed in is currently between PRIO_MIN and PRIO_MAX. * We have to scale this between MIN_USER_Q and MAX_USER_Q to match * the kernel's scheduling queues. */ if ((r = sched_nice(rmp, arg_pri)) != OK) { return r; } rmp->mp_nice = arg_pri; return(OK); }
ezRBTreeNode * rbtree_find_node(ezRBTree * tree, findCompareKey find_proc, void * find_args) { ezRBTreeNode *node = tree->root; ezRBTreeNode *sentinel = tree->sentinel; if (node == sentinel) { return NULL; } do { int r = find_proc(node, find_args); if (r == 0) return node; node = (r < 0) ? node->right : node->left; } while (node != NULL); return node; }
/*===========================================================================* * do_getprocnr * *===========================================================================*/ int do_getprocnr(void) { register struct mproc *rmp; /* This check should be replaced by per-call ACL checks. */ if (who_e != RS_PROC_NR) { printf("PM: unauthorized call of do_getprocnr by %d\n", who_e); return EPERM; } if ((rmp = find_proc(m_in.m_lsys_pm_getprocnr.pid)) == NULL) return(ESRCH); mp->mp_reply.m_pm_lsys_getprocnr.endpt = rmp->mp_endpoint; return(OK); }
int __ucore_wakeup_by_pid(int pid) { //kprintf("ucore_wakeup_by_pid %d\n", pid); struct proc_struct *proc = find_proc(pid); if (!proc) return -E_INVAL; bool flag; local_intr_save(flag); if (proc->state == PROC_ZOMBIE) { local_intr_restore(flag); return -E_INVAL; } if (proc->state == PROC_RUNNABLE) wakeup_proc(proc); local_intr_restore(flag); return 0; }
static orte_node_rank_t proc_get_node_rank(orte_process_name_t *proc) { orte_proc_t *pdata; if (NULL == (pdata = find_proc(proc))) { ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND); return ORTE_NODE_RANK_INVALID; } OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output, "%s ess:orcm: proc %s has node rank %d", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(proc), (int)pdata->node_rank)); return pdata->node_rank; }
static char* proc_get_hostname(orte_process_name_t *proc) { orte_proc_t *pdata; if (NULL == (pdata = find_proc(proc))) { ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND); return NULL; } OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output, "%s ess:orcm: proc %s is on host %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(proc), pdata->node->name)); return pdata->node->name; }
static uint32_t proc_get_arch(orte_process_name_t *proc) { orte_proc_t *pdata; if (NULL == (pdata = find_proc(proc))) { ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND); return 0; } OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output, "%s ess:hnp: proc %s has arch %0x", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(proc), pdata->node->arch)); return pdata->node->arch; }
static bool check_color(const SkBitmap& bm, SkPMColor expect32, uint16_t expect16, uint8_t expect8, skiatest::Reporter* reporter) { uint32_t expect; Proc proc = find_proc(bm, expect32, expect16, expect8, &expect); for (int y = 0; y < bm.height(); y++) { uint32_t bad; int x = proc(bm.getAddr(0, y), bm.width(), expect, &bad); if (x >= 0) { SkString str; str.printf("BlitRow config=%s [%d %d] expected %x got %x", gConfigName[bm.config()], x, y, expect, bad); reporter->reportFailed(str); return false; } } return true; }
void proc_init_ap(void) { int cpuid = myid(); struct proc_struct *idle; idle = alloc_proc(); if (idle == NULL) { panic("cannot alloc idleproc.\n"); } idle->pid = cpuid; idle->state = PROC_RUNNABLE; // No need to be set for kthread (no privilege switch) // idle->kstack = (uintptr_t)bootstack; idle->need_resched = 1; idle->tf = NULL; if ((idle->fs_struct = fs_create()) == NULL) { panic("create fs_struct (idleproc) failed.\n"); } fs_count_inc(idle->fs_struct); char namebuf[32]; snprintf(namebuf, 32, "idle/%d", cpuid); set_proc_name(idle, namebuf); nr_process++; idleproc = idle; current = idle; #if 1 int pid; char proc_name[32]; if((pid = ucore_kernel_thread(krefcache_cleaner, NULL, 0)) <= 0){ panic("krefcache_cleaner init failed.\n"); } struct proc_struct* cleaner = find_proc(pid); snprintf(proc_name, 32, "krefcache/%d", myid()); set_proc_name(cleaner, proc_name); set_proc_cpu_affinity(cleaner, myid()); nr_process++; #endif assert(idleproc != NULL && idleproc->pid == cpuid); }
static int update_arch(orte_process_name_t *proc, uint32_t arch) { orte_proc_t *pdata; if (NULL == (pdata = find_proc(proc))) { ORTE_ERROR_LOG(ORTE_ERR_NOT_FOUND); return ORTE_ERR_NOT_FOUND; } OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output, "%s ess:hnp: updating proc %s to arch %0x", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(proc), arch)); pdata->node->arch = arch; return ORTE_SUCCESS; }
static orte_vpid_t proc_get_daemon(orte_process_name_t *proc) { orte_proc_t *pdata; if( ORTE_JOBID_IS_DAEMON(proc->jobid) ) { return proc->vpid; } /* get the job data */ if (NULL == (pdata = find_proc(proc))) { return ORTE_VPID_INVALID; } OPAL_OUTPUT_VERBOSE((2, orte_ess_base_output, "%s ess:orcm: proc %s is hosted by daemon %s", ORTE_NAME_PRINT(ORTE_PROC_MY_NAME), ORTE_NAME_PRINT(proc), ORTE_VPID_PRINT(pdata->node->daemon->name.vpid))); return pdata->node->daemon->name.vpid; }
/*===========================================================================* * do_trace * *===========================================================================*/ PUBLIC int do_trace() { register struct mproc *child; /* the T_OK call is made by the child fork of the debugger before it execs * the process to be traced */ if (m_in.request == T_OK) { /* enable tracing by parent for this proc */ mp->mp_flags |= TRACED; mp->mp_reply.reply_trace = 0; return(OK); } if ((child=find_proc(m_in.pid))==NIL_MPROC || !(child->mp_flags & STOPPED)) { return(ESRCH); } /* all the other calls are made by the parent fork of the debugger to * control execution of the child */ switch (m_in.request) { case T_EXIT: /* exit */ pm_exit(child, (int) m_in.data); mp->mp_reply.reply_trace = 0; return(OK); case T_RESUME: case T_STEP: /* resume execution */ if (m_in.data < 0 || m_in.data > _NSIG) return(EIO); if (m_in.data > 0) { /* issue signal */ child->mp_flags &= ~TRACED; /* so signal is not diverted */ sig_proc(child, (int) m_in.data); child->mp_flags |= TRACED; } child->mp_flags &= ~STOPPED; break; } if (sys_trace(m_in.request,(int)(child-mproc),m_in.taddr,&m_in.data) != OK) return(-errno); mp->mp_reply.reply_trace = m_in.data; return(OK); }
void ker_int_send(uint_8 pid, struct message* sms){ struct pcb* s; uint_16 s1, s2; int i, sp; s = find_proc(pid); for (i = 0; i < MS_CAP; i++) if (mailbuf[i].occupy == FALSE) /* make sure it will not be filled*/ break; sp = i; mailbuf[i].occupy = TRUE; s1 = (uint_16)sms; s2 = (uint_16)&mailbuf[i].msg; for (i = 0; i < MESSAGE_SIZE; i++) physics_copy(kernel_segment, s1+i, kernel_segment, s2+i); mailbuf[sp].msg.source = current_pcb->pid; if (s->mail.value < 0){ for (i = 0; i < MESSAGE_SIZE; i++) physics_copy(kernel_segment, s2+i, s->cs, s->mes_off+i); mailbuf[sp].occupy = FALSE; } else { mailbuf[sp].next = s->MesQ; s->MesQ = &mailbuf[sp]; } V(&s->mail); }
void parse_monitor_options(int argc, char ** argv, dtnperf_global_options_t * perf_g_opt) { char c, done = 0; boolean_t output_set = FALSE; dtnperf_options_t * perf_opt = perf_g_opt->perf_opt; // kill daemon variables int pid; char cmd[256]; while (!done) { static struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"verbose", no_argument, 0, 'v'}, {"debug", optional_argument, 0, 33}, {"ldir", required_argument, 0, 40}, {"ip-addr", required_argument, 0, 37}, {"ip-port", required_argument, 0, 38}, {"daemon", no_argument, 0, 'a'}, {"output", required_argument, 0, 'o'}, {"stop", no_argument, 0, 's'}, {0,0,0,0} // The last element of the array has to be filled with zeros. }; int option_index = 0; c = getopt_long(argc, argv, "hvao:s", long_options, &option_index); switch (c) { case 'h': print_monitor_usage(argv[0]); exit(0); return ; case 'v': perf_opt->verbose = TRUE; break; case 33: // debug perf_opt->debug = TRUE; if (optarg != NULL){ int debug_level = atoi(optarg); if (debug_level >= 1 && debug_level <= 2) perf_opt->debug_level = atoi(optarg) -1; else { fprintf(stderr, "wrong --debug argument\n"); exit(1); return; } } else perf_opt->debug_level = 2; break; case 37: perf_opt->ip_addr = strdup(optarg); perf_opt->use_ip = TRUE; break; case 38: perf_opt->ip_port = atoi(optarg); perf_opt->use_ip = TRUE; break; case 40: perf_opt->logs_dir = strdup(optarg); break; case 'a': perf_opt->daemon = TRUE; break; case 'o': perf_opt->monitor_output_file = strdup(optarg); output_set = TRUE; break; case 's': memset(cmd, 0, sizeof(cmd)); sprintf(cmd, "%s %s", argv[0], MONITOR_STRING); pid = find_proc(cmd); if (pid) { printf("Closing dtnperf monitor pid: %d\n", pid); kill(pid, SIGINT); } else { fprintf(stderr, "ERROR: cannot find a running instance of dtnperf monitor\n"); } exit(0); break; case '?': break; case (char)(-1): done = 1; break; default: // getopt already prints an error message for unknown option characters print_monitor_usage(argv[0]); exit(1); } } if (output_set && !perf_opt->daemon) { fprintf(stderr, "\nSYNTAX ERROR: -o option can be used only with -a option\n"); \ print_monitor_usage(argv[0]); \ exit(1); } }
/*===========================================================================* * do_get * *===========================================================================*/ PUBLIC int do_get() { /* Handle GETUID, GETGID, GETGROUPS, GETGROUPS_O, GETPID, GETPGRP, GETSID. */ register struct mproc *rmp = mp; int r, i; int ngroups; char sgroups[NGROUPS_MAX]; /* XXX: Temp storage for GETGROUPS_O */ switch(call_nr) { case GETGROUPS_O: ngroups = m_in.grp_no; if (ngroups > NGROUPS_MAX || ngroups < 0) return(EINVAL); if (ngroups == 0) { r = rmp->mp_ngroups; break; } if (ngroups < rmp->mp_ngroups) /* Asking for less groups than available */ return(EINVAL); for (i = 0; i < ngroups; i++) sgroups[i] = (char) rmp->mp_sgroups[i]; r = sys_datacopy(SELF, (vir_bytes) &sgroups, who_e, (vir_bytes) m_in.groupsp, ngroups * sizeof(char)); if (r != OK) return(r); r = rmp->mp_ngroups; break; case GETGROUPS: ngroups = m_in.grp_no; if (ngroups > NGROUPS_MAX || ngroups < 0) return(EINVAL); if (ngroups == 0) { r = rmp->mp_ngroups; break; } if (ngroups < rmp->mp_ngroups) /* Asking for less groups than available */ return(EINVAL); r = sys_datacopy(SELF, (vir_bytes) rmp->mp_sgroups, who_e, (vir_bytes) m_in.groupsp, ngroups * sizeof(gid_t)); if (r != OK) return(r); r = rmp->mp_ngroups; break; case GETUID: r = rmp->mp_realuid; rmp->mp_reply.reply_res2 = rmp->mp_effuid; break; case GETGID: r = rmp->mp_realgid; rmp->mp_reply.reply_res2 = rmp->mp_effgid; break; case MINIX_GETPID: r = mproc[who_p].mp_pid; rmp->mp_reply.reply_res2 = mproc[rmp->mp_parent].mp_pid; break; case GETPGRP: r = rmp->mp_procgrp; break; case PM_GETSID: { struct mproc *target; pid_t p = m_in.PM_GETSID_PID; target = p ? find_proc(p) : &mproc[who_p]; r = ESRCH; if(target) r = target->mp_procgrp; break; } default: r = EINVAL; break; } return(r); }
void parse_server_options(int argc, char ** argv, dtnperf_global_options_t * perf_g_opt) { char c, done = 0; boolean_t output_set = FALSE; dtnperf_options_t * perf_opt = perf_g_opt->perf_opt; dtnperf_connection_options_t * conn_opt = perf_g_opt->conn_opt; // kill daemon variables int pid; char cmd[256]; while (!done) { static struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"verbose", no_argument, 0, 'v'}, {"memory", no_argument, 0, 'M'}, {"lifetime", required_argument, 0, 'l'}, {"debug", optional_argument, 0, 33}, // 33 because D is for data mode {"priority", required_argument, 0, 'p'}, {"ddir", required_argument, 0, 34}, {"fdir", required_argument, 0, 39}, {"acks-to-mon", no_argument, 0, 35}, // server only option {"ip-addr", required_argument, 0, 37}, {"ip-port", required_argument, 0, 38}, {"daemon", no_argument, 0, 'a'}, {"output", required_argument, 0, 'o'}, {"stop", no_argument, 0, 's'}, {0,0,0,0} // The last element of the array has to be filled with zeros. }; int option_index = 0; c = getopt_long(argc, argv, "hvMl:p:ao:s", long_options, &option_index); switch (c) { case 'h': print_server_usage(argv[0]); exit(0); return ; case 'v': perf_opt->verbose = TRUE; break; case 'M': perf_opt->use_file = 0; perf_opt->payload_type = BP_PAYLOAD_MEM; break; case 'l': conn_opt->expiration = atoi(optarg); break; case 'p': if (!strcasecmp(optarg, "bulk")) { conn_opt->priority.priority = BP_PRIORITY_BULK; } else if (!strcasecmp(optarg, "normal")) { conn_opt->priority.priority = BP_PRIORITY_NORMAL; } else if (!strcasecmp(optarg, "expedited")) { conn_opt->priority.priority = BP_PRIORITY_EXPEDITED; } else if (!strcasecmp(optarg, "reserved")) { conn_opt->priority.priority = BP_PRIORITY_RESERVED; } else { fprintf(stderr, "Invalid priority value %s\n", optarg); exit(1); } break; case 33: // debug perf_opt->debug = TRUE; if (optarg != NULL){ int debug_level = atoi(optarg); if (debug_level >= 1 && debug_level <= 2) perf_opt->debug_level = atoi(optarg) - 1; else { fprintf(stderr, "wrong --debug argument\n"); exit(1); return; } } else perf_opt->debug_level = 2; break; case 34: //incoming bundles destination directory perf_opt->dest_dir = strdup(optarg); break; case 35: //server send acks to monitor perf_opt->acks_to_mon = TRUE; break; case 36: //server do not send acks perf_opt->no_acks = TRUE; break; case 37: perf_opt->ip_addr = strdup(optarg); perf_opt->use_ip = TRUE; break; case 38: perf_opt->ip_port = atoi(optarg); perf_opt->use_ip = TRUE; break; case 39: perf_opt->file_dir = strdup(optarg); break; case 'a': perf_opt->daemon = TRUE; break; case 'o': perf_opt->server_output_file = strdup(optarg); output_set = TRUE; break; case 's': memset(cmd, 0, sizeof(cmd)); sprintf(cmd, "%s %s", argv[0], SERVER_STRING); pid = find_proc(cmd); if (pid) { printf("Closing dtnperf server pid: %d\n", pid); kill(pid, SIGINT); } else { fprintf(stderr, "ERROR: cannot find a running instance of dtnperf server\n"); } exit(0); break; case '?': fprintf(stderr, "Unknown option: %c\n", optopt); exit(1); break; case (char)(-1): done = 1; break; default: // getopt already prints an error message for unknown option characters print_server_usage(argv[0]); exit(1); } } if (output_set && !perf_opt->daemon) { fprintf(stderr, "\nSYNTAX ERROR: -o option can be used only with -a option\n"); \ print_server_usage(argv[0]); \ exit(1); } }
// init_main - the second kernel thread used to create kswapd_main & user_main kernel threads static int init_main(void *arg) { int pid; #ifndef CONFIG_NO_SWAP if ((pid = kernel_thread(kswapd_main, NULL, 0)) <= 0) { panic("kswapd init failed.\n"); } kswapd = find_proc(pid); set_proc_name(kswapd, "kswapd"); #else #warning swapping disabled #endif int ret; char root[] = "disk0:"; if ((ret = vfs_set_bootfs(root)) != 0) { panic("set boot fs failed: %e.\n", ret); } size_t nr_used_pages_store = nr_used_pages(); size_t slab_allocated_store = slab_allocated(); unsigned int nr_process_store = nr_process; pid = kernel_thread(user_main, NULL, 0); if (pid <= 0) { panic("create user_main failed.\n"); } while (do_wait(0, NULL) == 0) { if (nr_process_store == nr_process) { break; } schedule(); } #ifndef CONFIG_NO_SWAP assert(kswapd != NULL); int i; for (i = 0; i < 10; i ++) { if (kswapd->wait_state == WT_TIMER) { wakeup_proc(kswapd); } schedule(); } #endif mbox_cleanup(); fs_cleanup(); kprintf("all user-mode processes have quit, no /bin/sh?.\n"); #ifndef CONFIG_NO_SWAP assert(initproc->cptr == kswapd && initproc->yptr == NULL && initproc->optr == NULL); assert(kswapd->cptr == NULL && kswapd->yptr == NULL && kswapd->optr == NULL); assert(nr_process == 2 + pls_read(lcpu_count)); #else assert(nr_process == 1 + pls_read(lcpu_count)); #endif assert(nr_used_pages_store == nr_used_pages()); assert(slab_allocated_store == slab_allocated()); kprintf("init check memory pass.\n"); return 0; }
/*===========================================================================* * do_trace * *===========================================================================*/ PUBLIC int do_trace() { register struct mproc *child; struct ptrace_range pr; int i, r, seg, req; req = m_in.request; /* The T_OK call is made by the child fork of the debugger before it execs * the process to be traced. The T_ATTACH call is made by the debugger itself * to attach to an existing process. */ switch (req) { case T_OK: /* enable tracing by parent for this proc */ if (mp->mp_tracer != NO_TRACER) return(EBUSY); mp->mp_tracer = mp->mp_parent; mp->mp_reply.reply_trace = 0; return(OK); case T_ATTACH: /* attach to an existing process */ if ((child = find_proc(m_in.pid)) == NULL) return(ESRCH); if (child->mp_flags & EXITING) return(ESRCH); /* For non-root processes, user and group ID must match. */ if (mp->mp_effuid != SUPER_USER && (mp->mp_effuid != child->mp_effuid || mp->mp_effgid != child->mp_effgid || child->mp_effuid != child->mp_realuid || child->mp_effgid != child->mp_realgid)) return(EPERM); /* Only root may trace system servers. */ if (mp->mp_effuid != SUPER_USER && (child->mp_flags & PRIV_PROC)) return(EPERM); /* System servers may not trace anyone. They can use sys_trace(). */ if (mp->mp_flags & PRIV_PROC) return(EPERM); /* Can't trace self, PM or VM. */ if (child == mp || child->mp_endpoint == PM_PROC_NR || child->mp_endpoint == VM_PROC_NR) return(EPERM); /* Can't trace a process that is already being traced. */ if (child->mp_tracer != NO_TRACER) return(EBUSY); child->mp_tracer = who_p; child->mp_trace_flags = TO_NOEXEC; sig_proc(child, SIGSTOP, TRUE /*trace*/, FALSE /* ksig */); mp->mp_reply.reply_trace = 0; return(OK); case T_STOP: /* stop the process */ /* This call is not exposed to user programs, because its effect can be * achieved better by sending the traced process a signal with kill(2). */ return(EINVAL); case T_READB_INS: /* special hack for reading text segments */ if (mp->mp_effuid != SUPER_USER) return(EPERM); if ((child = find_proc(m_in.pid)) == NULL) return(ESRCH); if (child->mp_flags & EXITING) return(ESRCH); r = sys_trace(req, child->mp_endpoint, m_in.PMTRACE_ADDR, &m_in.data); if (r != OK) return(r); mp->mp_reply.reply_trace = m_in.data; return(OK); case T_WRITEB_INS: /* special hack for patching text segments */ if (mp->mp_effuid != SUPER_USER) return(EPERM); if ((child = find_proc(m_in.pid)) == NULL) return(ESRCH); if (child->mp_flags & EXITING) return(ESRCH); #if 0 /* Should check for shared text */ /* Make sure the text segment is not used as a source for shared * text. */ child->mp_ino = 0; child->mp_dev = 0; child->mp_ctime = 0; #endif r = sys_trace(req, child->mp_endpoint, m_in.PMTRACE_ADDR, &m_in.data); if (r != OK) return(r); mp->mp_reply.reply_trace = m_in.data; return(OK); } /* All the other calls are made by the tracing process to control execution * of the child. For all these calls, the child must be stopped. */ if ((child = find_proc(m_in.pid)) == NULL) return(ESRCH); if (child->mp_flags & EXITING) return(ESRCH); if (child->mp_tracer != who_p) return(ESRCH); if (!(child->mp_flags & STOPPED)) return(EBUSY); switch (req) { case T_EXIT: /* exit */ child->mp_flags |= TRACE_EXIT; /* Defer the exit if the traced process has an VFS call pending. */ if (child->mp_flags & VFS_CALL) child->mp_exitstatus = (int) m_in.data; /* save for later */ else exit_proc(child, (int) m_in.data, FALSE /*dump_core*/); /* Do not reply to the caller until VFS has processed the exit * request. */ return(SUSPEND); case T_SETOPT: /* set trace options */ child->mp_trace_flags = m_in.data; mp->mp_reply.reply_trace = 0; return(OK); case T_GETRANGE: case T_SETRANGE: /* get/set range of values */ r = sys_datacopy(who_e, (vir_bytes) m_in.PMTRACE_ADDR, SELF, (vir_bytes) &pr, (phys_bytes) sizeof(pr)); if (r != OK) return(r); if (pr.pr_space != TS_INS && pr.pr_space != TS_DATA) return(EINVAL); if (pr.pr_size == 0 || pr.pr_size > LONG_MAX) return(EINVAL); seg = (pr.pr_space == TS_INS) ? T : D; if (req == T_GETRANGE) r = sys_vircopy(child->mp_endpoint, seg, (vir_bytes) pr.pr_addr, who_e, D, (vir_bytes) pr.pr_ptr, (phys_bytes) pr.pr_size); else r = sys_vircopy(who_e, D, (vir_bytes) pr.pr_ptr, child->mp_endpoint, seg, (vir_bytes) pr.pr_addr, (phys_bytes) pr.pr_size); if (r != OK) return(r); mp->mp_reply.reply_trace = 0; return(OK); case T_DETACH: /* detach from traced process */ if (m_in.data < 0 || m_in.data >= _NSIG) return(EINVAL); child->mp_tracer = NO_TRACER; /* Let all tracer-pending signals through the filter. */ for (i = 1; i < _NSIG; i++) { if (sigismember(&child->mp_sigtrace, i)) { (void) sigdelset(&child->mp_sigtrace, i); check_sig(child->mp_pid, i, FALSE /* ksig */); } } if (m_in.data > 0) { /* issue signal */ sig_proc(child, (int) m_in.data, TRUE /*trace*/, FALSE /* ksig */); } /* Resume the child as if nothing ever happened. */ child->mp_flags &= ~STOPPED; child->mp_trace_flags = 0; check_pending(child); break; case T_RESUME: case T_STEP: case T_SYSCALL: /* resume execution */ if (m_in.data < 0 || m_in.data >= _NSIG) return(EINVAL); if (m_in.data > 0) { /* issue signal */ sig_proc(child, (int) m_in.data, FALSE /*trace*/, FALSE /* ksig */); } /* If there are any other signals waiting to be delivered, * feign a successful resumption. */ for (i = 1; i < _NSIG; i++) { if (sigismember(&child->mp_sigtrace, i)) { mp->mp_reply.reply_trace = 0; return(OK); } } child->mp_flags &= ~STOPPED; check_pending(child); break; } r = sys_trace(req, child->mp_endpoint, m_in.PMTRACE_ADDR, &m_in.data); if (r != OK) return(r); mp->mp_reply.reply_trace = m_in.data; return(OK); }