static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, unsigned int flags) { if (!proc_lookup(dir, dentry, flags)) return NULL; return proc_pid_lookup(dir, dentry, flags); }
term_t bif_run_slice2(term_t Pid, term_t Reductions, process_t *ctx) { process_t *proc; term_t retval, retval1; term_t res; if (!is_pid(Pid) || !is_int(Reductions)) return A_BADARG; proc = proc_lookup(pid_serial(Pid)); res = proc_main(proc, int_value2(Reductions), &retval); if (res == AI_DONE) { //proc_destroy(proc); //- process should not be destroyed now //- as we may need to notify links first retval1 = marshal_term(retval, proc_gc_pool(ctx)); result(make_tuple2(AI_DONE, retval1, proc_gc_pool(ctx))); } else { if (res == AI_YIELD) result(res); else { retval1 = marshal_term(retval, proc_gc_pool(ctx)); result(make_tuple2(res, retval1, proc_gc_pool(ctx))); } } return AI_OK; }
apr_status_t port_socket_close0(port_t *self) { port_socket_data_t *data = self->data; process_t *proc = proc_lookup(pid_serial(self->owner_in)); if (proc) { xpool_t *tmp = xpool_make(self->pool); int len = buffer_len(data->in_buf); term_t msg; if (len > 0) { term_t bin = make_binary(intnum(len), buffer_ptr(data->in_buf), tmp); msg = make_tuple3(A_TCP, port_id(self, tmp), bin, tmp); proc_new_mail(proc, msg); buffer_clear(data->in_buf); } msg = make_tuple2(A_TCP_CLOSED, port_id(self, tmp), tmp); proc_new_mail(proc, msg); xpool_destroy(tmp); } return apr_socket_close(data->sock); }
/* * Should be called from the init proc */ static void test_proc_create(){ dbg(DBG_TEST, "testing proc_create\n"); proc_t *myproc = proc_create("myproc"); KASSERT(list_empty(&myproc->p_threads)); KASSERT(list_empty(&myproc->p_children)); KASSERT(sched_queue_empty(&myproc->p_wait)); KASSERT(myproc->p_pproc->p_pid == 1 && "created proc's parent isn't the init proc\n"); KASSERT(myproc->p_state == PROC_RUNNING); /* make sure it's in the proc list */ KASSERT(proc_lookup(myproc->p_pid) == myproc && "created proc not in proc list\n"); /* make sure it's in it's parent's child list */ KASSERT(in_child_list(myproc)); /* clean everything up */ kthread_t *mythread = kthread_create(myproc, simple_function, NULL, NULL); sched_make_runnable(mythread); int status; do_waitpid(myproc->p_pid, 0, &status); dbg(DBG_TESTPASS, "all proc_create tests passed!\n"); }
static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) { if (!proc_lookup(dir, dentry, nd)) { return NULL; } return proc_pid_lookup(dir, dentry, nd); }
apr_status_t port_socket_do_writable(port_t *self) { port_socket_data_t *data = self->data; if (data->is_connecting) { process_t *proc = proc_lookup(pid_serial(self->owner_in)); // owner_in if (proc) { xpool_t *tmp = xpool_make(self->pool); term_t msg = make_tuple2(A_TCP_CONNECTED, port_id(self, tmp), tmp); proc_new_mail(proc, msg); xpool_destroy(tmp); } data->is_connecting = 0; } if (buffer_len(data->out_buf) > 0) { apr_status_t rs; rs = buffer_socket_send(data->out_buf, data->sock); if (rs != 0) return rs; } if (data->is_closing && buffer_len(data->out_buf) == 0) return APR_EOF; //make poll close0 the socket if (data->space_required && buffer_available(data->out_buf) >= data->required_size) { xpool_t *tmp = xpool_make(self->pool); int avail = buffer_available(data->out_buf); term_t msg = make_tuple3(A_TCP_SPACE, port_id(self, tmp), intnum(avail), tmp); process_t *proc = proc_lookup(pid_serial(self->owner_out)); //TODO: insure that only owner can send to socket proc_new_mail(proc, msg); xpool_destroy(tmp); data->space_required = 0; } return APR_SUCCESS; }
term_t bif_destroy_process1(term_t Pid, process_t *ctx) { process_t *proc; if (!is_pid(Pid)) return A_BADARG; proc = proc_lookup(pid_serial(Pid)); if (proc) proc_destroy(proc); result(A_TRUE); return AI_OK; }
void *init_child8(int arg1,void *arg2) { if(curtest == 5) kthread_exit(0); if(curtest == 4) { kthread_t *cur_proc_thd; proc_t *proc23 = proc_lookup(3); list_iterate_begin(&(proc23->p_threads), cur_proc_thd, kthread_t, kt_plink) { }list_iterate_end(); kthread_cancel(cur_proc_thd,0); }
static void test_do_waitpid_no_child(){ pid_t pid; /* find a PID that definitely isn't a child of curproc */ for (pid = 0; proc_lookup(pid) != NULL; pid++){} int status; pid_t returned_pid = do_waitpid(pid, 0, &status); KASSERT(returned_pid = -ECHILD); }
static void test_do_waitpid(waitpid_type_t type){ proc_t *test_procs[NUM_PROCS]; kthread_t *test_threads[NUM_PROCS]; int i; for (i = 0; i < NUM_PROCS; i++){ test_procs[i] = proc_create("test proc"); test_threads[i] = kthread_create(test_procs[i], simple_function, i, NULL); sched_make_runnable(test_threads[i]); } int j; for (j = 0; j < NUM_PROCS; j++){ if (type == ANY){ int status; do_waitpid(-1, 0, &status); } else { int status; pid_t proc_pid = test_procs[j]->p_pid; pid_t waitpid_pid = do_waitpid(proc_pid, 0, &status); KASSERT(waitpid_pid == proc_pid); } } int k; for (k = 0; k < NUM_PROCS; k++){ proc_t *p = test_procs[k]; KASSERT(proc_lookup(p->p_pid) == NULL); /* make sure all children have been reparented */ KASSERT(list_empty(&p->p_children)); /* make sure that it is no longer in it's parent's * child list */ KASSERT(!in_child_list(p)); /* make sure it exited with the correct status */ KASSERT(p->p_status == 0); KASSERT(p->p_state == PROC_DEAD); KASSERT(sched_queue_empty(&p->p_wait)); } }
static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) { /* * nr_threads is actually protected by the tasklist_lock; * however, it's conventional to do reads, especially for * reporting, without any locking whatsoever. */ if (dir->i_ino == PROC_ROOT_INO) /* check for safety... */ dir->i_nlink = proc_root.nlink + nr_threads; if (!proc_lookup(dir, dentry, nd)) { return NULL; } return proc_pid_lookup(dir, dentry, nd); }
apr_status_t port_socket_do_readable(port_t *self) { apr_status_t rs; port_socket_data_t *data = self->data; rs = buffer_socket_recv(data->in_buf, data->sock); if (rs == 0) { if (data->packet_expected) { process_t *proc; int len = buffer_len(data->in_buf); if (data->expected_size == 0 || data->expected_size > 0 && len >= data->expected_size) { if (data->expected_size > 0) len = data->expected_size; proc = proc_lookup(pid_serial(self->owner_in)); if (proc) { xpool_t *tmp = xpool_make(self->pool); term_t bin = make_binary(intnum(len), buffer_ptr(data->in_buf), tmp); term_t msg = make_tuple3(A_TCP, port_id(self, tmp), bin, tmp); proc_new_mail(proc, msg); buffer_consume(data->in_buf, len); xpool_destroy(tmp); } data->packet_expected = 0; data->expected_size = 0; } } } return rs; }
apr_status_t port_socket_set_option(port_t *self, term_t opt, term_t value) { port_socket_data_t *data = self->data; if (opt == A_EXPECT) { if (!is_int(value)) return APR_BADARG; data->expected_size = int_value2(value); if (data->expected_size < 0) return APR_BADARG; if (!is_pid(self->owner_in)) return APR_ENOPROC; //enough data may already be there if (data->expected_size == 0 && buffer_len(data->in_buf) > 0 || data->expected_size > 0 && buffer_len(data->in_buf) >= data->expected_size) { int len = (data->expected_size == 0) ?buffer_len(data->in_buf) :data->expected_size; xpool_t *tmp = xpool_make(self->pool); term_t bin = make_binary(intnum(len), buffer_ptr(data->in_buf), tmp); term_t msg = make_tuple3(A_TCP, port_id(self, tmp), bin, tmp); process_t *proc = proc_lookup(pid_serial(self->owner_in)); proc_new_mail(proc, msg); buffer_consume(data->in_buf, len); xpool_destroy(tmp); } else data->packet_expected = 1; } else if (opt == A_REQUIRE) { int size; if (!is_int(value)) return APR_BADARG; size = int_value2(value); if (size < 0 || size > SOCK_OUTBUF_LEN) return APR_BADARG; data->required_size = size; if (buffer_available(data->out_buf) >= size) { xpool_t *tmp = xpool_make(self->pool); int avail = buffer_available(data->out_buf); term_t msg = make_tuple3(A_TCP_SPACE, port_id(self, tmp), intnum(avail), tmp); process_t *proc = proc_lookup(pid_serial(self->owner_out)); //TODO: insure that only owner can send to socket proc_new_mail(proc, msg); xpool_destroy(tmp); data->space_required = 0; } else data->space_required = 1; } else return APR_BADARG; return APR_SUCCESS; }
/** * Once we're inside of idleproc_run(), we are executing in the context of the * first process-- a real context, so we can finally begin running * meaningful code. * * This is the body of process 0. It should initialize all that we didn't * already initialize in kmain(), launch the init process (initproc_run), * wait for the init process to exit, then halt the machine. * * @param arg1 the first argument (unused) * @param arg2 the second argument (unused) */ static void * idleproc_run(int arg1, void *arg2) { int status; pid_t child; /* create init proc */ kthread_t *initthr = initproc_create(); init_call_all(); GDB_CALL_HOOK(initialized); /* Create other kernel threads (in order) */ /* PROCS BLANK {{{ */ #ifdef __SHADOWD__ /* TODO port this - alvin */ #endif /* PROCS BLANK }}} */ #ifdef __VFS__ /* Once you have VFS remember to set the current working directory * of the idle and init processes */ /* PROCS BLANK {{{ */ proc_t *idle = proc_lookup(PID_IDLE); proc_t *init = proc_lookup(PID_INIT); KASSERT(NULL != idle); KASSERT(NULL != init); idle->p_cwd = vfs_root_vn; init->p_cwd = vfs_root_vn; vref(vfs_root_vn); vref(vfs_root_vn); /* PROCS BLANK }}} */ /* Here you need to make the null, zero, and tty devices using mknod */ /* You can't do this until you have VFS, check the include/drivers/dev.h * file for macros with the device ID's you will need to pass to mknod */ /* PROCS BLANK {{{ */ int fd, ii; char path[32]; struct stat statbuf; if (do_stat("/dev", &statbuf) < 0) { KASSERT(!(status = do_mkdir("/dev"))); } if ((fd = do_open("/dev/null", O_RDONLY)) < 0) { KASSERT(!(status = do_mknod("/dev/null", S_IFCHR, MEM_NULL_DEVID))); } else { do_close(fd); } if ((fd = do_open("/dev/zero", O_RDONLY)) < 0) { KASSERT(!(status = do_mknod("/dev/zero", S_IFCHR, MEM_ZERO_DEVID))); } else { do_close(fd); } memset(path, '\0', 32); for (ii = 0; ii < __NTERMS__; ii++) { sprintf(path, "/dev/tty%d", ii); dbg(DBG_INIT, "Creating tty mknod with path %s\n", path); if ((fd = do_open(path, O_RDONLY)) < 0) { KASSERT(!do_mknod(path, S_IFCHR, MKDEVID(2, ii))); } else { do_close(fd); } } for (ii = 0; ii < __NDISKS__; ii++) { sprintf(path, "/dev/hda%d", ii); dbg(DBG_INIT, "Creating disk mknod with path %s\n", path); if ((fd = do_open(path, O_RDONLY)) < 0) { KASSERT(!do_mknod(path, S_IFBLK, MKDEVID(1, ii))); } else { do_close(fd); } } /* PROCS BLANK }}} */ #endif /* Finally, enable interrupts (we want to make sure interrupts * are enabled AFTER all drivers are initialized) */ intr_enable(); /* Run initproc */ sched_make_runnable(initthr); /* Now wait for it */ child = do_waitpid(-1, 0, &status); KASSERT(PID_INIT == child); #ifdef __MTP__ kthread_reapd_shutdown(); #endif #ifdef __SHADOWD__ /* wait for shadowd to shutdown */ shadowd_shutdown(); #endif #ifdef __VFS__ /* Shutdown the vfs: */ dbg_print("weenix: vfs shutdown...\n"); vput(curproc->p_cwd); if (vfs_shutdown()) panic("vfs shutdown FAILED!!\n"); #endif /* Shutdown the pframe system */ #ifdef __S5FS__ pframe_shutdown(); #endif dbg_print("\nweenix: halted cleanly!\n"); GDB_CALL_HOOK(shutdown); hard_shutdown(); return NULL; }