/*===========================================================================* * dsp_ioctl *===========================================================================*/ PRIVATE int dsp_ioctl(const message *m_ptr) { int status; unsigned int val; dprint("sb16_dsp.c: dsp_ioctl()\n"); /* Cannot change parameters during play or recording */ if(DmaBusy >= 0) return EBUSY; /* Get user data */ if(m_ptr->REQUEST != DSPIORESET) { sys_vircopy(m_ptr->IO_ENDPT, D, (vir_bytes)m_ptr->ADDRESS, SELF, D, (vir_bytes)&val, sizeof(val)); } dprint("dsp_ioctl: got ioctl %d, argument: %d\n", m_ptr->REQUEST, val); switch(m_ptr->REQUEST) { case DSPIORATE: status = dsp_set_speed(val); break; case DSPIOSTEREO: status = dsp_set_stereo(val); break; case DSPIOBITS: status = dsp_set_bits(val); break; case DSPIOSIZE: status = dsp_set_size(val); break; case DSPIOSIGN: status = dsp_set_sign(val); break; case DSPIOMAX: val = DSP_MAX_FRAGMENT_SIZE; sys_vircopy(SELF, D, (vir_bytes)&val, m_ptr->IO_ENDPT, D, (vir_bytes)m_ptr->ADDRESS, sizeof(val)); status = OK; break; case DSPIORESET: status = dsp_reset(); break; default: status = ENOTTY; break; } return status; }
/*===========================================================================* * do_rdwt * *===========================================================================*/ PUBLIC int do_rdwt(message *mp) /* mp - pointer to read or write message */ { /* Carry out a single read or write request. */ int r, opcode; phys_bytes phys_addr; /* Disk address? Address and length of the user buffer? */ if (mp->COUNT < 0) return(EINVAL); /* Check the user buffer. */ sys_umap(mp->m_source, D, (vir_bytes) mp->ADDRESS, mp->COUNT, &phys_addr); if (phys_addr == 0) return(EFAULT); if(mp->COUNT>BUF_LEN) panic("CryptDrive","buffer is too small",s); printf("Request size is , %u bytes",mp->COUNT); /*debug*/ if(opcode == DEV_READ){ /*from here to caller*/ vir_bytes user_vir = (vir_bytes) mp->ADDRESS; mp->ADDRESS = (vir_bytes) buf; /* use my buffer */ mp->m_source=thispid; if(OK != sendrec(DRVR_PROC_NR, mp)) panic("CryptDrive","do_rd messaging failed",s); /* decrypt here - this line here */ sys_vircopy(SELF, D, buffer, proc_nr, D, user_vir, mp->COUNT); mp->m_source=thispid; if(OK != send(device_caller, mp)) panic("CryptDrive","do_wt messaging failed",s); } if(opcode == DEV_WRITE){ /*from caller to here*/ sys_vircopy(proc_nr, D, mp->ADDRESS, SELF, D, buffer, mp->COUNT); user_vir = mp->ADDRESS; mp->ADDRESS= (vir_bytes) buffer; /* use my buffer */ mp->m_source=thispid; if(OK != sendrec(DRVR_PROC_NR, mp)) panic("CryptDrive","do_wt messaging failed",s); mp->m_source=thispid; if(OK != send(device_caller, mp)) panic("CryptDrive","do_wt messaging failed",s); } return(OK); }
/*===========================================================================* * copy_fdsets * *===========================================================================*/ static int copy_fdsets(struct selectentry *se, int nfds, int direction) { int r; size_t fd_setsize; endpoint_t src_e, dst_e; fd_set *src_fds, *dst_fds; if (nfds < 0 || nfds > OPEN_MAX) panic("select copy_fdsets: nfds wrong: %d", nfds); /* Only copy back as many bits as the user expects. */ #ifdef __NBSD_LIBC fd_setsize = (size_t) (howmany(nfds, __NFDBITS) * sizeof(__fd_mask)); #else fd_setsize = (size_t) (_FDSETWORDS(nfds) * _FDSETBITSPERWORD/8); #endif /* Set source and destination endpoints */ src_e = (direction == FROM_PROC) ? se->req_endpt : SELF; dst_e = (direction == FROM_PROC) ? SELF : se->req_endpt; /* read set */ src_fds = (direction == FROM_PROC) ? se->vir_readfds : &se->ready_readfds; dst_fds = (direction == FROM_PROC) ? &se->readfds : se->vir_readfds; if (se->vir_readfds) { r = sys_vircopy(src_e, (vir_bytes) src_fds, dst_e, (vir_bytes) dst_fds, fd_setsize); if (r != OK) return(r); } /* write set */ src_fds = (direction == FROM_PROC) ? se->vir_writefds : &se->ready_writefds; dst_fds = (direction == FROM_PROC) ? &se->writefds : se->vir_writefds; if (se->vir_writefds) { r = sys_vircopy(src_e, (vir_bytes) src_fds, dst_e, (vir_bytes) dst_fds, fd_setsize); if (r != OK) return(r); } /* error set */ src_fds = (direction == FROM_PROC) ? se->vir_errorfds : &se->ready_errorfds; dst_fds = (direction == FROM_PROC) ? &se->errorfds : se->vir_errorfds; if (se->vir_errorfds) { r = sys_vircopy(src_e, (vir_bytes) src_fds, dst_e, (vir_bytes) dst_fds, fd_setsize); if (r != OK) return(r); } return(OK); }
PRIVATE int msg_ioctl(message *m_ptr) { int status, len, chan; phys_bytes user_phys; sub_dev_t *sub_dev_ptr; special_file_t* special_file_ptr; dprint("%s: msg_ioctl() device %d\n", drv.DriverName, m_ptr->DEVICE); special_file_ptr = get_special_file(m_ptr->DEVICE); if(special_file_ptr == NULL) { return EIO; } chan = special_file_ptr->io_ctl; if (chan == NO_CHANNEL) { error("%s: No io control channel specified!\n", drv.DriverName); return EIO; } /* get pointer to sub device data */ sub_dev_ptr = &sub_dev[chan]; if(!sub_dev_ptr->Opened) { error("%s: io control impossible - not opened!\n", drv.DriverName); return EIO; } /* this is a hack...todo: may we intercept reset calls? */ if(m_ptr->REQUEST == DSPIORESET) { device_available = FALSE; } /* this is confusing, _IOC_OUT bit means that there is incoming data */ if (m_ptr->REQUEST & _IOC_OUT) { /* if there is data for us, copy it */ len = io_ctl_length(m_ptr->REQUEST); sys_vircopy(m_ptr->PROC_NR, D, (vir_bytes)m_ptr->ADDRESS, SELF, D, (vir_bytes)io_ctl_buf, len); } /* all ioctl's are passed to the device specific part of the driver */ status = drv_io_ctl(m_ptr->REQUEST, (void *)io_ctl_buf, &len, chan); /* _IOC_IN bit -> user expects data */ if (status == OK && m_ptr->REQUEST & _IOC_IN) { /* copy result back to user */ sys_vircopy(SELF, D, (vir_bytes)io_ctl_buf, m_ptr->PROC_NR, D, (vir_bytes)m_ptr->ADDRESS, len); } return status; }
/* * TODO: IMPLEMENT do_getfslog * * Description: * Get the log meta-information (as provided by do_getfsloginf) and the * in-memory log - the circular buffer of filesystem operation entries. * The information obtained is a fsloginf struct (as in do_getfsloginf) and * an array of fslogrec structs. Both struct types are defined in unistd.h. * The information is returned by copying to addresses provided as message * fields. * * Expected incoming message fields: * m_in.m1_p1 - a pointer field that is the address to copy to of a fsloginf * struct in the address space of the calling process. * m_in.m1_p2 - a pointer field that is the address to copy to of an array of * fslogrec structs in the address space of the calling process. * * Return: * OK if the call succeeds * In the case of failure: * INVALID_ARG if either m_in.m1_p1 or m_in.m1_p2 is NULL: * An error status related to copying between process address spaces * using sysvircopy (one of EDOM, EFAULT, EPERM, EINVAL) * see: * https://wiki.minix3.org/doku.php?id=developersguide:kernelapi#sys_vircopy */ int do_getfslog() { if(m_in.m1_p1 == NULL || m_in.m1_p2 == NULL){ return INVALID_ARG; } int store1 = sys_vircopy(SELF, &fsloginf, who_e, m_in.m1_p1, sizeof(fsloginf)); if(store1 != OK){ return store1; } int store2 = sys_vircopy(SELF, fslog, who_e, m_in.m1_p2, sizeof(fslogrec)); if(store1 == OK && store2 == OK){ return OK; } else { return store2; } }
/*===========================================================================* * do_mapdriver * *===========================================================================*/ int do_mapdriver(void) { /* Create a device->driver mapping. RS will tell us which major is driven by * this driver, what type of device it is (regular, TTY, asynchronous, clone, * etc), and its label. This label is registered with DS, and allows us to * retrieve the driver's endpoint. */ int r, slot; devmajor_t major; endpoint_t endpoint; vir_bytes label_vir; size_t label_len; char label[LABEL_MAX]; struct fproc *rfp; /* Only RS can map drivers. */ if (who_e != RS_PROC_NR) return(EPERM); label_vir = job_m_in.m_lsys_vfs_mapdriver.label; label_len = job_m_in.m_lsys_vfs_mapdriver.labellen; major = job_m_in.m_lsys_vfs_mapdriver.major; /* Get the label */ if (label_len > sizeof(label)) { /* Can we store this label? */ printf("VFS: do_mapdriver: label too long\n"); return(EINVAL); } r = sys_vircopy(who_e, label_vir, SELF, (vir_bytes) label, label_len, CP_FLAG_TRY); if (r != OK) { printf("VFS: do_mapdriver: sys_vircopy failed: %d\n", r); return(EINVAL); } if (label[label_len-1] != '\0') { printf("VFS: do_mapdriver: label not null-terminated\n"); return(EINVAL); } /* Now we know how the driver is called, fetch its endpoint */ r = ds_retrieve_label_endpt(label, &endpoint); if (r != OK) { printf("VFS: do_mapdriver: label '%s' unknown\n", label); return(EINVAL); } /* Process is a service */ if (isokendpt(endpoint, &slot) != OK) { printf("VFS: can't map driver to unknown endpoint %d\n", endpoint); return(EINVAL); } rfp = &fproc[slot]; rfp->fp_flags |= FP_SRV_PROC; /* Try to update device mapping. */ return map_driver(label, major, endpoint); }
/* * TODO: IMPLEMENT do_getfsloginf * * Description: * Get the log meta-information - a fsloginf struct with fields for the * start position of the log, the length of the log (i.e. the number of valid * entries in the log array) and the value of the ops2log selector. The * fsloginf struct is defined in unistd.h. The struct is returned by copying to * an address provided as a message field. * * Expected incoming message fields: * m_in.m1_p1 - a pointer field that is the address to copy to of a fsloginf * struct in the address space of the calling process. * * Return: * OK if the call succeeds * In the case of failure: * INVALID_ARG if m_in.m1_p1 is NULL * An error status related to copying between process address spaces * using sysvircopy (one of EDOM, EFAULT, EPERM, EINVAL) * see: * https://wiki.minix3.org/doku.php?id=developersguide:kernelapi#sys_vircopy */ int do_getfsloginf() { if(m_in.m1_p1 == NULL){ return INVALID_ARG; } int store = sys_vircopy(SELF, &fsloginf, who_e, m_in.m1_p1, sizeof(fsloginf)); if(store == OK){ return OK; } else { return store; } }
/*===========================================================================* * do_mapdriver * *===========================================================================*/ PUBLIC int do_mapdriver() { int r, flags, major; endpoint_t endpoint; vir_bytes label_vir; size_t label_len; char label[LABEL_MAX]; /* Only RS can map drivers. */ if (who_e != RS_PROC_NR) { printf("vfs: unauthorized call of do_mapdriver by proc %d\n", who_e); return(EPERM); } /* Get the label */ label_vir= (vir_bytes)m_in.md_label; label_len= m_in.md_label_len; if (label_len+1 > sizeof(label)) { printf("vfs:do_mapdriver: label too long\n"); return EINVAL; } r= sys_vircopy(who_e, D, label_vir, SELF, D, (vir_bytes)label, label_len); if (r != OK) { printf("vfs:do_mapdriver: sys_vircopy failed: %d\n", r); return EINVAL; } label[label_len]= '\0'; r= ds_retrieve_label_endpt(label, &endpoint); if (r != OK) { printf("vfs:do_mapdriver: ds doesn't know '%s'\n", label); return EINVAL; } /* Try to update device mapping. */ major= m_in.md_major; flags= m_in.md_flags; r= map_driver(label, major, endpoint, m_in.md_style, flags); return(r); }
/*===========================================================================* * do_mapdriver * *===========================================================================*/ int do_mapdriver() { /* Create a device->driver mapping. RS will tell us which major is driven by * this driver, what type of device it is (regular, TTY, asynchronous, clone, * etc), and its label. This label is registered with DS, and allows us to * retrieve the driver's endpoint. */ int r, flags, major, style; endpoint_t endpoint; vir_bytes label_vir; size_t label_len; char label[LABEL_MAX]; /* Only RS can map drivers. */ if (who_e != RS_PROC_NR) return(EPERM); label_vir = (vir_bytes) job_m_in.md_label; label_len = (size_t) job_m_in.md_label_len; major = job_m_in.md_major; flags = job_m_in.md_flags; style = job_m_in.md_style; /* Get the label */ if (label_len+1 > sizeof(label)) { /* Can we store this label? */ printf("VFS: do_mapdriver: label too long\n"); return(EINVAL); } r = sys_vircopy(who_e, label_vir, SELF, (vir_bytes) label, label_len); if (r != OK) { printf("VFS: do_mapdriver: sys_vircopy failed: %d\n", r); return(EINVAL); } label[label_len] = '\0'; /* Terminate label */ /* Now we know how the driver is called, fetch its endpoint */ r = ds_retrieve_label_endpt(label, &endpoint); if (r != OK) { printf("VFS: do_mapdriver: label '%s' unknown\n", label); return(EINVAL); } /* Try to update device mapping. */ return map_driver(label, major, endpoint, style, flags); }
/*===========================================================================* * dump_segments * *===========================================================================*/ PRIVATE void dump_segments(Elf_Phdr phdrs[], int phnum) { int i; vir_bytes len; off_t off, seg_off; int r; static u8_t buf[CLICK_SIZE]; for (i = 1; i < phnum; i++) { len = phdrs[i].p_memsz; seg_off = phdrs[i].p_vaddr; for (off = 0; off < len; off += CLICK_SIZE) { r = sys_vircopy(fp->fp_endpoint, D, (vir_bytes) (seg_off + off), SELF, D, (vir_bytes) buf, (phys_bytes) CLICK_SIZE); write_buf((char *)buf, (off + CLICK_SIZE <= len) ? CLICK_SIZE : (len - off)); } } }
/*===========================================================================* * read_seg * *===========================================================================*/ static int read_seg( struct exec_info *execi, /* various data needed for exec */ off_t off, /* offset in file */ int proc_e, /* process number (endpoint) */ int seg, /* T, D, or S */ vir_bytes seg_addr, /* address to load segment */ phys_bytes seg_bytes /* how much is to be transferred? */ ) { /* * The byte count on read is usually smaller than the segment count, because * a segment is padded out to a click multiple, and the data segment is only * partially initialized. */ int r; assert((seg == T)||(seg == D)); if (off+seg_bytes > execi->image_len) return ENOEXEC; r= sys_vircopy(SELF, D, ((vir_bytes)execi->image)+off, proc_e, seg, seg_addr, seg_bytes); return r; }
/*===========================================================================* * read_seg * *===========================================================================*/ static int read_seg( struct exec_info *execi, /* various data needed for exec */ off_t off, /* offset in file */ off_t seg_addr, /* address to load segment */ size_t seg_bytes /* how much is to be transferred? */ ) { /* * The byte count on read is usually smaller than the segment count, because * a segment is padded out to a click multiple, and the data segment is only * partially initialized. */ int r; if (off+seg_bytes > execi->hdr_len) return ENOEXEC; if((r= sys_vircopy(SELF, D, ((vir_bytes)execi->hdr)+off, execi->proc_e, D, seg_addr, seg_bytes)) != OK) { printf("RS: exec read_seg: copy 0x%x bytes into %d at 0x%lx failed: %d\n", seg_bytes, execi->proc_e, seg_addr, r); } return r; }
void OSSendPtab(){ // printf("Scheduler (sched/ospex.c): %d \n", our_message.m_source); // printf("Scheduler int: %d \n", our_message.m1_i1); if (recordSched == 1) { struct pi pi_struct[NR_PROCS+NR_TASKS]; struct proc process_table[NR_PROCS+NR_TASKS]; sys_getproctab((struct proc *) &process_table); if (process_count < HISTORY) { int i; for (i = 0; i < NR_PROCS + NR_TASKS; i++) { strcpy(pi_struct[i].p_name, process_table[i].p_name); pi_struct[i].p_endpoint = process_table[i].p_endpoint; pi_struct[i].p_priority = process_table[i].p_priority; pi_struct[i].p_cpu_time_left = process_table[i].p_cpu_time_left; pi_struct[i].p_rts_flags = process_table[i].p_rts_flags; pi_struct[i].p_user_time = process_table[i].p_user_time; pi_struct[i].p_sys_time = process_table[i].p_sys_time; pi_struct[i].p_cycles = process_table[i].p_cycles; pi_struct[i].p_times.enter_queue = process_table[i].p_accounting.enter_queue; pi_struct[i].p_times.time_in_queue = process_table[i].p_accounting.time_in_queue; pi_struct[i].p_times.dequeues = process_table[i].p_accounting.dequeues; pi_struct[i].p_times.ipc_sync = process_table[i].p_accounting.ipc_sync; pi_struct[i].p_times.ipc_async = process_table[i].p_accounting.ipc_async; pi_struct[i].p_times.preempted = process_table[i].p_accounting.preempted; } sys_vircopy(SELF, (vir_bytes) &pi_struct, srcAddr, (vir_bytes) pInfoPtrs[process_count], sizeof(pi_struct)); process_count++; } sys_getrunqhead(1, SELF); // u64_t cpuFreq = cpu_get_freq(0); } }
PUBLIC int gfx_ioctl(message *mess) { int r; switch (mess->REQUEST) { case GFX_REQUEST_SET_MODE: r = sys_vircopy(mess->IO_ENDPT, D, (vir_bytes) mess->ADDRESS, SELF, D, (vir_bytes) &mode, sizeof(mode)); if (r != OK) return EGFX_ERROR; if ((driver->modes & mode) == 0) return EGFX_UNSUPPORTED_MODE; return driver->set_mode(mode); break; case GFX_REQUEST_PUT_PIXEL: r = sys_vircopy(mess->IO_ENDPT, D, (vir_bytes) mess->ADDRESS, SELF, D, (vir_bytes) &pixel, sizeof(pixel)); if (r != OK) return EGFX_ERROR; return driver->put_pixel(pixel.x, pixel.y, pixel.c); break; case GFX_REQUEST_DRAW_LINE: r = sys_vircopy(mess->IO_ENDPT, D, (vir_bytes) mess->ADDRESS, SELF, D, (vir_bytes) &line, sizeof(line)); if (r != OK) return EGFX_ERROR; return driver->draw_line(line.x1, line.y1, line.x2, line.y2, line.c); break; case GFX_REQUEST_DRAW_LINE_HORI: r = sys_vircopy(mess->IO_ENDPT, D, (vir_bytes) mess->ADDRESS, SELF, D, (vir_bytes) &line, sizeof(line)); if (r != OK) return EGFX_ERROR; return driver->draw_line_hori(line.x1, line.y1, line.x2, line.c); break; case GFX_REQUEST_DRAW_LINE_VERT: r = sys_vircopy(mess->IO_ENDPT, D, (vir_bytes) mess->ADDRESS, SELF, D, (vir_bytes) &line, sizeof(line)); if (r != OK) return EGFX_ERROR; return driver->draw_line_vert(line.x1, line.y1, line.y2, line.c); break; case GFX_REQUEST_CLEAR_SCREEN: return driver->clear_screen(); case GFX_REQUEST_DUMP_REGISTERS: { vga_registers_t regs; dump_registers(®s); r = sys_vircopy(SELF, D, (vir_bytes) ®s, mess->IO_ENDPT, D, (vir_bytes) mess->ADDRESS, sizeof(regs)); if (r != OK) return EGFX_ERROR; return 0; break; } case GFX_REQUEST_RESET: { struct reg86u reg86; reg86.u.b.intno = 0x10; reg86.u.w.ax = 0x0003; r = sys_int86(®86); if (r != OK) return EGFX_ERROR; return 0; break; } case GFX_REQUEST_DRAW_RECT: r = sys_vircopy(mess->IO_ENDPT, D, (vir_bytes) mess->ADDRESS, SELF, D, (vir_bytes) &rect, sizeof(rect)); if (r != OK) return EGFX_ERROR; return driver->draw_rect(rect.x1, rect.y1, rect.x2, rect.y2, rect.c); break; case GFX_REQUEST_PUT_CHAR: r = sys_vircopy(mess->IO_ENDPT, D, (vir_bytes) mess->ADDRESS, SELF, D, (vir_bytes) &chr, sizeof(chr)); if (r != OK) return EGFX_ERROR; return driver->put_char(chr.x, chr.y, chr.c, chr.chr, chr.f); break; case GFX_REQUEST_PUT_STRING: { unsigned char *s; r = sys_vircopy(mess->IO_ENDPT, D, (vir_bytes) mess->ADDRESS, SELF, D, (vir_bytes) &string, sizeof(string)); if (r != OK) return EGFX_ERROR; s = malloc(string.len); if (!s) return EGFX_OUT_OF_MEMORY; r = sys_vircopy(mess->IO_ENDPT, D, (vir_bytes) string.s, SELF, D, (vir_bytes) s, string.len); if (r != OK) { free(s); return EGFX_ERROR; } r = driver->put_string(string.x, string.y, string.c, s, string.len, string.f); free(s); return r; break; } default: break; } return 0; }
/*===========================================================================* * do_trace * *===========================================================================*/ PUBLIC int do_trace() { register struct mproc *child; struct ptrace_range pr; int i, r, seg, req; req = m_in.request; /* The T_OK call is made by the child fork of the debugger before it execs * the process to be traced. The T_ATTACH call is made by the debugger itself * to attach to an existing process. */ switch (req) { case T_OK: /* enable tracing by parent for this proc */ if (mp->mp_tracer != NO_TRACER) return(EBUSY); mp->mp_tracer = mp->mp_parent; mp->mp_reply.reply_trace = 0; return(OK); case T_ATTACH: /* attach to an existing process */ if ((child = find_proc(m_in.pid)) == NULL) return(ESRCH); if (child->mp_flags & EXITING) return(ESRCH); /* For non-root processes, user and group ID must match. */ if (mp->mp_effuid != SUPER_USER && (mp->mp_effuid != child->mp_effuid || mp->mp_effgid != child->mp_effgid || child->mp_effuid != child->mp_realuid || child->mp_effgid != child->mp_realgid)) return(EPERM); /* Only root may trace system servers. */ if (mp->mp_effuid != SUPER_USER && (child->mp_flags & PRIV_PROC)) return(EPERM); /* System servers may not trace anyone. They can use sys_trace(). */ if (mp->mp_flags & PRIV_PROC) return(EPERM); /* Can't trace self, PM or VM. */ if (child == mp || child->mp_endpoint == PM_PROC_NR || child->mp_endpoint == VM_PROC_NR) return(EPERM); /* Can't trace a process that is already being traced. */ if (child->mp_tracer != NO_TRACER) return(EBUSY); child->mp_tracer = who_p; child->mp_trace_flags = TO_NOEXEC; sig_proc(child, SIGSTOP, TRUE /*trace*/, FALSE /* ksig */); mp->mp_reply.reply_trace = 0; return(OK); case T_STOP: /* stop the process */ /* This call is not exposed to user programs, because its effect can be * achieved better by sending the traced process a signal with kill(2). */ return(EINVAL); case T_READB_INS: /* special hack for reading text segments */ if (mp->mp_effuid != SUPER_USER) return(EPERM); if ((child = find_proc(m_in.pid)) == NULL) return(ESRCH); if (child->mp_flags & EXITING) return(ESRCH); r = sys_trace(req, child->mp_endpoint, m_in.PMTRACE_ADDR, &m_in.data); if (r != OK) return(r); mp->mp_reply.reply_trace = m_in.data; return(OK); case T_WRITEB_INS: /* special hack for patching text segments */ if (mp->mp_effuid != SUPER_USER) return(EPERM); if ((child = find_proc(m_in.pid)) == NULL) return(ESRCH); if (child->mp_flags & EXITING) return(ESRCH); #if 0 /* Should check for shared text */ /* Make sure the text segment is not used as a source for shared * text. */ child->mp_ino = 0; child->mp_dev = 0; child->mp_ctime = 0; #endif r = sys_trace(req, child->mp_endpoint, m_in.PMTRACE_ADDR, &m_in.data); if (r != OK) return(r); mp->mp_reply.reply_trace = m_in.data; return(OK); } /* All the other calls are made by the tracing process to control execution * of the child. For all these calls, the child must be stopped. */ if ((child = find_proc(m_in.pid)) == NULL) return(ESRCH); if (child->mp_flags & EXITING) return(ESRCH); if (child->mp_tracer != who_p) return(ESRCH); if (!(child->mp_flags & STOPPED)) return(EBUSY); switch (req) { case T_EXIT: /* exit */ child->mp_flags |= TRACE_EXIT; /* Defer the exit if the traced process has an VFS call pending. */ if (child->mp_flags & VFS_CALL) child->mp_exitstatus = (int) m_in.data; /* save for later */ else exit_proc(child, (int) m_in.data, FALSE /*dump_core*/); /* Do not reply to the caller until VFS has processed the exit * request. */ return(SUSPEND); case T_SETOPT: /* set trace options */ child->mp_trace_flags = m_in.data; mp->mp_reply.reply_trace = 0; return(OK); case T_GETRANGE: case T_SETRANGE: /* get/set range of values */ r = sys_datacopy(who_e, (vir_bytes) m_in.PMTRACE_ADDR, SELF, (vir_bytes) &pr, (phys_bytes) sizeof(pr)); if (r != OK) return(r); if (pr.pr_space != TS_INS && pr.pr_space != TS_DATA) return(EINVAL); if (pr.pr_size == 0 || pr.pr_size > LONG_MAX) return(EINVAL); seg = (pr.pr_space == TS_INS) ? T : D; if (req == T_GETRANGE) r = sys_vircopy(child->mp_endpoint, seg, (vir_bytes) pr.pr_addr, who_e, D, (vir_bytes) pr.pr_ptr, (phys_bytes) pr.pr_size); else r = sys_vircopy(who_e, D, (vir_bytes) pr.pr_ptr, child->mp_endpoint, seg, (vir_bytes) pr.pr_addr, (phys_bytes) pr.pr_size); if (r != OK) return(r); mp->mp_reply.reply_trace = 0; return(OK); case T_DETACH: /* detach from traced process */ if (m_in.data < 0 || m_in.data >= _NSIG) return(EINVAL); child->mp_tracer = NO_TRACER; /* Let all tracer-pending signals through the filter. */ for (i = 1; i < _NSIG; i++) { if (sigismember(&child->mp_sigtrace, i)) { (void) sigdelset(&child->mp_sigtrace, i); check_sig(child->mp_pid, i, FALSE /* ksig */); } } if (m_in.data > 0) { /* issue signal */ sig_proc(child, (int) m_in.data, TRUE /*trace*/, FALSE /* ksig */); } /* Resume the child as if nothing ever happened. */ child->mp_flags &= ~STOPPED; child->mp_trace_flags = 0; check_pending(child); break; case T_RESUME: case T_STEP: case T_SYSCALL: /* resume execution */ if (m_in.data < 0 || m_in.data >= _NSIG) return(EINVAL); if (m_in.data > 0) { /* issue signal */ sig_proc(child, (int) m_in.data, FALSE /*trace*/, FALSE /* ksig */); } /* If there are any other signals waiting to be delivered, * feign a successful resumption. */ for (i = 1; i < _NSIG; i++) { if (sigismember(&child->mp_sigtrace, i)) { mp->mp_reply.reply_trace = 0; return(OK); } } child->mp_flags &= ~STOPPED; check_pending(child); break; } r = sys_trace(req, child->mp_endpoint, m_in.PMTRACE_ADDR, &m_in.data); if (r != OK) return(r); mp->mp_reply.reply_trace = m_in.data; return(OK); }
/*===========================================================================* * read_seg * *===========================================================================*/ static int read_seg( struct vnode *vp, /* inode descriptor to read from */ off_t off, /* offset in file */ int proc_e, /* process number (endpoint) */ int seg, /* T, D, or S */ vir_bytes seg_addr, /* address to load segment */ phys_bytes seg_bytes /* how much is to be transferred? */ ) { /* * The byte count on read is usually smaller than the segment count, because * a segment is padded out to a click multiple, and the data segment is only * partially initialized. */ int r; unsigned n, o; u64_t new_pos; unsigned int cum_io; static char buf[128 * 1024]; assert((seg == T)||(seg == D)); /* Make sure that the file is big enough */ if (vp->v_size < off+seg_bytes) return(EIO); if (seg == T) { /* We have to use a copy loop until safecopies support segments */ o = 0; while (o < seg_bytes) { n = seg_bytes - o; if (n > sizeof(buf)) n = sizeof(buf); if ((r = req_readwrite(vp->v_fs_e,vp->v_inode_nr,cvul64(off+o), READING, VFS_PROC_NR, buf, n, &new_pos, &cum_io)) != OK) { printf("VFS: read_seg: req_readwrite failed (text)\n"); return(r); } if (cum_io != n) { printf( "VFSread_seg segment has not been read properly by exec() \n"); return(EIO); } if ((r = sys_vircopy(VFS_PROC_NR, D, (vir_bytes)buf, proc_e, seg, seg_addr + o, n)) != OK) { printf("VFS: read_seg: copy failed (text)\n"); return(r); } o += n; } return(OK); } else if (seg == D) { if ((r = req_readwrite(vp->v_fs_e, vp->v_inode_nr, cvul64(off), READING, proc_e, (char*)seg_addr, seg_bytes, &new_pos, &cum_io)) != OK) { printf("VFS: read_seg: req_readwrite failed (data)\n"); return(r); } if (r == OK && cum_io != seg_bytes) printf("VFS: read_seg segment has not been read properly by exec()\n"); return(r); } return(OK); }
/*===========================================================================* * do_slink * *===========================================================================*/ PUBLIC int do_slink() { /* Perform the symlink(name1, name2) system call. */ register int r; /* error code */ char string[NAME_MAX]; /* last component of the new dir's path name */ struct inode *sip; /* inode containing symbolic link */ struct buf *bp; /* disk buffer for link */ struct inode *ldirp; /* directory containing link */ if (fetch_name(m_in.name2, m_in.name2_length, M1) != OK) return(err_code); if (m_in.name1_length <= 1 || m_in.name1_length >= _MIN_BLOCK_SIZE) return(ENAMETOOLONG); /* Create the inode for the symlink. */ sip = new_node(&ldirp, user_path, (mode_t) (I_SYMBOLIC_LINK | RWX_MODES), (zone_t) 0, TRUE, string); /* Allocate a disk block for the contents of the symlink. * Copy contents of symlink (the name pointed to) into first disk block. */ if ((r = err_code) == OK) { r = (bp = new_block(sip, (off_t) 0)) == NIL_BUF ? err_code : sys_vircopy(who_e, D, (vir_bytes) m_in.name1, SELF, D, (vir_bytes) bp->b_data, (vir_bytes) m_in.name1_length-1); if(r == OK) { bp->b_data[_MIN_BLOCK_SIZE-1] = '\0'; sip->i_size = strlen(bp->b_data); if(sip->i_size != m_in.name1_length-1) { /* This can happen if the user provides a buffer * with a \0 in it. This can cause a lot of trouble * when the symlink is used later. We could just use * the strlen() value, but we want to let the user * know he did something wrong. ENAMETOOLONG doesn't * exactly describe the error, but there is no * ENAMETOOWRONG. */ r = ENAMETOOLONG; } } put_block(bp, DIRECTORY_BLOCK); /* put_block() accepts NIL_BUF. */ if (r != OK) { sip->i_nlinks = 0; if (search_dir(ldirp, string, (ino_t *) 0, DELETE) != OK) panic(__FILE__, "Symbolic link vanished", NO_NUM); } } /* put_inode() accepts NIL_INODE as a noop, so the below are safe. */ put_inode(sip); put_inode(ldirp); return(r); }
/*===========================================================================* * do_mapdriver * *===========================================================================*/ PUBLIC int do_mapdriver() { int r, force, major, proc_nr_n; unsigned long tasknr; vir_bytes label_vir; size_t label_len; char label[LABEL_MAX]; if (!super_user) { printf("FS: unauthorized call of do_mapdriver by proc %d\n", who_e); return(EPERM); /* only su (should be only RS or some drivers) * may call do_mapdriver. */ } /* Get the label */ label_vir= (vir_bytes)m_in.md_label; label_len= m_in.md_label_len; if (label_len+1 > sizeof(label)) { printf("vfs:do_mapdriver: label too long\n"); return EINVAL; } r= sys_vircopy(who_e, D, label_vir, SELF, D, (vir_bytes)label, label_len); if (r != OK) { printf("vfs:do_mapdriver: sys_vircopy failed: %d\n", r); return EINVAL; } label[label_len]= '\0'; r= ds_retrieve_label_num(label, &tasknr); if (r != OK) { printf("vfs:do_mapdriver: ds doesn't know '%s'\n", label); return EINVAL; } if (isokendpt(tasknr, &proc_nr_n) != OK) { printf("vfs:do_mapdriver: bad endpoint %d\n", tasknr); return(EINVAL); } /* Try to update device mapping. */ major= m_in.md_major; force= m_in.md_force; r= map_driver(label, major, tasknr, m_in.md_style, force); if (r == OK) { /* If a driver has completed its exec(), it can be announced * to be up. */ if(force || fproc[proc_nr_n].fp_execced) { dev_up(major); } else { dmap[major].dmap_flags |= DMAP_BABY; } } return(r); }
/*===========================================================================* * do_select * *===========================================================================*/ int do_select(void) { /* Implement the select(nfds, readfds, writefds, errorfds, timeout) system * call. First we copy the arguments and verify their sanity. Then we check * whether there are file descriptors that satisfy the select call right of the * bat. If so, or if there are no ready file descriptors but the process * requested to return immediately, we return the result. Otherwise we set a * timeout and wait for either the file descriptors to become ready or the * timer to go off. If no timeout value was provided, we wait indefinitely. */ int r, nfds, do_timeout = 0, fd, s; struct timeval timeout; struct selectentry *se; vir_bytes vtimeout; nfds = job_m_in.SEL_NFDS; vtimeout = (vir_bytes) job_m_in.SEL_TIMEOUT; /* Sane amount of file descriptors? */ if (nfds < 0 || nfds > OPEN_MAX) return(EINVAL); /* Find a slot to store this select request */ for (s = 0; s < MAXSELECTS; s++) if (selecttab[s].requestor == NULL) /* Unused slot */ break; if (s >= MAXSELECTS) return(ENOSPC); se = &selecttab[s]; wipe_select(se); /* Clear results of previous usage */ se->requestor = fp; se->req_endpt = who_e; se->vir_readfds = (fd_set *) job_m_in.SEL_READFDS; se->vir_writefds = (fd_set *) job_m_in.SEL_WRITEFDS; se->vir_errorfds = (fd_set *) job_m_in.SEL_ERRORFDS; /* Copy fdsets from the process */ if ((r = copy_fdsets(se, nfds, FROM_PROC)) != OK) { se->requestor = NULL; return(r); } /* Did the process set a timeout value? If so, retrieve it. */ if (vtimeout != 0) { do_timeout = 1; r = sys_vircopy(who_e, (vir_bytes) vtimeout, SELF, (vir_bytes) &timeout, sizeof(timeout)); if (r != OK) { se->requestor = NULL; return(r); } } /* No nonsense in the timeval */ if (do_timeout && (timeout.tv_sec < 0 || timeout.tv_usec < 0)) { se->requestor = NULL; return(EINVAL); } /* If there is no timeout, we block forever. Otherwise, we block up to the * specified time interval. */ if (!do_timeout) /* No timeout value set */ se->block = 1; else if (do_timeout && (timeout.tv_sec > 0 || timeout.tv_usec > 0)) se->block = 1; else /* timeout set as (0,0) - this effects a poll */ se->block = 0; se->expiry = 0; /* no timer set (yet) */ /* Verify that file descriptors are okay to select on */ for (fd = 0; fd < nfds; fd++) { struct filp *f; unsigned int type, ops; /* Because the select() interface implicitly includes file descriptors * you might not want to select on, we have to figure out whether we're * interested in them. Typically, these file descriptors include fd's * inherited from the parent proc and file descriptors that have been * close()d, but had a lower fd than one in the current set. */ if (!(ops = tab2ops(fd, se))) continue; /* No operations set; nothing to do for this fd */ /* Get filp belonging to this fd */ f = se->filps[fd] = get_filp(fd, VNODE_READ); if (f == NULL) { if (err_code == EBADF) r = err_code; else /* File descriptor is 'ready' to return EIO */ r = EINTR; se->requestor = NULL; return(r); } /* Check file types. According to POSIX 2008: * "The pselect() and select() functions shall support regular files, * terminal and pseudo-terminal devices, FIFOs, pipes, and sockets. The * behavior of pselect() and select() on file descriptors that refer to * other types of file is unspecified." * * In our case, terminal and pseudo-terminal devices are handled by the * TTY major and sockets by either INET major (socket type AF_INET) or * PFS major (socket type AF_UNIX). PFS acts as an FS when it handles * pipes and as a driver when it handles sockets. Additionally, we * support select on the LOG major to handle kernel logging, which is * beyond the POSIX spec. */ se->type[fd] = -1; for (type = 0; type < SEL_FDS; type++) { if (fdtypes[type].type_match(f)) { se->type[fd] = type; se->nfds = fd+1; se->filps[fd]->filp_selectors++; break; } } unlock_filp(f); if (se->type[fd] == -1) { /* Type not found */ se->requestor = NULL; return(EBADF); } } /* Check all file descriptors in the set whether one is 'ready' now */ for (fd = 0; fd < nfds; fd++) { int ops, r; struct filp *f; /* Again, check for involuntarily selected fd's */ if (!(ops = tab2ops(fd, se))) continue; /* No operations set; nothing to do for this fd */ /* Test filp for select operations if not already done so. e.g., * processes sharing a filp and both doing a select on that filp. */ f = se->filps[fd]; if ((f->filp_select_ops & ops) != ops) { int wantops; wantops = (f->filp_select_ops |= ops); r = do_select_request(se, fd, &wantops); if (r != OK && r != SUSPEND) break; /* Error or bogus return code; abort */ /* The select request above might have turned on/off some * operations because they were 'ready' or not meaningful. * Either way, we might have a result and we need to store them * in the select table entry. */ if (wantops & ops) ops2tab(wantops, fd, se); } } if ((se->nreadyfds > 0 || !se->block) && !is_deferred(se)) { /* fd's were found that were ready to go right away, and/or * we were instructed not to block at all. Must return * immediately. */ r = copy_fdsets(se, se->nfds, TO_PROC); select_cancel_all(se); se->requestor = NULL; if (r != OK) return(r); else if (se->error != OK) return(se->error); return(se->nreadyfds); } /* Convert timeval to ticks and set the timer. If it fails, undo * all, return error. */ if (do_timeout) { int ticks; /* Open Group: * "If the requested timeout interval requires a finer * granularity than the implementation supports, the * actual timeout interval shall be rounded up to the next * supported value." */ #define USECPERSEC 1000000 while(timeout.tv_usec >= USECPERSEC) { /* this is to avoid overflow with *system_hz below */ timeout.tv_usec -= USECPERSEC; timeout.tv_sec++; } ticks = timeout.tv_sec * system_hz + (timeout.tv_usec * system_hz + USECPERSEC-1) / USECPERSEC; se->expiry = ticks; set_timer(&se->timer, ticks, select_timeout_check, s); } /* process now blocked */ suspend(FP_BLOCKED_ON_SELECT); return(SUSPEND); }
void OSSendPtab(void){ int i; if(recordSched == 1){ struct pi sendPi[NR_PROCS + NR_TASKS]; /*Use the following array to recover the next ready processes before we lose the addresses*/ struct proc nextReady[NR_PROCS + NR_TASKS]; struct proc tmpPtab[NR_PROCS +NR_TASKS]; struct proc queuehds[NR_SCHED_QUEUES]; if(pos_count < HISTORY ){ /*Get the current process table */ sys_getproctab((struct proc *) &tmpPtab); sys_cpuvar((char *) &queuehds,SELF); /* Handle the heads of each queue */ struct qh qh_send[NR_SCHED_QUEUES]; for(i=0;i<NR_SCHED_QUEUES;i++){ if(queuehds[i].p_priority!=-1){ strcpy(qh_send[i].p_name,queuehds[i].p_name); qh_send[i].p_endpoint = queuehds[i].p_endpoint; } else{ qh_send[i].p_endpoint = -1; } } for(i=0;i<(NR_PROCS+NR_TASKS);i++){ strcpy(sendPi[i].p_name,tmpPtab[i].p_name); sendPi[i].p_endpoint = tmpPtab[i].p_endpoint; for (int l=0; l<PROCNUM; l++) { if (0 == strcmp(tmpPtab[i].p_name, proc_name[l])) { strcpy(sjf[l].p_name,tmpPtab[i].p_name); sjf[l].p_endpoint = tmpPtab[i].p_endpoint; sjf[l].ticks = tmpPtab[i].p_cycles; if(!proc_is_runnable(&tmpPtab[i])) { sjf[l].is_blocked = 1; // sjf[l].predBurst = INT_MAX; // sjf[l].ticks = INT_MAX; } else { sjf[l].is_blocked = 0; } } } sendPi[i].p_priority = tmpPtab[i].p_priority; sendPi[i].p_cpu_time_left = tmpPtab[i].p_cpu_time_left; sendPi[i].p_rts_flags = tmpPtab[i].p_rts_flags; if(tmpPtab[i].p_nextready){ sys_vircopy(SYSTEM,(vir_bytes) tmpPtab[i].p_nextready, SELF,(vir_bytes) &(nextReady[i]),sizeof(struct proc)); strcpy(sendPi[i].p_nextready,nextReady[i].p_name); sendPi[i].p_nextready_endpoint = nextReady[i].p_endpoint; } else{ strcpy(sendPi[i].p_nextready, NOPROC); sendPi[i].p_nextready_endpoint = -1; } /*Copy the accounting structure. Using CPU cycles instead of times, because CPU speeds will vary*/ sendPi[i].p_times.enter_queue = tmpPtab[i].p_accounting.enter_queue; sendPi[i].p_times.time_in_queue = tmpPtab[i].p_accounting.time_in_queue; sendPi[i].p_times.dequeues = tmpPtab[i].p_accounting.dequeues; sendPi[i].p_times.ipc_sync = tmpPtab[i].p_accounting.ipc_sync; sendPi[i].p_times.ipc_async = tmpPtab[i].p_accounting.ipc_async; sendPi[i].p_times.preempted = tmpPtab[i].p_accounting.preempted; } sys_vircopy(SELF,(vir_bytes) &sendPi, srcAddr,(vir_bytes) pInfoPtrs[pos_count],sizeof(sendPi)); sys_vircopy(SELF,(vir_bytes) &qh_send, srcAddr,(vir_bytes) pQhPtrs[pos_count],sizeof(qh_send)); int piReady = pos_count; sys_vircopy(SELF,(vir_bytes) &piReady, srcAddr, (vir_bytes) srcPtr2, sizeof(piReady)); pos_count++; /* Ensure the proc history buffer does not overflow*/ } } }
/*===========================================================================* * pt_init * *===========================================================================*/ void pt_init(void) { pt_t *newpt; int s, r, p; vir_bytes sparepages_mem; #if defined(__arm__) vir_bytes sparepagedirs_mem; #endif static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES]; int m = kernel_boot_info.kern_mod; #if defined(__i386__) int global_bit_ok = 0; u32_t mypdbr; /* Page Directory Base Register (cr3) value */ #elif defined(__arm__) u32_t myttbr; #endif /* Find what the physical location of the kernel is. */ assert(m >= 0); assert(m < kernel_boot_info.mods_with_kernel); assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS); kern_mb_mod = &kernel_boot_info.module_list[m]; kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start; assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE)); assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE)); kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE; /* Get ourselves spare pages. */ sparepages_mem = (vir_bytes) static_sparepages; assert(!(sparepages_mem % VM_PAGE_SIZE)); #if defined(__arm__) /* Get ourselves spare pagedirs. */ sparepagedirs_mem = (vir_bytes) static_sparepagedirs; assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE)); #endif /* Spare pages are used to allocate memory before VM has its own page * table that things (i.e. arbitrary physical memory) can be mapped into. * We get it by pre-allocating it in our bss (allocated and mapped in by * the kernel) in static_sparepages. We also need the physical addresses * though; we look them up now so they are ready for use. */ #if defined(__arm__) missing_sparedirs = 0; assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS); for(s = 0; s < SPAREPAGEDIRS; s++) { vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);; phys_bytes ph; if((r=sys_umap(SELF, VM_D, (vir_bytes) v, ARCH_PAGEDIR_SIZE, &ph)) != OK) panic("pt_init: sys_umap failed: %d", r); if(s >= STATIC_SPAREPAGEDIRS) { sparepagedirs[s].pagedir = NULL; missing_sparedirs++; continue; } sparepagedirs[s].pagedir = (void *) v; sparepagedirs[s].phys = ph; } #endif if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0))) panic("reservedqueue_new for single pages failed"); assert(STATIC_SPAREPAGES < SPAREPAGES); for(s = 0; s < STATIC_SPAREPAGES; s++) { void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE); phys_bytes ph; if((r=sys_umap(SELF, VM_D, (vir_bytes) v, VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK) panic("pt_init: sys_umap failed: %d", r); reservedqueue_add(spare_pagequeue, v, ph); } #if defined(__i386__) /* global bit and 4MB pages available? */ global_bit_ok = _cpufeature(_CPUF_I386_PGE); bigpage_ok = _cpufeature(_CPUF_I386_PSE); /* Set bit for PTE's and PDE's if available. */ if(global_bit_ok) global_bit = I386_VM_GLOBAL; #endif /* Now reserve another pde for kernel's own mappings. */ { int kernmap_pde; phys_bytes addr, len; int flags, index = 0; u32_t offset = 0; kernmap_pde = freepde(); offset = kernmap_pde * ARCH_BIG_PAGE_SIZE; while(sys_vmctl_get_mapping(index, &addr, &len, &flags) == OK) { int usedpde; vir_bytes vir; if(index >= MAX_KERNMAPPINGS) panic("VM: too many kernel mappings: %d", index); kern_mappings[index].phys_addr = addr; kern_mappings[index].len = len; kern_mappings[index].flags = flags; kern_mappings[index].vir_addr = offset; kern_mappings[index].flags = ARCH_VM_PTE_PRESENT; if(flags & VMMF_UNCACHED) #if defined(__i386__) kern_mappings[index].flags |= PTF_NOCACHE; #elif defined(__arm__) kern_mappings[index].flags |= ARM_VM_PTE_DEVICE; #endif if(flags & VMMF_USER) kern_mappings[index].flags |= ARCH_VM_PTE_USER; #if defined(__arm__) else kern_mappings[index].flags |= ARM_VM_PTE_SUPER; #endif if(flags & VMMF_WRITE) kern_mappings[index].flags |= ARCH_VM_PTE_RW; #if defined(__i386__) if(flags & VMMF_GLO) kern_mappings[index].flags |= I386_VM_GLOBAL; #elif defined(__arm__) else kern_mappings[index].flags |= ARCH_VM_PTE_RO; #endif if(addr % VM_PAGE_SIZE) panic("VM: addr unaligned: %d", addr); if(len % VM_PAGE_SIZE) panic("VM: len unaligned: %d", len); vir = offset; if(sys_vmctl_reply_mapping(index, vir) != OK) panic("VM: reply failed"); offset += len; index++; kernmappings++; usedpde = ARCH_VM_PDE(offset); while(usedpde > kernmap_pde) { int newpde = freepde(); assert(newpde == kernmap_pde+1); kernmap_pde = newpde; } } } /* Reserve PDEs available for mapping in the page directories. */ { int pd; for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) { struct pdm *pdm = &pagedir_mappings[pd]; pdm->pdeno = freepde(); phys_bytes ph; /* Allocate us a page table in which to * remember page directory pointers. */ if(!(pdm->page_directories = vm_allocpage(&ph, VMP_PAGETABLE))) { panic("no virt addr for vm mappings"); } memset(pdm->page_directories, 0, VM_PAGE_SIZE); pdm->phys = ph; #if defined(__i386__) pdm->val = (ph & ARCH_VM_ADDR_MASK) | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW; #elif defined(__arm__) pdm->val = (ph & ARCH_VM_PDE_MASK) | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME #endif } } /* Allright. Now. We have to make our own page directory and page tables, * that the kernel has already set up, accessible to us. It's easier to * understand if we just copy all the required pages (i.e. page directory * and page tables), and set up the pointers as if VM had done it itself. * * This allocation will happen without using any page table, and just * uses spare pages. */ newpt = &vmprocess->vm_pt; if(pt_new(newpt) != OK) panic("vm pt_new failed"); /* Get our current pagedir so we can see it. */ #if defined(__i386__) if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK) #elif defined(__arm__) if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK) #endif panic("VM: sys_vmctl_get_pdbr failed"); #if defined(__i386__) if(sys_vircopy(NONE, mypdbr, SELF, (vir_bytes) currentpagedir, VM_PAGE_SIZE) != OK) #elif defined(__arm__) if(sys_vircopy(NONE, myttbr, SELF, (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE) != OK) #endif panic("VM: sys_vircopy failed"); /* We have mapped in kernel ourselves; now copy mappings for VM * that kernel made, including allocations for BSS. Skip identity * mapping bits; just map in VM. */ for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) { u32_t entry = currentpagedir[p]; phys_bytes ptaddr_kern, ptaddr_us; /* BIGPAGEs are kernel mapping (do ourselves) or boot * identity mapping (don't want). */ if(!(entry & ARCH_VM_PDE_PRESENT)) continue; if((entry & ARCH_VM_BIGPAGE)) continue; if(pt_ptalloc(newpt, p, 0) != OK) panic("pt_ptalloc failed"); assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT); #if defined(__i386__) ptaddr_kern = entry & ARCH_VM_ADDR_MASK; ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK; #elif defined(__arm__) ptaddr_kern = entry & ARCH_VM_PDE_MASK; ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK; #endif /* Copy kernel-initialized pagetable contents into our * normally accessible pagetable. */ if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK) panic("pt_init: abscopy failed"); } /* Inform kernel vm has a newly built page table. */ assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR); pt_bind(newpt, &vmproc[VM_PROC_NR]); pt_init_done = 1; /* All OK. */ return; }
void OSSendPtab(void) { if(snapNum == HISTORY || !firstCall) return; int i; struct qh qHistCur[NR_SCHED_QUEUES]; //Copy the queue head sys_getqhead(&qHistCur, &cpuFreq); for(i = 0; i < (NR_SCHED_QUEUES); i++) { strcpy(qHist[snapNum][i].p_name, qHistCur[i].p_name); qHist[snapNum][i].p_endpoint = qHistCur[i].p_endpoint; } //Get a copy of the current scheduling process table sys_getproctab((struct proc *) &tmpPtab); //Check all processes for(i=0;i<(NR_PROCS+NR_TASKS);i++) { //Copy the process name strcpy(snapShotHist[snapNum][i].p_name,tmpPtab[i].p_name); //Copy endpoint snapShotHist[snapNum][i].p_endpoint = tmpPtab[i].p_endpoint; //Copy priority snapShotHist[snapNum][i].p_priority = tmpPtab[i].p_priority; //Copy remaining CPU time snapShotHist[snapNum][i].p_cpu_time_left = tmpPtab[i].p_cpu_time_left; // Copy flags? (What are flags? For blocked processes?) snapShotHist[snapNum][i].p_rts_flags = tmpPtab[i].p_rts_flags; //PI Has queue head but proc does not. //Queue head copy goes here if needed //Copy NextReady if(tmpPtab[i].p_nextready) { //Copy the process sys_vircopy(SYSTEM,(vir_bytes) tmpPtab[i].p_nextready, SELF,(vir_bytes) &nextProc,sizeof(struct proc)); //Copy name strcpy(snapShotHist[snapNum][i].p_nextready,nextProc.p_name); //Copy next processes endpoint snapShotHist[snapNum][i].p_nextready_endpoint = nextProc.p_endpoint; } else { //Copy no Name and -1 for endpoint strcpy(snapShotHist[snapNum][i].p_nextready, NOPROC); snapShotHist[snapNum][i].p_nextready_endpoint = -1; } //Copy the p_accounting sys_vircopy(SYSTEM,(vir_bytes) &tmpPtab[i].p_accounting, SELF,(vir_bytes) &(snapShotHist[snapNum][i].p_times) ,sizeof(struct p_accounting)); //Copy user time snapShotHist[snapNum][i].p_user_time = tmpPtab[i].p_user_time; //Copy sys time snapShotHist[snapNum][i].p_sys_time = tmpPtab[i].p_sys_time; //Copy cycles snapShotHist[snapNum][i].p_cycles = tmpPtab[i].p_cycles; } //Incr number of snapshots taken snapNum++; }
/** * @brief Read segment * @param vp inode descriptor to read from * @param off offset in file * @param proc_e process number (endpoint) * @param seg T, D, or S * @param seg_bytes how much is to be transferred? * @return 0 on success */ static int aout_read_seg(struct vnode *vp, off_t off, int proc_e, int seg, phys_bytes seg_bytes) { /* The byte count on read is usually smaller than the segment count, because * a segment is padded out to a click multiple, and the data segment is only * partially initialized. */ int err = 0; unsigned n, k; u64_t new_pos; unsigned int cum_io; /* Make sure that the file is big enough */ if (vp->v_size < off+seg_bytes) return -EIO; if (seg != D) { char *buf = 0; /* We have to use a copy loop until safecopies support segments */ k = 0; buf = malloc(1024); if (!buf) { printk("Not enough memory!\n"); return -ENOMEM; } while (k < seg_bytes) { n = seg_bytes - k; if (n > sizeof(buf)) n = sizeof(buf); #if CONFIG_DEBUG_VFS_AOUT printk("read_seg for user %d, seg %d: buf 0x%x, size %d, pos %d\n", proc_e, seg, buf, n, off+k); #endif /* Issue request */ err = req_readwrite(vp->v_fs_e, vp->v_inode_nr, cvul64(off+k), READING, VFS_PROC_NR, buf, n, &new_pos, &cum_io); if (err) { printk("VFS: read_seg: req_readwrite failed (text)\n"); goto aout_free_buf; } if (cum_io != n) { printk("read_seg segment has not been read properly by exec()\n"); err = -EIO; goto aout_free_buf; } err = sys_vircopy(VFS_PROC_NR, D, (vir_bytes)buf, proc_e, seg, k, n); if (err) { printk("VFS: read_seg: copy failed (text)\n"); goto aout_free_buf; } k += n; } aout_free_buf: free(buf); return err; } /* Issue request */ err = req_readwrite(vp->v_fs_e, vp->v_inode_nr, cvul64(off), READING, proc_e, 0, seg_bytes, &new_pos, &cum_io); if (err) { printk("VFS: read_seg: req_readwrite failed (data)\n"); return err; } if (!err && cum_io != seg_bytes) printk("VFSread_seg segment has not been read properly by exec() \n"); return err; }
/*==========================================================================* * do_vrdwt * *==========================================================================*/ PRIVATE int do_vrdwt(message* mp) /* mp - pointer to read or write message */ { /* Carry out an device read or write to/from a vector of user addresses. * The "user addresses" are assumed to be safe, i.e. FS transferring to/from * its own buffers, so they are not checked. */ static iovec_t iovec[NR_IOREQS]; iovec_t *iov; phys_bytes iovec_size; unsigned nr_req, position; int r; message m_dd; /*message for disk driver*/ nr_req = mp->COUNT; /* Length of I/O vector */ position = mp->POSITION; /* Copy the vector from the caller to kernel space. */ if (nr_req > NR_IOREQS) nr_req = NR_IOREQS; iovec_size = (phys_bytes) (nr_req * sizeof(iovec[0])); if (OK != sys_datacopy(mp->m_source, (vir_bytes) mp->ADDRESS, SELF, (vir_bytes) iovec, iovec_size)) panic("Crypt Drive","bad I/O vector by", s); iov = iovec; while(nr_req>0){ vir_bytes user_vir = iov->iov_addr; /*User program mem addresss*/ unsigned count = iov->iov_size; /* number of byted to copy */ printf("CryptDrive: Size:%d , DST:%d , POS:%d, REQ: %d/%d \n",count, user_vir, position, nr_req, mp->COUNT); if(mp->m_type == DEV_GATHER){ m_dd.m_type=DEV_READ; m_dd.DEVICE=mp->DEVICE; m_dd.m_source=thispid; m_dd.COUNT=count; m_dd.POSITION=position; m_dd.ADDRESS=(vir_bytes) buffer; if(OK != sendrec(DRVR_PROC_NR, &m_dd)) panic("CryptDrive","do_rdv messaging failed",s); /* decrypt here - this line here */ /*from here to caller*/ sys_vircopy(SELF, D, buffer, proc_nr, D, user_vir, m_dd.COUNT); } if(mp->m_type == DEV_SCATTER){ /*from caller to here*/ sys_vircopy(proc_nr, D, user_vir, SELF, D, buffer, count); /*ENCYPT Here - this line no more no less*/ m_dd.m_type=DEV_WRITE; m_dd.DEVICE=mp->DEVICE; m_dd.m_source=thispid; m_dd.COUNT=count; m_dd.POSITION=position; m_dd.ADDRESS=(vir_bytes) buffer; if(OK != sendrec(DRVR_PROC_NR, &m_dd)) panic("CryptDrive","do_wtv messaging failed",s); count=m_dd.REP_STATUS; } /* Book the number of bytes transferred. */ position += count; iov->iov_addr += count; if ((iov->iov_size -= count) == 0) { iov++; nr_req--; } /* vector done; next request */ } /* Copy the I/O vector back to the caller. */ if (mp->m_source >= 0) { sys_datacopy(SELF, (vir_bytes) iovec, mp->m_source, (vir_bytes) mp->ADDRESS, iovec_size); } mp->m_type = TASK_REPLY; mp->REP_PROC_NR = proc_nr; mp->m_source=thispid; /* Status is ok */ mp->REP_STATUS = OK; send(device_caller, mp); return(OK); }