static int pgfault_handler(struct trapframe *tf) { extern struct mm_struct *check_mm_struct; struct mm_struct *mm; if (check_mm_struct != NULL) { //assert(current == idleproc); assert(pls_read(current) == pls_read(idleproc)); mm = check_mm_struct; } else { if (pls_read(current) == NULL) { print_trapframe(tf); print_pgfault(tf); panic("unhandled page fault.\n"); } mm = pls_read(current)->mm; } //print_pgfault(tf); /* convert ARM error code to kernel error code */ machine_word_t error_code = 0; if (tf->tf_err & (1 << 11)) error_code |= 0x02; //write if ((tf->tf_err & 0xC) != 0x04) error_code |= 0x01; uint32_t badaddr = 0; if (tf->tf_trapno == T_PABT) { badaddr = tf->tf_epc; } else { badaddr = far(); } //kprintf("rrr %08x %08x\n", error_code, *(volatile uint32_t*)(VPT_BASE+4*0xe00)); return do_pgfault(mm, error_code, badaddr); }
static inline void print_pgfault(struct trapframe *tf) { //print_trapframe(tf); uint32_t ttb = 0; asm volatile ("MRC p15, 0, %0, c2, c0, 0":"=r" (ttb)); kprintf("%s page fault at (0x%08x) 0x%08x 0x%03x: %s-%s %s PID=%d\n", tf->tf_trapno == T_PABT ? "instruction" : tf->tf_trapno == T_DABT ? "data" : "unknown", ttb, far(), tf->tf_err & 0xFFF, tf->tf_err & 0x2 ? "Page" : "Section", (tf->tf_err & 0xC) == 0xC ? "Permission" : (tf->tf_err & 0xC) == 0x8 ? "Domain" : (tf->tf_err & 0xC) == 0x4 ? "Translation" : "Alignment", //((fsr_v & 0xC) == 0) || ((fsr_v & 0xE) == 0x4) ? "Domain invalid" : "Domain valid", (tf->tf_err & 1 << 11) ? "W" : "R", pls_read(current) ? pls_read(current)->pid : -1); /* error_code: * bit 0 == 0 means no page found, 1 means protection fault // translation or domain/permission * bit 1 == 0 means read, 1 means write // permission * bit 2 == 0 means kernel, 1 means user // can't know * */ //~ kprintf("page fault at 0x%08x: %c/%c [%s].\n", rcr2(), //~ (tf->tf_err & 4) ? 'U' : 'K', //~ (tf->tf_err & 2) ? 'W' : 'R', //~ (tf->tf_err & 1) ? "protection fault" : "no page found"); }
void proc_init_ap(void) { int lcpu_idx = pls_read(lcpu_idx); int lapic_id = pls_read(lapic_id); pls_write(idleproc, alloc_proc()); if (idleproc == NULL) { panic("cannot alloc idleproc.\n"); } idleproc->pid = lcpu_idx; idleproc->state = PROC_RUNNABLE; // XXX // idleproc->kstack = (uintptr_t)bootstack; idleproc->need_resched = 1; idleproc->tf = NULL; if ((idleproc->fs_struct = fs_create()) == NULL) { panic("create fs_struct (idleproc) failed.\n"); } fs_count_inc(idleproc->fs_struct); char namebuf[32]; snprintf(namebuf, 32, "idle/%d", lapic_id); set_proc_name(idleproc, namebuf); nr_process ++; pls_write(current, idleproc); assert(idleproc != NULL && idleproc->pid == lcpu_idx); }
void syscall() { uint32_t arg[5]; struct trapframe* tf = pls_read(current)->tf; int num = tf->tf_err; // SYS_xxx if (num == 0){ if( __sys_linux_entry(tf) ) goto bad_call; return; } if (num >= 0 && num < NUM_SYSCALLS) { if (syscalls[num] != NULL) { arg[0] = tf->tf_regs.reg_r[0]; // arg0 arg[1] = tf->tf_regs.reg_r[1]; // arg1 arg[2] = tf->tf_regs.reg_r[2]; // arg2 arg[3] = tf->tf_regs.reg_r[3]; // arg3 tf->tf_regs.reg_r[0] = syscalls[num](arg); // calling the system call, return value in r0 return ; } } bad_call: print_trapframe(tf); kprintf("undefined syscall %d, pid = %d, name = %s.\n", num, pls_read(current)->pid, pls_read(current)->name); do_exit(-E_KILLED); }
static void trap_dispatch(struct trapframe *tf) { char c; int ret; switch (tf->tf_trapno) { case T_DEBUG: case T_BRKPT: debug_monitor(tf); break; case T_PGFLT: if ((ret = pgfault_handler(tf)) != 0) { print_trapframe(tf); if (pls_read(current) == NULL) { panic("handle pgfault failed. %e\n", ret); } else { if (trap_in_kernel(tf)) { panic ("handle pgfault failed in kernel mode. %e\n", ret); } kprintf("killed by kernel.\n"); do_exit(-E_KILLED); } } break; case T_SYSCALL: syscall(); break; case IRQ_OFFSET + IRQ_TIMER: ticks++; assert(pls_read(current) != NULL); run_timer_list(); break; case IRQ_OFFSET + IRQ_COM1: case IRQ_OFFSET + IRQ_KBD: if ((c = cons_getc()) == 13) { debug_monitor(tf); } else { extern void dev_stdin_write(char c); dev_stdin_write(c); } break; case IRQ_OFFSET + IRQ_IDE1: case IRQ_OFFSET + IRQ_IDE2: /* do nothing */ break; default: print_trapframe(tf); if (pls_read(current) != NULL) { kprintf("unhandled trap.\n"); do_exit(-E_KILLED); } panic("unexpected trap in kernel.\n"); } }
// proc_init - set up the first kernel thread idleproc "idle" by itself and // - create the second kernel thread init_main void proc_init(void) { int i; int lcpu_idx = pls_read(lcpu_idx); int lapic_id = pls_read(lapic_id); int lcpu_count = pls_read(lcpu_count); list_init(&proc_list); list_init(&proc_mm_list); for (i = 0; i < HASH_LIST_SIZE; i ++) { list_init(hash_list + i); } pls_write(idleproc, alloc_proc()); if (idleproc == NULL) { panic("cannot alloc idleproc.\n"); } idleproc->pid = lcpu_idx; idleproc->state = PROC_RUNNABLE; // XXX // idleproc->kstack = (uintptr_t)bootstack; idleproc->need_resched = 1; idleproc->tf = NULL; if ((idleproc->fs_struct = fs_create()) == NULL) { panic("create fs_struct (idleproc) failed.\n"); } fs_count_inc(idleproc->fs_struct); char namebuf[32]; snprintf(namebuf, 32, "idle/%d", lapic_id); set_proc_name(idleproc, namebuf); nr_process ++; pls_write(current, idleproc); int pid = kernel_thread(init_main, NULL, 0); if (pid <= 0) { panic("create init_main failed.\n"); } initproc = find_proc(pid); set_proc_name(initproc, "init"); assert(idleproc != NULL && idleproc->pid == lcpu_idx); assert(initproc != NULL && initproc->pid == lcpu_count); }
int sysfile_writev(int fd, struct iovec __user * iov, int iovcnt) { /* do nothing but return 0 */ //kprintf("writev: fd=%08x iov=%08x iovcnt=%d\n", fd, iov, iovcnt); struct iovec *tv; int rcode = 0, count = 0, i; struct mm_struct *mm = pls_read(current)->mm; for (i = 0; i < iovcnt; ++i) { char *pbase; size_t plen; copy_from_user(mm, &pbase, &(iov[i].iov_base), sizeof(char *), 0); copy_from_user(mm, &plen, &(iov[i].iov_len), sizeof(size_t), 0); // ZHKTODO if(i==1) kprintf("\t----%s ", pbase); if(i==2) kprintf("%s\n", pbase); rcode = sysfile_write(fd, pbase, plen); if (rcode < 0) break; count += rcode; } if (count == 0) return (rcode); else return (count); }
int sysfile_linux_fstat64(int fd, struct linux_stat64 __user * buf) { struct mm_struct *mm = pls_read(current)->mm; int ret; struct stat __local_stat, *kstat = &__local_stat; if ((ret = file_fstat(fd, kstat)) != 0) { return -1; } struct linux_stat64 *kls = kmalloc(sizeof(struct linux_stat64)); if (!kls) { return -1; } memset(kls, 0, sizeof(struct linux_stat64)); kls->st_ino = 1; /* ucore never check access permision */ kls->st_mode = kstat->st_mode | 0777; kls->st_nlink = kstat->st_nlinks; kls->st_blksize = 512; kls->st_blocks = kstat->st_blocks; kls->st_size = kstat->st_size; ret = 0; lock_mm(mm); { if (!copy_to_user(mm, buf, kls, sizeof(struct linux_stat64))) { ret = -1; } } unlock_mm(mm); kfree(kls); return ret; }
// try_free_pages - calculate pressure to estimate the number(pressure<<5) of needed page frames in ucore currently, // - then call kswapd kernel thread. bool try_free_pages(size_t n) { struct proc_struct *current = pls_read(current); if (!swap_init_ok || kswapd == NULL) { return 0; } if (current == kswapd) { panic("kswapd call try_free_pages!!.\n"); } if (n >= (1 << 7)) { return 0; } pressure += n; wait_t __wait, *wait = &__wait; bool intr_flag; local_intr_save(intr_flag); { wait_init(wait, current); current->state = PROC_SLEEPING; current->wait_state = WT_KSWAPD; wait_queue_add(&kswapd_done, wait); if (kswapd->wait_state == WT_TIMER) { wakeup_proc(kswapd); } } local_intr_restore(intr_flag); schedule(); assert(!wait_in_queue(wait) && wait->wakeup_flags == WT_KSWAPD); return 1; }
// get_pid - alloc a unique pid for process static int get_pid(void) { static_assert(MAX_PID > MAX_PROCESS); struct proc_struct *proc; list_entry_t *list = &proc_list, *le; static int next_safe = MAX_PID, last_pid = MAX_PID; if (++ last_pid >= MAX_PID) { last_pid = pls_read(lcpu_count); goto inside; } if (last_pid >= next_safe) { inside: next_safe = MAX_PID; repeat: le = list; while ((le = list_next(le)) != list) { proc = le2proc(le, list_link); if (proc->pid == last_pid) { if (++ last_pid >= next_safe) { if (last_pid >= MAX_PID) { last_pid = 1; } next_safe = MAX_PID; goto repeat; } } else if (proc->pid > last_pid && next_safe > proc->pid) { next_safe = proc->pid; } } } return last_pid; }
static uint32_t __sys_linux_getppid(uint32_t arg[]) { struct proc_struct *parent = pls_read(current)->parent; if(!parent) return 0; return parent->pid; }
static int pgfault_handler(struct trapframe *tf) { extern struct mm_struct *check_mm_struct; struct mm_struct *mm; if (check_mm_struct != NULL) { assert(pls_read(current) == pls_read(idleproc)); mm = check_mm_struct; } else { if (pls_read(current) == NULL) { print_trapframe(tf); print_pgfault(tf); panic("unhandled page fault.\n"); } mm = pls_read(current)->mm; } return do_pgfault(mm, tf->tf_err, rcr2()); }
int sysfile_write(int fd, void *base, size_t len) { int ret = 0; struct mm_struct *mm = pls_read(current)->mm; if (len == 0) { return 0; } if (!file_testfd(fd, 0, 1)) { return -E_INVAL; } /* for linux inode */ if (__is_linux_devfile(fd)) { /* use 8byte int, in case of 64bit off_t * config in linux kernel */ size_t alen = 0; ret = linux_devfile_write(fd, base, len, &alen); if (ret) return ret; return alen; } void *buffer; if ((buffer = kmalloc(IOBUF_SIZE)) == NULL) { return -E_NO_MEM; } size_t copied = 0, alen; while (len != 0) { if ((alen = IOBUF_SIZE) > len) { alen = len; } lock_mm(mm); { if (!copy_from_user(mm, buffer, base, alen, 0)) { ret = -E_INVAL; } } unlock_mm(mm); if (ret == 0) { ret = file_write(fd, buffer, alen, &alen); if (alen != 0) { assert(len >= alen); base += alen, len -= alen, copied += alen; } } if (ret != 0 || alen == 0) { goto out; } } out: kfree(buffer); if (copied != 0) { return copied; } return ret; }
/** * free_pages - call pmm->free_pages to free a continuing n*PAGESIZE memory * @param base the first page to be freed * @param n number of pages to be freed */ void free_pages(struct Page *base, size_t n) { bool intr_flag; local_intr_save(intr_flag); { pmm_manager->free_pages(base, n); } local_intr_restore(intr_flag); pls_write(used_pages, pls_read(used_pages) - n); }
static uint32_t sys_clone(uint32_t arg[]) { struct trapframe *tf = pls_read(current)->tf; uint32_t clone_flags = (uint32_t) arg[0]; uintptr_t stack = (uintptr_t) arg[1]; if (stack == 0) { stack = tf->sp; } return do_fork(clone_flags, stack, tf); }
void syscall(void) { struct trapframe *tf = pls_read(current)->tf; uint32_t arg[5]; int num = tf->regs.gprs[1]; if (num >= 0 && num < NUM_SYSCALLS) { if (syscalls[num] != NULL) { arg[0] = tf->regs.gprs[2]; arg[1] = tf->regs.gprs[3]; arg[2] = tf->regs.gprs[4]; arg[3] = tf->regs.gprs[5]; arg[4] = tf->regs.gprs[6]; tf->regs.gprs[12] = syscalls[num] (arg); return; } } print_trapframe(tf); panic("undefined syscall %d, pid = %d, name = %s.\n", num, pls_read(current)->pid, pls_read(current)->name); }
int sysfile_read(int fd, void *base, size_t len) { int ret = 0; struct mm_struct *mm = pls_read(current)->mm; if (len == 0) { return 0; } if (!file_testfd(fd, 1, 0)) { return -E_INVAL; } /* for linux inode */ if (__is_linux_devfile(fd)) { size_t alen = 0; ret = linux_devfile_read(fd, base, len, &alen); if (ret) return ret; return alen; } void *buffer; if ((buffer = kmalloc(IOBUF_SIZE)) == NULL) { return -E_NO_MEM; } size_t copied = 0, alen; while (len != 0) { if ((alen = IOBUF_SIZE) > len) { alen = len; } ret = file_read(fd, buffer, alen, &alen); if (alen != 0) { lock_mm(mm); { if (copy_to_user(mm, base, buffer, alen)) { assert(len >= alen); base += alen, len -= alen, copied += alen; } else if (ret == 0) { ret = -E_INVAL; } } unlock_mm(mm); } if (ret != 0 || alen == 0) { goto out; } } out: kfree(buffer); if (copied != 0) { return copied; } return ret; }
unsigned long __ucore_copy_from_user(void *to, const void *from, unsigned long n) { int ret = 0; struct mm_struct *mm = pls_read(current)->mm; lock_mm(mm); ret = copy_from_user(mm, to, from, n, 0); unlock_mm(mm); if(ret) return 0; return n; }
int ipc_sem_post_max(sem_t sem_id, int max) { assert(pls_read(current)->sem_queue != NULL); sem_undo_t *semu; sem_queue_t *sem_queue = pls_read(current)->sem_queue; down(&(sem_queue->sem)); semu = semu_list_search(&(sem_queue->semu_list), sem_id); up(&(sem_queue->sem)); if(semu != NULL) { int i; int ret = 0; for(i = 0; i < max; ++i) { if(wait_queue_empty(&(semu->sem->wait_queue))) break; usem_up(semu->sem); ++ret; } return ret; } return -E_INVAL; }
void syscall(void) { struct trapframe *tf = pls_read(current)->tf; uint32_t arg[5]; int num = tf->tf_regs.r4; // kprintf(" [syscall: num=%d]\n", num); if (num >= 0 && num < NUM_SYSCALLS) { if (syscalls[num] != NULL) { arg[0] = tf->tf_regs.r5; arg[1] = tf->tf_regs.r6; arg[2] = tf->tf_regs.r7; arg[3] = tf->tf_regs.r8; arg[4] = tf->tf_regs.r9; tf->tf_regs.r2 = syscalls[num](arg); return ; } } print_trapframe(tf); panic("undefined syscall %d, pid = %d, name = %s.\n", num, pls_read(current)->pid, pls_read(current)->name); }
static int ipc_sem_find_or_init_with_address(uintptr_t addr, int value, int create) { assert(pls_read(current)->sem_queue != NULL); sem_queue_t *sem_queue = pls_read(current)->sem_queue; down(&(sem_queue->sem)); sem_t sem_id = semu_search_with_addr(&(sem_queue->semu_list), addr); up(&(sem_queue->sem)); if(sem_id != -1) return sem_id; if(!create) return -E_NO_MEM; sem_undo_t *semu; if((semu = semu_create_with_address(NULL, addr, value)) == NULL) { return -E_NO_MEM; } down(&(sem_queue->sem)); list_add_after(&(sem_queue->semu_list), &(semu->semu_link)); up(&(sem_queue->sem)); return sem2semid(semu->sem); }
void trap(struct trapframe *tf) { // used for previous projects if (pls_read(current) == NULL) { trap_dispatch(tf); } else { // keep a trapframe chain in stack struct trapframe *otf = pls_read(current)->tf; pls_read(current)->tf = tf; bool in_kernel = trap_in_kernel(tf); trap_dispatch(tf); pls_read(current)->tf = otf; if (!in_kernel) { may_killed(); if (pls_read(current)->need_resched) { schedule(); } } } }
/** * call pmm->alloc_pages to allocate a continuing n*PAGESIZE memory * @param n pages to be allocated */ struct Page *alloc_pages(size_t n) { struct Page *page; bool intr_flag; try_again: local_intr_save(intr_flag); { page = pmm_manager->alloc_pages(n); } local_intr_restore(intr_flag); if (page == NULL && try_free_pages(n)) { goto try_again; } pls_write(used_pages, pls_read(used_pages) + n); return page; }
int sysfile_fstat(int fd, struct stat *__stat) { struct mm_struct *mm = pls_read(current)->mm; int ret; struct stat __local_stat, *stat = &__local_stat; if ((ret = file_fstat(fd, stat)) != 0) { return ret; } lock_mm(mm); { if (!copy_to_user(mm, __stat, stat, sizeof(struct stat))) { ret = -E_INVAL; } } unlock_mm(mm); return ret; }
static int copy_path(char **to, const char *from) { struct mm_struct *mm = pls_read(current)->mm; char *buffer; if ((buffer = kmalloc(FS_MAX_FPATH_LEN + 1)) == NULL) { return -E_NO_MEM; } lock_mm(mm); if (!copy_string(mm, buffer, from, FS_MAX_FPATH_LEN + 1)) { unlock_mm(mm); goto failed_cleanup; } unlock_mm(mm); *to = buffer; return 0; failed_cleanup: kfree(buffer); return -E_INVAL; }
void wakeup_proc(struct proc_struct *proc) { assert(proc->state != PROC_ZOMBIE); bool intr_flag; local_intr_save(intr_flag); { if (proc->state != PROC_RUNNABLE) { proc->state = PROC_RUNNABLE; proc->wait_state = 0; if (proc != current) { assert(proc->pid >= pls_read(lcpu_count)); sched_class_enqueue(proc); } } else { warn("wakeup runnable process.\n"); } } local_intr_restore(intr_flag); }
void db_time (uint16_t left, uint16_t right) { kprintf("\n"); int lcpu_count = pls_read(lcpu_count); int i, j; for (i = 0; i < lcpu_count; i++) { kprintf("On CPU%d: ", i); int sum = 0, total = PGSIZE / sizeof(uint16_t) / lcpu_count; for (j = 0; j < total; j++) { uint16_t pid = sched_info_pid[j*lcpu_count + i]; if (pid >= left && pid <= right) sum += sched_info_times[j*lcpu_count + i]; } kprintf("%4d", sum); sum = 0; for (j = left; j <= right; j++) sum += sched_slices[i][j % SLICEPOOL_SIZE]; kprintf("(%4d)\n", sum); } }
void db_sched (int lines) { kprintf("\n"); int lcpu_count = pls_read(lcpu_count); int i, j, k; /* Print a header */ kprintf(" "); for (i = 0; i < lcpu_count; i++) kprintf("| CPU%d ", i); kprintf("\n"); /* Print the table */ for (i = 0; i < lines; i++) { kprintf(" %4d ", i); for (k = 0; k < lcpu_count; k++) { j = sched_info_head[k] - i; if (j < 0) j += PGSIZE / sizeof(uint16_t) / lcpu_count; kprintf(" %4d(%4d) ", sched_info_pid[j*lcpu_count + k], sched_info_times[j*lcpu_count + k]); } kprintf("\n"); } }
static uint32_t sys_getpid(uint32_t arg[]) { return pls_read(current)->pid; }
static uint32_t sys_fork(uint32_t arg[]) { struct trapframe *tf = pls_read(current)->tf; uintptr_t stack = tf->sp; return do_fork(0, stack, tf); }