SYSCALL_DEFINE2(open, const char*, path, int, flags) { // FIXME: max length path = (char*)user_to_kernel_check((uint32_t)path, -1, 0); int fd; int r; file *f; pushcli(); r = file_open(path, flags, &f); if (r < 0) { popcli(); return SYSCALL_RETURN(r); } // FIXME: 0,1,2 stdin,out ve err icin sabit olarak atandi. Boyle olmamali for (fd = 3 ; fd < TASK_MAX_FILE_NR ; fd++) { if (task_curr->fs.files[fd] == NULL) { print_info(">> opened fd:%d\n", fd); task_curr->fs.files[fd] = f; // FIXME: -- f->inode->ref_count++; break; } } if (fd == TASK_MAX_FILE_NR) { print_info(">> task maximum file count exceeded (32)"); fd = -1; } popcli(); return SYSCALL_RETURN(fd); }
// Set up CPU's segment descriptors and task state for a given process. // If p==0, set up for "idle" state for when scheduler() is running. void setupsegs(struct proc *p) { struct cpu *c; pushcli(); c = &cpus[cpu()]; c->ts.ss0 = SEG_KDATA << 3; if(p) c->ts.esp0 = (uint)(p->kstack + KSTACKSIZE); else c->ts.esp0 = 0xffffffff; c->gdt[0] = SEG_NULL; c->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024-1, 0); c->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0); c->gdt[SEG_TSS] = SEG16(STS_T32A, (uint)&c->ts, sizeof(c->ts)-1, 0); c->gdt[SEG_TSS].s = 0; if(p){ c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)p->mem, p->sz-1, DPL_USER); c->gdt[SEG_UDATA] = SEG(STA_W, (uint)p->mem, p->sz-1, DPL_USER); } else { c->gdt[SEG_UCODE] = SEG_NULL; c->gdt[SEG_UDATA] = SEG_NULL; } lgdt(c->gdt, sizeof(c->gdt)); ltr(SEG_TSS << 3); popcli(); }
/* * child process'in bitmesini bekle ve child process idsini dondur. * child process yoksa -1 dondur. */ SYSCALL_DEFINE1(wait, int*, state) { // FIXME: bunu yapan fonksiyon var ... if ( task_curr->pgdir.verify_user_addr(state, 4, PTE_U) < 0 ) { print_warning(">> wait not verified: 0x%08x - 0x%08x\n", state, state+1); do_exit(111); } state = (int*)uaddr2kaddr((uint32_t)state); pushcli(); if (task_curr->childs.size() == 0) { popif(); return set_return(tf, -1); } /* cagriyi yapan taski runnable_list'den cikar */ remove_from_runnable_list(task_curr); task_curr->state = Task::State_interruptible; task_curr->waiting_child = 1; popif(); schedule(); task_curr->waiting_child = 0; Task *t = task_curr->wait_notify_next; ASSERT(t); task_curr->wait_notify_next = t->wait_notify_next; /* return ve state degiskenlerini ata */ *state = t->exit_code; return set_return(task_curr->registers(), t->id); }
asmlink void do_exit(int code) { pushcli(); if (task_curr->id == 1) PANIC(">> exit task 1"); remove_from_runnable_list(task_curr); task_curr->state = Task::State_zombie; task_zombie_list.push_back(&task_curr->list_node); task_free(task_curr); task_curr->time_end = jiffies; task_curr->exit_code = code; // FIXME: ??? if (task_curr->exit_signal == 0) task_curr->exit_signal = SIGCHLD; notify_parent(task_curr); print_info(">> [%d] exit OK\n", task_curr->id); popif(); schedule(); PANIC("do_exit return"); }
// Return currently running process. struct proc* curproc(void) { struct proc *p; pushcli(); p = cpus[cpu()].curproc; popcli(); return p; }
// Disable interrupts so that we are not rescheduled // while reading proc from the cpu structure struct proc* myproc(void) { struct cpu *c; struct proc *p; pushcli(); c = mycpu(); p = c->proc; popcli(); return p; }
// Switch TSS and h/w page table to correspond to process p. void switchuvm(struct proc *p) { pushcli(); cpu->gdt[SEG_TSS] = SEG16(STS_T32A, &cpu->ts, sizeof(cpu->ts)-1, 0); cpu->gdt[SEG_TSS].s = 0; cpu->ts.ss0 = SEG_KDATA << 3; cpu->ts.esp0 = (uint) proc->kstack + KSTACKSIZE; ltr(SEG_TSS << 3); if (p->pgdir == 0) panic("switchuvm: no pgdir"); lcr3(v2p(p->pgdir)); // switch to new address space popcli(); }
// Switch TSS and h/w page table to correspond to process p. void switchuvm(struct proc *p) { pushcli(); //cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE; if(p->pgdir == 0) panic("switchuvm: no pgdir"); //cprintf("before copying uvm to kvm kpgdir=%x the first entry: %x\n", kpgdir, kpgdir[0]); memmove((void *)kpgdir, (void *)p->pgdir, PGSIZE); // switch to new user address space flush_idcache(); flush_tlb(); popcli(); }
// Set up CPU's segment descriptors and current process task state. void usegment(void) { pushcli(); cpu->gdt[SEG_UCODE] = SEG(STA_X|STA_R, proc->mem, proc->sz-1, DPL_USER); cpu->gdt[SEG_UDATA] = SEG(STA_W, proc->mem, proc->sz-1, DPL_USER); cpu->gdt[SEG_TSS] = SEG16(STS_T32A, &cpu->ts, sizeof(cpu->ts)-1, 0); cpu->gdt[SEG_TSS].s = 0; cpu->ts.ss0 = SEG_KDATA << 3; cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE; ltr(SEG_TSS << 3); popcli(); }
SYSCALL_DEFINE2(kill, int, pid, int, sig) { pushcli(); Task *t = task_id_ht.get(pid); popif(); if (t == NULL) return SYSCALL_RETURN(-1); send_signal(sig, t); return SYSCALL_RETURN(0); }
SYSCALL_DEFINE1(close, unsigned int, fd) { pushcli(); if (task_curr->fs.files[fd] == NULL) { popif(); return SYSCALL_RETURN(-1); } file_close(task_curr->fs.files[fd]); task_curr->fs.files[fd] = NULL; popif(); return SYSCALL_RETURN(0); }
// Switch to the user page table (TTBR0) void switchuvm (struct proc *p) { uint64 val64; pushcli(); if (p->pgdir == 0) { panic("switchuvm: no pgdir"); } val64 = (uint64) V2P(p->pgdir) | 0x00; asm("MSR TTBR0_EL1, %[v]": :[v]"r" (val64):); flush_tlb(); popcli(); }
// Acquire the lock. // Loops (spins) until the lock is acquired. // Holding a lock for a long time may cause // other CPUs to waste time spinning to acquire it. void acquire(struct spinlock *lk) { pushcli(); // disable interrupts to avoid deadlock. if(holding(lk)) panic("acquire"); // The xchg is atomic. // It also serializes, so that reads after acquire are not // reordered before it. while(xchg(&lk->locked, 1) != 0) ; // Record info about lock acquisition for debugging. lk->cpu = cpu; getcallerpcs(&lk, lk->pcs); }
// Sets up virtual memory for process p void switchuvm(Proc *p) { TaskStateDescriptor *d; pushcli(); d = (TaskStateDescriptor *)&cpu->gdt[SEG_TSS]; tsdesc(d, &cpu->ts, sizeof(cpu->ts)); cpu->ts.rsp0 = (ulong)p->kstack + KSTACKSIZE; cpu->ts.iomapbase = 0xFFFF; // disable in/out in user space ltr(SEG_TSS << 3); if (p->pgmap == nil) panic("switchuvm - no pgmap"); lcr3(v2p(p->pgmap)); popcli(); }
// Acquire the lock. // Loops (spins) until the lock is acquired. // Holding a lock for a long time may cause // other CPUs to waste time spinning to acquire it. void acquire(struct spinlock *lock) { pushcli(); if(holding(lock)) panic("acquire"); // The xchg is atomic. // It also serializes, so that reads after acquire are not // reordered before it. while(xchg(&lock->locked, 1) == 1) ; // Record info about lock acquisition for debugging. // The +10 is only so that we can tell the difference // between forgetting to initialize lock->cpu // and holding a lock on cpu 0. lock->cpu = cpu() + 10; getcallerpcs(&lock, lock->pcs); }
// TODO: fork kurallari duzenlenmeli (neler kopyalanacak, neler kopyalanmayacak?) int do_fork() { CLOBBERED_REGISTERS_ALL(); /* debug only */ uint32_t mem_before_setup_vm = 0; uint32_t mem_before_copy_pages = 0; uint32_t mem_before_kernel_stack = 0; /* */ int r; Task *t; uint32_t eip; int e = 0; // error (bad_fork_* icin) pushcli(); // ASSERT_int_disable(); t = (Task*)kmalloc(sizeof(Task)); if (!t) goto bad_fork_task_alloc; memcpy(t, task_curr, sizeof(Task)); t->init(); t->parent = task_curr; t->state = Task::State_not_runnable; /* -- */ mem_before_setup_vm = mem_free(); r = task_setup_vm(t, &task_curr->pgdir); if (r < 0) goto bad_fork_setup_vm; /* user adres uzayini kopyala */ mem_before_copy_pages = mem_free(); r = t->pgdir.copy_pages(&task_curr->pgdir, MMAP_USER_BASE, MMAP_USER_SHARED_MEM_BASE); if (r < 0) goto bad_fork_copy_vm_user; /* shared memory kismini kopyalama, shm_fork fonksiyonu kopyalayacak */ r = t->pgdir.copy_pages(&task_curr->pgdir, MMAP_USER_SHARED_MEM_TOP, MMAP_USER_TOP); if (r < 0) goto bad_fork_copy_vm_user; t->pgdir.count_program = task_curr->pgdir.count_program; t->pgdir.count_stack = task_curr->pgdir.count_stack; t->pgdir.start_brk = task_curr->pgdir.start_brk; t->pgdir.end_brk = task_curr->pgdir.end_brk; /* */ /* ipc veriyapilari icin, ipc_fork */ r = ipc_fork(t); if (r < 0) goto bad_fork_ipc; r = fs_fork(t); if (r < 0) goto bad_fork_fs; /* kernel stackini kopyala */ mem_before_kernel_stack = mem_free(); r = t->pgdir.copy_pages(&task_curr->pgdir, MMAP_KERNEL_STACK_BASE, MMAP_KERNEL_STACK_TOP); if (r < 0) goto bad_fork_copy_kernel_stack; /* burasi 2 kere calisiyor */ eip = read_eip(); if (eip == 1) /* child process'de popif yapilmamali */ return 0; /* child prosesin register bilgileri */ t->k_eip = eip; read_reg(%esp, t->k_esp); read_reg(%ebp, t->k_ebp); /* child prosesin baslangic zamani */ t->time_start = jiffies; /* child listesine ekle */ ASSERT( task_curr->childs.push_back(&t->childlist_node) ); /* process id ata ve runnable listesine ekle */ set_task_id(t); add_to_runnable_list(t); popif(); return t->id; bad_fork_copy_kernel_stack: if (e++ == 0) print_warning("!! bad_fork_copy_kernel_stack\n"); task_free_kernel_stack(t); ASSERT(mem_free() == mem_before_kernel_stack); bad_fork_fs: // TODO: -- bad_fork_ipc: // TODO: -- bad_fork_copy_vm_user: if (e++ == 0) print_warning("!! bad_fork_copy_vm_user\n"); task_free_vm_user(t); ASSERT(mem_free() == mem_before_copy_pages); bad_fork_setup_vm: if (e++ == 0) print_warning("!! bad_fork_setup_vm\n"); task_delete_vm(t); ASSERT(mem_free() == mem_before_setup_vm); kfree(t); t = NULL; bad_fork_task_alloc: if (e++ == 0) print_warning("!! bad_fork_task_alloc\n"); ASSERT(t == NULL); popif(); return -1; }
int display_vga() { // set processor to vga mode struct regs16 regs = { .ax = 0x13}; pushcli(); pte_t original = biosmap(); int32(0x10, ®s); #if MODE_UNCHAINED display_unchained(); #endif biosunmap(original); popcli(); return 0; } int display_draw() { static int page = 0; char* page_addr = (char *)P2V(0xA0000 + page*FRAME_PIX); // move current buffer to non-visible page #if MODE_UNCHAINED int i; outb(SC_INDEX, MAP_MASK); for (i = 0; i < 4; i++) { outb(SC_DATA, 1 << i); memmove(page_addr, frame_buffer[i], FRAME_PIX); } short flip_addr = page*FRAME_PIX; // flip pages short high_addr = HIGH_ADDRESS | (flip_addr & 0xff00); short low_addr = LOW_ADDRESS | (flip_addr << 8); #ifdef VERTICAL_RETRACE while ((inb(INPUT_STATUS_1) & DISPLAY_ENABLE)); #endif outw(CRTC_INDEX, high_addr); outw(CRTC_INDEX, low_addr); #ifdef VERTICAL_RETRACE while (!(inb(INPUT_STATUS_1) & VRETRACE)); #endif // use the other page next time page = (page+1)%2; #else memmove(page_addr, frame_buffer, SCREEN_PIX); #endif return 0; } int display_text() { // set processor to text mode struct regs16 regs = { .ax = 0x03}; pushcli(); pte_t original = biosmap(); int32(0x10, ®s); biosunmap(original); popcli(); return 0; }