SYSCALL_DEFINE2(open, const char*, path, int, flags) { // FIXME: max length path = (char*)user_to_kernel_check((uint32_t)path, -1, 0); int fd; int r; file *f; pushcli(); r = file_open(path, flags, &f); if (r < 0) { popcli(); return SYSCALL_RETURN(r); } // FIXME: 0,1,2 stdin,out ve err icin sabit olarak atandi. Boyle olmamali for (fd = 3 ; fd < TASK_MAX_FILE_NR ; fd++) { if (task_curr->fs.files[fd] == NULL) { print_info(">> opened fd:%d\n", fd); task_curr->fs.files[fd] = f; // FIXME: -- f->inode->ref_count++; break; } } if (fd == TASK_MAX_FILE_NR) { print_info(">> task maximum file count exceeded (32)"); fd = -1; } popcli(); return SYSCALL_RETURN(fd); }
// Set up CPU's segment descriptors and task state for a given process. // If p==0, set up for "idle" state for when scheduler() is running. void setupsegs(struct proc *p) { struct cpu *c; pushcli(); c = &cpus[cpu()]; c->ts.ss0 = SEG_KDATA << 3; if(p) c->ts.esp0 = (uint)(p->kstack + KSTACKSIZE); else c->ts.esp0 = 0xffffffff; c->gdt[0] = SEG_NULL; c->gdt[SEG_KCODE] = SEG(STA_X|STA_R, 0, 0x100000 + 64*1024-1, 0); c->gdt[SEG_KDATA] = SEG(STA_W, 0, 0xffffffff, 0); c->gdt[SEG_TSS] = SEG16(STS_T32A, (uint)&c->ts, sizeof(c->ts)-1, 0); c->gdt[SEG_TSS].s = 0; if(p){ c->gdt[SEG_UCODE] = SEG(STA_X|STA_R, (uint)p->mem, p->sz-1, DPL_USER); c->gdt[SEG_UDATA] = SEG(STA_W, (uint)p->mem, p->sz-1, DPL_USER); } else { c->gdt[SEG_UCODE] = SEG_NULL; c->gdt[SEG_UDATA] = SEG_NULL; } lgdt(c->gdt, sizeof(c->gdt)); ltr(SEG_TSS << 3); popcli(); }
// Return currently running process. struct proc* curproc(void) { struct proc *p; pushcli(); p = cpus[cpu()].curproc; popcli(); return p; }
// Disable interrupts so that we are not rescheduled // while reading proc from the cpu structure struct proc* myproc(void) { struct cpu *c; struct proc *p; pushcli(); c = mycpu(); p = c->proc; popcli(); return p; }
// Switch TSS and h/w page table to correspond to process p. void switchuvm(struct proc *p) { pushcli(); cpu->gdt[SEG_TSS] = SEG16(STS_T32A, &cpu->ts, sizeof(cpu->ts)-1, 0); cpu->gdt[SEG_TSS].s = 0; cpu->ts.ss0 = SEG_KDATA << 3; cpu->ts.esp0 = (uint) proc->kstack + KSTACKSIZE; ltr(SEG_TSS << 3); if (p->pgdir == 0) panic("switchuvm: no pgdir"); lcr3(v2p(p->pgdir)); // switch to new address space popcli(); }
// Set up CPU's segment descriptors and current process task state. void usegment(void) { pushcli(); cpu->gdt[SEG_UCODE] = SEG(STA_X|STA_R, proc->mem, proc->sz-1, DPL_USER); cpu->gdt[SEG_UDATA] = SEG(STA_W, proc->mem, proc->sz-1, DPL_USER); cpu->gdt[SEG_TSS] = SEG16(STS_T32A, &cpu->ts, sizeof(cpu->ts)-1, 0); cpu->gdt[SEG_TSS].s = 0; cpu->ts.ss0 = SEG_KDATA << 3; cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE; ltr(SEG_TSS << 3); popcli(); }
// Switch TSS and h/w page table to correspond to process p. void switchuvm(struct proc *p) { pushcli(); //cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE; if(p->pgdir == 0) panic("switchuvm: no pgdir"); //cprintf("before copying uvm to kvm kpgdir=%x the first entry: %x\n", kpgdir, kpgdir[0]); memmove((void *)kpgdir, (void *)p->pgdir, PGSIZE); // switch to new user address space flush_idcache(); flush_tlb(); popcli(); }
// Switch to the user page table (TTBR0) void switchuvm (struct proc *p) { uint64 val64; pushcli(); if (p->pgdir == 0) { panic("switchuvm: no pgdir"); } val64 = (uint64) V2P(p->pgdir) | 0x00; asm("MSR TTBR0_EL1, %[v]": :[v]"r" (val64):); flush_tlb(); popcli(); }
// Release the lock. void release(struct spinlock *lock) { if(!holding(lock)) panic("release"); lock->pcs[0] = 0; lock->cpu = 0xffffffff; // The xchg serializes, so that reads before release are // not reordered after it. (This reordering would be allowed // by the Intel manuals, but does not happen on current // Intel processors. The xchg being asm volatile also keeps // gcc from delaying the above assignments.) xchg(&lock->locked, 0); popcli(); }
// Sets up virtual memory for process p void switchuvm(Proc *p) { TaskStateDescriptor *d; pushcli(); d = (TaskStateDescriptor *)&cpu->gdt[SEG_TSS]; tsdesc(d, &cpu->ts, sizeof(cpu->ts)); cpu->ts.rsp0 = (ulong)p->kstack + KSTACKSIZE; cpu->ts.iomapbase = 0xFFFF; // disable in/out in user space ltr(SEG_TSS << 3); if (p->pgmap == nil) panic("switchuvm - no pgmap"); lcr3(v2p(p->pgmap)); popcli(); }
// Release the lock. void release(struct spinlock *lk) { if(!holding(lk)) panic("release"); lk->pcs[0] = 0; lk->cpu = 0; // The xchg serializes, so that reads before release are // not reordered after it. The 1996 PentiumPro manual (Volume 3, // 7.2) says reads can be carried out speculatively and in // any order, which implies we need to serialize here. // But the 2007 Intel 64 Architecture Memory Ordering White // Paper says that Intel 64 and IA-32 will not move a load // after a store. So lock->locked = 0 would work here. // The xchg being asm volatile ensures gcc emits it after // the above assignments (and after the critical section). xchg(&lk->locked, 0); popcli(); }
int display_vga() { // set processor to vga mode struct regs16 regs = { .ax = 0x13}; pushcli(); pte_t original = biosmap(); int32(0x10, ®s); #if MODE_UNCHAINED display_unchained(); #endif biosunmap(original); popcli(); return 0; } int display_draw() { static int page = 0; char* page_addr = (char *)P2V(0xA0000 + page*FRAME_PIX); // move current buffer to non-visible page #if MODE_UNCHAINED int i; outb(SC_INDEX, MAP_MASK); for (i = 0; i < 4; i++) { outb(SC_DATA, 1 << i); memmove(page_addr, frame_buffer[i], FRAME_PIX); } short flip_addr = page*FRAME_PIX; // flip pages short high_addr = HIGH_ADDRESS | (flip_addr & 0xff00); short low_addr = LOW_ADDRESS | (flip_addr << 8); #ifdef VERTICAL_RETRACE while ((inb(INPUT_STATUS_1) & DISPLAY_ENABLE)); #endif outw(CRTC_INDEX, high_addr); outw(CRTC_INDEX, low_addr); #ifdef VERTICAL_RETRACE while (!(inb(INPUT_STATUS_1) & VRETRACE)); #endif // use the other page next time page = (page+1)%2; #else memmove(page_addr, frame_buffer, SCREEN_PIX); #endif return 0; } int display_text() { // set processor to text mode struct regs16 regs = { .ax = 0x03}; pushcli(); pte_t original = biosmap(); int32(0x10, ®s); biosunmap(original); popcli(); return 0; }
net_interface_t* ne2k_init(u8int bus, u8int slot) { ne2k = kmalloc(sizeof(net_interface_t)); ne2k->spin = kmalloc(sizeof(spinlock_t)); initlock(ne2k->spin, "ne2k"); acquire(ne2k->spin); pci_info_t card = pci_getinfo(bus, slot); if(card.device != 0x8029) { cprintf("\nThis is not an ne2k device!\n\n"); kfree((char*)ne2k); ne2k = 0; return ne2k; } //u8int func = 0; ne2k->ioaddr = card.bar0 & ~0x3; // RESET outb( ne2k->ioaddr + 0x1F, inb(ne2k->ioaddr + 0x1F) ); // wait for reset to finish while((inb(ne2k->ioaddr + ISR) & 0x80) == 0); // set interrupt status to 0xFF ne2k_select_page(ne2k, 0); outb(ne2k->ioaddr + IMR, ISR_ALL); //u8int prom[32]; // page0 CR, no rDMA, STOP outb(ne2k->ioaddr + CR, 0x01); // set to word-wide mode outb(ne2k->ioaddr + DCR, (1 << DCR_LS) | (1 << DCR_WTS)); // clear DMA byte counts outb(ne2k->ioaddr + RBCR0, 0); outb(ne2k->ioaddr + RBCR1, 0); // clear interrupt status outb(ne2k->ioaddr + IMR, 0xFF); outb(ne2k->ioaddr + IMR, 0); outb(ne2k->ioaddr + RCR, (1 << RCR_MON) | (1 << RCR_AM) | (1 << RCR_AB) | (1 << RCR_PRO) ); // accept all but runt packets // loopback mode conf (check CRC and internal loopback) outb(ne2k->ioaddr + TCR, (1 << TCR_CRC) | (1 << TCR_LB0)); // set DMA byte count outb(ne2k->ioaddr + RBCR0, 32); outb(ne2k->ioaddr + RBCR1, 0); // RX buffer DMA outb(ne2k->ioaddr + RSAR0, 0); outb(ne2k->ioaddr + RSAR1, 0); ne2k->rx_ringbuf = kmalloc(MAX_RX_BUF_SIZE * 16); ne2k->rx_ringbuf_phys = V2P(ne2k->rx_ringbuf); // TX buffer ne2k->tx_ringbuf = kmalloc((eth_max_frame_length + 4) * 4); ne2k->tx_ringbuf_phys = V2P(ne2k->tx_ringbuf); outb(ne2k->ioaddr + PSTART, ne2k->rx_ringbuf_phys); outb(ne2k->ioaddr + PSTOP, ne2k->rx_ringbuf_phys + (MAX_RX_BUF_SIZE * 16)); // START and DMA READ outb(ne2k->ioaddr + CR, (1 << CR_STA) | (1 << CR_RD0)); u16int i; for(i = 0; i < 6; i++) { // read MAC ne2k->mac_addr[i] = inb(ne2k->ioaddr + 0x10); dbg_cprintf(1, "%x:", ne2k->mac_addr[i]); } // START and DMA WRITE outb(ne2k->ioaddr + CR, (1 << CR_STA) | (1 << CR_RD2)); outb(ne2k->ioaddr + CR, (1 << CR_STA) | (1 << CR_RD1)); for(i = 0; i < 6; i++) { ne2k_select_page(ne2k, 1); outb(ne2k->ioaddr + 0x01 + i, ne2k->mac_addr[i]); } ne2k->nic_rx_config = RCR; // also be sure that it is page0 for W, page2 for R ne2k->conf_accept_error = (1 << RCR_SEP) | 0xC0; // bits 6 and 7 always 1 ne2k->conf_accept_runt = (1 << RCR_AR) | 0xC0; ne2k->conf_accept_broadcast = (1 << RCR_AB) | 0xC0; ne2k->conf_accept_multicast = (1 << RCR_AM) | 0xC0; ne2k->conf_accept_my_macaddr = 0; // always accepts ne2k->conf_accept_all_macaddr = (1 << RCR_PRO) | 0xC0; pci_reg_interrupt_handler(bus, slot, ne2k_interrupt_handler); // TX OK, RX OK outb(ne2k->ioaddr + IMR, (1 << ISR_PRX) | (1 << ISR_PTX)); ne2k->init_success = true; cprintf_color(COLOR_BLACK, COLOR_LIGHT_GREEN, true, " done\n"); //! \bug need to clear interrupts for some reason? popcli(); // clear interrupt status outb(ne2k->ioaddr + IMR, 0); outb(ne2k->ioaddr + RCR, (1 << RCR_MON) | (1 << RCR_AM) | (1 << RCR_AB) | (1 << RCR_PRO) ); // accept all but runt packets // TX OK, RX OK outb(ne2k->ioaddr + IMR, (1 << ISR_PRX) | (1 << ISR_PTX)); return ne2k; }