void e1000_rxinit(struct e1000 *e) { uintptr_t ptr; struct e1000_rx_desc *descs; ptr = (uintptr_t)(kmalloc(sizeof(struct e1000_rx_desc)*NUM_RX_DESC + 16)); e->rx_free = (uint8_t *)ptr; if(ptr % 16 != 0) ptr = (ptr + 16) - (ptr % 16); descs = (struct e1000_rx_desc *)ptr; for(int i = 0; i < NUM_RX_DESC; i++) { e->rx_descs[i] = (struct e1000_rx_desc *)((uintptr_t)descs + i*16); e->rx_descs[i]->addr = (uint64_t)(uintptr_t)V2P(kmalloc(8192 + 16)); e->rx_descs[i]->status = 0; } //give the card the pointer to the descriptors e1000_outl(e, REG_RXDESCLO, V2P(ptr)); e1000_outl(e, REG_RXDESCHI, 0); //now setup total length of descriptors e1000_outl(e, REG_RXDESCLEN, NUM_RX_DESC * 16); //setup numbers e1000_outl(e, REG_RXDESCHEAD, 0); e1000_outl(e, REG_RXDESCTAIL, NUM_RX_DESC); e->rx_cur = 0; //enable receiving //uint32_t flags = (2 << 16) | (1 << 25) | (1 << 26) | (1 << 15) | (1 << 5) | (0 << 8) | (1 << 4) | (1 << 3) | ( 1 << 2); uint32_t flags = (2 << 16) | (1 << 25) | (1 << 26) | (1 << 15) | (1 << 5) | (0 << 8) | (0 << 4) | (0 << 3) | ( 1 << 2); // uint32_t flags = (2 << 16) | (1 << 25) | (1 << 26) | (1 << 15) | (1 << 5) | (0 << 8) | (1 << 4) | ( 1 << 2); // e1000_outl(e, REG_RCTRL, RCTRL_8192 | RCTRL_MPE); e1000_outl(e, REG_RCTRL, flags);//RCTRL_8192 | RCTRL_MPE | RCTRL_UPE |RCTRL_EN); }
// Start the non-boot (AP) processors. static void startothers(void) { extern uchar _binary_entryother_start[], _binary_entryother_size[]; uchar *code; struct cpu *c; char *stack; // Write entry code to unused memory at 0x7000. // The linker has placed the image of entryother.S in // _binary_entryother_start. code = P2V(0x7000); memmove(code, _binary_entryother_start, (uint)_binary_entryother_size); for(c = cpus; c < cpus+ncpu; c++){ if(c == cpus+cpunum()) // We've started already. continue; // Tell entryother.S what stack to use, where to enter, and what // pgdir to use. We cannot use kpgdir yet, because the AP processor // is running in low memory, so we use entrypgdir for the APs too. stack = kalloc(); *(void**)(code-4) = stack + KSTACKSIZE; *(void**)(code-8) = mpenter; *(int**)(code-12) = (void *) V2P(entrypgdir); lapicstartap(c->apicid, V2P(code)); // wait for cpu to finish mpmain() while(c->started == 0) ; } }
/* proc_creates allocates a new process and returns it. */ struct Process *proc_create(void) { struct Process *proc = NULL; int i; int pid = -1; struct SectionTableEntry *vm = NULL; char *kernel_stack = NULL; char *user_stack = NULL; for (i = 0; i < PROCESS_COUNT_MAX; i++) if (process_table[i].state == UNUSED) { pid = i + 1; break; } if (pid == -1) return NULL; kernel_stack = kalloc(); user_stack = kalloc(); vm = process_vm[pid - 1]; memset(vm, 0, SECTION_TABLE_SIZE); setup_kernel_vm(vm); map_pages(vm, (struct MemoryMapping){ KERNEL_STACK_BOTTOM, V2P(kernel_stack), V2P(kernel_stack) + PAGE_SIZE, AP_RW_R });
void frame_init() { /* All frames are free to begin with */ for(uint32_t i = 0; i < 4096; ++i) { frames_bitmap[i] = 0x00000000; } /* Now lets mark the kernel's frames as taken */ uintptr_t kstart = V2P((uintptr_t)&_start); uintptr_t kend = V2P((uintptr_t)&_end); for(uintptr_t i = kstart; i < kend; i += 0x1000) { frame_set(i, 1); } /* We also need to mark the GPU's frames as taken */ /* For now we're assuming we have 512MB of memory * and 64 MB of it is allocated to the GPU */ /* 64 MB */ uintptr_t gpu_size = 0x04000000; uintptr_t gpu_end = 0x20000000; uintptr_t gpu_start = gpu_end - gpu_size; for(uintptr_t i = gpu_start; i < gpu_end; i += 0x1000) { frame_set(i, 1); } /* Mark frame 0 as taken, so null pointers *NEVER* point to anything */ frame_set(0, 1); }
// Allocate page tables and physical memory to grow process from oldsz to // newsz, which need not be page aligned. Returns new size or 0 on error. int allocuvm(pde_t *pgdir, uint oldsz, uint newsz) { char *mem; uint a; if(newsz >= KERNBASE) return 0; if(newsz < oldsz) return oldsz; a = PGROUNDUP(oldsz); for(; a < newsz; a += PGSIZE){ mem = kalloc(); if(mem == 0){ cprintf("allocuvm out of memory\n"); deallocuvm(pgdir, newsz, oldsz); return 0; } memset(mem, 0, PGSIZE); if (mappages(pgdir, (char*)a, PGSIZE, V2P(mem), PTE_W|PTE_U) < 0) panic("allocuvm: cannot create pagetable"); } return newsz; }
// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz) { pde_t *d; pte_t *pte; uint pa, i, flags; char *mem; if((d = setupkvm()) == 0) return 0; for(i = 0; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, (void *) i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); flags = PTE_FLAGS(*pte); if((mem = kalloc()) == 0) goto bad; memmove(mem, (char*)P2V(pa), PGSIZE); if(mappages(d, (void*)i, PGSIZE, V2P(mem), flags) < 0) goto bad; } return d; bad: freevm(d); return 0; }
bool BackedMapping::Map (RefPtr<TranslationTable> aPageTable) { assert(!mMapped); VmAddr_t virt = mBaseAddress; size_t numPagesLeft = mRegion->GetPageCount(); List<Page, &Page::list_link>::Iterator pageIter = mRegion->GetPages(); while (numPagesLeft > 0) { Page * p = *pageIter; bool mapped = aPageTable->MapPage(virt, V2P(p->base_address), mProtection); if (!mapped) { assert(false); Mapping::Unmap(aPageTable, mRegion->GetPageCount() - numPagesLeft); return false; } virt += PAGE_SIZE; numPagesLeft--; ++pageIter; } mMapped = true; return true; }
int syscall_exit(int arg1) { int pid; (void) arg1; if (current_process == NULL) return -1; set_translation_table_base((uint32_t) V2P(kernel_vm)); __asm__ volatile("ldr sp, =kernel_stack_start"); for (pid = 0; pid < PROCESS_COUNT_MAX; pid++) { struct Process *proc = &process_table[pid]; if (proc->state == SLEEPING && proc->wait_pid == current_process->pid) { proc->wait_pid = -1; proc->state = READY; } } proc_free(current_process); current_process = NULL; schedule(); return 0; }
void pcnet_receive(struct pcnet *l) { size_t len; uint8_t *buf; struct sockbuf *sb; while((l->rx_descs[l->cur_rx].status & 0x8000) == 0) { if(!(l->rx_descs[l->cur_rx].status & 0x4000) && (l->rx_descs[l->cur_rx].status & 0x0300) == 0x0300) { len = l->rx_descs[l->cur_rx].flags2 & 0xFFFF; buf = l->rx_buffers[l->cur_rx]; sb = sockbuf_alloc(l->dev, len); kmemcpy(sb->data, buf, len); network_received(sb); } l->rx_descs[l->cur_rx].addr = V2P(l->rx_buffers[l->cur_rx]); l->rx_descs[l->cur_rx].status = 0x8000; l->rx_descs[l->cur_rx].len = -2048; l->rx_descs[l->cur_rx].flags2 = 0; l->cur_rx++; if(l->cur_rx == 8) l->cur_rx = 0; } }
void e1000_txinit(struct e1000 *e) { uintptr_t ptr; struct e1000_tx_desc *descs; ptr = (uintptr_t)(kmalloc(sizeof(struct e1000_tx_desc)*NUM_TX_DESC + 16)); e->tx_free = (uint8_t *)ptr; if(ptr % 16 != 0) ptr = (ptr + 16) - (ptr % 16); descs = (struct e1000_tx_desc *)ptr; for(int i = 0; i < NUM_TX_DESC; i++) { e->tx_descs[i] = (struct e1000_tx_desc *)((uintptr_t)descs + i*16); e->tx_descs[i]->addr = 0; e->tx_descs[i]->cmd = 0; } //give the card the pointer to the descriptors e1000_outl(e, REG_TXDESCLO, V2P(ptr)); e1000_outl(e, REG_TXDESCHI, 0); //now setup total length of descriptors e1000_outl(e, REG_TXDESCLEN, NUM_TX_DESC * 16); //setup numbers e1000_outl(e, REG_TXDESCHEAD, 0); e1000_outl(e, REG_TXDESCTAIL, NUM_TX_DESC); e->tx_cur = 0; e1000_outl(e, REG_TCTRL, (1 << 1) | (1 << 3)); }
void JniWebView::SetRect(int id, const Rect& controlRect) { Rect rect = V2P(controlRect); jmethodID mid = GetMethodID("SetRect", "(IFFFF)V"); if (mid) { GetEnvironment()->CallStaticVoidMethod(GetJavaClass(), mid, id, rect.x, rect.y, rect.dx, rect.dy); } }
void JniWebView::Initialize(WebViewControl* control, int id, const Rect& controlRect) { controls[id] = control; Rect rect = V2P(controlRect); jmethodID mid = GetMethodID("Initialize", "(IFFFF)V"); if (mid) GetEnvironment()->CallStaticVoidMethod(GetJavaClass(), mid, id, rect.x, rect.y, rect.dx, rect.dy); }
// check if the given address has a valid header unsigned int *acpiCheckRSDPtr(unsigned int *ptr) { char sig[8]; sig[0] = 'R'; sig[1] = 'S'; sig[2] = 'D'; sig[3] = ' '; sig[4] = 'P'; sig[5] = 'T'; sig[6] = 'R' ; sig[7] = ' '; struct RSDPtr *rsdp = (struct RSDPtr *) ptr; u8int *bptr; u8int check = 0; int i; cprintf("\nrsdp addr 0x%x...", rsdp); //char temp[8]; //for(i = 0; i < 8; i++){ //if(rsdp->Signature != 0){ //temp[i] = (char) *(rsdp+i); //} else { // temp[i] = 0; // } //} //cprintf("\ninside Check, about to try memcmp....\n"); //if (strncmp(sig, temp, 8) == 0) if(memcmp( (void*) V2P(&sig), (void*) rsdp, 8) == 0) { //cprintf("inside memcmp.."); // check checksum rsdpd bptr = (u8int *) ptr; for (i = 0; i < sizeof(struct RSDPtr); i++) { check += *bptr; bptr++; } // found valid rsdpd if (check == 0) { cprintf("found valid "); if (rsdp->Revision == 0) { cprintf("acpi 1"); } else { cprintf("acpi 2"); } return (unsigned int *) rsdp->RsdtAddress; } } //cprintf("leaving Check\n"); return 0; }
size_t e1000_send(struct network_dev *dev, uint8_t *_buf, size_t length) { struct e1000 *e = dev->device; e->tx_descs[e->tx_cur]->addr = (uint64_t)(uintptr_t)V2P(_buf); e->tx_descs[e->tx_cur]->length = length; e->tx_descs[e->tx_cur]->cmd = ((1 << 3) | 3); uint8_t old_cur = e->tx_cur; e->tx_cur = (e->tx_cur + 1) % NUM_TX_DESC; e1000_outl(e, REG_TXDESCTAIL, e->tx_cur); while(!(e->tx_descs[old_cur]->status & 0xff)); return 0; }
void rtl8139_start(struct rtl8139 *rtl) { rtl->tx_cur = 0; rtl_outb(rtl, 0x37, 0x10); while((rtl_inb(rtl, 0x37) & 0x10) != 0); kmemset(rtl->rx_buffer, 0, (8192*8)+16+1500); rtl_outl(rtl, 0x30,(uintptr_t)V2P(rtl->rx_buffer)); rtl_outb(rtl, 0x37, 0xc); for(int i=0; i < 4; i++) { rtl_outl(rtl, 0x20 + i*4, (uintptr_t)V2P(rtl->tx_buffers) + i*(8192 +16+1500)); } //TODO: need to register pci IRQs instead of doing it directly //interrupt_register(32 + rtl->pci_hdr->int_line, &rtl_handler); pci_register_irq(rtl->pci, &rtl_handler, rtl); rtl_outl(rtl, 0x44, (1 << 7) | 8| (1 << 1)); rtl_outw(rtl, 0x3c, 0x5 ); for(int i = 0; i < 6; i ++) rtl->mac[i] = rtl_inb(rtl, i); for(int i = 0; i < 100; i++) { /* uint16_t isr = rtl_inw(rtl, ISR); if(isr & 0x20) { rtl_outw(rtl, ISR, 0x20); printf("isr %x\n",isr); break; }*/ } }
// Load the initcode into address 0 of pgdir. // sz must be less than a page. void inituvm(pde_t *pgdir, char *init, uint sz) { char *mem; if(sz >= PGSIZE) panic("inituvm: more than a page"); if ((mem = kalloc()) == 0) panic("inituvm: cannot allocate memory"); memset(mem, 0, PGSIZE); if (mappages(pgdir, 0, PGSIZE, V2P(mem), PTE_W|PTE_U) < 0) panic("inituvm: cannot create pagetable"); memmove(mem, init, sz); }
// Switch TSS and h/w page table to correspond to process p. void switchuvm(struct proc *p) { pushcli(); cpu->gdt[SEG_TSS] = SEG16(STS_T32A, &cpu->ts, sizeof(cpu->ts)-1, 0); cpu->gdt[SEG_TSS].s = 0; cpu->ts.ss0 = SEG_KDATA << 3; cpu->ts.esp0 = (uint)proc->kstack + KSTACKSIZE; ltr(SEG_TSS << 3); if(p->pgdir == 0) panic("switchuvm: no pgdir"); lcr3(V2P(p->pgdir)); // switch to new address space popcli(); }
// Switch to the user page table (TTBR0) void switchuvm (struct proc *p) { uint64 val64; pushcli(); if (p->pgdir == 0) { panic("switchuvm: no pgdir"); } val64 = (uint64) V2P(p->pgdir) | 0x00; asm("MSR TTBR0_EL1, %[v]": :[v]"r" (val64):); flush_tlb(); popcli(); }
size_t rtl8139_send(struct network_dev *dev, uint8_t *_buf, size_t length) { struct rtl8139 *rtl = dev->device; void* tx_buffer = (void *)(rtl->tx_buffers + 8192*rtl->tx_cur); kmemset(tx_buffer, 0, (length <60) ? 60 : length); kmemcpy(tx_buffer, _buf, length); if(length < 60) length = 60; rtl_outl(rtl, 0x20 + rtl->tx_cur*4, V2P(tx_buffer)); rtl_outl(rtl, 0x10 + rtl->tx_cur*4, length | (48 << 16)); rtl->tx_cur++; rtl->tx_cur %= 4; return length; }
size_t pcnet_send(struct network_dev *dev, uint8_t *_buf, size_t length) { struct pcnet *l = dev->device; // interrupt_disable(); kmemcpy(l->tx_buffers[l->cur_tx], _buf, length); l->tx_descs[l->cur_tx].addr = (uintptr_t)V2P(l->tx_buffers[l->cur_tx]); l->tx_descs[l->cur_tx].flags2 = 0; l->tx_descs[l->cur_tx].status |= 0x8300; l->tx_descs[l->cur_tx].len = -length; // printf("%p, io %x %x\n",l, &l->io_base, l->tx_descs[l->cur_tx].addr); uint16_t csr = pcnet_csr_inl(l, 0); pcnet_csr_outl(l, 0, csr | 8); l->cur_tx++; if(l->cur_tx == 8) l->cur_tx = 0; // interrupt_enable(); return length; }
//PAGEBREAK: 21 // Free the page of physical memory pointed at by v, // which normally should have been returned by a // call to kalloc(). (The exception is when // initializing the allocator; see kinit above.) void kfree(char *v) { struct run *r; if((uint)v % PGSIZE || v < end || v2p(v) >= PHYSTOP) panic("kfree"); // Fill with junk to catch dangling refs. memset(v, 1, PGSIZE); if(kmem.use_lock) acquire(&kmem.lock); r = &kmem.runs[(V2P(v) / PGSIZE)]; r->next = kmem.freelist; kmem.freelist = r; if(kmem.use_lock) release(&kmem.lock); }
// Return the address of the PTE in page table pgdir // that corresponds to virtual address va. If alloc!=0, // create any required page table pages. static pte_t * walkpgdir(pde_t *pgdir, const void *va, int alloc) { pde_t *pde; pte_t *pgtab; pde = &pgdir[PDX(va)]; if(*pde & PTE_P){ pgtab = (pte_t*)P2V(PTE_ADDR(*pde)); } else { if(!alloc || (pgtab = (pte_t*)kalloc()) == 0) return 0; // Make sure all those PTE_P bits are zero. memset(pgtab, 0, PGSIZE); // The permissions here are overly generous, but they can // be further restricted by the permissions in the page table // entries, if necessary. *pde = V2P(pgtab) | PTE_P | PTE_W | PTE_U; } return &pgtab[PTX(va)]; }
void userinit(void) { struct proc *p; extern char _binary_kernel_initcode_start[], _binary_kernel_initcode_size[]; char *mem; p = allocproc(); assert(p); initproc = p; if((p->pgdir = setupkvm(kalloc)) == NULL) panic("userinit: out of memory?"); if ((int)_binary_kernel_initcode_size > PGSIZE) panic("inituvm: initcode more than a page"); mem = kalloc(); memset(mem, 0, PGSIZE); mappages(p->pgdir, 0, PGSIZE, V2P(mem), PTE_W|PTE_U, kalloc); memcpy(mem, (char *)_binary_kernel_initcode_start, (int)_binary_kernel_initcode_size); safestrcpy(p->name, "initcode", sizeof(p->name)); p->brk = PGSIZE; memset(p->tf, 1, sizeof(*p->tf)); p->tf->cs = (SEG_UCODE << 3) | DPL_USER; p->tf->ss = (SEG_UDATA << 3) | DPL_USER; p->tf->ds = p->tf->es = p->tf->fs = p->tf->gs = p->tf->ss; p->tf->eflags = FL_IF; p->tf->esp = PGSIZE; p->tf->eip = 0; // beginning of initcode p->counter = p->priority = 10; p->state = RUNNABLE; }
void schedule() { struct proc *p; int c, idle; while (1) { c = 0; idle = 1; while (1) { for (p = ptable; p < ptable + NPROC; p++) if (p->state == RUNNABLE) { if (p != ptable + TASK_IDLE) idle = 0; if (p->counter > c) { c = p->counter; current = p; } } if ((current == ptable + TASK_IDLE && idle) || current != ptable + TASK_IDLE) break; // task_idle && !idle for (p = ptable; p < ptable + NPROC; p++) p->counter = p->priority; } // break to here // current has been set // char *a[] = {"0", "1", "2", "3", "4", "5", "6", "7","8"}; // kputs(a[current - ptable]); tss.esp0 = (u32)current->kstack + KSTACKSZ; lcr3(V2P(current->pgdir)); swtch(&scheduler, current->context); } }
void frame_free(uintptr_t frame) { frame_set(V2P(frame), 0); /* convert from virt address */ }
// 0xfe000000..0: mapped direct (devices such as ioapic) // // The kernel allocates physical memory for its heap and for user memory // between V2P(end) and the end of physical memory (PHYSTOP) // (directly addressable from end..P2V(PHYSTOP)). // This table defines the kernel's mappings, which are present in // every process's page table. static struct kmap { void *virt; uint phys_start; uint phys_end; int perm; } kmap[] = { { (void*)KERNBASE, 0, EXTMEM, PTE_W}, // I/O space { (void*)KERNLINK, V2P(KERNLINK), V2P(data), 0}, // kern text+rodata { (void*)data, V2P(data), PHYSTOP, PTE_W}, // kern data+memory { (void*)DEVSPACE, DEVSPACE, 0, PTE_W}, // more devices }; // Set up kernel part of a page table. pde_t* setupkvm(void) { pde_t *pgdir; struct kmap *k; if((pgdir = (pde_t*)kalloc()) == 0) return 0; memset(pgdir, 0, PGSIZE); if (p2v(PHYSTOP) > (void*)DEVSPACE)
// 0..USERTOP: user memory (canonical lower half; not included in kmap below) // // KERNBASE..KERNBASE+EXTMEM -> 0..EXTMEM: I/O space (e.g. VGA), bootloader stack, etc. // KERNBASE+EXTMEM..data -> EXTMEM..v2p(data): kernel text and rodata. Read-only. // data..KERNBASE+PHYSTOP -> v2p(data)..PHYSTOP: kernel data and 1-1 mapping with free physical memory // KERNBASE+DEVSPACE..KERNBASE+DEVTOP -> DEVSPACE..DEVTOP: memory mapped devices typedef struct Kmap Kmap; static struct Kmap { void *addr; uintptr phys_start; uintptr phys_end; int perm; } kmap[] = { {(void *)KERNBASE, 0, EXTMEM, PTE_W}, // memory mapped devices {(void *)KERNBASE+EXTMEM, EXTMEM, V2P(data), 0}, // Kernel read only data {(void *)data, V2P(data), PHYSTOP, PTE_W}, // kernel data + physical pages {(void*)(KERNBASE+DEVSPACE), DEVSPACE, DEVTOP, PTE_W}, // more devices }; Pml4e * setupkvm(void) { Pml4e *pgmap = kalloc(); Kmap *k; if (pgmap == nil) return nil; memzero(pgmap, PGSIZE);
int init(void) { void find_pci(uint32_t device, uint16_t venid, uint16_t devid, void* data) { if((venid == 0x1022) && (devid == 0x2000)) *((uint32_t*) data) = device; } int pci = 0; pci_scan(&find_pci, -1, &pci); if(!pci) { kprintf(ERROR "pcnet: pci device not found!\n"); return -1; } struct pcnet* dev = (struct pcnet*) kmalloc(sizeof(struct pcnet), GFP_KERNEL); struct ethif* eth = (struct ethif*) kmalloc(sizeof(struct ethif), GFP_KERNEL); memset(dev, 0, sizeof(struct pcnet)); memset(eth, 0, sizeof(struct ethif)); eth->internals = (void*) dev; dev->pci = pci; spinlock_init(&dev->lock); dev->buf = (uintptr_t) kvalloc(0x10000, GFP_KERNEL); dev->bufp = (uintptr_t) V2P((void*) dev->buf); uint16_t cmd = pci_read_field(dev->pci, PCI_COMMAND, 4); if(!(cmd & (1 << 2))) pci_write_field(dev->pci, PCI_COMMAND, 4, cmd | (1 << 2)); dev->irq = pci_read_field(dev->pci, PCI_INTERRUPT_LINE, 1); dev->io = pci_read_field(dev->pci, PCI_BAR0, 4) & 0xFFFFFFF0; dev->mem = pci_read_field(dev->pci, PCI_BAR1, 4) & 0xFFFFFFF0; kprintf(LOG "pcnet: irq: %d, io: %p, mem: %p\n", dev->irq, dev->io, dev->mem); int i; for(i = 0; i < 6; i++) eth->address[i] = inb(dev->io + i); pcnet_irqno = dev->irq; /* FIXME: fix current_irq */ irq_enable(dev->irq, pcnet_irq); irq_set_data(dev->irq, dev); eth->low_level_init = pcnet_init; eth->low_level_startoutput = pcnet_startoutput; eth->low_level_output = pcnet_output; eth->low_level_endoutput = pcnet_endoutput; eth->low_level_startinput = pcnet_startinput; eth->low_level_input = pcnet_input; eth->low_level_endinput = pcnet_endinput; eth->low_level_input_nomem = pcnet_input_nomem; IP4_ADDR(ð->ip, 10, 0, 2, 15); IP4_ADDR(ð->nm, 255, 255, 255, 0); IP4_ADDR(ð->gw, 10, 0, 2, 2); struct netif* netif = (struct netif*) kmalloc(sizeof(struct netif), GFP_KERNEL); dev->netif = netif; if(!netif_add(netif, ð->ip, ð->nm, ð->gw, eth, ethif_init, ethernet_input)) { kprintf(ERROR "pcnet: netif_add() failed\n"); kfree(dev); kfree(eth); kfree(netif); return -1; } netif_set_default(netif); netif_set_up(netif); return 0; }
void gmm_fisher_save_soft_assgn(int n, const float *v, const gmm_t * g, int flags, float *dp_dlambda, float *word_total_soft_assignment) { long d=g->d, k=g->k; float *p = fvec_new(n * k); long i,j,l; long ii=0; float * vp = NULL; /* v*p */ float * sum_pj = NULL; /* sum of p's for a given j */ gmm_compute_p(n,v,g,p,flags | GMM_FLAGS_W); #define P(j,i) p[(i)*k+(j)] #define V(l,i) v[(i)*d+(l)] #define MU(l,j) g->mu[(j)*d+(l)] #define SIGMA(l,j) g->sigma[(j)*d+(l)] #define VP(l,j) vp[(j)*d+(l)] // Save total soft assignment per centroid if (word_total_soft_assignment != NULL) { for (j=0; j<k; j++) { double sum=0; for (i=0; i<n; i++) { sum += P(j,i); } if (n != 0) { word_total_soft_assignment[j] = (float)(sum/n); } else { word_total_soft_assignment[j] = 0.0; } } } if(flags & GMM_FLAGS_W) { for(j=1; j<k; j++) { double accu=0; for(i=0; i<n; i++) accu+= P(j,i)/g->w[j] - P(0,i)/g->w[0]; /* normalization */ double f=n*(1/g->w[j]+1/g->w[0]); dp_dlambda[ii++]=accu/sqrt(f); } } if(flags & GMM_FLAGS_MU) { float *dp_dmu=dp_dlambda+ii; #define DP_DMU(l,j) dp_dmu[(j)*d+(l)] if(0) { /* simple and slow */ for(j=0; j<k; j++) { for(l=0; l<d; l++) { double accu=0; for(i=0; i<n; i++) accu += P(j,i) * (V(l,i)-MU(l,j)) / SIGMA(l,j); DP_DMU(l,j)=accu; } } } else { /* complicated and fast */ /* precompute tables that may be useful for sigma too */ vp = fvec_new(k * d); fmat_mul_tr(v,p,d,k,n,vp); sum_pj = fvec_new(k); for(j=0; j<k; j++) { double sum=0; for(i=0; i<n; i++) sum += P(j,i); sum_pj[j] = sum; } for(j=0; j<k; j++) { for(l=0; l<d; l++) DP_DMU(l,j) = (VP(l,j) - MU(l,j) * sum_pj[j]) / SIGMA(l,j); } } /* normalization */ if(!(flags & GMM_FLAGS_NO_NORM)) { for(j=0; j<k; j++) for(l=0; l<d; l++) { float nf = sqrt(n*g->w[j]/SIGMA(l,j)); if(nf > 0) DP_DMU(l,j) /= nf; } } #undef DP_DMU ii+=d*k; } if(flags & (GMM_FLAGS_SIGMA | GMM_FLAGS_1SIGMA)) { if(flags & GMM_FLAGS_1SIGMA) { /* fast not implemented for 1 sigma */ for(j=0; j<k; j++) { double accu2=0; for(l=0; l<d; l++) { double accu=0; for(i=0; i<n; i++) accu += P(j,i) * (sqr(V(l,i)-MU(l,j)) / SIGMA(l,j) - 1) / sqrt(SIGMA(l,j)); if(flags & GMM_FLAGS_SIGMA) { double f=flags & GMM_FLAGS_NO_NORM ? 1.0 : 2*n*g->w[j]/SIGMA(l,j); dp_dlambda[ii++]=accu/sqrt(f); } accu2+=accu; } if(flags & GMM_FLAGS_1SIGMA) { double f=flags & GMM_FLAGS_NO_NORM ? 1.0 : 2*d*n*g->w[j]/SIGMA(0,j); dp_dlambda[ii++]=accu2/sqrt(f); } } } else { /* fast and complicated */ assert(flags & GMM_FLAGS_SIGMA); float *dp_dsigma = dp_dlambda + ii; if(!vp) { vp = fvec_new(k * d); fmat_mul_tr(v,p,d,k,n,vp); } if(!sum_pj) { sum_pj = fvec_new(k); for(j=0; j<k; j++) { double sum=0; for(i=0; i<n; i++) sum += P(j,i); sum_pj[j] = sum; } } float *v2 = fvec_new(n * d); for(i = n*d-1 ; i >= 0; i--) v2[i] = v[i] * v[i]; float *v2p = fvec_new(k * d); fmat_mul_tr(v2,p,d,k,n,v2p); free(v2); #define V2P(l,j) v2p[(j)*d+(l)] #define DP_DSIGMA(i,j) dp_dsigma[(i)+(j)*d] for(j=0; j<k; j++) { for(l=0; l<d; l++) { double accu; accu = V2P(l, j); accu += VP(l, j) * (- 2 * MU(l,j)); accu += sum_pj[j] * (sqr(MU(l,j)) - SIGMA(l,j)); /* normalization */ double f; if(flags & GMM_FLAGS_NO_NORM) { f = pow(SIGMA(l,j), -1.5); } else { f = 1 / (SIGMA(l,j) * sqrt(2*n*g->w[j])); } DP_DSIGMA(l,j) = accu * f; } } free(v2p); #undef DP_DSIGMA #undef V2P ii += d * k; } } assert(ii==gmm_fisher_sizeof(g,flags)); #undef P #undef V #undef MU #undef SIGMA free(p); free(sum_pj); free(vp); }
// Switch h/w page table register to the kernel-only page table, // for when no process is running. void switchkvm(void) { lcr3(V2P(kpgdir)); // switch to the kernel page table }