void console_init() { #ifdef VERSATILE_PB uart0 = (struct uart *)mmio_map_region(0x101F1000, 4 * 1024); #else uart0 = (struct uart *)mmio_map_region(0x20201000, 4 * 1024); #endif }
int e1000_attach(struct pci_func *f) { pci_func_enable(f); memmove(&e1000_pci_func, f, sizeof(struct pci_func)); e1000_mmio_beg = mmio_map_region(f->reg_base[0], f->reg_size[0]); init_tx(); init_rx(); return 0; }
// LAB 6: Your driver code here int e1000_attach(struct pci_func *pcif) { int i; pci_func_enable(pcif); e1000 = mmio_map_region(pcif->reg_base[0], pcif->reg_size[0]); // VM mapping for BAR 0 cprintf("Status is: 0x%x. Desired: 0x80080783\n", e1000[E1000_STATUS]); assert(sizeof(tx_descs) % 128 == 0); // should be 128-byte aligned assert(sizeof(rx_descs) % 128 == 0); // should be 128-byte aligned // perform transmit initialization e1000[E1000_TDBAL] = PADDR(tx_descs); e1000[E1000_TDLEN] = sizeof(tx_descs); e1000[E1000_TDH] = 0; e1000[E1000_TDT] = 0; e1000[E1000_TCTL] |= E1000_TCTL_EN; e1000[E1000_TCTL] |= E1000_TCTL_PSP; e1000[E1000_TCTL] |= E1000_TCTL_CT_INIT; e1000[E1000_TCTL] |= E1000_TCTL_COLD_INIT; e1000[E1000_TIPG] |= E1000_TIPG_INIT; // perform receive initialization e1000[E1000_RAL] = 0x12005452; // hardcoded 52:54:00:12:34:56 e1000[E1000_RAH] = 0x00005634 | E1000_RAH_AV; // hardcoded 52:54:00:12:34:56 e1000[E1000_RDBAL] = PADDR(rx_descs); e1000[E1000_RDLEN] = sizeof(rx_descs); e1000[E1000_RDH] = 0; e1000[E1000_RDT] = NRDESC - 1; e1000[E1000_RCTL] |= E1000_RCTL_EN; e1000[E1000_RCTL] &= (~E1000_RCTL_LPE); // turn off long packat for now e1000[E1000_RCTL] |= E1000_RCTL_LBM_NO; e1000[E1000_RCTL] |= E1000_RCTL_RDMTS_HALF; e1000[E1000_RCTL] |= E1000_RCTL_MO_0; e1000[E1000_RCTL] |= E1000_RCTL_BAM; e1000[E1000_RCTL] |= E1000_RCTL_SZ_2048; e1000[E1000_RCTL] |= E1000_RCTL_SECRC; // init transmit descriptors for (i = 0; i < NTDESC; i++) { tx_descs[i].addr = PADDR(&tx_packets[i * MAXPKTLEN]); tx_descs[i].cmd |= E1000_TXD_CMD_RS; tx_descs[i].status |= E1000_TXD_STA_DD; } // init receive descriptors for (i = 0; i < NRDESC; i++){ rx_descs[i].addr = PADDR(&rx_packets[i * MAXPKTLEN]); } return 0; }
// check page_insert, page_remove, &c static void check_page(void) { struct Page *pp, *pp0, *pp1, *pp2; struct Page *fl; pte_t *ptep, *ptep1; void *va; uintptr_t mm1, mm2; int i; extern pde_t entry_pgdir[]; // should be able to allocate three pages pp0 = pp1 = pp2 = 0; assert((pp0 = page_alloc(0))); assert((pp1 = page_alloc(0))); assert((pp2 = page_alloc(0))); assert(pp0); assert(pp1 && pp1 != pp0); assert(pp2 && pp2 != pp1 && pp2 != pp0); // temporarily steal the rest of the free pages fl = page_free_list; page_free_list = 0; // should be no free memory assert(!page_alloc(0)); // there is no page allocated at address 0 assert(page_lookup(kern_pgdir, (void *) 0x0, &ptep) == NULL); // there is no free memory, so we can't allocate a page table assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0); // free pp0 and try again: pp0 should be used for page table page_free(pp0); assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0); assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0)); assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp0->pp_ref == 1); // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0); assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2)); assert(pp2->pp_ref == 1); // should be no free memory assert(!page_alloc(0)); // should be able to map pp2 at PGSIZE because it's already there assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0); assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2)); assert(pp2->pp_ref == 1); // pp2 should NOT be on the free list // could happen in ref counts are handled sloppily in page_insert assert(!page_alloc(0)); // check that pgdir_walk returns a pointer to the pte ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)])); assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE)); // should be able to change permissions too. assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W|PTE_U) == 0); assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2)); assert(pp2->pp_ref == 1); assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U); assert(kern_pgdir[0] & PTE_U); // should be able to remap with fewer permissions assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0); assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_W); assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U)); // should not be able to map at PTSIZE because need free page for page table assert(page_insert(kern_pgdir, pp0, (void*) PTSIZE, PTE_W) < 0); // insert pp1 at PGSIZE (replacing pp2) assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W) == 0); assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U)); // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ... assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1)); assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1)); // ... and ref counts should reflect this assert(pp1->pp_ref == 2); assert(pp2->pp_ref == 0); // pp2 should be returned by page_alloc assert((pp = page_alloc(0)) && pp == pp2); // unmapping pp1 at 0 should keep pp1 at PGSIZE page_remove(kern_pgdir, 0x0); assert(check_va2pa(kern_pgdir, 0x0) == ~0); assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp2->pp_ref == 0); // unmapping pp1 at PGSIZE should free it page_remove(kern_pgdir, (void*) PGSIZE); assert(check_va2pa(kern_pgdir, 0x0) == ~0); assert(check_va2pa(kern_pgdir, PGSIZE) == ~0); assert(pp1->pp_ref == 0); assert(pp2->pp_ref == 0); // so it should be returned by page_alloc assert((pp = page_alloc(0)) && pp == pp1); // should be no free memory assert(!page_alloc(0)); // forcibly take pp0 back assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0)); kern_pgdir[0] = 0; assert(pp0->pp_ref == 1); pp0->pp_ref = 0; // check pointer arithmetic in pgdir_walk page_free(pp0); va = (void*)(PGSIZE * NPDENTRIES + PGSIZE); ptep = pgdir_walk(kern_pgdir, va, 1); ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)])); assert(ptep == ptep1 + PTX(va)); kern_pgdir[PDX(va)] = 0; pp0->pp_ref = 0; // check that new page tables get cleared memset(page2kva(pp0), 0xFF, PGSIZE); page_free(pp0); pgdir_walk(kern_pgdir, 0x0, 1); ptep = (pte_t *) page2kva(pp0); for(i=0; i<NPTENTRIES; i++) assert((ptep[i] & PTE_P) == 0); kern_pgdir[0] = 0; pp0->pp_ref = 0; // give free list back page_free_list = fl; // free the pages we took page_free(pp0); page_free(pp1); page_free(pp2); // test mmio_map_region mm1 = (uintptr_t) mmio_map_region(0, 4097); mm2 = (uintptr_t) mmio_map_region(0, 4096); // check that they're in the right region assert(mm1 >= MMIOBASE && mm1 + 8096 < MMIOLIM); assert(mm2 >= MMIOBASE && mm2 + 8096 < MMIOLIM); // check that they're page-aligned assert(mm1 % PGSIZE == 0 && mm2 % PGSIZE == 0); // check that they don't overlap assert(mm1 + 8096 <= mm2); // check page mappings assert(check_va2pa(kern_pgdir, mm1) == 0); assert(check_va2pa(kern_pgdir, mm1+PGSIZE) == PGSIZE); assert(check_va2pa(kern_pgdir, mm2) == 0); assert(check_va2pa(kern_pgdir, mm2+PGSIZE) == ~0); // check permissions assert(*pgdir_walk(kern_pgdir, (void*) mm1, 0) & (PTE_W|PTE_PWT|PTE_PCD)); assert(!(*pgdir_walk(kern_pgdir, (void*) mm1, 0) & PTE_U)); // clear the mappings *pgdir_walk(kern_pgdir, (void*) mm1, 0) = 0; *pgdir_walk(kern_pgdir, (void*) mm1 + PGSIZE, 0) = 0; *pgdir_walk(kern_pgdir, (void*) mm2, 0) = 0; cprintf("check_page() succeeded!\n"); }
int E1000_attach_function(struct pci_func *pcif) { pci_func_enable(pcif); e1000_bar_base = mmio_map_region(pcif->reg_base[0], pcif->reg_size[0] ); /* Debugging*/ // cprintf("value of device status register%x\n", *(e1000_bar_address+ 0x00008/4)); int i; for (i = 0; i < E1000_TX_DESCS; ++i) { tx_descriptors[i].status = E1000_TXD_STAT_DD; } /* Transmitter registers*/ e1000_bar_base[E1000_TDH / 4] = 0; e1000_bar_base[E1000_TDT / 4] = 0; e1000_bar_base[E1000_TDBAL / 4] = PADDR(tx_descriptors); e1000_bar_base[E1000_TDBAH / 4] = 0; e1000_bar_base[E1000_TDLEN / 4] = sizeof(struct tx_desc) * E1000_TX_DESCS; e1000_bar_base[E1000_TCTL / 4] |= E1000_TCTL_EN; e1000_bar_base[E1000_TCTL / 4] |= E1000_TCTL_PSP; e1000_bar_base[E1000_TCTL / 4] &= ~E1000_TCTL_CT; e1000_bar_base[E1000_TCTL / 4] |= (E1000_TCTL_CT_VAL << 4); e1000_bar_base[E1000_TCTL / 4] &= ~E1000_TCTL_COLD; e1000_bar_base[E1000_TCTL / 4] |= (E1000_TCTL_COLD_VAL << 12); e1000_bar_base[E1000_TIPG / 4] = (E1000_TIPG_IPGT_VAL ) | (E1000_TIPG_IPGR1_VAL << 10) | (E1000_TIPG_IPGR2_VAL << 20) | (E1000_TIPG_RESERVED_VAL << 30); /* Receiver registers */ e1000_bar_base[E1000_RDBAL / 4] = PADDR(rx_descriptors); e1000_bar_base[E1000_RDBAH / 4] = 0; e1000_bar_base[E1000_RDLEN / 4] = sizeof(struct rx_desc) * E1000_RX_DESCS; for (i = 0; i < E1000_TX_DESCS; ++i) { rx_descriptors[i].addr = PADDR(rx_bufs[i]); } e1000_bar_base[E1000_IMS / 4] = 0; for (i = 0; i < E1000_N_MTA_ELEMS; ++i) { e1000_bar_base[E1000_MTA / 4] = 0; } e1000_bar_base[E1000_RDH / 4] = 0; e1000_bar_base[E1000_RDT / 4] = E1000_RX_DESCS - 1; e1000_bar_base[E1000_RCTL / 4] |= E1000_RCTL_EN; e1000_bar_base[E1000_RCTL / 4] &= ~E1000_RCTL_LPE; e1000_bar_base[E1000_RCTL / 4] |= E1000_RCTL_BAM; e1000_bar_base[E1000_RCTL / 4] |= E1000_RCTL_SECRC; e1000_bar_base[E1000_RAL(0) / 4] = 0x12005452; e1000_bar_base[E1000_RAH(0) / 4] = 0x5634; e1000_bar_base[E1000_RAH(0) / 4] |= E1000_RAH_AV; /* Test tx_packet transmit*/ /* Debugging char d='a'; E1000_tx_packet(&d, 1); */ return 0; }
int attachE1000(struct pci_func *pcif) { int i; pci_func_enable(pcif); loc_mmio=(uint32_t *)mmio_map_region((physaddr_t)pcif->reg_base[0] ,(size_t)pcif->reg_size[0]); memset(tx_que, 0x0, sizeof(struct trans_desc) * MAXTX_DESC); memset(pkt_que, 0x0, sizeof(struct trans_pkt) * MAXTX_DESC); for( i=0; i<MAXTX_DESC; i++) { tx_que[i].addr = PADDR(pkt_que[i].arr); tx_que[i].status |= E1000_TXD_STAT_DD; } memset(rx_que, 0x0, sizeof(struct rcv_desc) * MAXTX_DESC); memset(rpkt_que, 0x0, sizeof(struct rcv_pkt) * MAXTX_DESC); for( i=0; i<MAXTX_DESC; i++) { rx_que[i].addr = PADDR(rpkt_que[i].arr); //tx_que[i].status |= E1000_TXD_STAT_DD; } loc_mmio[E1000_RAL] = 0x52; loc_mmio[E1000_RAL] |= (0x54) << 8; loc_mmio[E1000_RAL] |= (0x00) << 16; loc_mmio[E1000_RAL] |= (0x12) << 24; loc_mmio[E1000_RAH] |= (0x34); loc_mmio[E1000_RAH] |= (0x56) << 8; loc_mmio[E1000_RAH] |= 0x80000000; //initialization of various registers loc_mmio[E1000_TDBAH] = 0x0; loc_mmio[E1000_TDBAL] = PADDR(tx_que); loc_mmio[E1000_TDLEN] = sizeof(struct trans_desc) * MAXTX_DESC; loc_mmio[E1000_TDH] = 0x0; loc_mmio[E1000_TDT] = 0x0; loc_mmio[E1000_RDBAH] = 0x0; loc_mmio[E1000_RDBAL] = PADDR(rx_que); loc_mmio[E1000_RDLEN] = sizeof(struct rcv_desc) * MAXTX_DESC; loc_mmio[E1000_RDH] = 0x0; loc_mmio[E1000_RDT] = 0x0; loc_mmio[E1000_TCTL] |= E1000_TCTL_EN|E1000_TCTL_PSP|(E1000_TCTL_CT & (0x10 << 4))|(E1000_TCTL_COLD & (0x40 << 12)); loc_mmio[E1000_RCTL] |= E1000_RCTL_EN; loc_mmio[E1000_RCTL] &= ~E1000_RCTL_LPE; loc_mmio[E1000_RCTL] &= ~(E1000_RCTL_LBM_MAC | E1000_RCTL_LBM_SLP |E1000_RCTL_LBM_TCVR); loc_mmio[E1000_RCTL] &= ~(E1000_RCTL_RDMTS_QUAT | E1000_RCTL_RDMTS_EIGTH); loc_mmio[E1000_RCTL] &= ~(E1000_RCTL_MO_3); loc_mmio[E1000_RCTL] &= ~E1000_RCTL_BAM; loc_mmio[E1000_RCTL] &= ~(E1000_RCTL_BSEX); loc_mmio[E1000_RCTL] &= ~(E1000_RCTL_SZ_256); loc_mmio[E1000_RCTL] |= E1000_RCTL_SECRC; // loc_mmio[E1000_TIPG] = 0x0; // loc_mmio[E1000_TIPG] |= 0xA; // loc_mmio[E1000_TIPG] |= (0x6) << 20; // loc_mmio[E1000_TIPG] |= (0x4) << 10; return 0; }
// check page_insert, page_remove, &c static void page_check(void) { struct Page *pp0, *pp1, *pp2,*pp3,*pp4,*pp5; struct Page * fl; pte_t *ptep, *ptep1; pdpe_t *pdpe; pde_t *pde; void *va; int i; uintptr_t mm1, mm2; pp0 = pp1 = pp2 = pp3 = pp4 = pp5 =0; assert(pp0 = page_alloc(0)); assert(pp1 = page_alloc(0)); assert(pp2 = page_alloc(0)); assert(pp3 = page_alloc(0)); assert(pp4 = page_alloc(0)); assert(pp5 = page_alloc(0)); assert(pp0); assert(pp1 && pp1 != pp0); assert(pp2 && pp2 != pp1 && pp2 != pp0); assert(pp3 && pp3 != pp2 && pp3 != pp1 && pp3 != pp0); assert(pp4 && pp4 != pp3 && pp4 != pp2 && pp4 != pp1 && pp4 != pp0); assert(pp5 && pp5 != pp4 && pp5 != pp3 && pp5 != pp2 && pp5 != pp1 && pp5 != pp0); // temporarily steal the rest of the free pages fl = page_free_list; page_free_list = NULL; // should be no free memory assert(!page_alloc(0)); // there is no page allocated at address 0 assert(page_lookup(boot_pml4e, (void *) 0x0, &ptep) == NULL); // there is no free memory, so we can't allocate a page table assert(page_insert(boot_pml4e, pp1, 0x0, 0) < 0); // free pp0 and try again: pp0 should be used for page table page_free(pp0); assert(page_insert(boot_pml4e, pp1, 0x0, 0) < 0); page_free(pp2); page_free(pp3); //cprintf("pp1 ref count = %d\n",pp1->pp_ref); //cprintf("pp0 ref count = %d\n",pp0->pp_ref); //cprintf("pp2 ref count = %d\n",pp2->pp_ref); assert(page_insert(boot_pml4e, pp1, 0x0, 0) == 0); assert((PTE_ADDR(boot_pml4e[0]) == page2pa(pp0) || PTE_ADDR(boot_pml4e[0]) == page2pa(pp2) || PTE_ADDR(boot_pml4e[0]) == page2pa(pp3) )); assert(check_va2pa(boot_pml4e, 0x0) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp0->pp_ref == 1); assert(pp2->pp_ref == 1); //should be able to map pp3 at PGSIZE because pp0 is already allocated for page table assert(page_insert(boot_pml4e, pp3, (void*) PGSIZE, 0) == 0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp3)); assert(pp3->pp_ref == 2); // should be no free memory assert(!page_alloc(0)); // should be able to map pp3 at PGSIZE because it's already there assert(page_insert(boot_pml4e, pp3, (void*) PGSIZE, 0) == 0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp3)); assert(pp3->pp_ref == 2); // pp3 should NOT be on the free list // could happen in ref counts are handled sloppily in page_insert assert(!page_alloc(0)); // check that pgdir_walk returns a pointer to the pte pdpe = KADDR(PTE_ADDR(boot_pml4e[PML4(PGSIZE)])); pde = KADDR(PTE_ADDR(pdpe[PDPE(PGSIZE)])); ptep = KADDR(PTE_ADDR(pde[PDX(PGSIZE)])); assert(pml4e_walk(boot_pml4e, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE)); // should be able to change permissions too. assert(page_insert(boot_pml4e, pp3, (void*) PGSIZE, PTE_U) == 0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp3)); assert(pp3->pp_ref == 2); assert(*pml4e_walk(boot_pml4e, (void*) PGSIZE, 0) & PTE_U); assert(boot_pml4e[0] & PTE_U); // should not be able to map at PTSIZE because need free page for page table assert(page_insert(boot_pml4e, pp0, (void*) PTSIZE, 0) < 0); // insert pp1 at PGSIZE (replacing pp3) assert(page_insert(boot_pml4e, pp1, (void*) PGSIZE, 0) == 0); assert(!(*pml4e_walk(boot_pml4e, (void*) PGSIZE, 0) & PTE_U)); // should have pp1 at both 0 and PGSIZE assert(check_va2pa(boot_pml4e, 0) == page2pa(pp1)); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp1)); // ... and ref counts should reflect this assert(pp1->pp_ref == 2); assert(pp3->pp_ref == 1); // unmapping pp1 at 0 should keep pp1 at PGSIZE page_remove(boot_pml4e, 0x0); assert(check_va2pa(boot_pml4e, 0x0) == ~0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp3->pp_ref == 1); // Test re-inserting pp1 at PGSIZE. // Thanks to Varun Agrawal for suggesting this test case. assert(page_insert(boot_pml4e, pp1, (void*) PGSIZE, 0) == 0); assert(pp1->pp_ref); assert(pp1->pp_link == NULL); // unmapping pp1 at PGSIZE should free it page_remove(boot_pml4e, (void*) PGSIZE); assert(check_va2pa(boot_pml4e, 0x0) == ~0); assert(check_va2pa(boot_pml4e, PGSIZE) == ~0); assert(pp1->pp_ref == 0); assert(pp3->pp_ref == 1); #if 0 // should be able to page_insert to change a page // and see the new data immediately. memset(page2kva(pp1), 1, PGSIZE); memset(page2kva(pp2), 2, PGSIZE); page_insert(boot_pgdir, pp1, 0x0, 0); assert(pp1->pp_ref == 1); assert(*(int*)0 == 0x01010101); page_insert(boot_pgdir, pp2, 0x0, 0); assert(*(int*)0 == 0x02020202); assert(pp2->pp_ref == 1); assert(pp1->pp_ref == 0); page_remove(boot_pgdir, 0x0); assert(pp2->pp_ref == 0); #endif // forcibly take pp3 back assert(PTE_ADDR(boot_pml4e[0]) == page2pa(pp3)); boot_pml4e[0] = 0; assert(pp3->pp_ref == 1); page_decref(pp3); // check pointer arithmetic in pml4e_walk page_decref(pp0); page_decref(pp2); va = (void*)(PGSIZE * 100); ptep = pml4e_walk(boot_pml4e, va, 1); pdpe = KADDR(PTE_ADDR(boot_pml4e[PML4(va)])); pde = KADDR(PTE_ADDR(pdpe[PDPE(va)])); ptep1 = KADDR(PTE_ADDR(pde[PDX(va)])); assert(ptep == ptep1 + PTX(va)); // check that new page tables get cleared page_decref(pp4); memset(page2kva(pp4), 0xFF, PGSIZE); pml4e_walk(boot_pml4e, 0x0, 1); pdpe = KADDR(PTE_ADDR(boot_pml4e[0])); pde = KADDR(PTE_ADDR(pdpe[0])); ptep = KADDR(PTE_ADDR(pde[0])); for(i=0; i<NPTENTRIES; i++) assert((ptep[i] & PTE_P) == 0); boot_pml4e[0] = 0; // give free list back page_free_list = fl; // free the pages we took page_decref(pp0); page_decref(pp1); page_decref(pp2); // test mmio_map_region mm1 = (uintptr_t) mmio_map_region(0, 4097); mm2 = (uintptr_t) mmio_map_region(0, 4096); // check that they're in the right region assert(mm1 >= MMIOBASE && mm1 + 8096 < MMIOLIM); assert(mm2 >= MMIOBASE && mm2 + 8096 < MMIOLIM); // check that they're page-aligned assert(mm1 % PGSIZE == 0 && mm2 % PGSIZE == 0); // check that they don't overlap assert(mm1 + 8096 <= mm2); // check page mappingsasdfasd assert(check_va2pa(boot_pml4e, mm1) == 0); assert(check_va2pa(boot_pml4e, mm1+PGSIZE) == PGSIZE); assert(check_va2pa(boot_pml4e, mm2) == 0); assert(check_va2pa(boot_pml4e, mm2+PGSIZE) == ~0); // check permissions assert(*pml4e_walk(boot_pml4e, (void*) mm1, 0) & (PTE_W|PTE_PWT|PTE_PCD)); assert(!(*pml4e_walk(boot_pml4e, (void*) mm1, 0) & PTE_U)); // clear the mappings *pml4e_walk(boot_pml4e, (void*) mm1, 0) = 0; *pml4e_walk(boot_pml4e, (void*) mm1 + PGSIZE, 0) = 0; *pml4e_walk(boot_pml4e, (void*) mm2, 0) = 0; cprintf("check_page() succeeded!\n"); }
int net_pci_attach(struct pci_func *pcif){ int i = 0; // Register the PCI device and enable pci_func_enable(pcif); // Provide the memory for the PCI device net_pci_addr = mmio_map_region(pcif->reg_base[0], pcif->reg_size[0]); // Check to see if the correct value gets printed cprintf("NET PCI status: %x\n", net_pci_addr[E1000_STATUS]); // Initialize transmit descriptor array and packet buffer (not necessarily needed) memset(tx_desc_arr, 0, sizeof(struct tx_desc) * PCI_TXDESC); memset(tx_pkt_buf, 0, sizeof(struct tx_pkt) * PCI_TXDESC); /* Transmit initialization */ // Transmit descriptor base address registers init net_pci_addr[E1000_TDBAL] = PADDR(tx_desc_arr); net_pci_addr[E1000_TDBAH] = 0x0; // Transmit descriptor length register init net_pci_addr[E1000_TDLEN] = sizeof(struct tx_desc) * PCI_TXDESC; // Transmit descriptor head and tail registers init net_pci_addr[E1000_TDH] = 0x0; net_pci_addr[E1000_TDT] = 0x0; // Transmit control register init // 1st bit net_pci_addr[E1000_TCTL] = E1000_TCTL_EN; // 2nd bit net_pci_addr[E1000_TCTL] |= E1000_TCTL_PSP; // TCTL-CT starts from 4th bit and extends to 11th bit // clear all those bits and set it to 10h (11:4) (as per manual) net_pci_addr[E1000_TCTL] &= ~E1000_TCTL_CT; net_pci_addr[E1000_TCTL] |= (0x10) << 4; // TCTL-COLD starts from 12the bit and extends to 21st bit // clear all those bits and set i to 40h (21:12) (as per manual) net_pci_addr[E1000_TCTL] &= ~E1000_TCTL_COLD; net_pci_addr[E1000_TCTL] |= (0x40) << 12; /* Transmit IPG register init */ // Set to zero first net_pci_addr[E1000_TIPG] = 0x0; // IPGT value 10 for IEEE 802.3 standard (as per maunal) net_pci_addr[E1000_TIPG] |= 0xA; // IPGR1 2/3 the value of IPGR2 as per IEEE 802.3 standard (as per manual) // Starts from the 10th bit net_pci_addr[E1000_TIPG] |= (0x4) << 10; // IPGR2 starts from the 20th bit, value = 6(as per manual) net_pci_addr[E1000_TIPG] |= (0x6) << 20; /* Receive Initialization */ // Program the Receive Address Registers net_pci_addr[E1000_RAL] = 0x12005452; net_pci_addr[E1000_RAH] = 0x5634 | E1000_RAH_AV; // HArd coded mac address. (needed to specify end of RAH) net_pci_addr[E1000_MTA] = 0x0; // Program the Receive Descriptor Base Address Registers net_pci_addr[E1000_RDBAL] = PADDR(rx_desc_arr); net_pci_addr[E1000_RDBAH] = 0x0; // Set the Receive Descriptor Length Register net_pci_addr[E1000_RDLEN] = sizeof(struct rx_desc) * PCI_RXDESC; // Set the Receive Descriptor Head and Tail Registers net_pci_addr[E1000_RDH] = 0x0; net_pci_addr[E1000_RDT] = 0x0; // Initialize the Receive Control Register net_pci_addr[E1000_RCTL] |= E1000_RCTL_EN; // Bradcast set 1b net_pci_addr[E1000_RCTL] |= E1000_RCTL_BAM; // CRC strip net_pci_addr[E1000_RCTL] |= E1000_RCTL_SECRC; // Associate the descriptors with the packets. (one to one mapping) for (i = 0; i < PCI_TXDESC; i++) { tx_desc_arr[i].addr = PADDR(tx_pkt_buf[i].buf); tx_desc_arr[i].status |= E1000_TXD_STAT_DD; rx_desc_arr[i].addr = PADDR(rx_pkt_buf[i].buf); } return 0; }