示例#1
0
// Modify mappings in kern_pgdir to support SMP
//   - Remap [IOMEMBASE, 2^32) to physical address [IOMEM_PADDR, 2^32)
//   - Map the per-CPU stacks in the region [KSTACKTOP-PTSIZE, KSTACKTOP)
// See the revised inc/memlayout.h
//
static void
mem_init_mp(void)
{
	// Create a direct mapping at the top of virtual address space starting
	// at IOMEMBASE for accessing the LAPIC unit using memory-mapped I/O.
	boot_map_region(kern_pgdir, IOMEMBASE, -IOMEMBASE, IOMEM_PADDR, PTE_W);

	// Map per-CPU stacks starting at KSTACKTOP, for up to 'NCPU' CPUs.
	//
	// For CPU i, use the physical memory that 'percpu_kstacks[i]' refers
	// to as its kernel stack. CPU i's kernel stack grows down from virtual
	// address kstacktop_i = KSTACKTOP - i * (KSTKSIZE + KSTKGAP), and is
	// divided into two pieces, just like the single stack you set up in
	// mem_init:
	//     * [kstacktop_i - KSTKSIZE, kstacktop_i)
	//          -- backed by physical memory
	//     * [kstacktop_i - (KSTKSIZE + KSTKGAP), kstacktop_i - KSTKSIZE)
	//          -- not backed; so if the kernel overflows its stack,
	//             it will fault rather than overwrite another CPU's stack.
	//             Known as a "guard page".
	//     Permissions: kernel RW, user NONE
	//
	// LAB 4: Your code here:
	uint32_t i;
	uint32_t per_stack_top = KSTACKTOP - KSTKSIZE;
	for (i = 0; i < NCPU; i++) {
		boot_map_region(kern_pgdir, per_stack_top, KSTKSIZE, PADDR(percpu_kstacks[i]), PTE_P | PTE_W);
		per_stack_top -= (KSTKSIZE + KSTKGAP);
	}
}
示例#2
0
// Modify mappings in kern_pgdir to support SMP
//   - Map the per-CPU stacks in the region [KSTACKTOP-PTSIZE, KSTACKTOP)
//
static void
mem_init_mp(void)
{
	// Map per-CPU stacks starting at KSTACKTOP, for up to 'NCPU' CPUs.
	//
	// For CPU i, use the physical memory that 'percpu_kstacks[i]' refers
	// to as its kernel stack. CPU i's kernel stack grows down from virtual
	// address kstacktop_i = KSTACKTOP - i * (KSTKSIZE + KSTKGAP), and is
	// divided into two pieces, just like the single stack you set up in
	// mem_init:
	//     * [kstacktop_i - KSTKSIZE, kstacktop_i)
	//          -- backed by physical memory
	//     * [kstacktop_i - (KSTKSIZE + KSTKGAP), kstacktop_i - KSTKSIZE)
	//          -- not backed; so if the kernel overflows its stack,
	//             it will fault rather than overwrite another CPU's stack.
	//             Known as a "guard page".
	//     Permissions: kernel RW, user NONE
	//
	// LAB 4: Your code here:
    int i;
    for(i=0; i<NCPU; i++){
      boot_map_region( kern_pgdir, KSTACKTOP-i*(KSTKSIZE+KSTKGAP)-KSTKSIZE, 
                     KSTKSIZE, PADDR(percpu_kstacks[i]) ,PTE_W | PTE_P );
    }
}
示例#3
0
// Modify mappings in kern_pgdir to support SMP
//   - Map the per-CPU stacks in the region [KSTACKTOP-PTSIZE, KSTACKTOP)
//
static void
mem_init_mp(void)
{
	// Map per-CPU stacks starting at KSTACKTOP, for up to 'NCPU' CPUs.
	//
	// For CPU i, use the physical memory that 'percpu_kstacks[i]' refers
	// to as its kernel stack. CPU i's kernel stack grows down from virtual
	// address kstacktop_i = KSTACKTOP - i * (KSTKSIZE + KSTKGAP), and is
	// divided into two pieces, just like the single stack you set up in
	// mem_init:
	//     * [kstacktop_i - KSTKSIZE, kstacktop_i)
	//          -- backed by physical memory
	//     * [kstacktop_i - (KSTKSIZE + KSTKGAP), kstacktop_i - KSTKSIZE)
	//          -- not backed; so if the kernel overflows its stack,
	//             it will fault rather than overwrite another CPU's stack.
	//             Known as a "guard page".
	//     Permissions: kernel RW, user NONE
	//
	// LAB 4: Your code here:
	// 注意,虽然mem_init()在lab3中已经对kernel stack进行mapping
	// 但不能认为cpu0就不需要在这儿mapping了。一则不知道BSP就一定是cpu0
	// 二则,现在统一使用percpu_kstacks[i]作为cpui的栈(从check_kern_pgdir()对
	// cpu kernel stack的assert也可以看出)
	uint32_t i = 0, stki;
	for ( ; i < NCPU; i++){
		stki = KSTACKTOP - i*(KSTKSIZE + KSTKGAP) - KSTKSIZE;
		boot_map_region(kern_pgdir, stki, KSTKSIZE, 
				PADDR(percpu_kstacks[i]), PTE_W|PTE_P);
	}
}
示例#4
0
	int
e1000_attach(struct pci_func *pcif)
{
	pci_func_enable(pcif);
	e1000_mem_init();

	// Sanity check
	static_assert(sizeof(struct tx_desc) == 16 && sizeof(struct rcv_desc) == 16);

	boot_map_region(kern_pgdir, E1000_ADDR, pcif->reg_size[0], pcif->reg_base[0], PTE_PCD | PTE_PWT | PTE_W);
	e1000 = (uint32_t*)E1000_ADDR;

	e1000[E1000_TDBAL] = PADDR(tx_queue);
	e1000[E1000_TDBAH] = 0;
	e1000[E1000_TDLEN] = sizeof(struct tx_desc) * E1000_NTXDESC;
	e1000[E1000_TDH]   = 0;
	e1000[E1000_TDT]   = 0;

	// Ensure proper alignment of values
	assert(e1000[E1000_TDBAL] % 0x10 == 0 && e1000[E1000_TDLEN] % 0x80 == 0);

	// Setup TCTL register
	e1000[E1000_TCTL] |= E1000_TCTL_EN;
	e1000[E1000_TCTL] |= E1000_TCTL_PSP;
	e1000[E1000_TCTL] |= E1000_TCTL_CT;
	e1000[E1000_TCTL] |= E1000_TCTL_COLD;

	// Setup TIPG register
	e1000[E1000_TIPG]  = 0;
	e1000[E1000_TIPG] |= E1000_TIPG_IPGT;
	e1000[E1000_TIPG] |= E1000_TIPG_IPGR1;
	e1000[E1000_TIPG] |= E1000_TIPG_IPGR2;

	e1000[E1000_FILTER_RAL] = 0x12005452;
	e1000[E1000_FILTER_RAH] = 0x00005634;
	e1000[E1000_FILTER_RAH] |= E1000_FILTER_RAH_VALID;

	//cprintf("Ethernet Address: 0x%08x%08x\n", e1000[E1000_FILTER_RAH], e1000[E1000_FILTER_RAL]);

	// Setup RCV Registers
	e1000[E1000_RDBAL] = PADDR(rcv_queue);
	e1000[E1000_RDBAH] = 0;
	e1000[E1000_RDLEN] = sizeof(struct rcv_desc) * E1000_NRCVDESC;
	e1000[E1000_RDH]   = 1;
	e1000[E1000_RDT]   = 0; // Gets reset later

	e1000[E1000_RCTL] = E1000_RCTL_EN;
	e1000[E1000_RCTL] &= ~E1000_RCTL_LPE;
	e1000[E1000_RCTL] &= ~E1000_RCTL_LBM;
	e1000[E1000_RCTL] &= ~E1000_RCTL_RDMTS;
	e1000[E1000_RCTL] &= ~E1000_RCTL_MO;
	e1000[E1000_RCTL] |= E1000_RCTL_BAM;
	e1000[E1000_RCTL] &= ~E1000_RCTL_BSIZE;
	e1000[E1000_RCTL] |= E1000_RCTL_SECRC;

	return 1;
}
示例#5
0
void *
nw_mmio_map_region(physaddr_t pa, size_t size)
{
	static uintptr_t base = NW_MMIOBASE;
	uintptr_t ret_base = base;

	boot_map_region(kern_pgdir, base, ROUNDUP(size, PGSIZE) , pa , PTE_P | PTE_W | PTE_PCD | PTE_PWT);
	base = (uintptr_t)((char *)base + ROUNDUP(size, PGSIZE));
	// Fo now, there is only one address space, so always invalidate.
	tlb_invalidate(kern_pgdir, (void *)base);
	return (void *)ret_base;
}
示例#6
0
//
// Reserve size bytes in the MMIO region and map [pa,pa+size) at this
// location.  Return the base of the reserved region.  size does *not*
// have to be multiple of PGSIZE.
//
void *
mmio_map_region(physaddr_t pa, size_t size)
{
	// Where to start the next region.  Initially, this is the
	// beginning of the MMIO region.  Because this is static, its
	// value will be preserved between calls to mmio_map_region
	// (just like nextfree in boot_alloc).
	static uintptr_t base = MMIOBASE;

	// Reserve size bytes of virtual memory starting at base and
	// map physical pages [pa,pa+size) to virtual addresses
	// [base,base+size).  Since this is device memory and not
	// regular DRAM, you'll have to tell the CPU that it isn't
	// safe to cache access to this memory.  Luckily, the page
	// tables provide bits for this purpose; simply create the
	// mapping with PTE_PCD|PTE_PWT (cache-disable and
	// write-through) in addition to PTE_W.  (If you're interested
	// in more details on this, see section 10.5 of IA32 volume
	// 3A.)
	//
	// Be sure to round size up to a multiple of PGSIZE and to
	// handle if this reservation would overflow MMIOLIM (it's
	// okay to simply panic if this happens).
	//
	// Hint: The staff solution uses boot_map_region.
	//
	// Your code here:
	size = ROUNDUP(size, PGSIZE);
	//MMIOBASE += size;
	if(base + size >= MMIOLIM) {
		panic("overflow MMIOLIM");
	}
	/* boot_map_region(pde_t *pgdir, uintptr_t va, size_t size, physaddr_t pa, int perm) */
	boot_map_region(kern_pgdir, base, size, pa, (PTE_PCD | PTE_PWT | PTE_W));
	void *r = (void *) base;
	base += size;
	return r;
	//return;
	//panic("mmio_map_region not implemented");
}
示例#7
0
//
// Reserve size bytes in the MMIO region and map [pa,pa+size) at this
// location.  Return the base of the reserved region.  size does *not*
// have to be multiple of PGSIZE.
//
void *
mmio_map_region(physaddr_t pa, size_t size)
{
	// Where to start the next region.  Initially, this is the
	// beginning of the MMIO region.  Because this is static, its
	// value will be preserved between calls to mmio_map_region
	// (just like nextfree in boot_alloc).
	static uintptr_t base = MMIOBASE;
	uintptr_t ret_base = base;

	// Reserve size bytes of virtual memory starting at base and
	// map physical pages [pa,pa+size) to virtual addresses
	// [base,base+size).  Since this is device memory and not
	// regular DRAM, you'll have to tell the CPU that it isn't
	// safe to cache access to this memory.  Luckily, the page
	// tables provide bits for this purpose; simply create the
	// mapping with PTE_PCD|PTE_PWT (cache-disable and
	// write-through) in addition to PTE_W.  (If you're interested
	// in more details on this, see section 10.5 of IA32 volume
	// 3A.)
	//
	// Be sure to round size up to a multiple of PGSIZE and to
	// handle if this reservation would overflow MMIOLIM (it's
	// okay to simply panic if this happens).
	//
	// Hint: The staff solution uses boot_map_region.
	//
	// Your code here:

	if(((char *)base + size) > (char *)MMIOLIM)
		panic("MMIOLIMIT reached cannot map memorry for the new device");
	boot_map_region(kern_pgdir, base, ROUNDUP(size, PGSIZE) , pa , PTE_P | PTE_W | PTE_PCD | PTE_PWT);
	base = (uintptr_t)((char *)base + ROUNDUP(size, PGSIZE));
	// Fo now, there is only one address space, so always invalidate.
	tlb_invalidate(kern_pgdir, (void *)base);
	return (void *)ret_base;
}
示例#8
0
// Set up a two-level page table:
//    kern_pgdir is its linear (virtual) address of the root
//
// This function only sets up the kernel part of the address space
// (ie. addresses >= UTOP).  The user part of the address space
// will be setup later.
//
// From UTOP to ULIM, the user is allowed to read but not write.
// Above ULIM the user cannot read or write.
void
mem_init(void)
{
	uint32_t cr0;
	size_t n;

	// Find out how much memory the machine has (npages & npages_basemem).
	i386_detect_memory();

	//////////////////////////////////////////////////////////////////////
	// create initial page directory.
	kern_pgdir = (pde_t *) boot_alloc(PGSIZE);
	memset(kern_pgdir, 0, PGSIZE);

	//////////////////////////////////////////////////////////////////////
	// Recursively insert PD in itself as a page table, to form
	// a virtual page table at virtual address UVPT.
	// (For now, you don't have understand the greater purpose of the
	// following line.)

	// Permissions: kernel R, user R
	kern_pgdir[PDX(UVPT)] = PADDR(kern_pgdir) | PTE_U | PTE_P;

	//////////////////////////////////////////////////////////////////////
	// Allocate an array of npages 'struct Page's and store it in 'pages'.
	// The kernel uses this array to keep track of physical pages: for
	// each physical page, there is a corresponding struct Page in this
	// array.  'npages' is the number of physical pages in memory.
	// Your code goes here:
    pages = boot_alloc(npages*sizeof(* pages));

	//////////////////////////////////////////////////////////////////////
	// Make 'envs' point to an array of size 'NENV' of 'struct Env'.
	// LAB 3: Your code here.

    envs = boot_alloc(NENV * sizeof(* envs));

	//////////////////////////////////////////////////////////////////////
	// Now that we've allocated the initial kernel data structures, we set
	// up the list of free physical pages. Once we've done so, all further
	// memory management will go through the page_* functions. In
	// particular, we can now map memory using boot_map_region
	// or page_insert
	page_init();

	check_page_free_list(1);
	check_page_alloc();
	check_page();

	//////////////////////////////////////////////////////////////////////
	// Now we set up virtual memory

	//////////////////////////////////////////////////////////////////////
	// Map 'pages' read-only by the user at linear address UPAGES
	// Permissions:
	//    - the new image at UPAGES -- kernel R, user R
	//      (ie. perm = PTE_U | PTE_P)
	//    - pages itself -- kernel RW, user NONE
	// Your code goes here:
    boot_map_region( kern_pgdir,  (uintptr_t) UPAGES, npages*(sizeof(* pages)),
                     PADDR(pages), PTE_U | PTE_P);

    boot_map_region( kern_pgdir, (uintptr_t) pages, npages*(sizeof(* pages)),
                     PADDR(pages), PTE_W | PTE_P);

    
	//////////////////////////////////////////////////////////////////////
	// Map the 'envs' array read-only by the user at linear address UENVS
	// (ie. perm = PTE_U | PTE_P).
	// Permissions:
	//    - the new image at UENVS  -- kernel R, user R
	//    - envs itself -- kernel RW, user NONE
	// LAB 3: Your code here.

    boot_map_region( kern_pgdir,  (uintptr_t) UENVS, npages*(sizeof(* envs)),
                     PADDR(envs), PTE_U | PTE_P);

    boot_map_region( kern_pgdir, (uintptr_t) envs, npages*(sizeof(* envs)),
                     PADDR(envs), PTE_W | PTE_P);

	//////////////////////////////////////////////////////////////////////
	// Use the physical memory that 'bootstack' refers to as the kernel
	// stack.  The kernel stack grows down from virtual address KSTACKTOP.
	// We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP)
	// to be the kernel stack, but break this into two pieces:
	//     * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory
	//     * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if
	//       the kernel overflows its stack, it will fault rather than
	//       overwrite memory.  Known as a "guard page".
	//     Permissions: kernel RW, user NONE
	// Your code goes here:

    boot_map_region( kern_pgdir, KSTACKTOP-KSTKSIZE, KSTKSIZE,
                     PADDR(bootstack), PTE_W | PTE_P );

	//////////////////////////////////////////////////////////////////////
	// Map all of physical memory at KERNBASE.
	// Ie.  the VA range [KERNBASE, 2^32) should map to
	//      the PA range [0, 2^32 - KERNBASE)
	// We might not have 2^32 - KERNBASE bytes of physical memory, but
	// we just set up the mapping anyway.
	// Permissions: kernel RW, user NONE
	// Your code goes here:
    
    boot_map_region( kern_pgdir, (uintptr_t) KERNBASE, 0xFFFFFFFF, 0,
                     PTE_W | PTE_P);

	// Initialize the SMP-related parts of the memory map
	mem_init_mp();

	// Check that the initial page directory has been set up correctly.
	check_kern_pgdir();

	// Switch from the minimal entry page directory to the full kern_pgdir
	// page table we just created.	Our instruction pointer should be
	// somewhere between KERNBASE and KERNBASE+4MB right now, which is
	// mapped the same way by both page tables.
	//
	// If the machine reboots at this point, you've probably set up your
	// kern_pgdir wrong.
	lcr3(PADDR(kern_pgdir));

	check_page_free_list(0);

	// entry.S set the really important flags in cr0 (including enabling
	// paging).  Here we configure the rest of the flags that we care about.
	cr0 = rcr0();
	cr0 |= CR0_PE|CR0_PG|CR0_AM|CR0_WP|CR0_NE|CR0_MP;
	cr0 &= ~(CR0_TS|CR0_EM);
	lcr0(cr0);

	// Some more checks, only possible after kern_pgdir is installed.
	check_page_installed_pgdir();
}
示例#9
0
文件: e1000.c 项目: bosswissam/djos
// LAB 6: Your driver code here
int
e1000_attach(struct pci_func *pcif)
{
	uint32_t i;

	// Enable PCI device
	pci_func_enable(pcif);

	// Memory map I/O for PCI device
	boot_map_region(kern_pgdir, E1000_MMIOADDR,
			pcif->reg_size[0], pcif->reg_base[0], 
			PTE_PCD | PTE_PWT | PTE_W);
	e1000 = (uint32_t *) E1000_MMIOADDR;

	assert(e1000[E1000_STATUS] == 0x80080783);

	// Initialize tx buffer array
	memset(tx_desc_array, 0x0, sizeof(struct tx_desc) * E1000_TXDESC);
	memset(tx_pkt_bufs, 0x0, sizeof(struct tx_pkt) * E1000_TXDESC);
	for (i = 0; i < E1000_TXDESC; i++) {
		tx_desc_array[i].addr = PADDR(tx_pkt_bufs[i].buf);
		tx_desc_array[i].status |= E1000_TXD_STAT_DD;
	}

	// Initialize rcv desc buffer array
	memset(rcv_desc_array, 0x0, sizeof(struct rcv_desc) * E1000_RCVDESC);
	memset(rcv_pkt_bufs, 0x0, sizeof(struct rcv_pkt) * E1000_RCVDESC);
	for (i = 0; i < E1000_RCVDESC; i++) {
		rcv_desc_array[i].addr = PADDR(rcv_pkt_bufs[i].buf);
	}

	/* Transmit initialization */
	// Program the Transmit Descriptor Base Address Registers
	e1000[E1000_TDBAL] = PADDR(tx_desc_array);
	e1000[E1000_TDBAH] = 0x0;

	// Set the Transmit Descriptor Length Register
	e1000[E1000_TDLEN] = sizeof(struct tx_desc) * E1000_TXDESC;

	// Set the Transmit Descriptor Head and Tail Registers
	e1000[E1000_TDH] = 0x0;
	e1000[E1000_TDT] = 0x0;

	// Initialize the Transmit Control Register 
	e1000[E1000_TCTL] |= E1000_TCTL_EN;
	e1000[E1000_TCTL] |= E1000_TCTL_PSP;
	e1000[E1000_TCTL] &= ~E1000_TCTL_CT;
	e1000[E1000_TCTL] |= (0x10) << 4;
	e1000[E1000_TCTL] &= ~E1000_TCTL_COLD;
	e1000[E1000_TCTL] |= (0x40) << 12;

	// Program the Transmit IPG Register
	e1000[E1000_TIPG] = 0x0;
	e1000[E1000_TIPG] |= (0x6) << 20; // IPGR2 
	e1000[E1000_TIPG] |= (0x4) << 10; // IPGR1
	e1000[E1000_TIPG] |= 0xA; // IPGR

	/* Receive Initialization */
	// Program the Receive Address Registers
	e1000[E1000_EERD] = 0x0;
	e1000[E1000_EERD] |= E1000_EERD_START;
	while (!(e1000[E1000_EERD] & E1000_EERD_DONE));
	e1000[E1000_RAL] = e1000[E1000_EERD] >> 16;

	e1000[E1000_EERD] = 0x1 << 8;
	e1000[E1000_EERD] |= E1000_EERD_START;
	while (!(e1000[E1000_EERD] & E1000_EERD_DONE));
	e1000[E1000_RAL] |= e1000[E1000_EERD] & 0xffff0000;

	e1000[E1000_EERD] = 0x2 << 8;
	e1000[E1000_EERD] |= E1000_EERD_START;
	while (!(e1000[E1000_EERD] & E1000_EERD_DONE));
	e1000[E1000_RAH] = e1000[E1000_EERD] >> 16;

	e1000[E1000_RAH] |= 0x1 << 31;

	// Program the Receive Descriptor Base Address Registers
	e1000[E1000_RDBAL] = PADDR(rcv_desc_array);
        e1000[E1000_RDBAH] = 0x0;

	// Set the Receive Descriptor Length Register
	e1000[E1000_RDLEN] = sizeof(struct rcv_desc) * E1000_RCVDESC;

        // Set the Receive Descriptor Head and Tail Registers
	e1000[E1000_RDH] = 0x0;
	e1000[E1000_RDT] = 0x0;

	// Initialize the Receive Control Register
	e1000[E1000_RCTL] |= E1000_RCTL_EN;
	e1000[E1000_RCTL] &= ~E1000_RCTL_LPE;
	e1000[E1000_RCTL] &= ~E1000_RCTL_LBM;
	e1000[E1000_RCTL] &= ~E1000_RCTL_RDMTS;
	e1000[E1000_RCTL] &= ~E1000_RCTL_MO;
	e1000[E1000_RCTL] |= E1000_RCTL_BAM;
	e1000[E1000_RCTL] &= ~E1000_RCTL_SZ; // 2048 byte size
	e1000[E1000_RCTL] |= E1000_RCTL_SECRC;

	return 0;
}