Esempio n. 1
0
// TODO: Should a lock be used to access kernel_pmap?
// Setup the kernel's pmap
void _pmap_kernel_init() {
	// The kernel's pgd has already been set up and we know where it is via the linker script symbols
	kernel_pmap.pgd = (pgd_t*)(PGDPHYSICALBASEADDR-MEMBASEADDR+KVIRTUALBASEADDR);
  kernel_pmap.pgd_pa = (paddr_t)(PGDPHYSICALBASEADDR);

	// Need to allocate memory for the pgt_entry structs
	// We are too early in the bootstrap process to be able to use the heap so we need to use _pmap_bootstrap_memory
	pgt_entry_t *pentries = (pgt_entry_t*)_pmap_bootstrap_memory(sizeof(pgt_entry_t) * (uint32_t)(NUMPAGETABLES));

	kernel_pmap.pgt_entry_head = pentries;

	for(uint32_t i = 0, n_pgt = (uint32_t)(NUMPAGETABLES); i < n_pgt; i++) {
		// Get the location of the page table
		pentries[i].pgt = &KERNEL_PGTS_BASE[i];

		// Assign the next pgt_entry
		pentries[i].next = ((i+1) < n_pgt) ? &pentries[i+1] : NULL;

		// The kernel virtual address space is always the last n MB, thus the page tables will always be the 
    // mapped to the last n entries in the page directories. Where n == n_pgt
		pentries[i].offset = KERNEL_PGD_PGT_INDEX_BASE + i;
	}

  // Remove the identity mapped section
  kernel_pmap.pgd->pde[PGD_GET_INDEX(MEMBASEADDR)] = 0x0;

  // Finally increment the reference count on the pmap. The refcount for kernel_pmap should never be 0.
  pmap_reference(pmap_kernel());
}
Esempio n. 2
0
// TODO: Refactor this disgusting code
void _vmap_kernel_init() {
  // Get a reference to the kernel's pmap
  kernel_vmap.pmap = pmap_kernel();
  pmap_reference(kernel_vmap.pmap);

  // We need to create the regions: text/data, stack, and heap. And vm objects.
  kernel_vmap.regions = (vregion_t*)vmm_km_zalloc(sizeof(vregion_t) * 3);

  // We need to get the start and end addresses of the virtual memory regions of the kernel
  vaddr_t vas[6] = {(uintptr_t)(&__kernel_virtual_start), (uintptr_t)(&__kernel_virtual_end), 0, 0, (uintptr_t)(&__svc_stack_limit), 0};
  
  // The address of the start of the stack is in vas[5] (This address represents the end of the virtual memory region however). 
  // Since the stack grows downward, the end of stack (or start of the virtual memory region) is vas[5]-0x1000 (since we have defined the stacks
  // to be 4096 bytes in size)
  // TODO: PAGESIZE shouldn't be hardcoded. What if we change the size of the kernel stacks? Maybe have a STACKSIZE?
  vas[5] = vas[4]+PAGESIZE;

  kernel_vmap.text_start = vas[0];
  kernel_vmap.text_end = vas[1];
  kernel_vmap.data_start = vas[0];
  kernel_vmap.data_end = vas[1];
  kernel_vmap.stack_start = vas[5];
  
  // The kernel heap doesn't exist yet
  // These values need to be updated after the final vmm_km_zalloc call
  kernel_vmap.heap_start = vas[2];
  kernel_vmap.heap_end = kernel_vmap.heap_end;

  // Now lets populate each of the vregion structs
  // Note the indexing operator only works here because we have allocated the vregions contiguously. Usually a linked list.
  vm_prot_t prots[3] = {VM_PROT_ALL, VM_PROT_DEFAULT, VM_PROT_DEFAULT};
  for(uint32_t i = 0, num_regions = 3; i < num_regions; i++) {
    kernel_vmap.regions[i].vstart = vas[(i*2)];
    kernel_vmap.regions[i].vend = ROUND_PAGE(vas[(i*2)+1]);
    kernel_vmap.regions[i].vm_prot = prots[i];
    kernel_vmap.regions[i].needs_copy = kernel_vmap.regions[i].copy_on_write = 0;
    
    // Populate the amaps
    uint32_t num_pages = (uint32_t)((double)(kernel_vmap.regions[i].vend - kernel_vmap.regions[i].vstart) / (double)PAGESIZE);
    if(num_pages > 0) {
      kernel_vmap.regions[i].aref.amap = (vm_amap_t*)vmm_km_zalloc(sizeof(vm_amap_t));
      kernel_vmap.regions[i].aref.slotoff = 0;
      kernel_vmap.regions[i].aref.amap->maxslots = kernel_vmap.regions[i].aref.amap->nslots = num_pages;
      kernel_vmap.regions[i].aref.amap->refcount = 1;
      kernel_vmap.regions[i].aref.amap->aslots = (vm_anon_t**)vmm_km_zalloc(sizeof(vm_anon_t*) * num_pages);

      // Populate the anon structs and put them in amap.aslots
      for(uint32_t j = 0; j < num_pages; j++) {
        vm_anon_t *anon = (vm_anon_t*)vmm_km_zalloc(sizeof(vm_anon_t));
        anon->page = (vpage_t*)vmm_km_zalloc(sizeof(vpage_t));
        anon->page->vaddr = kernel_vmap.regions[i].vstart + (j * PAGESIZE);
        kernel_vmap.regions[i].aref.amap->aslots[j] = anon;
        anon->refcount = 1;
      }
    }
    
    if((i+1) != num_regions) kernel_vmap.regions[i].next = &kernel_vmap.regions[i+1];
    else kernel_vmap.regions[i].next = NULL;
  }
}
Esempio n. 3
0
pmap_t* pmap_create() {
  pmap_t *pmap = (pmap_t*)kheap_alloc(sizeof(pmap_t));
  memset(pmap, 0, sizeof(pmap_t));

  // Create pgd
  // TODO: This will not work! We need to allocate 16 KiB of contiguous memory aligned to a 16 KiB address boundary
  pmap->pgd = (pgd_t*)kheap_alloc(sizeof(pgd_t));
  memset(pmap->pgd, 0, sizeof(pgd_t));

  // Get the physical address of the pgd
  pmap->pgd_pa = TRUNC_PAGE(KERNEL_PGTS_BASE[PGD_GET_INDEX((vaddr_t)pmap->pgd)-KERNEL_PGD_PGT_INDEX_BASE].pte[PGT_GET_INDEX((vaddr_t)pmap->pgd)]);

  pmap_reference(pmap);
  return pmap;
}
Esempio n. 4
0
/*
 * uvm_km_suballoc: allocate a submap in the kernel map.   once a submap
 * is allocated all references to that area of VM must go through it.  this
 * allows the locking of VAs in kernel_map to be broken up into regions.
 *
 * => if `fixed' is true, *min specifies where the region described
 *      by the submap must start
 * => if submap is non NULL we use that as the submap, otherwise we
 *	alloc a new map
 */
struct vm_map *
uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size,
    int flags, boolean_t fixed, struct vm_map *submap)
{
	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);

	size = round_page(size);	/* round up to pagesize */

	/*
	 * first allocate a blank spot in the parent map
	 */

	if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
	    UVM_ADV_RANDOM, mapflags)) != 0) {
	       panic("uvm_km_suballoc: unable to allocate space in parent map");
	}

	/*
	 * set VM bounds (min is filled in by uvm_map)
	 */

	*max = *min + size;

	/*
	 * add references to pmap and create or init the submap
	 */

	pmap_reference(vm_map_pmap(map));
	if (submap == NULL) {
		submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags);
		if (submap == NULL)
			panic("uvm_km_suballoc: unable to create submap");
	} else {
		uvm_map_setup(submap, *min, *max, flags);
		submap->pmap = vm_map_pmap(map);
	}

	/*
	 * now let uvm_map_submap plug in it...
	 */

	if (uvm_map_submap(map, *min, *max, submap) != 0)
		panic("uvm_km_suballoc: submap allocation failed");

	return(submap);
}
Esempio n. 5
0
void
kmem_submap(
	vm_map_t 	map, 
	vm_map_t 	parent,
	vm_offset_t 	*min, 
	vm_offset_t 	*max,
	vm_size_t 	size,
	boolean_t 	pageable)
{
	vm_offset_t addr;
	kern_return_t kr;

	size = round_page(size);

	/*
	 *	Need reference on submap object because it is internal
	 *	to the vm_system.  vm_object_enter will never be called
	 *	on it (usual source of reference for vm_map_enter).
	 */
	vm_object_reference(vm_submap_object);

	addr = vm_map_min(parent);
	kr = vm_map_enter(parent, &addr, size,
			  (vm_offset_t) 0, TRUE,
			  vm_submap_object, (vm_offset_t) 0, FALSE,
			  VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
	if (kr != KERN_SUCCESS)
		panic("kmem_submap");

	pmap_reference(vm_map_pmap(parent));
	vm_map_setup(map, vm_map_pmap(parent), addr, addr + size, pageable);
	kr = vm_map_submap(parent, addr, addr + size, map);
	if (kr != KERN_SUCCESS)
		panic("kmem_submap");

	*min = addr;
	*max = addr + size;
}