Пример #1
0
void omFreeBinPages(omBinPage bin_page, int how_many)
{
  omBinPageRegion region = bin_page->region;

  region->used_pages -= how_many;
  if (region->used_pages == 0)
  {
    if (region == om_CurrentBinPageRegion)
    {
      if (region->next != NULL)
        om_CurrentBinPageRegion = region->next;
      else
        om_CurrentBinPageRegion = region->prev;
    }
    omTakeOutRegion(region);
    omFreeBinPagesRegion(region);
  }
  else
  {
    if (region != om_CurrentBinPageRegion && OM_IS_EMPTY_REGION(region))
    {
      omTakeOutRegion(region);
      omInsertRegionAfter(region, om_CurrentBinPageRegion);
    }
    if (how_many > 1)
    {
      int i = how_many;
      char* page = (char *)bin_page;

      while (i > 1)
      {
        NEXT_PAGE(page) = page + SIZEOF_SYSTEM_PAGE;
        page = NEXT_PAGE(page);
        i--;
      }
      NEXT_PAGE(page) = region->current;
    }
    else
    {
      NEXT_PAGE(bin_page) = region->current;
    }
    region->current = (void*) bin_page;
  }
  om_Info.AvailPages += how_many;
  om_Info.UsedPages -= how_many;
  OM_FREE_BINPAGE_HOOK;
}
Пример #2
0
omBinPage omAllocBinPage()
{
  omBinPage bin_page;

  if (om_CurrentBinPageRegion == NULL)
    om_CurrentBinPageRegion = omAllocNewBinPagesRegion(1);

  while (1)
  {
    if (om_CurrentBinPageRegion->current != NULL)
    {
      bin_page = om_CurrentBinPageRegion->current;
      om_CurrentBinPageRegion->current = NEXT_PAGE(bin_page);
      goto Found;
    }
    if (om_CurrentBinPageRegion->init_pages > 0)
    {
      bin_page = (omBinPage)om_CurrentBinPageRegion->init_addr;
      om_CurrentBinPageRegion->init_pages--;
      if (om_CurrentBinPageRegion->init_pages > 0)
        om_CurrentBinPageRegion->init_addr += SIZEOF_SYSTEM_PAGE;
      else
        om_CurrentBinPageRegion->init_addr = NULL;
      goto Found;
    }
    if (om_CurrentBinPageRegion->next != NULL)
    {
      om_CurrentBinPageRegion = om_CurrentBinPageRegion->next;
    }
    else
    {
      omBinPageRegion new_region = omAllocNewBinPagesRegion(1);
      new_region->prev = om_CurrentBinPageRegion;
      om_CurrentBinPageRegion->next = new_region;
      om_CurrentBinPageRegion = new_region;
    }
  }

  Found:
  bin_page->region = om_CurrentBinPageRegion;
  om_CurrentBinPageRegion->used_pages++;

  om_Info.UsedPages++;
  om_Info.AvailPages--;
  if (om_Info.UsedPages > om_Info.MaxPages)
    om_Info.MaxPages = om_Info.UsedPages;

  OM_ALLOC_BINPAGE_HOOK;
  return bin_page;
}
Пример #3
0
static page_frame_t kernel_page_table_install(struct multiboot_info *mb) {
    // Certain very important things already exist in physical memory. They
    // need to be marked as present so that the allocator doesn't grab them by
    // accident.
    // After they are marked as present they can safely be mapped with
    // map_page.

    // Iterate over the multiboot memory map table and mark certain addresses
    // as unusable.
    klogf("Marking unusable!\n");
    multiboot_memory_map_t *mm_last = (multiboot_memory_map_t*)(mb->mmap_addr +
        mb->mmap_length);
    for (multiboot_memory_map_t *mm = (multiboot_memory_map_t*)mb->mmap_addr;
         mm < mm_last;
         mm = (multiboot_memory_map_t*)((uintptr_t)mm +
                                        mm->size + sizeof(mm->size))) {
        // If the memory is not available
        if (mm->type != MULTIBOOT_MEMORY_AVAILABLE) {
            klogf("Unusable physical address %p of type %p and length %p\n",
                    mm->addr, mm->type, mm->len);
            for (uint64_t page = PAGE_ALIGN(mm->addr);
                 page < NEXT_PAGE(mm->addr+mm->len); page += PAGE_SIZE) {
                use_frame(page);
            }
        }
    }
    klogf("mm_addr table %p\n", mb->mmap_addr);
    klogf("apm table %p\n", mb->apm_table);
    klogf("fb table %p\n", mb->framebuffer_addr);
    klogf("vbe int off %p\n", mb->vbe_interface_off);

    // Catch NULL pointer dereferences
    use_frame(0);

    // Mark all the pages the kernel sits on as used
    use_range(KERNEL_START, KERNEL_END);

    // Mark the kernel heap as in use
    use_frame(KHEAP_PHYS_ROOT);
    // Mark video memory as in use
    use_range(VGA_BEGIN, VGA_END);

    // Mark the paging directory as in use
    use_frame(PAGE_DIRECTORY);
    page_frame_t page_dir = bootstrap_kernel_page_table();

    enable_paging(page_dir);
    return page_dir;
}
Пример #4
0
static void* omTakeOutConsecutivePages(omBinPageRegion region, int pages)
{
  void* current;
  char* iter;
  void* prev = NULL;
  void* bin_page;
  int found;
  current = region->current;
  while (current != NULL)
  {
    found = 1;
    iter = current;
    while (NEXT_PAGE(iter) == (iter + SIZEOF_SYSTEM_PAGE))
    {
      iter = NEXT_PAGE(iter);
      /* handle pathological case that iter + SIZEOF_SYSTEM_PAGE == 0 */
      if (iter == NULL) return NULL;
      found++;
      if (found == pages)
      {
        bin_page = current;
        if (current == region->current)
        {
          region->current = NEXT_PAGE(iter);
        }
        else
        {
          omAssume(prev != NULL);
          NEXT_PAGE(prev) = NEXT_PAGE(iter);
        }
        return bin_page;
      }
    }
    prev = iter;
    current = NEXT_PAGE(iter);
  }
  return NULL;
}
Пример #5
0
kern_return_t
kmem_alloc_contig(
	vm_map_t		map,
	vm_offset_t		*addrp,
	vm_size_t		size,
	vm_offset_t 		mask,
	ppnum_t			max_pnum,
	ppnum_t			pnum_mask,
	int 			flags)
{
	vm_object_t		object;
	vm_object_offset_t	offset;
	vm_map_offset_t		map_addr; 
	vm_map_offset_t		map_mask;
	vm_map_size_t		map_size, i;
	vm_map_entry_t		entry;
	vm_page_t		m, pages;
	kern_return_t		kr;

	if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) 
		return KERN_INVALID_ARGUMENT;
	
	if (size == 0) {
		*addrp = 0;
		return KERN_INVALID_ARGUMENT;
	}

	map_size = vm_map_round_page(size);
	map_mask = (vm_map_offset_t)mask;

	/*
	 *	Allocate a new object (if necessary) and the reference we
	 *	will be donating to the map entry.  We must do this before
	 *	locking the map, or risk deadlock with the default pager.
	 */
	if ((flags & KMA_KOBJECT) != 0) {
		object = kernel_object;
		vm_object_reference(object);
	} else {
		object = vm_object_allocate(map_size);
	}

	kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, &entry);
	if (KERN_SUCCESS != kr) {
		vm_object_deallocate(object);
		return kr;
	}

	entry->object.vm_object = object;
	entry->offset = offset = (object == kernel_object) ? 
		        map_addr : 0;

	/* Take an extra object ref in case the map entry gets deleted */
	vm_object_reference(object);
	vm_map_unlock(map);

	kr = cpm_allocate(CAST_DOWN(vm_size_t, map_size), &pages, max_pnum, pnum_mask, FALSE, flags);

	if (kr != KERN_SUCCESS) {
		vm_map_remove(map, vm_map_trunc_page(map_addr),
			      vm_map_round_page(map_addr + map_size), 0);
		vm_object_deallocate(object);
		*addrp = 0;
		return kr;
	}

	vm_object_lock(object);
	for (i = 0; i < map_size; i += PAGE_SIZE) {
		m = pages;
		pages = NEXT_PAGE(m);
		*(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL;
		m->busy = FALSE;
		vm_page_insert(m, object, offset + i);
	}
	vm_object_unlock(object);

	if ((kr = vm_map_wire(map, vm_map_trunc_page(map_addr),
			      vm_map_round_page(map_addr + map_size), VM_PROT_DEFAULT, FALSE)) 
		!= KERN_SUCCESS) {
		if (object == kernel_object) {
			vm_object_lock(object);
			vm_object_page_remove(object, offset, offset + map_size);
			vm_object_unlock(object);
		}
		vm_map_remove(map, vm_map_trunc_page(map_addr), 
			      vm_map_round_page(map_addr + map_size), 0);
		vm_object_deallocate(object);
		return kr;
	}
	vm_object_deallocate(object);

	if (object == kernel_object)
		vm_map_simplify(map, map_addr);

	*addrp = (vm_offset_t) map_addr;
	assert((vm_map_offset_t) *addrp == map_addr);
	return KERN_SUCCESS;
}