Esempio n. 1
0
File: heap.c Progetto: zrho/Oxygen
uint64_t heap_sbrk(intptr_t increase)
{
    // Acquire lock
    spinlock_acquire(&heap_lock);
    
    // Increase?
    if (increase > 0) {
        // Align
        increase = mem_align((uintptr_t) increase, 0x1000);
        
        // Determine amount of pages
        size_t pages = increase / 0x1000;
        size_t i;
        
        for (i = 0; i < pages; ++i) {
            // Allocate frame
            uintptr_t phys = frame_alloc();
            
            // Map frame
            page_map(
                heap_begin + heap_length,
                phys,
                PG_PRESENT | PG_GLOBAL | PG_WRITABLE);
                
            // Increase length
            heap_length += 0x1000;
        }
        
    // Decrease
    } else if (increase < 0) {
        // Align decrease
        uintptr_t decrease = mem_align((uintptr_t) (-increase), 0x1000);
        
        // Determine amount of pages
        size_t pages = decrease / 0x1000;
        size_t i;
        
        for (i = 0; i < pages; ++i) {
            // Get (virtual) begin address of last page
            uintptr_t virt = heap_begin + heap_length;
            
            // Get physical address
            uintptr_t phys = page_get_physical(virt);
            
            // Unmap page
            page_unmap(virt);
            
            // Decrease length
            heap_length -= 0x1000;
        }
    }
    
    // Release lock
    spinlock_release(&heap_lock);
    
    // Beginning of the heap
    return heap_begin;
}
Esempio n. 2
0
int
ipc_port_send(const struct ipc_header *ipch, void *vpage)
{
	struct vm_page *page;
	struct task *task;
	struct vm *vm;
	int error;

	task = current_task();

	ASSERT(task != NULL, "Must have a running task.");
	ASSERT(ipch != NULL, "Must have a header.");

	/*
	 * Extract the vm_page for this page.
	 */
	if (vpage == NULL) {
		page = NULL;
	} else {
		if ((task->t_flags & TASK_KERNEL) == 0)
			vm = task->t_vm;
		else
			vm = &kernel_vm;
		error = page_extract(vm, (vaddr_t)vpage, &page);
		if (error != 0)
			return (error);
		if (vm == &kernel_vm) {
			error = page_unmap_direct(vm, page, (vaddr_t)vpage);
			if (error != 0)
				panic("%s: could not unmap direct page: %m", __func__, error);
		} else {
			error = page_unmap(vm, (vaddr_t)vpage, page);
			if (error != 0)
				panic("%s: could not unmap source page: %m", __func__, error);
			error = vm_free_address(vm, (vaddr_t)vpage);
			if (error != 0)
				panic("%s: could not free source page address: %m", __func__, error);
		}
	}

	error = ipc_port_send_page(ipch, page);
	if (error != 0) {
		if (page != NULL)
			page_release(page);
		return (error);
	}

	return (0);
}
void *
Vmem_alloc::page_unmap (void *page)
{
  Address phys = Kmem::virt_to_phys(page);

  if (phys == (Address) -1)
    return 0;
  
  Address va = reinterpret_cast<Address>(page);
  void *res = (void*)Mem_layout::phys_to_pmem(phys);

  if (va < Mem_layout::Vmem_end)
    {
      // clean out page-table entry
      *(Kmem::kdir->walk(Virt_addr(va)).e) = 0;
      page_unmap (page, 0);
      Mem_unit::tlb_flush(va);
    }

  return res;
}
void
Vmem_alloc::page_free (void *page)
{
  Address phys = Kmem::virt_to_phys(page);

  if (phys == (Address) -1)
    return;

  // convert back to virt (do not use "page") to get canonic mapping
  Mapped_allocator::allocator()->free(Config::PAGE_SHIFT, // 2^0=1 pages
      Kmem::phys_to_virt(phys));

  Address va = reinterpret_cast<Address>(page);

  if (va < Mem_layout::Vmem_end)
    {
      // clean out page-table entry
      Kmem::kdir->walk(Virt_addr(va)).e->clear();
      page_unmap (page, 0);
      Mem_unit::tlb_flush(va);
    }
}