Ejemplo n.º 1
0
int omap4430_enable_emu()
{
	int ret = -1;
	void *base_cm_emu, *base_l3instr_l3;
	int timeout = GLOBAL_TIMEOUT;

	base_cm_emu = map_page(OMAP4430_CM_EMU);
	if (base_cm_emu == NULL)
		goto end;

	base_l3instr_l3 = map_page(OMAP4430_CM_L3INSTR_L3);
	if (base_l3instr_l3 == NULL)
		goto unmap_cm_emu;

	// Enable clocks
	__writel(0x2, base_cm_emu + 0xA00);
	__writel(0x1, base_l3instr_l3 + 0xE20);
	__writel(0x1, base_l3instr_l3 + 0xE28);
	
	// Check if it worked
	while (--timeout)
		if (((__readl(base_cm_emu + 0xA00) & 0xf00) == 0x300)
			&& (__readl(base_cm_emu + 0xA20) & 0x40000)) {
			ret = 0;
			break;
		}

	unmap_page(base_l3instr_l3);
unmap_cm_emu:
	unmap_page(base_cm_emu);
end:
	return ret;
}
enum status physical_page_remove(phys_addr address) {
    enum status status = Error_Absent;
    assert(is_aligned(address, Page_Small));
    phys_addr original = Physical_Page;
    phys_addr current = original;
    lock_acquire_writer(&physical_allocator_lock);
    while (current != invalid_phys_addr) {
        unmap_page(Physical_Page_Stack, false);
        status = map_page(Physical_Page_Stack, current, Memory_Writable);
        assert_ok(status);
        current = Physical_Page_Stack->next;

        if (Physical_Page_Stack->next == address) {
            unmap_page(Physical_Page_Stack, false);
            assert_ok(map_page(Physical_Page_Stack, Physical_Page_Stack->next,
                     Memory_Writable));
            phys_addr next_next = Physical_Page_Stack->next;
            unmap_page(Physical_Page_Stack, false);
            assert_ok(map_page(Physical_Page_Stack, current, Memory_Writable));
            Physical_Page_Stack->next = next_next;
            status = Ok;
            break;
        }
    }
    unmap_page(Physical_Page_Stack, false);
    assert_ok(map_page(Physical_Page_Stack, original, Memory_Writable));
    lock_release_writer(&physical_allocator_lock);
    return status;
}
Ejemplo n.º 3
0
int vmfree(void *addr, unsigned long size, int type) {
    struct filemap *fm = NULL;
    int pages = PAGES(size);
    int i, rc;
    char *vaddr;

    if (size == 0) return 0;
    addr = (void *) PAGEADDR(addr);
    if (!valid_range(addr, size)) return -EINVAL;

    if (type & (MEM_DECOMMIT | MEM_RELEASE)) {
        vaddr = (char *) addr;
        for (i = 0; i < pages; i++) {
            if (page_directory_mapped(vaddr)) {
                pte_t flags = get_page_flags(vaddr);
                unsigned long pfn = BTOP(virt2phys(vaddr));

                if (flags & PT_FILE) {
                    handle_t h = (flags & PT_PRESENT) ? pfdb[pfn].owner : pfn;
                    struct filemap *newfm = (struct filemap *) hlookup(h);
                    if (newfm != fm) {
                        if (fm) {
                            if (fm->pages == 0) {
                                rc = free_filemap(fm);
                            } else {
                                rc = unlock_filemap(fm);
                            }
                            if (rc < 0) return rc;
                        }
                        fm = newfm;
                        rc = wait_for_object(fm, INFINITE);
                        if (rc < 0) return rc;
                    }
                    fm->pages--;
                    unmap_page(vaddr);
                    if (flags & PT_PRESENT) free_pageframe(pfn);
                } else  if (flags & PT_PRESENT) {
                    unmap_page(vaddr);
                    free_pageframe(pfn);
                }
            }

            vaddr += PAGESIZE;
        }
    }

    if (fm) {
        if (fm->pages == 0) {
            rc = free_filemap(fm);
        } else {
            rc = unlock_filemap(fm);
        }
        if (rc < 0) return rc;
    } else if (type & MEM_RELEASE) {
        rmap_free(vmap, BTOP(addr), pages);
    }

    return 0;
}
Ejemplo n.º 4
0
static char	evaluate_freeable(char *ptr, char *previous, char *mem, char *next)
{
	if (*PAGE_SIZE(mem) == ROUND_PAGE(*DATA_SIZE(ptr, 0) +
		PAGE_META + DATA_META))
		return (unmap_page(previous, mem, next));
	delete_memory(ptr, mem);
	if (!memory_is_set(mem + PAGE_META, *PAGE_SIZE(mem) - PAGE_META))
		return (unmap_page(previous, mem, next));
	return (0);
}
Ejemplo n.º 5
0
int MPTKern_test1()
{
  unsigned int vaddr = 4096*1024*300;
  container_split(0, 100);
  if (get_ptbl_entry_by_va(1, vaddr) != 0) {
    dprintf("test 1.1 failed.\n");
    return 1;
  }
  if (get_pdir_entry_by_va(1, vaddr) != 0) {
    dprintf("test 1.2 failed.\n");
    return 1;
  }
  map_page(1, vaddr, 100, 7);
  if (get_ptbl_entry_by_va(1, vaddr) == 0) {
    dprintf("test 1.3 failed.\n");
    return 1;
  }
  if (get_pdir_entry_by_va(1, vaddr) == 0) {
    dprintf("test 1.4 failed.\n");
    return 1;
  }
  unmap_page(1, vaddr);
  if (get_ptbl_entry_by_va(1, vaddr) != 0) {
    dprintf("test 1.5 failed.\n");
    return 1;
  }
  dprintf("test 1 passed.\n");
  return 0;
}
Ejemplo n.º 6
0
static int fetch_file_page(struct filemap *fm, void *addr) {
    struct file *filp;
    unsigned long pfn;
    unsigned long pos;
    int rc;

    filp = (struct file *) olock(fm->file, OBJECT_FILE);
    if (!filp) return -EBADF;

    pfn = alloc_pageframe('FMAP');
    if (pfn == 0xFFFFFFFF) {
        orel(filp);
        return -ENOMEM;
    }

    map_page(addr, pfn, PT_WRITABLE | PT_PRESENT);

    pos = (char *) addr - fm->addr;
    rc = pread(filp, addr, PAGESIZE, fm->offset + pos);
    if (rc < 0) {
        orel(filp);
        unmap_page(addr);
        free_pageframe(pfn);
        return rc;
    }

    pfdb[pfn].owner = fm->self;
    map_page(addr, pfn, fm->protect | PT_PRESENT);

    orel(filp);
    return 0;
}
Ejemplo n.º 7
0
void miounmap(void *addr, int size) {
    int i;
    int pages = PAGES(size);

    for (i = 0; i < pages; i++) unmap_page((char *) addr + PTOB(i));
    rmap_free(vmap, BTOP(addr), pages);
}
void physical_stack_debug(void) {
    phys_addr original = Physical_Page;
    phys_addr current = Physical_Page;
    logf(Log_Debug, "physical_stack_debug\n");
    lock_acquire_writer(&physical_allocator_lock);
    while (current != invalid_phys_addr) {
        unmap_page(Physical_Page_Stack, false);
        assert_ok(map_page(Physical_Page_Stack, current, Memory_Writable));
        logf(Log_Debug, "%lx %zu\n", current, Physical_Page_Stack->length);
        current = Physical_Page_Stack->next;
    }
    logf(Log_Debug, "%lx\n", current);
    unmap_page(Physical_Page_Stack, false);
    assert_ok(map_page(Physical_Page_Stack, original, Memory_Writable));
    lock_release_writer(&physical_allocator_lock);
}
phys_addr physical_alloc(void) {
    phys_addr phys;
    phys_addr next;
    lock_acquire_writer(&physical_allocator_lock);
    assert(is_aligned(Physical_Page_Stack->length, Page_Small));
    Physical_Page_Stack->length -= Page_Small;
    if (Physical_Page_Stack->length == 0) {
        phys = Physical_Page;
        next = Physical_Page_Stack->next;
        unmap_page(Physical_Page_Stack, false);
        if (next != invalid_phys_addr) {
            if (map_page(Physical_Page_Stack, next, Memory_Writable) != Ok) {
                phys = invalid_phys_addr;
                goto out;
            }
        }
        Physical_Page = next;
        assert(phys != next);
    } else {
        phys = Physical_Page + Physical_Page_Stack->length;
    }
out:
    lock_release_writer(&physical_allocator_lock);
    assert((phys & 0xfff) == 0);
    assert((phys & 0xffff000000000000) == 0);
    assert(phys != Physical_Page);
    return phys;
}
Ejemplo n.º 10
0
/*
 * see if a mapped address was really a "safe" buffer and if so, copy
 * the data from the safe buffer back to the unsafe buffer and free up
 * the safe buffer.  (basically return things back to the way they
 * should be)
 */
void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
{
	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
		__func__, (void *) dma_addr, size, dir);

	unmap_page(dev, dma_addr, size, dir);
}
Ejemplo n.º 11
0
/* Unmap the virtual memory */
void AcpiOsUnmapMemory(void *Where, ACPI_SIZE Length)
{
	u32 page_count = Length + ((ACPI_PHYSICAL_ADDRESS)(Where) & 0xFFF); // page page
	
	// Round to a page boundary
	if( (page_count & 0xFFF) != 0 )
		page_count = (page_count & 0xFFFFF000) + 0x1000;
	// Calculate the number of pages required
	page_count /= 0x1000;
	
	for(u32 incr = 0; incr < (page_count*0x1000); incr += 0x1000)
	{
		unmap_page(current->t_dir, (void*)((u32)Where + incr));
	}
}
Ejemplo n.º 12
0
Archivo: vm.c Proyecto: semahawk/kernel
/*
 * Unmap <sz> memory of continuous pages
 */
void unmap_pages(void *vaddr, unsigned sz)
{
  if (sz == 0)
    return;

  vaddr = PALIGNDOWN(vaddr);

  /* the last bit is to map a sufficent number of pages */
  unsigned npages = sz / PAGE_SIZE + (sz % PAGE_SIZE > 0);

  for (int i = 0; i < npages; i++){
    unmap_page(vaddr);

    vaddr += PAGE_SIZE;
  }
}
static void physical_free_range(phys_addr address, size_t length) {
    enum status status;
    assert(is_aligned(address, Page_Small));
    assert(is_aligned(length, Page_Small));
    assert((address & 0xfff) == 0);
    assert((length & 0xfff) == 0);
    assert(length > 0);
    assert((address & 0xffff000000000000) == 0);
    assert(((address + length) & 0xffff000000000000) == 0);
    phys_addr prev;

    lock_acquire_writer(&physical_allocator_lock);

    prev = Physical_Page;
    unmap_page(Physical_Page_Stack, false);
    status = map_page(Physical_Page_Stack, address, Memory_Writable);
    assert_ok(status);

    Physical_Page_Stack->next = prev;
    Physical_Page_Stack->length = length;
    Physical_Page = address;

    lock_release_writer(&physical_allocator_lock);
}
Ejemplo n.º 14
0
t_error			ia32_region_release(i_as			asid,
					    i_region			regid)
{
  REGION_ENTER(region);

  t_ia32_pde			pde_start;
  t_ia32_pde			pde_end;
  t_ia32_pte			pte_start;
  t_ia32_pte			pte_end;
  t_ia32_table			table;
  t_ia32_directory		pd;
  o_as*				oas;
  t_vsize			size;
  t_paddr			base;
  t_vaddr			pd_addr;
  t_paddr			pt_addr;
  o_region*			oreg;
  int				i = 0;
  int				j = 0;
  t_ia32_pte*			t;

  REGION_ENTER(region);
  if (as_get(asid, &oas) != ERROR_NONE)
    REGION_LEAVE(region, ERROR_UNKNOWN);

  if (region_get(asid, regid, &oreg) != ERROR_NONE)
    REGION_LEAVE(region, ERROR_UNKNOWN);

  size = oreg->size;
  pd = oas->machdep.pd;
  base = MK_BASE(pd);
  // Mapping PD into Kernel
  map_page(base, &pd_addr);

  /*   printf("pd %x\n", pd_addr); */

  /*   printf("%x\n", oreg->address); */

  pde_start = PDE_ENTRY(oreg->address);
  pde_end = PDE_ENTRY(oreg->address + size);
  pte_start = PTE_ENTRY(oreg->address);
  pte_end = PTE_ENTRY(oreg->address + size);

  for (i = pde_start; i <= pde_end; i++)
    {
      if (pd_get_table((t_ia32_directory *) &pd_addr, i, &table) != ERROR_UNKNOWN)
	{
	  map_page(table.entries, &pt_addr);
	  table.entries = pt_addr;
	}

      for (j = (i == pde_start ? pte_start : 0); j <= (i == pde_end ? pte_end : 1023); j++)
	{
	  t = (t_ia32_pte*)table.entries;
       if (t[j] != 0)
      if (pt_delete_page(&table, j) != ERROR_NONE)
    REGION_LEAVE(region, ERROR_UNKNOWN);
	}
      unmap_page(&pt_addr);
    }

  tlb_flush();
      unmap_page(&pd_addr);
  REGION_LEAVE(region, ERROR_NONE);
}
Ejemplo n.º 15
0
t_error			ia32_region_reserve(i_as			asid,
					i_segment		segid,
				     	t_paddr			offset,
				     	t_opts			opts,
				     	t_vaddr			address,
				     	t_vsize			size,
				     	i_region*		regid)
{
  t_ia32_pde			pde_start;
  t_ia32_pde			pde_end;
  t_ia32_pte			pte_start;
  t_ia32_pte			pte_end;
  t_ia32_table			table;
  t_ia32_directory		pd;
  o_as*				oas;
  t_ia32_page			page;
  t_paddr			base;
  t_paddr			ram_paddr;
  t_vaddr			pd_addr;
  t_paddr			pt_addr;
  o_segment*			oseg;
  o_region*			oreg;
  int				i = 0;
  int				j = 0;
  int				x = 0;
  int				clear = 0;

  REGION_ENTER(region);
  if (as_get(asid, &oas) != ERROR_NONE)
    REGION_LEAVE(region, ERROR_UNKNOWN);

  if (region_get(asid, *regid, &oreg) != ERROR_NONE)
    REGION_LEAVE(region, ERROR_UNKNOWN);

  if (segment_get(segid, &oseg) != ERROR_NONE)
    REGION_LEAVE(region, ERROR_UNKNOWN);

  ram_paddr = oseg->address;
  pd = oas->machdep.pd;
  base = MK_BASE(pd);
  // Mapping PD into Kernel
  map_page(base, &pd_addr);

  /*   printf("pd %x\n", pd_addr); */

  /*   printf("%x\n", oreg->address); */

  pde_start = PDE_ENTRY(oreg->address);
  pde_end = PDE_ENTRY(oreg->address + size);
  pte_start = PTE_ENTRY(oreg->address);
  pte_end = PTE_ENTRY(oreg->address + size);

  for (i = pde_start; i <= pde_end; i++)
    {
      if (pd_get_table((t_ia32_directory *) &pd_addr, i, &table) == ERROR_UNKNOWN)
	{
	  segment_reserve(asid, PAGESZ, PERM_READ | PERM_WRITE, &segid);
	  if (segment_get(segid, &oseg) != ERROR_NONE)
	    REGION_LEAVE(region, ERROR_UNKNOWN);
	  table.rw = PDE_FLAG_RW;
	  table.present = 1;
	  table.user = (opts & REGION_OPT_USER) ? PT_USER : PT_PRIVILEGED;
	  table.writeback = PT_CACHED;
	  table.cached = 1;
	  pt_build(oseg->address, &table, 0);
	  pd_add_table((t_ia32_directory *) &pd_addr, i, table);
	  clear = 1;
	}
      else
	clear = 0;
      map_page(table.entries, &pt_addr);

      table.entries = pt_addr;

      if (clear)
	memset((void*)pt_addr, '\0', PAGESZ);
      for (j = (i == pde_start ? pte_start : 0); j <= (i == pde_end ? pte_end : 1023); j++)
	{
	  page.addr = x + (offset + ram_paddr);
	  page.present = 1;
	  page.rw = (oseg->perms & PERM_WRITE) ? PG_WRITABLE : PG_READONLY;
	  page.present = 1;
	  page.user = (opts & REGION_OPT_USER) ? PG_USER : PG_PRIVILEGED;
	  page.cached = PG_CACHED;
	  pt_add_page(&table, j, page);
	  x += PAGESZ;
	}
      unmap_page(&pt_addr);
    }

  tlb_flush();
      unmap_page(&pd_addr);
  REGION_LEAVE(region, ERROR_NONE);
}
Ejemplo n.º 16
0
/* This function is the SIGSEGV handler for the virtual memory system.  If the
 * faulting address is within the user-space virtual memory pool then this
 * function responds appropriately to allow the faulting operation to be
 * retried.  If the faulting address falls outside of the virtual memory pool
 * then the segmentation fault is reported as an error.
 *
 * While a SIGSEGV is being processed, SIGALRM signals are blocked.  Therefore
 * a timer interrupt will never interrupt the segmentation-fault handler.
 */
static void sigsegv_handler(int signum, siginfo_t *infop, void *data) {
    void *addr;
    page_t page;

    /* Only handle SIGSEGVs addresses in range */
    addr = infop->si_addr;
    if (addr < vmem_start || addr >= vmem_end) {
        fprintf(stderr, "segmentation fault at address %p\n", addr);
        abort();
    }

    num_faults++;

    /* Figure out what page generated the fault. */
    page = addr_to_page(addr);
    assert(page < NUM_PAGES);

#if VERBOSE
    fprintf(stderr,
        "================================================================\n");
    fprintf(stderr, "SIGSEGV:  Address %p, Page %u, Code %s (%d)\n",
           addr, page, signal_code(infop->si_code), infop->si_code);
#endif

    /* We really can't handle any other type of code.  On Linux this should be
     * fine though.
     */
    assert(infop->si_code == SEGV_MAPERR || infop->si_code == SEGV_ACCERR);

    /* Map the page into memory so that the fault can be resolved.  Of course,
     * this may result in some other page being unmapped.
     */

    /* Handle unmapped address (SEGV_MAPERR). */
    if (infop->si_code == SEGV_MAPERR) {
        /* Evict a page. */
        assert(num_resident <= max_resident);
        if (num_resident == max_resident) {
            page_t victim = choose_victim_page();
            assert(is_page_resident(victim));
            unmap_page(victim);
            assert(!is_page_resident(victim));
        }

        /*
         * There should now be space, so load the new page. No permissions
         * initially.
         */
        assert(num_resident < max_resident);
        map_page(page, PAGEPERM_NONE);
    }

    /* Handle unpermitted access (SEGV_ACCERR). */
    else {
        /* Regardless of attempted read or write, it is now accessed. */
        set_page_accessed(page);
        assert(is_page_accessed(page));

        switch(get_page_permission(page)) {
            case PAGEPERM_NONE:
                /*
                 * Tried to read or write. Give read access, if it was a write
                 * then it will segfault again and read-write access will be
                 * given then.
                 */
                set_page_permission(page, PAGEPERM_READ);
                break;
            case PAGEPERM_READ:
                /* Tried to write, so make it read-write access. */
                set_page_permission(page, PAGEPERM_RDWR);
                /* Since it is a write it is also dirty. */
                set_page_dirty(page);
                assert(is_page_dirty(page));
                break;
            case PAGEPERM_RDWR:
                fprintf(stderr, "sigsegv_handler: got unpermitted access error \
                    on page that already has read-write permission.\n");
                abort();
                break;
        }
    }
}