Esempio n. 1
0
vaddr_t
VmMap::getFreeVAddr(paddr_t paddr, size_t siz)
{
    MemArea *_area, *tmp_area;
    size_t len, off = paddr % PAGE_SIZE;
    vaddr_t vaddr;
    OUTDEBUG("[ VmMap ]");
    len = PAGE_ALIGN(off + siz);

    _area = &this->freeareas;
    do
    {
        if (len < _area->length)
        {
            vaddr = (vaddr_t) PAGE_TRUNC(_area->v_addr);
            _area->v_addr += len;
            _area->length -= len;
            _area->length = PAGE_TRUNC(_area->length);
            return vaddr;
        }
        _area = _area->next;
    }
    while (_area != &this->freeareas);

    ::cout << "getFree " << (int) ((len)) << "," << (int) (vaddr) << "\n";
    return NULL;
}
Esempio n. 2
0
File: vm.c Progetto: AndrewD/prex
static int
do_free(vm_map_t map, void *addr)
{
	struct region *reg;

	addr = (void *)PAGE_TRUNC(addr);

	/*
	 * Find the target region.
	 */
	reg = region_find(&map->head, addr, 1);
	if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE))
		return EINVAL;

	/*
	 * Unmap pages of the region.
	 */
	mmu_map(map->pgd, reg->phys, reg->addr,	reg->size, PG_UNMAP);

	/*
	 * Relinquish use of the page if it is not shared and mapped.
	 */
	if (!(reg->flags & REG_SHARED) && !(reg->flags & REG_MAPPED))
		page_free(reg->phys, reg->size);

	region_free(&map->head, reg);
	return 0;
}
Esempio n. 3
0
VmMap &
VmMap::operator +=(const MemArea & blk)
{
    MemArea *area;
    vaddr_t addr_start, addr_end;
    ENTER;
    if (NULL == blk.v_addr || blk.length <= 0)
    {
        ::cout << "[MemMap]  ERROR!! Invalid parameters\n";
        EXIT;
        goto plusout;
    }
    addr_start = PAGE_TRUNC(blk.v_addr);
    addr_end = PAGE_ALIGN(blk.v_addr + blk.length);
    if (addr_end >= (vaddr_t) & KERNEL_BASE)
    {
        ::cout << "[vm] ERROR!! Invalid mem area\n";
        EXIT;
        goto plusout;
    }
    for (area = this->usedareas.next; area && (area != &this->usedareas); area
         = area->next)
    {
        // OUTDEBUG("[ VmMap ]");
        /* find overlapped area */
        if (addr_start >= area->v_addr && addr_start <= (area->v_addr
                                                         + area->length))
        {
            // OUTDEBUG("[ VmMap ]");
            /* create union  */
            addr_end = (addr_end > (area->v_addr + area->length)) ? addr_end
                                                                  : (area->v_addr + area->length);
            area->length = addr_end - area->v_addr;
            ::cout << kernel::Console::HEX << "[vm] New area: "
                      "area->v_addr=0x" << (int) area->v_addr << ", area->length=0x"
                   << (int) area->length << "\n" << kernel::Console::HEX;
            EXIT;
            goto plusout;
        }
    }
    area = new MemArea();
    if (!area)
    {
        ::cout << "[vm] ERROR! No mem space\n";
        EXIT;
        goto plusout;
    }
    area->length = addr_end - addr_start;
    area->v_addr = addr_start;
    area->prev = this->usedareas.prev;
    area->next = &this->usedareas;
    area->prev->next = area;
    area->alloc = blk.alloc;
    area->p_addr = blk.p_addr;
    this->usedareas.prev = area;
    EXIT;
plusout: return *this;
}
Esempio n. 4
0
vaddr_t
VmMap::remap(paddr_t paddr, vaddr_t _vaddr, size_t siz)
{
    size_t off = paddr % PAGE_SIZE;
    int num = PAGE_ALIGN(off + siz) / PAGE_SIZE;

    // OUTDEBUG("[ VmMap ]");
    cout << kernel::Console::HEX << "[ VmMap ] remapping " << num
         << " pages from paddr(0x" << (int) (PAGE_TRUNC(paddr))
         << ")  to vaddr(0x" << (int) (PAGE_TRUNC(_vaddr)) << ")"
         << kernel::Console::DEC << (int) (PAGE_ALIGN(off + siz)) << "]\n";

    kernel::mem::MemArea area(PAGE_TRUNC(paddr), (PAGE_TRUNC(_vaddr)),
                              PAGE_ALIGN(off + siz), false);
    ArchVmMap::archMapArea(const_cast<kernel::mem::MemArea &>(area));

    return _vaddr + off;
}
Esempio n. 5
0
File: page.c Progetto: AndrewD/prex
/*
 * The function to reserve pages in specific address.
 * Return 0 on success, or -1 on failure
 */
int
page_reserve(void *addr, size_t size)
{
	struct page_block *blk, *tmp;
	char *end;

	if (size == 0)
		return 0;

	addr = phys_to_virt(addr);
	end = (char *)PAGE_ALIGN((char *)addr + size);
	addr = (void *)PAGE_TRUNC(addr);
	size = (size_t)(end - (char *)addr);

	/*
	 * Find the block which includes specified block.
	 */
	blk = page_head.next;
	for (;;) {
		if (blk == &page_head)
			panic("page_reserve");
		if ((char *)blk <= (char *)addr
		    && end <= (char *)blk + blk->size)
			break;
		blk = blk->next;
	}
	if ((char *)blk == (char *)addr && blk->size == size) {
		/*
		 * Unlink the block from free list.
		 */
		blk->prev->next = blk->next;
		blk->next->prev = blk->prev;
	} else {
		/*
		 * Split this block.
		 */
		if ((char *)blk + blk->size != end) {
			tmp = (struct page_block *)end;
			tmp->size = (size_t)((char *)blk + blk->size - end);
			tmp->next = blk->next;
			tmp->prev = blk;

			blk->size -= tmp->size;
			blk->next->prev = tmp;
			blk->next = tmp;
		}
		if ((char *)blk == (char *)addr) {
			blk->prev->next = blk->next;
			blk->next->prev = blk->prev;
		} else
			blk->size = (size_t)((char *)addr - (char *)blk);
	}
	return 0;
}
Esempio n. 6
0
File: vm.c Progetto: AndrewD/prex
static int
do_allocate(vm_map_t map, void **addr, size_t size, int anywhere)
{
	struct region *reg;
	char *start, *end, *phys;

	if (size == 0)
		return EINVAL;

	/*
	 * Allocate region
	 */
	if (anywhere) {
		size = (size_t)PAGE_ALIGN(size);
		if ((reg = region_alloc(&map->head, size)) == NULL)
			return ENOMEM;
	} else {
		start = (char *)PAGE_TRUNC(*addr);
		end = (char *)PAGE_ALIGN(start + size);
		size = (size_t)(end - start);

		reg = region_find(&map->head, start, size);
		if (reg == NULL || !(reg->flags & REG_FREE))
			return EINVAL;

		reg = region_split(&map->head, reg, start, size);
		if (reg == NULL)
			return ENOMEM;
	}
	reg->flags = REG_READ | REG_WRITE;

	/*
	 * Allocate physical pages, and map them into virtual address
	 */
	if ((phys = page_alloc(size)) == 0)
		goto err1;

	if (mmu_map(map->pgd, phys, reg->addr, size, PG_WRITE))
		goto err2;

	reg->phys = phys;

	/* Zero fill */
	memset(phys_to_virt(phys), 0, reg->size);
	*addr = reg->addr;
	return 0;

 err2:
	page_free(phys, size);
 err1:
	region_free(&map->head, reg);
	return ENOMEM;
}
Esempio n. 7
0
static int
do_map(vm_map_t map, void *addr, size_t size, void **alloc)
{
	vm_map_t curmap;
	task_t self;
	char *start, *end;
	struct region *reg, *tgt;
	void *tmp;

	if (size == 0)
		return EINVAL;

	/* check fault */
	tmp = NULL;
	if (umem_copyout(&tmp, alloc, sizeof(tmp)))
		return EFAULT;

	start = (char *)PAGE_TRUNC(addr);
	end = (char *)PAGE_ALIGN((char *)addr + size);
	size = (size_t)(end - start);

	/*
	 * Find the region that includes target address
	 */
	reg = region_find(&map->head, start, size);
	if (reg == NULL || (reg->flags & REG_FREE))
		return EINVAL;	/* not allocated */
	tgt = reg;

	/*
	 * Create new region to map
	 */
	self = cur_task();
	curmap = self->map;
	reg = region_create(&curmap->head, start, size);
	if (reg == NULL)
		return ENOMEM;
	reg->flags = tgt->flags | REG_MAPPED;

	umem_copyout(&addr, alloc, sizeof(addr));
	return 0;
}
Esempio n. 8
0
/*
 * Reserve specific area for boot tasks.
 */
static int
do_reserve(vm_map_t map, void **addr, size_t size)
{
	struct region *reg;
	char *start, *end;

	if (size == 0)
		return EINVAL;

	start = (char *)PAGE_TRUNC(*addr);
	end = (char *)PAGE_ALIGN(start + size);
	size = (size_t)(end - start);

	reg = region_create(&map->head, start, size);
	if (reg == NULL)
		return ENOMEM;
	reg->flags = REG_READ | REG_WRITE;
	*addr = reg->addr;
	return 0;
}
Esempio n. 9
0
static int
do_free(vm_map_t map, void *addr)
{
	struct region *reg;

	addr = (void *)PAGE_TRUNC(addr);

	/*
	 * Find the target region.
	 */
	reg = region_find(&map->head, addr, 1);
	if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE))
		return EINVAL;	/* not allocated */

	/*
	 * Free pages if it is not shared and mapped.
	 */
	if (!(reg->flags & REG_SHARED) && !(reg->flags & REG_MAPPED))
		page_free(reg->addr, reg->size);

	region_free(&map->head, reg);
	return 0;
}
Esempio n. 10
0
static int
do_attribute(vm_map_t map, void *addr, int attr)
{
	struct region *reg;
	int new_flags = 0;

	addr = (void *)PAGE_TRUNC(addr);

	/*
	 * Find the target region.
	 */
	reg = region_find(&map->head, addr, 1);
	if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE)) {
		return EINVAL;	/* not allocated */
	}
	/*
	 * The attribute of the mapped or shared region can not be changed.
	 */
	if ((reg->flags & REG_MAPPED) || (reg->flags & REG_SHARED))
		return EINVAL;

	/*
	 * Check new and old flag.
	 */
	if (reg->flags & REG_WRITE) {
		if (!(attr & VMA_WRITE))
			new_flags = REG_READ;
	} else {
		if (attr & VMA_WRITE)
			new_flags = REG_READ | REG_WRITE;
	}
	if (new_flags == 0)
		return 0;	/* same attribute */
	reg->flags = new_flags;
	return 0;
}
Esempio n. 11
0
static int
do_allocate(vm_map_t map, void **addr, size_t size, int anywhere)
{
	struct region *reg;
	char *start, *end;

	if (size == 0)
		return EINVAL;

	/*
	 * Allocate region, and reserve pages for it.
	 */
	if (anywhere) {
		size = (size_t)PAGE_ALIGN(size);
		if ((start = page_alloc(size)) == 0)
			return ENOMEM;
	} else {
		start = (char *)PAGE_TRUNC(*addr);
		end = (char *)PAGE_ALIGN(start + size);
		size = (size_t)(end - start);

		if (page_reserve(start, size))
			return EINVAL;
	}
	reg = region_create(&map->head, start, size);
	if (reg == NULL) {
     		page_free(start, size);
		return ENOMEM;
	}
	reg->flags = REG_READ | REG_WRITE;

	/* Zero fill */
	memset(start, 0, size);
	*addr = reg->addr;
	return 0;
}
Esempio n. 12
0
/**
 * TODO This (mapping) should be done somewhere in a page or
 * platform context.
 */
vaddr_t
ArchVmMap::archMapArea(kernel::mem::MemArea &area)
{
    int err = 0;
    vaddr_t vaddr, vaddr_end, paddr;
    pte *l2_page;
    pte entry;
    pgd_t pgd;
    int l1_idx, l2_idx, i;

    if (!area.length || !pNode)
    {
        return NULL;
    }

    /* check alloc flag before allocation */
    if (area.alloc)
    {
        area.p_addr = (paddr_t)pNode->alloc(area.length,2);
    }

    paddr = area.p_addr;

    if (NULL == area.p_addr)
    {
        ::cout << (char *)"[vm] %s:%s:%d ERROR!! Out of memory\n";
        err = -ENOMEM;
        return NULL;
    }

    for (vaddr = PAGE_TRUNC(area.v_addr),
         vaddr_end = PAGE_ALIGN(area.v_addr + area.length);
         vaddr < vaddr_end;
         vaddr+=PAGE_SIZE,paddr+=PAGE_SIZE)
    {
        l1_idx = PTE_L1_IDX(vaddr);
        entry =  (pte)pgd[l1_idx]; /* virtual address of entry */

        switch (PTE_L1_TYPE_MASK & entry)
        {
        case PTE_L1_COARSE:

            l2_page = (pte *)phys_to_virt((pgd[l1_idx] & (PTE_L1_MASK | PTE_L2_COARSE_MASK)));
            l2_idx = PTE_L2_COARSE_IDX(vaddr);
            l2_page[l2_idx] = (paddr & ~0x00000FFF)
                    | (PTE_AP_RW_RW << 4)
                    | (PTE_AP_RW_RW << 6)
                    | (PTE_AP_RW_RW << 8)
                    | (PTE_AP_RW_RW << 10)
                    | 0xE; /* Cachable & buffered & small page*/
            break;

        case PTE_L1_SECTION:
            break;

        case PTE_L1_FINE:
            /* TODO Implement Fine page table mapping */
            ::cout << (char *)"[vm] BUG!!!!!!!!!!!!! Fine Page Tables not supported!\n";
            ::hwplatform.idle();
            break;

        case PTE_L1_FAULT:
        default:
            l2_page = (pte *)pNode->alloc(PAGE_SIZE,0);
            /*
                 * PAGE_SIZE contains 4 coarse page tables
                 * create entries for each of page table
                 *
                 * TODO Mapping continuosly 4KB page to 4 coarse page tables may cause an overlap of page tables!!!!
                 *
                 * l2_page incremented by 0x100 because of it is a (pte *) type
                 * and contains 256 (0x100) pte entries, so each page table is
                 * 0x100*sizeof(pte) bytes in size.
                 */
            for (i = 0; i < 4; i++, l2_page += 0x100)
            {
                pgd[l1_idx + i] = ((unsigned int)l2_page & 0xFFFFFC00)
                        | (1 << 4)
                        | PTE_L1_COARSE;
            }

            l2_page = (pte *)phys_to_virt((pgd[l1_idx] & 0xFFFFFC00));
            l2_idx = PTE_L2_COARSE_IDX(vaddr);

            l2_page[l2_idx] = (paddr & ~0x00000FFF)
                    | (PTE_AP_RW_RW << 4)
                    | (PTE_AP_RW_RW << 6)
                    | (PTE_AP_RW_RW << 8)
                    | (PTE_AP_RW_RW << 10)
                    | 0xE; /* Cachable & buffered & small page*/
            break;
        };
    }

    if (err)
    {
        /* ... */
        /* FIXME TODO sys/alphabet/vm.c: Free allocated physical pages on error. */
        ;
    }

    EXIT;
}
Esempio n. 13
0
File: vm.c Progetto: AndrewD/prex
static int
do_map(vm_map_t map, void *addr, size_t size, void **alloc)
{
	vm_map_t curmap;
	char *start, *end, *phys;
	size_t offset;
	struct region *reg, *cur, *tgt;
	task_t self;
	int map_type;
	void *tmp;

	if (size == 0)
		return EINVAL;

	/* check fault */
	tmp = NULL;
	if (umem_copyout(&tmp, alloc, sizeof(tmp)))
		return EFAULT;

	start = (char *)PAGE_TRUNC(addr);
	end = (char *)PAGE_ALIGN((char *)addr + size);
	size = (size_t)(end - start);
	offset = (size_t)((char *)addr - start);

	/*
	 * Find the region that includes target address
	 */
	reg = region_find(&map->head, start, size);
	if (reg == NULL || (reg->flags & REG_FREE))
		return EINVAL;	/* not allocated */
	tgt = reg;

	/*
	 * Find the free region in current task
	 */
	self = cur_task();
	curmap = self->map;
	if ((reg = region_alloc(&curmap->head, size)) == NULL)
		return ENOMEM;
	cur = reg;

	/*
	 * Try to map into current memory
	 */
	if (tgt->flags & REG_WRITE)
		map_type = PG_WRITE;
	else
		map_type = PG_READ;

	phys = (char *)tgt->phys + (start - (char *)tgt->addr);
	if (mmu_map(curmap->pgd, phys, cur->addr, size, map_type)) {
		region_free(&curmap->head, reg);
		return ENOMEM;
	}

	cur->flags = tgt->flags | REG_MAPPED;
	cur->phys = phys;

	tmp = (char *)cur->addr + offset;
	umem_copyout(&tmp, alloc, sizeof(tmp));
	return 0;
}
Esempio n. 14
0
File: vm.c Progetto: AndrewD/prex
static int
do_attribute(vm_map_t map, void *addr, int attr)
{
	struct region *reg;
	int new_flags = 0;
	void *old_addr, *new_addr = NULL;
	int map_type;

	addr = (void *)PAGE_TRUNC(addr);

	/*
	 * Find the target region.
	 */
	reg = region_find(&map->head, addr, 1);
	if (reg == NULL || reg->addr != addr || (reg->flags & REG_FREE)) {
		return EINVAL;	/* not allocated */
	}
	/*
	 * The attribute of the mapped region can not be changed.
	 */
	if (reg->flags & REG_MAPPED)
		return EINVAL;

	/*
	 * Check new and old flag.
	 */
	if (reg->flags & REG_WRITE) {
		if (!(attr & VMA_WRITE))
			new_flags = REG_READ;
	} else {
		if (attr & VMA_WRITE)
			new_flags = REG_READ | REG_WRITE;
	}
	if (new_flags == 0)
		return 0;	/* same attribute */

	map_type = (new_flags & REG_WRITE) ? PG_WRITE : PG_READ;

	/*
	 * If it is shared region, duplicate it.
	 */
	if (reg->flags & REG_SHARED) {

		old_addr = reg->phys;

		/* Allocate new physical page. */
		if ((new_addr = page_alloc(reg->size)) == 0)
			return ENOMEM;

		/* Copy source page */
		memcpy(phys_to_virt(new_addr), phys_to_virt(old_addr),
		       reg->size);

		/* Map new region */
		if (mmu_map(map->pgd, new_addr, reg->addr, reg->size,
			    map_type)) {
			page_free(new_addr, reg->size);
			return ENOMEM;
		}
		reg->phys = new_addr;

		/* Unlink from shared list */
		reg->sh_prev->sh_next = reg->sh_next;
		reg->sh_next->sh_prev = reg->sh_prev;
		if (reg->sh_prev == reg->sh_next)
			reg->sh_prev->flags &= ~REG_SHARED;
		reg->sh_next = reg->sh_prev = reg;
	} else {
		if (mmu_map(map->pgd, reg->phys, reg->addr, reg->size,
			    map_type))
			return ENOMEM;
	}
	reg->flags = new_flags;
	return 0;
}