Ejemplo n.º 1
0
void* vmm_virt2phys(thread_t thread, void* vaddr, size_t size){

    struct page_table_entry_t* entry = find_page_entry(thread->task->page_table, PAGE_BASE(vaddr));

    if(entry==NULL){
        return NULL;
    }

    return (entry->paddr_base + (vaddr - PAGE_BASE(vaddr)));
    //TODO page border check paddr+size
}
Ejemplo n.º 2
0
static unsigned long get_mmap_base(elf_bin_t *elf)
{
	unsigned long addr, pg_start, pg_end, mmap_min = ULONG_MAX, mmap_max = 0;
	int i;

	for (i=0; i<elf->hdr.e_phnum; i++) 
		if ( elf->phdr[i].p_type == PT_LOAD)
		{
			pg_start = PAGE_BASE(elf->phdr[i].p_vaddr);
			pg_end   = PAGE_NEXT(elf->phdr[i].p_vaddr + elf->phdr[i].p_memsz);

			if (pg_start < mmap_min)
				mmap_min = pg_start;

			if (pg_end > mmap_max)
				mmap_max = pg_end;
		}

	if (mmap_min > mmap_max)
		mmap_min = mmap_max;

	addr = do_mmap2(mmap_min, mmap_max-mmap_min, PROT_NONE,
	                MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);

	if (addr & PG_MASK) /* not on page boundary -> error code */
		return addr;

	sys_munmap(addr, mmap_max-mmap_min);

	return addr-mmap_min;
}
Ejemplo n.º 3
0
/* Find the page table entry to the given vaddr */
page_table_entry_t* find_page_entry(struct list_t* page_table, void* vaddr)
{
    struct list_entry_t* entry = page_table->head;

    while(entry != NULL) {

        if(((struct page_table_entry_t*)entry->data)->vaddr_base == PAGE_BASE(vaddr)) {
            return (struct page_table_entry_t*)entry->data;
        }

        entry = entry->next;
    }

    return NULL; /* Address not in page table */
}
Ejemplo n.º 4
0
static long mmap_prog_section(elf_bin_t *elf, Elf32_Phdr *p)
{
	unsigned long base = elf->base, brk_, bss, addr, pg_off, size;
	int prot = 0;

	if (p->p_flags & PF_R)
		prot |= PROT_READ;		
	if (p->p_flags & PF_W)
		prot |= PROT_WRITE;		
	if (p->p_flags & PF_X)
		prot |= PROT_EXEC;

	addr = PAGE_BASE(base+p->p_vaddr);
	size = PAGE_NEXT(base+p->p_vaddr + p->p_filesz) - addr;
	pg_off = p->p_offset/PG_SIZE;

	bss = base + p->p_vaddr + p->p_filesz;
	brk_ = base + p->p_vaddr + p->p_memsz;

	if ( ((p->p_vaddr-p->p_offset) & PG_MASK) || (bss > brk_) )
		return -1;

	addr = do_mmap2(addr, size, prot, MAP_PRIVATE|MAP_FIXED, elf->fd, pg_off);

	if (addr & PG_MASK) /* not on page boundary -> error code */
		return addr;

	if (elf->brk < brk_)
		elf->brk = brk_;

	if (elf->bss < bss)
		elf->bss = bss;

	/* linux does not fill in bss sections
	 * between load segments for interpreters;
	 * makes no difference to the standard ld.so
	 */
	addr = zerofill(bss, brk_, prot);

	if (addr & PG_MASK) /* not on page boundary -> error code */
		return addr;

	return 0;
}
Ejemplo n.º 5
0
void dtlb_miss() {
    struct optimsoc_scheduler_core *core_ctx;
    core_ctx = &optimsoc_scheduler_core[optimsoc_get_domain_coreid()];

    void *vaddr = (void*) or1k_mfspr(SPR_EEAR_BASE);

    runtime_trace_dtlb_miss(vaddr);

    /* Look up virtual address in the page table */
    struct page_table_entry_t* entry;
    entry = find_page_entry(core_ctx->active_thread->task->page_table, vaddr);

    if(entry != NULL) {

        /* Write address back to DTLB */
        arch_set_dtlb(entry->vaddr_base, entry->paddr_base);

    } else {

        void *page = vmm_alloc_page();
        assert(page != NULL);

        entry = malloc(sizeof(struct page_table_entry_t));

        entry->vaddr_base = PAGE_BASE(vaddr);
        entry->paddr_base = page;

        printf("Allocated new data page %p and mapped to %p\n", entry->paddr_base, entry->vaddr_base);

        list_add_tail(core_ctx->active_thread->task->page_table, (void*)entry);
        runtime_trace_dtlb_allocate_page(page);

        arch_set_dtlb(entry->vaddr_base, entry->paddr_base);

    }

}
Ejemplo n.º 6
0
status_t
map_image(int fd, char const* path, image_t* image, bool fixed)
{
	// cut the file name from the path as base name for the created areas
	const char* baseName = strrchr(path, '/');
	if (baseName != NULL)
		baseName++;
	else
		baseName = path;

	// determine how much space we need for all loaded segments

	addr_t reservedAddress = 0;
	addr_t loadAddress;
	size_t reservedSize = 0;
	size_t length = 0;
	uint32 addressSpecifier = B_ANY_ADDRESS;

	for (uint32 i = 0; i < image->num_regions; i++) {
		// for BeOS compatibility: if we load an old BeOS executable, we
		// have to relocate it, if possible - we recognize it because the
		// vmstart is set to 0 (hopefully always)
		if (fixed && image->regions[i].vmstart == 0)
			fixed = false;

		uint32 regionAddressSpecifier;
		get_image_region_load_address(image, i,
			loadAddress - image->regions[i - 1].vmstart, fixed,
			loadAddress, regionAddressSpecifier);
		if (i == 0) {
			reservedAddress = loadAddress;
			addressSpecifier = regionAddressSpecifier;
		}

		length += TO_PAGE_SIZE(image->regions[i].vmsize
			+ (loadAddress % B_PAGE_SIZE));

		size_t size = TO_PAGE_SIZE(loadAddress + image->regions[i].vmsize)
			- reservedAddress;
		if (size > reservedSize)
			reservedSize = size;
	}

	// Check whether the segments have an unreasonable amount of unused space
	// inbetween.
	if (reservedSize > length + 8 * 1024)
		return B_BAD_DATA;

	// reserve that space and allocate the areas from that one
	if (_kern_reserve_address_range(&reservedAddress, addressSpecifier,
			reservedSize) != B_OK)
		return B_NO_MEMORY;

	for (uint32 i = 0; i < image->num_regions; i++) {
		char regionName[B_OS_NAME_LENGTH];

		snprintf(regionName, sizeof(regionName), "%s_seg%lu%s",
			baseName, i, (image->regions[i].flags & RFLAG_RW) ? "rw" : "ro");

		get_image_region_load_address(image, i, image->regions[i - 1].delta,
			fixed, loadAddress, addressSpecifier);

		// If the image position is arbitrary, we must let it point to the start
		// of the reserved address range.
		if (addressSpecifier != B_EXACT_ADDRESS)
			loadAddress = reservedAddress;

		if ((image->regions[i].flags & RFLAG_ANON) != 0) {
			image->regions[i].id = _kern_create_area(regionName,
				(void**)&loadAddress, B_EXACT_ADDRESS,
				image->regions[i].vmsize, B_NO_LOCK,
				B_READ_AREA | B_WRITE_AREA);

			if (image->regions[i].id < 0) {
				_kern_unreserve_address_range(reservedAddress, reservedSize);
				return image->regions[i].id;
			}
		} else {
			// Map all segments r/w first -- write access might be needed for
			// relocations. When we've done with those we change the protection
			// of read-only segments back to read-only. We map those segments
			// over-committing, since quite likely only a relatively small
			// number of pages needs to be touched and we want to avoid a lot
			// of memory to be committed for them temporarily, just because we
			// have to write map them.
			uint32 protection = B_READ_AREA | B_WRITE_AREA
				| ((image->regions[i].flags & RFLAG_RW) != 0
					? 0 : B_OVERCOMMITTING_AREA);
			image->regions[i].id = _kern_map_file(regionName,
				(void**)&loadAddress, B_EXACT_ADDRESS,
				image->regions[i].vmsize, protection, REGION_PRIVATE_MAP, false,
				fd, PAGE_BASE(image->regions[i].fdstart));

			if (image->regions[i].id < 0) {
				_kern_unreserve_address_range(reservedAddress, reservedSize);
				return image->regions[i].id;
			}

			TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path,
				(void *)loadAddress, image->regions[i].vmsize,
				image->regions[i].flags & RFLAG_RW ? "rw" : "read-only"));

			// handle trailer bits in data segment
			if (image->regions[i].flags & RFLAG_RW) {
				addr_t startClearing = loadAddress
					+ PAGE_OFFSET(image->regions[i].start)
					+ image->regions[i].size;
				addr_t toClear = image->regions[i].vmsize
					- PAGE_OFFSET(image->regions[i].start)
					- image->regions[i].size;

				TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
					startClearing, toClear));
				memset((void *)startClearing, 0, toClear);
			}
		}

		image->regions[i].delta = loadAddress - image->regions[i].vmstart;
		image->regions[i].vmstart = loadAddress;
	}

	if (image->dynamic_ptr != 0)
		image->dynamic_ptr += image->regions[0].delta;

	return B_OK;
}
Ejemplo n.º 7
0
static status_t
parse_program_headers(image_t* image, char* buff, int phnum, int phentsize)
{
	elf_phdr* pheader;
	int regcount;
	int i;

	image->dso_tls_id = unsigned(-1);

	regcount = 0;
	for (i = 0; i < phnum; i++) {
		pheader = (elf_phdr*)(buff + i * phentsize);

		switch (pheader->p_type) {
			case PT_NULL:
				/* NOP header */
				break;
			case PT_LOAD:
				if (pheader->p_memsz == pheader->p_filesz) {
					/*
					 * everything in one area
					 */
					image->regions[regcount].start = pheader->p_vaddr;
					image->regions[regcount].size = pheader->p_memsz;
					image->regions[regcount].vmstart
						= PAGE_BASE(pheader->p_vaddr);
					image->regions[regcount].vmsize
						= TO_PAGE_SIZE(pheader->p_memsz
							+ PAGE_OFFSET(pheader->p_vaddr));
					image->regions[regcount].fdstart = pheader->p_offset;
					image->regions[regcount].fdsize = pheader->p_filesz;
					image->regions[regcount].delta = 0;
					image->regions[regcount].flags = 0;
					if (pheader->p_flags & PF_WRITE) {
						// this is a writable segment
						image->regions[regcount].flags |= RFLAG_RW;
					}
				} else {
					/*
					 * may require splitting
					 */
					addr_t A = TO_PAGE_SIZE(pheader->p_vaddr
						+ pheader->p_memsz);
					addr_t B = TO_PAGE_SIZE(pheader->p_vaddr
						+ pheader->p_filesz);

					image->regions[regcount].start = pheader->p_vaddr;
					image->regions[regcount].size = pheader->p_filesz;
					image->regions[regcount].vmstart
						= PAGE_BASE(pheader->p_vaddr);
					image->regions[regcount].vmsize
						= TO_PAGE_SIZE(pheader->p_filesz
							+ PAGE_OFFSET(pheader->p_vaddr));
					image->regions[regcount].fdstart = pheader->p_offset;
					image->regions[regcount].fdsize = pheader->p_filesz;
					image->regions[regcount].delta = 0;
					image->regions[regcount].flags = 0;
					if (pheader->p_flags & PF_WRITE) {
						// this is a writable segment
						image->regions[regcount].flags |= RFLAG_RW;
					}

					if (A != B) {
						/*
						 * yeah, it requires splitting
						 */
						regcount += 1;
						image->regions[regcount].start = pheader->p_vaddr;
						image->regions[regcount].size
							= pheader->p_memsz - pheader->p_filesz;
						image->regions[regcount].vmstart
							= image->regions[regcount-1].vmstart
								+ image->regions[regcount-1].vmsize;
						image->regions[regcount].vmsize
							= TO_PAGE_SIZE(pheader->p_memsz
									+ PAGE_OFFSET(pheader->p_vaddr))
								- image->regions[regcount-1].vmsize;
						image->regions[regcount].fdstart = 0;
						image->regions[regcount].fdsize = 0;
						image->regions[regcount].delta = 0;
						image->regions[regcount].flags = RFLAG_ANON;
						if (pheader->p_flags & PF_WRITE) {
							// this is a writable segment
							image->regions[regcount].flags |= RFLAG_RW;
						}
					}
				}
				regcount += 1;
				break;
			case PT_DYNAMIC:
				image->dynamic_ptr = pheader->p_vaddr;
				break;
			case PT_INTERP:
				// should check here for appropiate interpreter
				break;
			case PT_NOTE:
				// unsupported
				break;
			case PT_SHLIB:
				// undefined semantics
				break;
			case PT_PHDR:
				// we don't use it
				break;
			case PT_RELRO:
				// not implemented yet, but can be ignored
				break;
			case PT_STACK:
				// we don't use it
				break;
			case PT_TLS:
				image->dso_tls_id
					= TLSBlockTemplates::Get().Register(
						TLSBlockTemplate((void*)pheader->p_vaddr,
							pheader->p_filesz, pheader->p_memsz));
				break;
			default:
				FATAL("%s: Unhandled pheader type in parse 0x%" B_PRIx32 "\n",
					image->path, pheader->p_type);
				return B_BAD_DATA;
		}
	}

	return B_OK;
}