Esempio n. 1
0
// TODO: This function shouldn't need to exist. Find another way
vaddr_t vmm_km_heap_extend(size_t size) {
  vregion_t *region = &vmap_kernel()->regions[2];
  kassert((UINT32_MAX - region->vend) > ROUND_PAGE(size));


  vaddr_t prev_vend = region->vend;
  region->vend += ROUND_PAGE(size);

  for(vaddr_t va = prev_vend; va < region->vend; va += PAGESIZE) {
    // Allocate a free page if one should be available else panic
    paddr_t pa = pmm_alloc();
    kassert(pa != UINTPTR_MAX);

    // TODO: Use pmap_enter here instead
    pmap_kenter_pa(va, pa, region->vm_prot, PMAP_WIRED | PMAP_WRITE_COMBINE); 

    // Enter the information into the amap
    region->aref.amap->aslots[(uint32_t)((double)(va-region->vstart)/(double)PAGESIZE)]->page->vaddr = va;
  }

  memset((vaddr_t*)prev_vend, 0, PAGESIZE);
  vmap_kernel()->heap_end = region->vend;
  
  uint32_t new_size = region->vend - region->vstart;
  region->aref.amap->maxslots = region->aref.amap->nslots = (uint32_t)((double)new_size/(double)PAGESIZE);

  return prev_vend;
}
Esempio n. 2
0
void Archive::write()
{
	std::ofstream out(archPath, std::ofstream::binary);
	if (!out.is_open())
		BOOST_THROW_EXCEPTION(excp::FileNotWritable()  << excp::InfoFileName(archPath));

	// magic number
	out.write(MAGIC, sizeof(MAGIC));

	// number of files
	uint32_t num = paths.size();
	out.write((char*) &num, sizeof(num));

	uint64_t header_size = (uint64_t) out.tellp();
	// length of offsets
	header_size += paths.size() * sizeof(entry_t);

	// write file offsets
	entry_t* entries = new entry_t[paths.size()];
	entries[0].offset = ROUND_PAGE(header_size);
	for (int i = 0; i < paths.size(); i++)
	{
		std::ifstream input(paths[i]);
		if (!input.is_open())
			BOOST_THROW_EXCEPTION(excp::FileNotFoundException()  << excp::InfoFileName(paths[i]));
		input.seekg(0, input.end);
		uint64_t length = input.tellg();
		entries[i].length = length;

		if (i < paths.size() - 1)
			entries[i+1].offset = ROUND_PAGE(entries[i].offset + length);
	}
	out.write((char*) entries, paths.size() * sizeof(entry_t));
	delete[] entries;

	// padding
	while (out.tellp() % PAGE_SIZE != 0)
		out.write("\0", 1);

	// write files
	for (int i = 0; i < paths.size(); i++)
	{
		std::ifstream input(paths[i], std::ifstream::binary);
		if (!input.is_open())
			BOOST_THROW_EXCEPTION(excp::FileNotFoundException()  << excp::InfoFileName(paths[i]));
		out << input.rdbuf();

		// padding
		while (out.tellp() % PAGE_SIZE != 0)
			out.write("\0", 1);
	}
}
Esempio n. 3
0
// TODO: Refactor this disgusting code
void _vmap_kernel_init() {
  // Get a reference to the kernel's pmap
  kernel_vmap.pmap = pmap_kernel();
  pmap_reference(kernel_vmap.pmap);

  // We need to create the regions: text/data, stack, and heap. And vm objects.
  kernel_vmap.regions = (vregion_t*)vmm_km_zalloc(sizeof(vregion_t) * 3);

  // We need to get the start and end addresses of the virtual memory regions of the kernel
  vaddr_t vas[6] = {(uintptr_t)(&__kernel_virtual_start), (uintptr_t)(&__kernel_virtual_end), 0, 0, (uintptr_t)(&__svc_stack_limit), 0};
  
  // The address of the start of the stack is in vas[5] (This address represents the end of the virtual memory region however). 
  // Since the stack grows downward, the end of stack (or start of the virtual memory region) is vas[5]-0x1000 (since we have defined the stacks
  // to be 4096 bytes in size)
  // TODO: PAGESIZE shouldn't be hardcoded. What if we change the size of the kernel stacks? Maybe have a STACKSIZE?
  vas[5] = vas[4]+PAGESIZE;

  kernel_vmap.text_start = vas[0];
  kernel_vmap.text_end = vas[1];
  kernel_vmap.data_start = vas[0];
  kernel_vmap.data_end = vas[1];
  kernel_vmap.stack_start = vas[5];
  
  // The kernel heap doesn't exist yet
  // These values need to be updated after the final vmm_km_zalloc call
  kernel_vmap.heap_start = vas[2];
  kernel_vmap.heap_end = kernel_vmap.heap_end;

  // Now lets populate each of the vregion structs
  // Note the indexing operator only works here because we have allocated the vregions contiguously. Usually a linked list.
  vm_prot_t prots[3] = {VM_PROT_ALL, VM_PROT_DEFAULT, VM_PROT_DEFAULT};
  for(uint32_t i = 0, num_regions = 3; i < num_regions; i++) {
    kernel_vmap.regions[i].vstart = vas[(i*2)];
    kernel_vmap.regions[i].vend = ROUND_PAGE(vas[(i*2)+1]);
    kernel_vmap.regions[i].vm_prot = prots[i];
    kernel_vmap.regions[i].needs_copy = kernel_vmap.regions[i].copy_on_write = 0;
    
    // Populate the amaps
    uint32_t num_pages = (uint32_t)((double)(kernel_vmap.regions[i].vend - kernel_vmap.regions[i].vstart) / (double)PAGESIZE);
    if(num_pages > 0) {
      kernel_vmap.regions[i].aref.amap = (vm_amap_t*)vmm_km_zalloc(sizeof(vm_amap_t));
      kernel_vmap.regions[i].aref.slotoff = 0;
      kernel_vmap.regions[i].aref.amap->maxslots = kernel_vmap.regions[i].aref.amap->nslots = num_pages;
      kernel_vmap.regions[i].aref.amap->refcount = 1;
      kernel_vmap.regions[i].aref.amap->aslots = (vm_anon_t**)vmm_km_zalloc(sizeof(vm_anon_t*) * num_pages);

      // Populate the anon structs and put them in amap.aslots
      for(uint32_t j = 0; j < num_pages; j++) {
        vm_anon_t *anon = (vm_anon_t*)vmm_km_zalloc(sizeof(vm_anon_t));
        anon->page = (vpage_t*)vmm_km_zalloc(sizeof(vpage_t));
        anon->page->vaddr = kernel_vmap.regions[i].vstart + (j * PAGESIZE);
        kernel_vmap.regions[i].aref.amap->aslots[j] = anon;
        anon->refcount = 1;
      }
    }
    
    if((i+1) != num_regions) kernel_vmap.regions[i].next = &kernel_vmap.regions[i+1];
    else kernel_vmap.regions[i].next = NULL;
  }
}
Esempio n. 4
0
int
au_himem_map(void *cookie, bus_addr_t addr, bus_size_t size,
    int flags, bus_space_handle_t *bshp, int acct)
{
	au_himem_cookie_t	*c = (au_himem_cookie_t *)cookie;
	int			err;
	paddr_t			pa;
	vaddr_t			va;
	vsize_t			realsz;
	int			s;

	/* make sure we can map this bus address */
	if (addr < c->c_start || (addr + size) > c->c_end) {
		return EINVAL;
	}

	/* physical address, page aligned */
	pa = TRUNC_PAGE(c->c_physoff + addr);

	/*
	 * we are only going to work with whole pages.  the
	 * calculation is the offset into the first page, plus the
	 * intended size, rounded up to a whole number of pages.
	 */
	realsz = ROUND_PAGE((addr % PAGE_SIZE) + size);

	va = uvm_km_alloc(kernel_map,
	    realsz, PAGE_SIZE, UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0) {
		return ENOMEM;
	}

	/* virtual address in handle (offset appropriately) */
	*bshp = va + (addr % PAGE_SIZE);

	/* map the pages in the kernel pmap */
	s = splhigh();
	while (realsz) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		pa += PAGE_SIZE;
		va += PAGE_SIZE;
		realsz -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());
	splx(s);

	/* record our allocated range of bus addresses */
	if (acct && c->c_extent != NULL) {
		err = extent_alloc_region(c->c_extent, addr, size, EX_NOWAIT);
		if (err) {
			au_himem_unmap(cookie, *bshp, size, 0);
			return err;
		}
	}

	return 0;
}
Esempio n. 5
0
static char	evaluate_freeable(char *ptr, char *previous, char *mem, char *next)
{
	if (*PAGE_SIZE(mem) == ROUND_PAGE(*DATA_SIZE(ptr, 0) +
		PAGE_META + DATA_META))
		return (unmap_page(previous, mem, next));
	delete_memory(ptr, mem);
	if (!memory_is_set(mem + PAGE_META, *PAGE_SIZE(mem) - PAGE_META))
		return (unmap_page(previous, mem, next));
	return (0);
}
Esempio n. 6
0
// TODO: This function shouldn't need to exist. Find another way
void vmm_km_heap_init() {
  // Now lets set up the (empty) heap region
  // So what is happening here? Initially the kernel heap is empty, meaning there are no pages in the heap region.
  // Okay, thats great. So when kheap_init is called, it wants to extend the heap region to a certain size and the function vmm_km_heap_extend should do that.
  // Alright. But whats the problem? Well for each virtual page that kheap_init wants to extend the heap region by, there needs to be a vm_anon_t and vpage_t struct 
  // associated with those pages. Not only that but a vm_amap_t structure needs to be allocated for the heap region.
  // The problem is now, where the hell do we put these structures? We can't put them on the heap because technically the heap hasn't even been fully initialized.
  // Now assume that the kheap has been initialized somehow. When kheap tries to allocate memory but sees that there isn't enough, it asks vmm_km_heap_extend to map 
  // more memory for the heap region. Again, vmm needs to allocate vm_anon_t and vpage_t structures and reallocate the vm_amap_t structure and again we can't use the
  // kheap because the kheap is asking vmm for more memory! The problem is cyclic dependency. kheap needs vmm_km_heap_extend to extend the heap region but vmm_km_heap_extend
  // needs kheap to allocate data structures which manage the new memory mappings!
  // So, the idea is to calculate the maximum potential size of the kheap and preallocate all the needed data structures. That way whenever vmm_km_heap_extend is called
  // it doesn't have to allocate any memory, it can just use what's already been allocated. Of course this means that kernel heap region structure can't be used with 
  // any other vmm function since it needs 'special' treatment.
  kernel_vmap.heap_start = kernel_vend;
  kernel_vmap.heap_start = ROUND_PAGE(kernel_vmap.heap_start);
  
  // Theoretical max size of the heap, the heap should never grow this large. As a matter of fact, we are overestimating since this doesn't take into account the space
  // needed to store the vmm structures that will eat some of the heap space.
  // TODO: 0xFFFF0000? What value should this be? UINT32_MAX?
  size_t max_heap_size = ROUND_PAGE(0xFFFF0000 - kernel_vmap.heap_start);
  uint32_t num_pages_needed = (uint32_t)((double)max_heap_size/(double)PAGESIZE);

  // Set up the vmm data structures needed to store the all the information about the heap
  kernel_vmap.regions[2].aref.amap = (vm_amap_t*)vmm_km_zalloc(sizeof(vm_amap_t));
  kernel_vmap.regions[2].aref.slotoff = 0;
  kernel_vmap.regions[2].aref.amap->maxslots = kernel_vmap.regions[2].aref.amap->nslots = 0;
  kernel_vmap.regions[2].aref.amap->refcount = 1;
  kernel_vmap.regions[2].aref.amap->aslots = (vm_anon_t**)vmm_km_zalloc(sizeof(vm_anon_t*) * num_pages_needed);

  for(uint32_t i = 0; i < num_pages_needed; i++) {
    vm_anon_t *anon = (vm_anon_t*)vmm_km_zalloc(sizeof(vm_anon_t));
    anon->page = (vpage_t*)vmm_km_zalloc(sizeof(vpage_t));
    anon->page->vaddr = UINT32_MAX;
    kernel_vmap.regions[2].aref.amap->aslots[i] = anon;
    anon->refcount = 1;
  }

  // Update the start of the heap
  kernel_vmap.heap_start = kernel_vend;
  kernel_vmap.regions[2].vstart = kernel_vmap.regions[2].vend = kernel_vmap.heap_end = kernel_vmap.heap_start = ROUND_PAGE(kernel_vmap.heap_start);
} 
Esempio n. 7
0
thread_safe_page *original_io_request::complete_req(thread_safe_page *p,
		bool lock)
{
	if (get_req_type() == io_request::BASIC_REQ) {
		int page_off;
		thread_safe_page *ret = NULL;
		char *req_buf;
		int req_size;

		if (within_1page()) {
			page_off = get_offset() - ROUND_PAGE(get_offset());
			req_buf = get_buf();
			req_size = get_size();
		}
		else {
			io_request extracted;
			extract(p->get_offset(), PAGE_SIZE, extracted);
			page_off = extracted.get_offset() - ROUND_PAGE(extracted.get_offset());
			req_buf = extracted.get_buf();
			req_size = extracted.get_size();
		}

		if (lock)
			p->lock();
		if (get_access_method() == WRITE) {
			memcpy((char *) p->get_data() + page_off, req_buf, req_size);
			if (!p->set_dirty(true))
				ret = p;
		}
		else 
			/* I assume the data I read never crosses the page boundary */
			memcpy(req_buf, (char *) p->get_data() + page_off, req_size);
		if (lock)
			p->unlock();
		return ret;
	}
	else {
		p->inc_ref();
		get_page_status(p).pg = p;
		return NULL;
	}
}
Esempio n. 8
0
int global_cached_io::preload(off_t start, long size) {
	if (size > cache_size) {
		fprintf(stderr, "we can't preload data larger than the cache size\n");
		exit(1);
	}

	assert(ROUND_PAGE(start) == start);
	for (long offset = start; offset < start + size; offset += PAGE_SIZE) {
		page_id_t pg_id(get_file_id(), ROUND_PAGE(offset));
		page_id_t old_id;
		thread_safe_page *p = (thread_safe_page *) (get_global_cache()->search(
					pg_id, old_id));
		// This is mainly for testing. I don't need to really read data from disks.
		if (!p->data_ready()) {
			p->set_io_pending(false);
			p->set_data_ready(true);
		}
		p->dec_ref();
	}
	return 0;
}
Esempio n. 9
0
void pmap_init() {
  // Set the end of the kernel's virtual and physical address space
	kernel_vend = ROUND_PAGE((vaddr_t)(PGTPHYSICALSTARTADDR-MEMBASEADDR+KVIRTUALBASEADDR) + sizeof(pgt_t) * (vaddr_t)(NUMPAGETABLES));
	kernel_pend = ROUND_PAGE((paddr_t)(PGTPHYSICALSTARTADDR) + sizeof(pgt_t) * (paddr_t)(NUMPAGETABLES));

	// Initialize the kernel pmap
	_pmap_kernel_init();

  // Initialize pmm
  pmm_init();

  // Reserve the pages used by the kernel
	for(uint32_t i = 0, n_tot_entries = (uint32_t)(NUMPAGETABLES) * PGTNENTRIES, *pte = (uint32_t*)KERNEL_PGTS_BASE; i < n_tot_entries; i++) {
		if(pte[i] & PTE_PAGE_BIT) {
	    // Count the resident and wired pages for the kernel (will be the same)
      kernel_pmap.pmap_stats.wired_count++;
			kernel_pmap.pmap_stats.resident_count++;

      pmm_reserve(TRUNC_PAGE(pte[i]));
    }
  }
}
Esempio n. 10
0
thread_safe_page *complete_cached_req(io_request *req, thread_safe_page *p,
		io_interface *io, join_compute_allocator *compute_alloc,
		std::vector<io_request> &requests)
{
	if (req->get_req_type() == io_request::BASIC_REQ) {
		int page_off;
		thread_safe_page *ret = NULL;
		char *req_buf;
		int req_size;

		page_off = req->get_offset() - ROUND_PAGE(req->get_offset());
		req_buf = req->get_buf();
		req_size = req->get_size();

		p->lock();
		if (req->get_access_method() == WRITE) {
			memcpy((char *) p->get_data() + page_off, req_buf, req_size);
			if (!p->set_dirty(true))
				ret = p;
		}
		else 
			/* I assume the data I read never crosses the page boundary */
			memcpy(req_buf, (char *) p->get_data() + page_off, req_size);
		p->unlock();
		return ret;
	}
	else {
		simple_page_byte_array arr(req, p);
		user_compute *compute = req->get_compute();
		compute->run(arr);
		compute_alloc->set_compute(compute);
		compute->fetch_requests(io, compute_alloc, requests);
		compute_alloc->set_compute(NULL);
		// If no one else is referencing the user compute, it means the computation
		// is complete now.
		if (compute->get_ref() == 0) {
			compute_allocator *alloc = compute->get_allocator();
			alloc->free(compute);
		}
		return NULL;
	}
}
Esempio n. 11
0
void
au_himem_unmap(void *cookie, bus_space_handle_t bsh, bus_size_t size, int acct)
{
	au_himem_cookie_t	*c = (au_himem_cookie_t *)cookie;
	vaddr_t			va;
	vsize_t			realsz;
	paddr_t			pa;
	int			s;

	va = (vaddr_t)TRUNC_PAGE(bsh);
	realsz = (vsize_t)ROUND_PAGE((bsh % PAGE_SIZE) + size);

	s = splhigh();

	/* make sure that any pending writes are flushed */
	wbflush();

	/*
	 * we have to get the bus address, so that we can free it in the
	 * extent manager.  this is the unfortunate thing about using
	 * virtual memory instead of just a 1:1 mapping scheme.
	 */
	if (pmap_extract(pmap_kernel(), va, &pa) == false)
		panic("au_himem_unmap: virtual address invalid!");

	/* now remove it from the pmap */
	pmap_kremove(va, realsz);
	pmap_update(pmap_kernel());
	splx(s);

	/* finally we can release both virtual and bus address ranges */
	uvm_km_free(kernel_map, va, realsz, UVM_KMF_VAONLY);

	if (acct) {
		bus_addr_t		addr;
		addr = ((pa - c->c_physoff) + (bsh % PAGE_SIZE));
		extent_free(c->c_extent, addr, size, EX_NOWAIT);
	}
}
Esempio n. 12
0
void map_segments (long fd, Elf32_Phdr *segs[2], Elf32_Half type, dso *so) {

  /* Adjust text segment addresses to page size */
  Elf32_Off text_offset = TRUNC_PAGE(segs[0]->p_offset);
  Elf32_Addr text_vaddr = TRUNC_PAGE(segs[0]->p_vaddr);  
  Elf32_Addr text_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  unsigned long mapsize = text_vlimit - text_vaddr;

  /* Executable has to be loaded at constant address */
  void *base_addr = 0;
  if (type == ET_EXEC) {
    base_addr = (void *)text_vaddr;
  }

  /* TODO: what if base address lies in already mapped area? E.g. where the loader resides? */

  /* Map text segment into memory */
  char *mapbase = sl_mmap(base_addr, mapsize, convert_prot(segs[0]->p_flags),
                          MAP_PRIVATE, fd, text_offset);
  if ((long)mapbase == -1) {
    sl_close(fd);
    sl_printf("Error map_segments: mapping of text segment failed.\n");
    sl_exit(1);
  }

  /* Adjust data segment addresses to page size */
  Elf32_Off data_offset = TRUNC_PAGE(segs[1]->p_offset);
  Elf32_Addr data_vaddr = TRUNC_PAGE(segs[1]->p_vaddr);
  Elf32_Addr data_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_filesz);
  void *data_addr = mapbase + (data_vaddr - text_vaddr);
  long data_prot = convert_prot(segs[1]->p_flags);

  /* Map data segment into memory */
  if ((long)sl_mmap(data_addr, data_vlimit - data_vaddr, data_prot,
                    MAP_PRIVATE | MAP_FIXED, fd, data_offset) == -1) {
    sl_close(fd);
    sl_printf("Error map_segments: mapping of data segment failed.\n");
    sl_exit(1);
  }
   
  /* Clear BSS part */
  Elf32_Addr clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
  void *clear_addr = mapbase + (clear_vaddr - text_vaddr);
  void *clear_page = mapbase + (TRUNC_PAGE(clear_vaddr) - text_vaddr);
  unsigned long nclear = data_vlimit - clear_vaddr;

  if (nclear > 0) {
    /* Make sure the end of the segment is writable */
    if ((data_prot & PROT_WRITE) == 0 &&
        sl_mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE) == -1) {
      sl_printf("Error map_segments: mprotect on data segment failed.\n");
      sl_exit(1);
    }
    
    sl_memset(clear_addr, 0, nclear);

    /* Reset the data protection */
    if ((data_prot & PROT_WRITE) == 0) {
      sl_mprotect(clear_page, PAGE_SIZE, data_prot);
    }
  }
  
  /* Allocate remaining part of bss section */
  Elf32_Addr bss_vaddr = data_vlimit;
  Elf32_Addr bss_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  void *bss_addr = mapbase + (bss_vaddr - text_vaddr);
  if (bss_vlimit > bss_vaddr) {
    if ((long)sl_mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
                      MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == -1) {
      sl_printf("Error map_segments: mmap of bss segment failed.\n");
      sl_exit(1);
    }
    
  }
  
  /* Save important information */
  so->base_addr = (type == ET_EXEC) ? 0 : mapbase;
  so->text_addr = mapbase;
  so->text_size = mapsize;  
  so->data_addr = data_addr;
  so->data_size = data_vlimit - data_vaddr;
  so->bss_addr = bss_addr;
  so->bss_size = bss_vlimit - bss_vaddr;
  so->end_addr = bss_addr + so->bss_size;
  so->text_prot = convert_prot(segs[0]->p_flags);
  so->data_prot = data_prot;
  so->bss_prot = data_prot;
}
Esempio n. 13
0
/* Verifies the kernel provided program header PT_LOAD entries and does the
 * segment mappings only if required. As the kernel already mapped the PT_LOAD
 * segments our RTLD should not map them again.
 */
void map_segments_RTLD (long fd, Elf32_Phdr *segs[2], Elf32_Half type, dso *so, Elf32_Phdr *segs_auxv[2]) {

  /* TODO: improve error handling ;) */
  if(segs[0]->p_offset != segs_auxv[0]->p_offset) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[0]->p_vaddr != segs_auxv[0]->p_vaddr) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[0]->p_memsz != segs_auxv[0]->p_memsz) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  
  if(segs[1]->p_offset != segs_auxv[1]->p_offset) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[1]->p_vaddr != segs_auxv[1]->p_vaddr) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  if(segs[1]->p_memsz != segs_auxv[1]->p_memsz) {
	 sl_printf("map_segments_RTLD: difference in program headers found!\n");
	 sl_exit(1);
  }
  
  /* Adjust text segment addresses to page size */
  //Elf32_Off text_offset = TRUNC_PAGE(segs[0]->p_offset);
  Elf32_Addr text_vaddr = TRUNC_PAGE(segs[0]->p_vaddr);  
  Elf32_Addr text_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  unsigned long mapsize = text_vlimit - text_vaddr;
  
  /* Executable has to be loaded at constant address */
  void *base_addr = 0;
  if (type == ET_EXEC) {
    base_addr = (void *)text_vaddr;
  } else {
	 sl_printf("map_segments_RTLD: first program header entry is not ET_EXEC!\n");
	 sl_exit(1);
  }

  /* TODO: what if base address lies in already mapped area? E.g. where the loader resides? */

  /* Text segment already mapped */
  char *mapbase = base_addr;

  /* Adjust data segment addresses to page size */
  //Elf32_Off data_offset = TRUNC_PAGE(segs[1]->p_offset);
  Elf32_Addr data_vaddr = TRUNC_PAGE(segs[1]->p_vaddr);
  Elf32_Addr data_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_filesz);
  void *data_addr = mapbase + (data_vaddr - text_vaddr);
  //long data_prot = convert_prot(segs[1]->p_flags);
  
  /* Clear BSS part */
  //Elf32_Addr clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz;
  //void *clear_addr = mapbase + (clear_vaddr - text_vaddr);
  //void *clear_page = mapbase + (TRUNC_PAGE(clear_vaddr) - text_vaddr);
  //unsigned long nclear = data_vlimit - clear_vaddr;
 
  /* Allocate remaining part of bss section */
  Elf32_Addr bss_vaddr = data_vlimit;
  Elf32_Addr bss_vlimit = ROUND_PAGE(segs[1]->p_vaddr + segs[1]->p_memsz);
  void *bss_addr = mapbase + (bss_vaddr - text_vaddr);
  
  /* Save important information */
  so->base_addr = (type == ET_EXEC) ? 0 : mapbase;
  so->text_addr = mapbase;
  so->text_size = mapsize;  
  so->data_addr = data_addr;
  so->data_size = data_vlimit - data_vaddr;
  so->bss_addr = bss_addr;
  so->bss_size = bss_vlimit - bss_vaddr;
  so->end_addr = bss_addr + so->bss_size;
  so->text_prot = convert_prot(segs[0]->p_flags);
  so->data_prot = convert_prot(segs[1]->p_flags);
  so->bss_prot = convert_prot(segs[1]->p_flags);
}
Esempio n. 14
0
/**
 * Write the data in the request to the page.
 * @orig: the very original request issued by the user. It may span
 * multiple pages.
 */
ssize_t global_cached_io::__write(original_io_request *orig, thread_safe_page *p,
		std::vector<thread_safe_page *> &dirty_pages)
{
	ssize_t ret = 0;
	p->lock();
	assert(!p->is_old_dirty());
	if (!p->data_ready()) {
		if(!p->is_io_pending()) {
			assert(!p->is_dirty());
			assert(orig->has_overlap(p->get_offset(), PAGE_SIZE));

			// We are going to write to part of a page, therefore,
			// we need to first read the page.
			if (orig->get_offset() > p->get_offset()
					|| orig->get_offset() + orig->get_size()
					< p->get_offset() + PAGE_SIZE) {
				off_t off = orig->get_offset();
				data_loc_t pg_loc(orig->get_file_id(), ROUND_PAGE(off));
				io_req_extension *ext = ext_allocator->alloc_obj();

				io_request read_req(ext, pg_loc, READ, this, p->get_node_id());
				read_req.add_page(p);
				read_req.set_priv(p);
				assert(p->get_io_req() == NULL);
				p->set_io_pending(true);
				p->add_req(orig);
				p->unlock();
				send2underlying(read_req);
			}
			else {
				// This is an optimization. If we can overwrite the entire page,
				// we don't need to read the page first. However, we have to
				// make sure data is written to a page without anyone else
				// having IO operations on it.
				p->set_data_ready(true);
				thread_safe_page *dirty = __complete_req_unlocked(orig, p);
				if (dirty)
					dirty_pages.push_back(dirty);
				p->unlock();
				ret = PAGE_SIZE;
				finalize_partial_request(p, orig);
				// TODO I may need to move page dereference further down.
				// dirty page now doesn't have a reference.
				p->dec_ref();
			}
		}
		else {
			// If there is an IO pending, it means a read request
			// has been issuded. It can't be a write request, otherwise,
			// the data in the page will be ready.
			assert(orig->get_access_method() == WRITE);
			p->add_req(orig);
			p->unlock();
		}
	}
	else {
		// The data in the page is ready. We can write data to the page directly.
		//
		// If data is ready, there shouldn't be an IO pending.
		// In other words, if the thread for writing dirty pages is writing
		// a page, the page will be referenced and therefore, can't be returned
		// from the cache.
		// TODO we should delay the write if the page is being written back.
//		assert(!p->is_io_pending());
		p->unlock();

		thread_safe_page *dirty = __complete_req(orig, p);
		if (dirty)
			dirty_pages.push_back(dirty);
		ret = orig->get_size();
		finalize_partial_request(p, orig);
		// TODO I may need to move page dereference further down.
		// dirty page now doesn't have a reference.
		p->dec_ref();
	}
	return ret;
}
Esempio n. 15
0
}

/*
 * Create a new 1-bit per handle BMP with the handles starting
 * at address `rep_addr' and the handles area being `rep_size'
 * bytes long.
 */
void H1BIT_Init(H1BIT_BMP* bmp, unsigned* rep_addr, unsigned rep_size )
{
  /* each bit in the bimtap represents a handle, which
   * takes 2^H_GRAIN_BITS bytes.  So a byte in the
   * bitmap repreesnts 2^(H_GRAIN_BITS+3) bytes in the
   * handle space.
        */
  bmp->bmp_size = rep_size >> (H_GRAIN_BITS+3);
  bmp->bmp_size = ROUND_PAGE( bmp->bmp_size );
  bmp->bmp = (byte*)mokMemReserve( NULL, bmp->bmp_size );
  mokMemCommit( bmp->bmp, bmp->bmp_size, true );
  bmp->rep_addr = (byte*)rep_addr;
  bmp->entry = bmp->bmp - (((unsigned)rep_addr)>>H1B_NON_BS_BITS);
}


/******************************************************************** 

Implementation of a 2 bit per handle BMP.

Layout of a handle:

| 31 --------- 5 | 4 -- 3 | 2 - 0 |
|      BS        |   FS   |  Z    |