/* * preallocate memory area of given size. abort if fail */ void PreallocateUserMemory(struct NaClApp *nap) { struct SetupList *policy = nap->manifest->user_setup; uintptr_t i = nap->data_end; uint32_t stump = nap->manifest->user_setup->max_mem - nap->stack_size - nap->data_end; uint32_t dead_space; struct NaClVmmapEntry *user_space; /* check if max_mem is specified in manifest and proceed if so */ if(!policy->max_mem) return; /* user memory chunk must be allocated next to the data end */ i = (i + NACL_MAP_PAGESIZE - 1) & ~(NACL_MAP_PAGESIZE - 1); policy->heap_ptr = NaClCommonSysMmapIntern(nap, (void*)i, stump, 3, 0x22, -1, 0); assert(policy->heap_ptr == i); /* * free "whole chunk" block without real memory deallocation * the map entry we need is the last in raw */ user_space = nap->mem_map.vmentry[nap->mem_map.nvalid - 1]; assert(policy->heap_ptr / NACL_PAGESIZE == user_space->page_num); assert(nap->mem_map.is_sorted != 1); /* protect dead space */ dead_space = NaClVmmapFindMaxFreeSpace(&nap->mem_map, 1) * NACL_PAGESIZE; i = (user_space->page_num + user_space->npages) * NACL_PAGESIZE; dead_space = NaClCommonSysMmapIntern(nap, (void*)i, dead_space, 0, 0x22, -1, 0); assert(dead_space == i); /* sort and remove deleted blocks */ user_space->removed = 1; nap->mem_map.is_sorted = 0; /* force sort because we need to get rid of removed blocks */ NaClVmmapMakeSorted(&nap->mem_map); /* why 0xfffff000? 1. 0x1000 reserved for error codes 2. it is still larger then 4gb - stack */ COND_ABORT(policy->heap_ptr > 0xfffff000, "cannot preallocate memory for user\n"); }
/* * premap given file (channel). return 0 if success, otherwise negative errcode * note: malloc() */ int PremapChannel(struct NaClApp *nap, struct PreOpenedFileDesc* channel) { int desc; struct NaClHostDesc *hd = malloc(sizeof(*hd)); /* debug checks */ COND_ABORT(!hd, "cannot allocate memory to hold channel descriptor\n"); COND_ABORT(!channel, "channel is not constructed\n"); COND_ABORT(channel->mounted != MAPPED, "channel is not supposed to be mapped\n"); COND_ABORT(!channel->name, "cannot resolve channel name\n"); /* open file */ channel->handle = open((char*)channel->name, GetChannelOpenFlags(channel), S_IRWXU); COND_ABORT(channel->handle < 0, "channel open error\n"); /* check if given file in bounds of manifest limits */ channel->fsize = GetFileSize((char*)channel->name); PreallocateChannel(channel); COND_ABORT(channel->max_size < channel->fsize, "channel legnth exceeded policy limit\n"); /* construct nacl descriptor */ hd->d = channel->handle; desc = NaClSetAvail(nap, ((struct NaClDesc *) NaClDescIoDescMake(hd))); /* map whole file into the memory. address cannot be higher than stack */ channel->buffer = NaClCommonSysMmapIntern(nap, NULL, channel->fsize, GetChannelMapProt(channel), GetChannelMapFlags(channel), desc, 0); COND_ABORT((uint32_t)channel->buffer > 0xFF000000, "channel map error\n"); /* mounting finalization */ close(channel->handle); channel->bsize = channel->fsize; /* yes. mapped file always put to memory whole */ channel->handle = -1; /* there is no opened file for mapped channel */ return 0; }
struct NaClSignalContext *StartGuestWithSharedMemory( struct NaClApp *nap) { char arg_string[32]; char *args[] = {"prog_name", arg_string}; uint32_t mmap_addr; struct NaClSignalContext *expected_regs; /* * Allocate some space in untrusted address space. We pass the * address to the guest program so that we can share data with it. */ mmap_addr = NaClCommonSysMmapIntern( nap, NULL, sizeof(*expected_regs), NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_PRIVATE | NACL_ABI_MAP_ANONYMOUS, -1, 0); SNPRINTF(arg_string, sizeof(arg_string), "0x%x", (unsigned int) mmap_addr); expected_regs = (struct NaClSignalContext *) NaClUserToSys(nap, mmap_addr); WaitForThreadToExitFully(nap); CHECK(NaClCreateMainThread(nap, NACL_ARRAY_SIZE(args), args, NULL)); return expected_regs; }
NaClErrorCode NaClElfImageLoadDynamically(struct NaClElfImage *image, struct NaClApp *nap, struct Gio *gfile) { int segnum; for (segnum = 0; segnum < image->ehdr.e_phnum; ++segnum) { const Elf_Phdr *php = &image->phdrs[segnum]; int32_t result; /* * We check for PT_LOAD directly rather than using the "loadable" * array because we are not using NaClElfImageValidateProgramHeaders() * to fill out the "loadable" array for this ELF object. This ELF * object does not have to fit such strict constraints (such as * having code at 0x20000), and safety checks are applied by * NaClTextDyncodeCreate() and NaClCommonSysMmapIntern(). */ if (PT_LOAD != php->p_type) { continue; } /* * Ideally, Gio would have a Pread() method which we would use * instead of Seek(). In practice, though, there is no * Seek()/Read() race condition here because both * GioMemoryFileSnapshot and NaClGioShm use a seek position that * is local and not shared between processes. */ if ((*gfile->vtbl->Seek)(gfile, (off_t) php->p_offset, SEEK_SET) == (off_t) -1) { NaClLog(1, "NaClElfImageLoadDynamically: seek failed\n"); return LOAD_READ_ERROR; } if (0 != (php->p_flags & PF_X)) { /* Load code segment. */ /* * We make a copy of the code. This is not ideal given that * GioMemoryFileSnapshot and NaClGioShm already have a copy of * the file in memory or mmapped. * TODO(mseaborn): Reduce the amount of copying here. */ char *code_copy = malloc(php->p_filesz); if (NULL == code_copy) { NaClLog(1, "NaClElfImageLoadDynamically: malloc failed\n"); return LOAD_NO_MEMORY; } if ((Elf_Word) (*gfile->vtbl->Read)(gfile, code_copy, php->p_filesz) != php->p_filesz) { free(code_copy); NaClLog(1, "NaClElfImageLoadDynamically: " "failed to read code segment\n"); return LOAD_READ_ERROR; } result = NaClTextDyncodeCreate(nap, (uint32_t) php->p_vaddr, code_copy, (uint32_t) php->p_filesz); free(code_copy); if (0 != result) { NaClLog(1, "NaClElfImageLoadDynamically: " "failed to load code segment\n"); return LOAD_UNLOADABLE; } } else { /* Load data segment. */ void *paddr = (void *) NaClUserToSys(nap, php->p_vaddr); size_t mapping_size = NaClRoundAllocPage(php->p_memsz); /* * Note that we do not used NACL_ABI_MAP_FIXED because we do not * want to silently overwrite any existing mappings, such as the * user app's data segment or the stack. We detect overmapping * when mmap chooses not to use the preferred address we supply. * (Ideally mmap would provide a MAP_EXCL option for this * instead.) */ result = NaClCommonSysMmapIntern( nap, (void *) php->p_vaddr, mapping_size, NACL_ABI_PROT_READ | NACL_ABI_PROT_WRITE, NACL_ABI_MAP_ANONYMOUS | NACL_ABI_MAP_PRIVATE, -1, 0); if ((int32_t) php->p_vaddr != result) { NaClLog(1, "NaClElfImageLoadDynamically: failed to map data segment\n"); return LOAD_UNLOADABLE; } if ((Elf_Word) (*gfile->vtbl->Read)(gfile, paddr, php->p_filesz) != php->p_filesz) { NaClLog(1, "NaClElfImageLoadDynamically: " "failed to read data segment\n"); return LOAD_READ_ERROR; } /* * Note that we do not need to zero the BSS (the region from * p_filesz to p_memsz) because it should already be zero * filled. This would not be the case if we were mapping the * data segment from the file. */ if (0 == (php->p_flags & PF_W)) { /* Handle read-only data segment. */ int rc = NaCl_mprotect(paddr, mapping_size, NACL_ABI_PROT_READ); if (0 != rc) { NaClLog(1, "NaClElfImageLoadDynamically: " "failed to mprotect read-only data segment\n"); return LOAD_MPROTECT_FAIL; } NaClVmmapUpdate(&nap->mem_map, php->p_vaddr >> NACL_PAGESHIFT, mapping_size >> NACL_PAGESHIFT, PROT_READ, NULL, 0 /* remove: false */); } } }