/* ### * helper function. extract file name from given * NaClAppThread and position in desc_tbl * return file name if successfully extracted, otherwise - NULL */ const char * GetFileName(struct NaClAppThread *natp, int d) { struct NaClDesc *ndp; uintptr_t sysaddr; /* get channel name */ ndp = NaClGetDesc(natp->nap, d); /* check if NaClDesc address is valid */ sysaddr = NaClUserToSysAddr(natp->nap, (uintptr_t) ndp); if (kNaClBadAddress == sysaddr || NULL != ndp) { return NULL; } // return ((struct NaClDescIoDesc *) ndp)->hd->channel; return NULL; // ### }
int NaClCopyInFromUserZStr(struct NaClApp *nap, char *dst_buffer, size_t dst_buffer_bytes, uintptr_t src_usr_addr) { uintptr_t src_sys_addr; CHECK(dst_buffer_bytes > 0); src_sys_addr = NaClUserToSysAddr(nap, src_usr_addr); if (kNaClBadAddress == src_sys_addr) { dst_buffer[0] = '\0'; return 0; } NaClXMutexLock(&nap->mu); strncpy(dst_buffer, (char *) src_sys_addr, dst_buffer_bytes); NaClXMutexUnlock(&nap->mu); /* POSIX strncpy pads with NUL characters */ if (dst_buffer[dst_buffer_bytes - 1] != '\0') { dst_buffer[dst_buffer_bytes - 1] = '\0'; return 0; } return 1; }
NaClErrorCode NaClElfImageLoad(struct NaClElfImage *image, struct NaClDesc *ndp, struct NaClApp *nap) { int segnum; uintptr_t vaddr; uintptr_t paddr; uintptr_t end_vaddr; ssize_t read_ret; int safe_for_mmap; for (segnum = 0; segnum < image->ehdr.e_phnum; ++segnum) { const Elf_Phdr *php = &image->phdrs[segnum]; Elf_Off offset = (Elf_Off) NaClTruncAllocPage(php->p_offset); Elf_Off filesz = php->p_offset + php->p_filesz - offset; /* did we decide that we will load this segment earlier? */ if (!image->loadable[segnum]) { continue; } NaClLog(2, "loading segment %d\n", segnum); if (0 == php->p_filesz) { NaClLog(4, "zero-sized segment. ignoring...\n"); continue; } end_vaddr = php->p_vaddr + php->p_filesz; /* integer overflow? */ if (end_vaddr < php->p_vaddr) { NaClLog(LOG_FATAL, "parameter error should have been detected already\n"); } /* * is the end virtual address within the NaCl application's * address space? if it is, it implies that the start virtual * address is also. */ if (end_vaddr >= ((uintptr_t) 1U << nap->addr_bits)) { NaClLog(LOG_FATAL, "parameter error should have been detected already\n"); } vaddr = NaClTruncAllocPage(php->p_vaddr); paddr = NaClUserToSysAddr(nap, vaddr); CHECK(kNaClBadAddress != paddr); /* * Check NaClDescIsSafeForMmap(ndp) to see if it might be okay to * mmap. */ NaClLog(4, "NaClElfImageLoad: checking descriptor mmap safety\n"); safe_for_mmap = NaClDescIsSafeForMmap(ndp); if (safe_for_mmap) { NaClLog(4, "NaClElfImageLoad: safe-for-mmap\n"); } if (!safe_for_mmap && NACL_FI("ELF_LOAD_BYPASS_DESCRIPTOR_SAFETY_CHECK", 0, 1)) { NaClLog(LOG_WARNING, "WARNING: BYPASSING DESCRIPTOR SAFETY CHECK\n"); safe_for_mmap = 1; } if (safe_for_mmap) { NaClErrorCode map_status; NaClLog(4, "NaClElfImageLoad: safe-for-mmap\n"); map_status = NaClElfFileMapSegment(nap, ndp, php->p_flags, offset, filesz, vaddr, paddr); /* * NB: -Werror=switch-enum forces us to not use a switch. */ if (LOAD_OK == map_status) { /* Segment has been handled -- proceed to next segment */ continue; } else if (LOAD_STATUS_UNKNOWN != map_status) { /* * A real error! Return it so that this can be reported to * the embedding code (via start_module status). */ return map_status; } /* Fall through: pread-based fallback requested */ } NaClLog(4, "PReading %"NACL_PRIdElf_Xword" (0x%"NACL_PRIxElf_Xword") bytes to" " address 0x%"NACL_PRIxPTR", position %" NACL_PRIdElf_Off" (0x%"NACL_PRIxElf_Off")\n", filesz, filesz, paddr, offset, offset); /* * Tell valgrind that this memory is accessible and undefined. For more * details see * http://code.google.com/p/nativeclient/wiki/ValgrindMemcheck#Implementation_details */ NACL_MAKE_MEM_UNDEFINED((void *) paddr, filesz); read_ret = (*NACL_VTBL(NaClDesc, ndp)-> PRead)(ndp, (void *) paddr, filesz, (nacl_off64_t) offset); if (NaClSSizeIsNegErrno(&read_ret) || (size_t) read_ret != filesz) { NaClLog(LOG_ERROR, "load failure segment %d", segnum); return LOAD_SEGMENT_BAD_PARAM; } /* region from p_filesz to p_memsz should already be zero filled */ /* Tell Valgrind that we've mapped a segment of nacl_file. */ NaClFileMappingForValgrind(paddr, filesz, offset); } return LOAD_OK; }
int32_t NaClSysBrk(struct NaClAppThread *natp, uintptr_t new_break) { struct NaClApp *nap = natp->nap; uintptr_t break_addr; int32_t rv = -NACL_ABI_EINVAL; struct NaClVmmapIter iter; struct NaClVmmapEntry *ent; struct NaClVmmapEntry *next_ent; uintptr_t sys_break; uintptr_t sys_new_break; uintptr_t usr_last_data_page; uintptr_t usr_new_last_data_page; uintptr_t last_internal_data_addr; uintptr_t last_internal_page; uintptr_t start_new_region; uintptr_t region_size; /* * The sysbrk() IRT interface is deprecated and is not enabled for * ABI-stable PNaCl pexes, so for security hardening, disable the * syscall under PNaCl too. */ if (nap->pnacl_mode) return -NACL_ABI_ENOSYS; break_addr = nap->break_addr; NaClLog(3, "Entered NaClSysBrk(new_break 0x%08"NACL_PRIxPTR")\n", new_break); sys_new_break = NaClUserToSysAddr(nap, new_break); NaClLog(3, "sys_new_break 0x%08"NACL_PRIxPTR"\n", sys_new_break); if (kNaClBadAddress == sys_new_break) { goto cleanup_no_lock; } if (NACL_SYNC_OK != NaClMutexLock(&nap->mu)) { NaClLog(LOG_ERROR, "Could not get app lock for 0x%08"NACL_PRIxPTR"\n", (uintptr_t) nap); goto cleanup_no_lock; } if (new_break < nap->data_end) { NaClLog(4, "new_break before data_end (0x%"NACL_PRIxPTR")\n", nap->data_end); goto cleanup; } if (new_break <= nap->break_addr) { /* freeing memory */ NaClLog(4, "new_break before break (0x%"NACL_PRIxPTR"); freeing\n", nap->break_addr); nap->break_addr = new_break; break_addr = new_break; } else { /* * See if page containing new_break is in mem_map; if so, we are * essentially done -- just update break_addr. Otherwise, we * extend the VM map entry from the page containing the current * break to the page containing new_break. */ sys_break = NaClUserToSys(nap, nap->break_addr); usr_last_data_page = (nap->break_addr - 1) >> NACL_PAGESHIFT; usr_new_last_data_page = (new_break - 1) >> NACL_PAGESHIFT; last_internal_data_addr = NaClRoundAllocPage(new_break) - 1; last_internal_page = last_internal_data_addr >> NACL_PAGESHIFT; NaClLog(4, ("current break sys addr 0x%08"NACL_PRIxPTR", " "usr last data page 0x%"NACL_PRIxPTR"\n"), sys_break, usr_last_data_page); NaClLog(4, "new break usr last data page 0x%"NACL_PRIxPTR"\n", usr_new_last_data_page); NaClLog(4, "last internal data addr 0x%08"NACL_PRIxPTR"\n", last_internal_data_addr); if (NULL == NaClVmmapFindPageIter(&nap->mem_map, usr_last_data_page, &iter) || NaClVmmapIterAtEnd(&iter)) { NaClLog(LOG_FATAL, ("current break (0x%08"NACL_PRIxPTR", " "sys 0x%08"NACL_PRIxPTR") " "not in address map\n"), nap->break_addr, sys_break); } ent = NaClVmmapIterStar(&iter); NaClLog(4, ("segment containing current break" ": page_num 0x%08"NACL_PRIxPTR", npages 0x%"NACL_PRIxS"\n"), ent->page_num, ent->npages); if (usr_new_last_data_page < ent->page_num + ent->npages) { NaClLog(4, "new break within break segment, just bumping addr\n"); nap->break_addr = new_break; break_addr = new_break; } else { NaClVmmapIterIncr(&iter); if (!NaClVmmapIterAtEnd(&iter) && ((next_ent = NaClVmmapIterStar(&iter))->page_num <= last_internal_page)) { /* ran into next segment! */ NaClLog(4, ("new break request of usr address " "0x%08"NACL_PRIxPTR" / usr page 0x%"NACL_PRIxPTR " runs into next region, page_num 0x%"NACL_PRIxPTR", " "npages 0x%"NACL_PRIxS"\n"), new_break, usr_new_last_data_page, next_ent->page_num, next_ent->npages); goto cleanup; } NaClLog(4, "extending segment: page_num 0x%08"NACL_PRIxPTR", " "npages 0x%"NACL_PRIxS"\n", ent->page_num, ent->npages); /* go ahead and extend ent to cover, and make pages accessible */ start_new_region = (ent->page_num + ent->npages) << NACL_PAGESHIFT; ent->npages = (last_internal_page - ent->page_num + 1); region_size = (((last_internal_page + 1) << NACL_PAGESHIFT) - start_new_region); if (0 != NaClMprotect((void *) NaClUserToSys(nap, start_new_region), region_size, PROT_READ | PROT_WRITE)) { NaClLog(LOG_FATAL, ("Could not mprotect(0x%08"NACL_PRIxPTR", " "0x%08"NACL_PRIxPTR", " "PROT_READ|PROT_WRITE)\n"), start_new_region, region_size); } NaClLog(4, "segment now: page_num 0x%08"NACL_PRIxPTR", " "npages 0x%"NACL_PRIxS"\n", ent->page_num, ent->npages); nap->break_addr = new_break; break_addr = new_break; } /* * Zero out memory between old break and new break. */ ASSERT(sys_new_break > sys_break); memset((void *) sys_break, 0, sys_new_break - sys_break); } cleanup: NaClXMutexUnlock(&nap->mu); cleanup_no_lock: /* * This cast is safe because the incoming value (new_break) cannot * exceed the user address space--even though its type (uintptr_t) * theoretically allows larger values. */ rv = (int32_t) break_addr; NaClLog(3, "NaClSysBrk: returning 0x%08"NACL_PRIx32"\n", rv); return rv; }