void FZ_(syscall_read)(ThreadId tid, UWord *args, UInt nArgs, SysRes res) { if(fengSysFlag){VG_(printf)("feng:entered syscall_read\n");} printArgs(args,nArgs,"read"); UInt i, j, k; Int fd = -1; Char *data = NULL; //populate_guest_args(tid); fd = ((Int)args[0])/*guest_args[tid].args[3]*/; data = (Char *)(args[1]/*guest_args[tid].args[1]*/); //VG_(printf)("[?] tid %d read(%d) tainted %d\n", tid, fd, tainted_fds[tid][fd]); if (fd < 0 || sr_isError(res) || !tainted_fds[tid][fd]) { return; } k = position_fds[tid][fd]; for (i = 0; i < sr_Res(res); i++) { #ifdef FENG_AMD64 if(fengSysFlag){VG_(printf)("feng:addr:%llu\n",((ULong)data + i));} j = add_dependency_addr((Addr)((ULong)data + i), 8); //VG_(printf)("[+] tid %d read(%d) tainting byte %d (0x%08x)\n", // tid, fd, k + i, (ULong)(data + i)); pj=getDep(&depaddr8,j); VG_(snprintf)(pj->cons, XXX_MAX_BUF, "input(%d)", k + i); #else if(fengSysFlag){VG_(printf)("feng:addr:%llu\n",((UInt)data + i));} j = add_dependency_addr((Addr)(UInt)(data + i), 8); //VG_(printf)("[+] tid %d read(%d) tainting byte %d (0x%08x)\n", // tid, fd, k + i, (UInt)(data + i)); pj=getDep(&depaddr8,j); VG_(snprintf)(pj->cons, XXX_MAX_BUF, "input(%d)", k + i); #endif // FENG_AMD64 } position_fds[tid][fd] += sr_Res(res); }
static HChar* read_dot_valgrindrc ( const HChar* dir ) { Int n; SysRes fd; struct vg_stat stat_buf; HChar* f_clo = NULL; const HChar dot_valgrindrc[] = ".valgrindrc"; vg_assert(dir != NULL); HChar filename[VG_(strlen)(dir) + 1 + VG_(strlen)(dot_valgrindrc) + 1]; VG_(sprintf)(filename, "%s/%s", dir, dot_valgrindrc); fd = VG_(open)(filename, 0, VKI_S_IRUSR); if ( !sr_isError(fd) ) { Int res = VG_(fstat)( sr_Res(fd), &stat_buf ); // Ignore if not owned by current user or world writeable (CVE-2008-4865) if (!res && stat_buf.uid == VG_(geteuid)() && (!(stat_buf.mode & VKI_S_IWOTH))) { if ( stat_buf.size > 0 ) { f_clo = VG_(malloc)("commandline.rdv.1", stat_buf.size+1); n = VG_(read)(sr_Res(fd), f_clo, stat_buf.size); if (n == -1) n = 0; vg_assert(n >= 0 && n <= stat_buf.size+1); f_clo[n] = '\0'; } } else VG_(message)(Vg_UserMsg, "%s was not read as it is either world writeable or not " "owned by the current user\n", filename); VG_(close)(sr_Res(fd)); } return f_clo; }
Int ML_(am_readlink)(const HChar* path, HChar* buf, UInt bufsiz) { SysRes res; # if defined(VGP_arm64_linux) res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path, (UWord)buf, bufsiz); # elif defined(VGP_tilegx_linux) res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path, (UWord)buf, bufsiz); # elif defined(VGO_linux) || defined(VGO_darwin) res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz); # elif defined(VGO_solaris) res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path, (UWord)buf, bufsiz); # else # error Unknown OS # endif return sr_isError(res) ? -1 : sr_Res(res); }
/* Open and map a dylinker file. Returns 0 on success, -1 on any failure. filename must be an absolute path. The dylinker's entry point is returned in *out_linker_entry. */ static int open_dylinker(const char *filename, vki_uint8_t **out_linker_entry) { struct vg_stat sb; vki_size_t filesize; SysRes res; int fd; int err; if (filename[0] != '/') { print("bad executable (dylinker name is not an absolute path)\n"); return -1; } res = VG_(open)(filename, VKI_O_RDONLY, 0); fd = sr_Res(res); if (sr_isError(res)) { print("couldn't open dylinker: "); print(filename); print("\n"); return -1; } err = VG_(fstat)(fd, &sb); if (err) { print("couldn't stat dylinker: "); print(filename); print("\n"); VG_(close)(fd); return -1; } filesize = sb.size; err = load_mach_file(fd, 0, filesize, MH_DYLINKER, filename, NULL, NULL, NULL, out_linker_entry, NULL); if (err) { print("...while loading dylinker: "); print(filename); print("\n"); } VG_(close)(fd); return err; }
static void lk_write_global(const char* path) { int fd; SysRes res; res = VG_(open) (path, VKI_O_CREAT|VKI_O_WRONLY|VKI_O_TRUNC, 0); if (sr_isError(res)) { VG_(printf)("Error opening file!\n"); return; } fd = (int) sr_Res(res); char buffer [1000]; int cx = VG_(snprintf)(buffer, 1000, "%lu\n",global_counter); VG_(write) (fd, buffer,cx); //Close file VG_(close)(fd); }
// Look at the first 80 chars, and if any are greater than 127, it's binary. // This is crude, but should be good enough. Note that it fails on a // zero-length file, as we want. static Bool is_binary_file(Char* f) { SysRes res = VG_(open)(f, VKI_O_RDONLY, 0); if (!sr_isError(res)) { UChar buf[80]; Int fd = sr_Res(res); Int n = VG_(read)(fd, buf, 80); Int i; for (i = 0; i < n; i++) { if (buf[i] > 127) return True; // binary char found } return False; } else { // Something went wrong. This will only happen if we earlier // succeeded in opening the file but fail here (eg. the file was // deleted between then and now). VG_(printf)("valgrind: %s: unknown error\n", f); VG_(exit)(126); // 126 == NOEXEC } }
Bool storeUsedOffsets(Char* fileName) { SysRes openRes = VG_(open)(fileName, VKI_O_WRONLY | VKI_O_TRUNC | VKI_O_CREAT, VKI_S_IRUSR | VKI_S_IROTH | VKI_S_IRGRP | VKI_S_IWUSR | VKI_S_IWOTH | VKI_S_IWGRP); if (sr_isError(openRes)) { return False; } Int fd = sr_Res(openRes); Int previousOffset = -1, i, j; Word value; for (j = 0; j < VG_(sizeXA) (usedOffsets); j ++) { OSet *offsetSet = *((OSet **) VG_(indexXA) (usedOffsets, j)); Char *fileName = * ((Char **) VG_(indexXA) (inputFiles, j)); VG_(OSetWord_ResetIter) (offsetSet); VG_(write) (fd, fileName, VG_(strlen) (fileName)); while(VG_(OSetWord_Next) (offsetSet, &value)) { for (i = previousOffset; i < value - 1; i ++) { VG_(write) (fd, "\0", 1); } VG_(write) (fd, "\1", 1); previousOffset = value; } VG_(OSetWord_Destroy) (offsetSet); VG_(free) (fileName); VG_(write) (fd, "\n", 1); } VG_(close) (fd); VG_(deleteXA) (inputFiles); VG_(deleteXA) (usedOffsets); return True; }
/* returns: 0 = success, non-0 is failure */ Int VG_(load_script)(Int fd, const HChar* name, ExeInfo* info) { HChar hdr[4096]; Int len = sizeof hdr; Int eol; HChar* interp; HChar* end; HChar* cp; HChar* arg = NULL; SysRes res; // Read the first part of the file. res = VG_(pread)(fd, hdr, len, 0); if (sr_isError(res)) { VG_(close)(fd); return VKI_EACCES; } else { len = sr_Res(res); } vg_assert('#' == hdr[0] && '!' == hdr[1]); end = hdr + len; interp = hdr + 2; while (interp < end && (*interp == ' ' || *interp == '\t')) interp++; /* skip over interpreter name */ for (cp = interp; cp < end && !VG_(isspace)(*cp); cp++) ; eol = (*cp == '\n'); *cp++ = '\0'; if (!eol && cp < end) { /* skip space before arg */ while (cp < end && VG_(isspace)(*cp) && *cp != '\n') cp++; /* arg is from here to eol */ arg = cp; while (cp < end && *cp != '\n') cp++; *cp = '\0'; } VG_(free)(info->interp_name); info->interp_name = VG_(strdup)("ume.ls.1", interp); vg_assert(NULL != info->interp_name); if (arg != NULL && *arg != '\0') { info->interp_args = VG_(strdup)("ume.ls.2", arg); vg_assert(NULL != info->interp_args); } if (info->argv && info->argv[0] != NULL) info->argv[0] = name; VG_(args_the_exename) = name; if (0) VG_(printf)("#! script: interp_name=\"%s\" interp_args=\"%s\"\n", info->interp_name, info->interp_args); return VG_(do_exec_inner)(interp, info); }
// Check the file looks executable. SysRes VG_(pre_exec_check)(const HChar* exe_name, Int* out_fd, Bool allow_setuid) { Int fd, ret, i; SysRes res; Char buf[4096]; SizeT bufsz = 4096, fsz; Bool is_setuid = False; // Check it's readable res = VG_(open)(exe_name, VKI_O_RDONLY, 0); if (sr_isError(res)) { return res; } fd = sr_Res(res); // Check we have execute permissions ret = VG_(check_executable)(&is_setuid, (HChar*)exe_name, allow_setuid); if (0 != ret) { VG_(close)(fd); if (is_setuid && !VG_(clo_xml)) { VG_(message)(Vg_UserMsg, ""); VG_(message)(Vg_UserMsg, "Warning: Can't execute setuid/setgid executable: %s", exe_name); VG_(message)(Vg_UserMsg, "Possible workaround: remove " "--trace-children=yes, if in effect"); VG_(message)(Vg_UserMsg, ""); } return VG_(mk_SysRes_Error)(ret); } fsz = (SizeT)VG_(fsize)(fd); if (fsz < bufsz) bufsz = fsz; res = VG_(pread)(fd, buf, bufsz, 0); if (sr_isError(res) || sr_Res(res) != bufsz) { VG_(close)(fd); return VG_(mk_SysRes_Error)(VKI_EACCES); } bufsz = sr_Res(res); // Look for a matching executable format for (i = 0; i < EXE_HANDLER_COUNT; i++) { if ((*exe_handlers[i].match_fn)(buf, bufsz)) { res = VG_(mk_SysRes_Success)(i); break; } } if (i == EXE_HANDLER_COUNT) { // Rejected by all executable format handlers. res = VG_(mk_SysRes_Error)(VKI_ENOEXEC); } // Write the 'out_fd' param if necessary, or close the file. if (!sr_isError(res) && out_fd) { *out_fd = fd; } else { VG_(close)(fd); } return res; }
static void fr_post_clo_init(void) { Rule_List* last_rule_ptr = NULL; Char* read_ptr; Trace_Block* block = NULL; Trace_Block* parent = NULL; Int* indents = (int*)dir_buffer; Int indent; Int depth = -1; Bool is_group; SysRes sres; Int fd; OffT file_size; if (clo_mmap) { #if VG_WORDSIZE == 4 mmap_section.next = NULL; mmap_section.page_addr = 0; mmap_section.trace_blocks = VG_(calloc)("freya.fr_post_clo_init.2", PAGE_NUMBER, sizeof(Trace_Block*)); mmap_section.used_blocks = VG_(calloc)("freya.fr_post_clo_init.3", PAGE_NUMBER, sizeof(Char)); #else mmap_sections = VG_(calloc)("freya.fr_post_clo_init.1", 1, sizeof(Mmap_Section)); mmap_sections->next = NULL; mmap_sections->page_addr = 0; mmap_sections->trace_blocks = VG_(calloc)("freya.fr_post_clo_init.2", PAGE_NUMBER, sizeof(Trace_Block*)); mmap_sections->used_blocks = VG_(calloc)("freya.fr_post_clo_init.3", PAGE_NUMBER, sizeof(Char)); mmap_section_cache = mmap_sections; #endif } read_ptr = NULL; if (clo_config) { sres = VG_(open)(clo_config, VKI_O_RDONLY, 0); if (!sr_isError(sres)) { fd = (Int) sr_Res(sres); file_size = VG_(lseek)(fd, 0, VKI_SEEK_END); VG_(lseek)(fd, 0, VKI_SEEK_SET); if (clo_fr_verb) VG_(printf)("File '%s' (size: %ld bytes) is successfully opened.\n", clo_config, file_size); read_ptr = VG_(malloc)("freya.fr_post_clo_init.3", (file_size + 1) * sizeof(Char)); VG_(read)(fd, read_ptr, file_size); read_ptr[file_size] = '\0'; VG_(close) (fd); } else if (clo_fr_verb) VG_(printf)("Cannot open '%s'. (Fallback to default config)\n", clo_config); } else if (clo_fr_verb) VG_(printf)("No config file provided. (Fallback to default config)\n"); if (!read_ptr) { // Duplicate read_ptr = VG_(malloc)("freya.fr_post_clo_init.4", (VG_(strlen)(default_rule) + 1) * sizeof(Char)); VG_(strcpy)(read_ptr, default_rule); } while (*read_ptr) { // Parsing the next line, first skip spaces indent = 0; while (*read_ptr == ' ') { indent++; read_ptr++; } // Skip comments and empty lines if (*read_ptr == '#' || *read_ptr == '\r' || *read_ptr == '\n') { while (*read_ptr != '\0' && *read_ptr != '\r' && *read_ptr != '\n') read_ptr++; if (*read_ptr) { read_ptr++; continue; } } if (*read_ptr == '{') { read_ptr = parse_extra_rule(read_ptr, block); continue; } else if (*read_ptr != '[' && *read_ptr != '(') { read_ptr = parse_rule(read_ptr, &last_rule_ptr); continue; } is_group = *read_ptr == '['; block = VG_(malloc)("freya.fr_post_clo_init.4", sizeof(Trace_Block)); read_ptr++; block->name = read_ptr; while (!(!is_group && *read_ptr == ')') && !(is_group && *read_ptr == ']')) { tl_assert2(*read_ptr && *read_ptr != '\n' && *read_ptr != '\r', "unterminated ( or ["); read_ptr++; } tl_assert2(block->name != read_ptr, "node has no name"); *read_ptr = '\0'; if (!is_group) search_rule(block, block->name, read_ptr - block->name); read_ptr++; if (*read_ptr == '+') { tl_assert2(default_parent == NULL, "Only one default node is allowed"); default_parent = block; read_ptr++; } while (*read_ptr == ' ') read_ptr++; tl_assert2(*read_ptr == '\n' || *read_ptr == '\r' || !*read_ptr, "Garbage at the end of the line"); if (clo_fr_verb) VG_(printf)("%s '%s' %s\n", is_group ? "Group:" : "Group & Attach:", block->name, default_parent == block ? "(Default)" : ""); if (depth >= 0) { if (indents[depth] != indent) { if (indent > indents[depth]) { tl_assert2(depth < 63, "Maximum allowed depth is 63 for the tree"); depth++; indents[depth] = indent; if (parent) parent = parent->first; else parent = trace_head; } else { do { tl_assert2(depth != 0, "Wrong tree indentation"); depth--; tl_assert(parent); parent = parent->parent; } while (indent != indents[depth]); tl_assert((depth == 0 && !parent) || (depth > 0 && parent)); } } } else { // The indentation of the top element tl_assert(!parent); indents[0] = indent; depth = 0; } block->parent = parent; if (parent) { block->next = parent->first; parent->first = block; } else { block->next = trace_head; trace_head = block; } block->first = NULL; block->hash_next = NULL; block->allocs = 0; block->total = 0; block->current = 0; block->peak = 0; block->ips = 0; } remove_unused_rules(); }
Int ML_(am_read) ( Int fd, void* buf, Int count) { SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count); return sr_isError(res) ? -1 : sr_Res(res); }
VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp ) { Int szB; SysRes sres; VgStack* stack; UInt* p; Int i; /* Allocate the stack. */ szB = VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB; #if !defined(VGPV_ppc64_linux_bgq) sres = VG_(am_mmap_anon_float_valgrind)( szB ); if (sr_isError(sres)) return NULL; stack = (VgStack*)(AddrH)sr_Res(sres); aspacem_assert(VG_IS_PAGE_ALIGNED(szB)); aspacem_assert(VG_IS_PAGE_ALIGNED(stack)); /* Protect the guard areas. */ sres = local_do_mprotect_NO_NOTIFY( (Addr) &stack[0], VG_STACK_GUARD_SZB, VKI_PROT_NONE ); if (sr_isError(sres)) goto protect_failed; VG_(am_notify_mprotect)( (Addr) &stack->bytes[0], VG_STACK_GUARD_SZB, VKI_PROT_NONE ); sres = local_do_mprotect_NO_NOTIFY( (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB], VG_STACK_GUARD_SZB, VKI_PROT_NONE ); if (sr_isError(sres)) goto protect_failed; VG_(am_notify_mprotect)( (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB], VG_STACK_GUARD_SZB, VKI_PROT_NONE ); #else { sres = VG_(am_mmap_anon_float_valgrind)( szB ); if (sr_isError(sres)) return NULL; stack = (VgStack*)sr_Res(sres); } #endif /* Looks good. Fill the active area with junk so we can later tell how much got used. */ p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB]; for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++) p[i] = 0xDEADBEEF; *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB]; *initial_sp -= 8; *initial_sp &= ~((Addr)0x1F); /* 32-align it */ VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n", (ULong)(Addr)stack, szB); ML_(am_do_sanity_check)(); return stack; protect_failed: /* The stack was allocated, but we can't protect it. Unmap it and return NULL (failure). */ (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB ); ML_(am_do_sanity_check)(); return NULL; }
/* Based on ML_(generic_PRE_sys_mmap) from syswrap-generic.c. If we are trying to do mmap with VKI_MAP_SHARED flag we need to align the start address on VKI_SHMLBA like we did in VG_(am_mmap_file_float_valgrind_flags) */ static SysRes mips_PRE_sys_mmap(ThreadId tid, UWord arg1, UWord arg2, UWord arg3, UWord arg4, UWord arg5, Off64T arg6) { Addr advised; SysRes sres; MapRequest mreq; Bool mreq_ok; if (arg2 == 0) { /* SuSV3 says: If len is zero, mmap() shall fail and no mapping shall be established. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } if (!VG_IS_PAGE_ALIGNED(arg1)) { /* zap any misaligned addresses. */ /* SuSV3 says misaligned addresses only cause the MAP_FIXED case to fail. Here, we catch them all. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } if (!VG_IS_PAGE_ALIGNED(arg6)) { /* zap any misaligned offsets. */ /* SuSV3 says: The off argument is constrained to be aligned and sized according to the value returned by sysconf() when passed _SC_PAGESIZE or _SC_PAGE_SIZE. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } /* Figure out what kind of allocation constraints there are (fixed/hint/any), and ask aspacem what we should do. */ mreq.start = arg1; mreq.len = arg2; if (arg4 & VKI_MAP_FIXED) { mreq.rkind = MFixed; } else if (arg1 != 0) { mreq.rkind = MHint; } else { mreq.rkind = MAny; } if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & arg4) && !(VKI_MAP_FIXED & arg4)) mreq.len = arg2 + VKI_SHMLBA - VKI_PAGE_SIZE; /* Enquire ... */ advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok ); if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & arg4) && !(VKI_MAP_FIXED & arg4)) advised = VG_ROUNDUP(advised, VKI_SHMLBA); if (!mreq_ok) { /* Our request was bounced, so we'd better fail. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } /* Otherwise we're OK (so far). Install aspacem's choice of address, and let the mmap go through. */ sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3, arg4 | VKI_MAP_FIXED, arg5, arg6); /* A refinement: it may be that the kernel refused aspacem's choice of address. If we were originally asked for a hinted mapping, there is still a last chance: try again at any address. Hence: */ if (mreq.rkind == MHint && sr_isError(sres)) { mreq.start = 0; mreq.len = arg2; mreq.rkind = MAny; advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok ); if (!mreq_ok) { /* Our request was bounced, so we'd better fail. */ return VG_(mk_SysRes_Error)( VKI_EINVAL ); } /* and try again with the kernel */ sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3, arg4 | VKI_MAP_FIXED, arg5, arg6); } if (!sr_isError(sres)) { ULong di_handle; /* Notify aspacem. */ notify_core_of_mmap( (Addr)sr_Res(sres), /* addr kernel actually assigned */ arg2, /* length */ arg3, /* prot */ arg4, /* the original flags value */ arg5, /* fd */ arg6 /* offset */ ); /* Load symbols? */ di_handle = VG_(di_notify_mmap)( (Addr)sr_Res(sres), False/*allow_SkFileV*/, (Int)arg5 ); /* Notify the tool. */ notify_tool_of_mmap( (Addr)sr_Res(sres), /* addr kernel actually assigned */ arg2, /* length */ arg3, /* prot */ di_handle /* so the tool can refer to the read debuginfo later, if it wants. */ ); } /* Stay sane */ if (!sr_isError(sres) && (arg4 & VKI_MAP_FIXED)) vg_assert(sr_Res(sres) == arg1); return sres; }
/* Load a fat Mach-O executable. */ static int load_fat_file(int fd, vki_off_t offset, vki_off_t size, unsigned long filetype, const HChar *filename, vki_uint8_t **out_stack_start, vki_uint8_t **out_stack_end, vki_uint8_t **out_text, vki_uint8_t **out_entry, vki_uint8_t **out_linker_entry) { struct fat_header fh; vki_off_t arch_offset; int i; cpu_type_t good_arch; #if defined(VGA_arm) cpu_subtype_t highest_subtype; #endif SysRes res; #if defined(VGA_ppc32) good_arch = CPU_TYPE_POWERPC; #elif defined(VGA_ppc64be) good_arch = CPU_TYPE_POWERPC64BE; #elif defined(VGA_ppc64le) good_arch = CPU_TYPE_POWERPC64LE; #elif defined(VGA_x86) good_arch = CPU_TYPE_I386; #elif defined(VGA_amd64) good_arch = CPU_TYPE_X86_64; #elif defined(VGA_arm) good_arch = CPU_TYPE_ARM; highest_subtype = CPU_SUBTYPE_ARM_V7; #else # error unknown architecture #endif // Read fat header // All fat contents are BIG-ENDIAN if (size < sizeof(fh)) { print("bad executable (bad fat header)\n"); return -1; } res = VG_(pread)(fd, &fh, sizeof(fh), offset); if (sr_isError(res) || sr_Res(res) != sizeof(fh)) { print("bad executable (bad fat header)\n"); return -1; } // Scan arch headers looking for a good one arch_offset = offset + sizeof(fh); fh.nfat_arch = VG_(ntohl)(fh.nfat_arch); for (i = 0; i < fh.nfat_arch; i++) { struct fat_arch arch; if (arch_offset + sizeof(arch) > size) { print("bad executable (corrupt fat archs)\n"); return -1; } res = VG_(pread)(fd, &arch, sizeof(arch), arch_offset); arch_offset += sizeof(arch); if (sr_isError(res) || sr_Res(res) != sizeof(arch)) { VG_(printf)("bad executable (corrupt fat arch) %x %llu\n", arch.cputype, (ULong)arch_offset); return -1; } arch.cputype = VG_(ntohl)(arch.cputype); arch.cpusubtype = VG_(ntohl)(arch.cpusubtype); arch.offset = VG_(ntohl)(arch.offset); arch.size = VG_(ntohl)(arch.size); arch.align = VG_(ntohl)(arch.align); #if defined(VGA_arm) if ((arch.cputype == good_arch) && (arch.cpusubtype <= highest_subtype)) { #else if (arch.cputype == good_arch) { #endif // use this arch if (arch.offset > size || arch.offset + arch.size > size) { print("bad executable (corrupt fat arch 2)\n"); return -1; } return load_mach_file(fd, offset+arch.offset, arch.size, filetype, filename, out_stack_start, out_stack_end, out_text, out_entry, out_linker_entry); } } print("bad executable (can't run on this machine)\n"); return -1; } /* Load a Mach-O executable or dylinker. The file may be fat or thin. */ static int load_mach_file(int fd, vki_off_t offset, vki_off_t size, unsigned long filetype, const HChar *filename, vki_uint8_t **out_stack_start, vki_uint8_t **out_stack_end, vki_uint8_t **out_text, vki_uint8_t **out_entry, vki_uint8_t **out_linker_entry) { vki_uint32_t magic; SysRes res; if (size < sizeof(magic)) { print("bad executable (no Mach-O magic)\n"); return -1; } res = VG_(pread)(fd, &magic, sizeof(magic), offset); if (sr_isError(res) || sr_Res(res) != sizeof(magic)) { print("bad executable (no Mach-O magic)\n"); return -1; } if (magic == MAGIC) { // thin return load_thin_file(fd, offset, size, filetype, filename, out_stack_start, out_stack_end, out_text, out_entry, out_linker_entry); } else if (magic == VG_(htonl)(FAT_MAGIC)) { // fat return load_fat_file(fd, offset, size, filetype, filename, out_stack_start, out_stack_end, out_text, out_entry, out_linker_entry); } else { // huh? print("bad executable (bad Mach-O magic)\n"); return -1; } }
void APROF_(post_syscall)(ThreadId tid, UInt syscallno, UWord * args, UInt nArgs, SysRes res) { APROF_(debug_assert)(tid == VG_(get_running_tid)(), "TID mismatch"); #if defined(VGO_linux) if(res._isError) { #elif defined(VGO_darwin) if(res._mode == SysRes_UNIX_ERR) { #endif return; } /* * This is an undocumented behavior of Valgrind */ Bool forced_switch = False; if (tid != APROF_(runtime).current_TID) { APROF_(thread_switch)(tid, 0); forced_switch = True; // a thread switch increased the global counter } if( syscallno == __NR_read #if defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux) || syscallno == __NR_recv #endif #if !defined(VGP_x86_linux) && !defined(VGP_s390x_linux) || syscallno == __NR_recvfrom #endif #if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin) || syscallno == __NR_pread #else || syscallno == __NR_pread64 #endif ){ Addr addr = args[1]; Int size = (Int) sr_Res(res); APROF_(fix_access_size)(addr, size); if (!forced_switch) APROF_(increase_global_counter)(); while(size > 0) { APROF_(trace_access_drms)( STORE, addr, APROF_(runtime).memory_resolution, True); size -= APROF_(runtime).memory_resolution; addr += APROF_(runtime).memory_resolution; } } else if ( syscallno == __NR_readv #if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) || syscallno == __NR_preadv #endif ){ struct vki_iovec * base = (struct vki_iovec *) args[1]; UWord iovcnt = args[2]; Int size = (Int) sr_Res(res); UWord i; Int iov_len; if (!forced_switch) APROF_(increase_global_counter)(); for(i = 0; i < iovcnt; i++){ if(size == 0) break; Addr addr = (Addr) base[i].iov_base; if(base[i].iov_len <= size) iov_len = base[i].iov_len; else iov_len = size; size -= iov_len; APROF_(fix_access_size)(addr, iov_len); while(iov_len > 0) { APROF_(trace_access_drms)( STORE, addr, APROF_(runtime).memory_resolution, True); iov_len -= APROF_(runtime).memory_resolution; addr += APROF_(runtime).memory_resolution; } } } else if ( syscallno == __NR_write #if defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux) || syscallno== __NR_send #endif #if !defined(VGP_x86_linux) && !defined(VGP_s390x_linux) || syscallno== __NR_sendto #endif #if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin) || syscallno== __NR_pwrite #else || syscallno== __NR_pwrite64 #endif ){ Addr addr = args[1]; Int size = (Int) sr_Res(res); APROF_(fix_access_size)(addr, size); while (size > 0) { APROF_(trace_access_drms)( LOAD, addr, APROF_(runtime).memory_resolution, True); size -= APROF_(runtime).memory_resolution; addr += APROF_(runtime).memory_resolution; } } else if ( syscallno == __NR_writev #if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) || syscallno == __NR_pwritev #endif ){ Int size = (Int) sr_Res(res); struct vki_iovec * base = (struct vki_iovec *) args[1]; UWord iovcnt = args[2]; UWord i; Int iov_len; for(i = 0; i < iovcnt; i++){ if(size == 0) break; Addr addr = (Addr) base[i].iov_base; if(base[i].iov_len <= size) iov_len = base[i].iov_len; else iov_len = size; size -= iov_len; APROF_(fix_access_size)(addr, iov_len); while (iov_len > 0) { APROF_(trace_access_drms)( LOAD, addr, APROF_(runtime).memory_resolution, True); iov_len -= APROF_(runtime).memory_resolution; addr += APROF_(runtime).memory_resolution; } } } else if ( #if !defined(VGP_x86_linux) syscallno == __NR_msgrcv #else False #endif ){ Int size = (Int) sr_Res(res); Addr addr = args[1]; size = size + sizeof(long int); APROF_(fix_access_size)(addr, size); if (!forced_switch) APROF_(increase_global_counter)(); while(size > 0) { APROF_(trace_access_drms)( STORE, addr, APROF_(runtime).memory_resolution, True); size -= APROF_(runtime).memory_resolution; addr += APROF_(runtime).memory_resolution; } } else if ( #if !defined(VGP_x86_linux) syscallno == __NR_msgsnd #else False #endif ){ Addr addr = args[1]; SizeT s = args[2]; Int size = s + sizeof(long int); APROF_(fix_access_size)(addr, size); while(size > 0) { APROF_(trace_access_drms)( LOAD, addr, APROF_(runtime).memory_resolution, True); size -= APROF_(runtime).memory_resolution; addr += APROF_(runtime).memory_resolution; } } else if ( syscallno == __NR_mmap #if defined(VGP_x86_linux) || syscallno == __NR_mmap2 #endif ) { Addr addr = (Addr) sr_Res(res); Int size= args[1]; APROF_(fix_access_size)(addr, size); if (!forced_switch) APROF_(increase_global_counter)(); while(size > 0) { APROF_(trace_access_drms)( STORE, addr, APROF_(runtime).memory_resolution, True); size -= APROF_(runtime).memory_resolution; addr += APROF_(runtime).memory_resolution; } } }
VgStack* VG_(am_alloc_VgStack)( Addr* initial_sp ) { Int szB; SysRes sres; VgStack* stack; UInt* p; Int i; szB = VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB; sres = VG_(am_mmap_anon_float_valgrind)( szB ); if (sr_isError(sres)) return NULL; stack = (VgStack*)(AddrH)sr_Res(sres); aspacem_assert(VG_IS_PAGE_ALIGNED(szB)); aspacem_assert(VG_IS_PAGE_ALIGNED(stack)); sres = local_do_mprotect_NO_NOTIFY( (Addr) &stack[0], VG_STACK_GUARD_SZB, VKI_PROT_NONE ); if (sr_isError(sres)) goto protect_failed; VG_(am_notify_mprotect)( (Addr) &stack->bytes[0], VG_STACK_GUARD_SZB, VKI_PROT_NONE ); sres = local_do_mprotect_NO_NOTIFY( (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB], VG_STACK_GUARD_SZB, VKI_PROT_NONE ); if (sr_isError(sres)) goto protect_failed; VG_(am_notify_mprotect)( (Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB], VG_STACK_GUARD_SZB, VKI_PROT_NONE ); p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB]; for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++) p[i] = 0xDEADBEEF; *initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB]; *initial_sp -= 8; *initial_sp &= ~((Addr)0x1F); VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n", (ULong)(Addr)stack, szB); ML_(am_do_sanity_check)(); return stack; protect_failed: (void)ML_(am_do_munmap_NO_NOTIFY)( (Addr)stack, szB ); ML_(am_do_sanity_check)(); return NULL; }
Bool parseMask(Char* filename) { inputFilter = VG_(newXA)(VG_(malloc), "inputFilter", VG_(free), sizeof(XArray*)); Int fd = sr_Res(VG_(open)(filename, VKI_O_RDWR | VKI_O_CREAT, VKI_S_IRWXU | VKI_S_IRWXG | VKI_S_IRWXO)); struct vg_stat fileInfo; VG_(fstat)(fd, &fileInfo); Long size = fileInfo.size; Char* buf = (Char*) VG_(malloc)("buf", size + 1); VG_(read)(fd, buf, size); buf[size] = '\0'; VG_(close)(fd); Char* str = buf; Char** endptr = &str; XArray* curfilter = VG_(newXA)(VG_(malloc), "chunk", VG_(free), sizeof(offsetPair)); for (;;) { while (VG_(isspace)(*str)) { while (VG_(isspace)(*str) && (*str != '\n')) { str++; } if (*str == '\n') { VG_(addToXA)(inputFilter, &curfilter); curfilter = VG_(newXA)(VG_(malloc), "chunk", VG_(free), sizeof(offsetPair)); str++; } } Char* str0 = str; Long p1 = VG_(strtoll16)(str, endptr); if ((*endptr == str0) && (p1 == 0)) { str = *endptr; if (*str == '\0') { break; } while (VG_(isspace)(*str) && (*str != '\n')) { str++; } if (*str != '\n') { return False; } else { VG_(addToXA)(inputFilter, &curfilter); curfilter = VG_(newXA)(VG_(malloc), "chunk", VG_(free), sizeof(offsetPair)); str++; continue; } } Long p2 = p1; str = *endptr; while (VG_(isspace)(*str) && (*str != '\n')) { str++; } if (*str == '-') { str++; str0 = str; p2 = VG_(strtoll16)(str, endptr); if ((*endptr == str0) && (p2 == 0)) { return False; } } offsetPair* newPair = VG_(malloc) ("newPair", sizeof(offsetPair)); newPair->first = p1; newPair->last = p2; //VG_(printf)("p1=%x\n", p1); //VG_(printf)("p2=%x\n", p2); VG_(addToXA)(curfilter, newPair); str = *endptr; while (VG_(isspace)(*str) && (*str != '\n')) { str++; } if (*str == '\n') { VG_(addToXA)(inputFilter, &curfilter); curfilter = VG_(newXA)(VG_(malloc), "chunk", VG_(free), sizeof(offsetPair)); str++; } if (*str == '\0') { break; } } VG_(free)(buf); return True; }
static struct elfinfo *readelf(Int fd, const char *filename) { SysRes sres; struct elfinfo *e = VG_(malloc)("ume.re.1", sizeof(*e)); Int phsz; vg_assert(e); e->fd = fd; sres = VG_(pread)(fd, &e->e, sizeof(e->e), 0); if (sr_isError(sres) || sr_Res(sres) != sizeof(e->e)) { VG_(printf)("valgrind: %s: can't read ELF header: %s\n", filename, VG_(strerror)(sr_Err(sres))); goto bad; } if (VG_(memcmp)(&e->e.e_ident[0], ELFMAG, SELFMAG) != 0) { VG_(printf)("valgrind: %s: bad ELF magic number\n", filename); goto bad; } if (e->e.e_ident[EI_CLASS] != VG_ELF_CLASS) { VG_(printf)("valgrind: wrong ELF executable class " "(eg. 32-bit instead of 64-bit)\n"); goto bad; } if (e->e.e_ident[EI_DATA] != VG_ELF_DATA2XXX) { VG_(printf)("valgrind: executable has wrong endian-ness\n"); goto bad; } if (!(e->e.e_type == ET_EXEC || e->e.e_type == ET_DYN)) { VG_(printf)("valgrind: this is not an executable\n"); goto bad; } if (e->e.e_machine != VG_ELF_MACHINE) { VG_(printf)("valgrind: executable is not for " "this architecture\n"); goto bad; } if (e->e.e_phentsize != sizeof(ESZ(Phdr))) { VG_(printf)("valgrind: sizeof ELF Phdr wrong\n"); goto bad; } phsz = sizeof(ESZ(Phdr)) * e->e.e_phnum; e->p = VG_(malloc)("ume.re.2", phsz); vg_assert(e->p); sres = VG_(pread)(fd, e->p, phsz, e->e.e_phoff); if (sr_isError(sres) || sr_Res(sres) != phsz) { VG_(printf)("valgrind: can't read phdr: %s\n", VG_(strerror)(sr_Err(sres))); VG_(free)(e->p); goto bad; } return e; bad: VG_(free)(e); return NULL; }
/* Loads a Mach-O executable into memory, along with any threads, stacks, and dylinker. Returns 0 on success, -1 on any failure. fd[offset..offset+size) is a Mach-O thin file. filetype is MH_EXECUTE or MH_DYLINKER. The mapped but empty stack is returned in *out_stack. The executable's Mach headers are returned in *out_text. The executable's entry point is returned in *out_entry. The dylinker's entry point (if any) is returned in *out_linker_entry. GrP fixme need to return whether dylinker was found - stack layout is different */ static int load_thin_file(int fd, vki_off_t offset, vki_off_t size, unsigned long filetype, const char *filename, vki_uint8_t **out_stack_start, vki_uint8_t **out_stack_end, vki_uint8_t **out_text, vki_uint8_t **out_entry, vki_uint8_t **out_linker_entry) { struct MACH_HEADER mh; vki_uint8_t *headers; vki_uint8_t *headers_end; struct load_command *lc; struct load_command *lcend; struct SEGMENT_COMMAND *segcmd; struct thread_command *threadcmd; struct dylinker_command *dycmd; int err; SysRes res; vki_size_t len; vki_uint8_t *stack_start = NULL; // allocated thread stack (hot end) vki_uint8_t *stack_end = NULL; // allocated thread stack (cold end) vki_uint8_t *entry = NULL; // static entry point vki_uint8_t *text = NULL; // start of text segment (i.e. the mach headers) vki_uint8_t *linker_entry = NULL; // dylinker entry point // Read Mach-O header if (sizeof(mh) > size) { print("bad executable (no Mach-O header)\n"); } res = VG_(pread)(fd, &mh, sizeof(mh), offset); if (sr_isError(res) || sr_Res(res) != sizeof(mh)) { print("bad executable (no Mach-O header)\n"); return -1; } // Sanity-check the header itself if (mh.magic != MAGIC) { print("bad executable (no Mach-O magic)\n"); return -1; } if (mh.filetype != filetype) { // expecting MH_EXECUTE or MH_DYLINKER print("bad executable (wrong file type)\n"); return -1; } // Map all headers into memory len = sizeof(mh) + mh.sizeofcmds; if (len > size) { print("bad executable (missing load commands)\n"); return -1; } headers = VG_(malloc)("ume.macho.headers", len); res = VG_(pread)(fd, headers, len, offset); if (sr_isError(res)) { print("couldn't read load commands from executable\n"); return -1; } headers_end = headers + len; // Map some segments into client memory: // LC_SEGMENT (text, data, etc) // UNIXSTACK (stack) // LOAD_DYLINKER (dyld) lcend = (struct load_command *)(headers + mh.sizeofcmds + sizeof(mh)); for (lc = (struct load_command *)(headers + sizeof(mh)); lc < lcend; lc = (struct load_command *)(lc->cmdsize + (vki_uint8_t *)lc)) { if ((vki_uint8_t *)lc < headers || lc->cmdsize+(vki_uint8_t *)lc > headers_end) { print("bad executable (invalid load commands)\n"); return -1; } switch (lc->cmd) { case LC_SEGMENT_CMD: if (lc->cmdsize < sizeof(struct SEGMENT_COMMAND)) { print("bad executable (invalid load commands)\n"); return -1; } segcmd = (struct SEGMENT_COMMAND *)lc; err = load_segment(fd, offset, size, &text, &stack_start, segcmd, filename); if (err) return -1; break; case LC_UNIXTHREAD: if (stack_end || entry) { print("bad executable (multiple thread commands)\n"); return -1; } if (lc->cmdsize < sizeof(struct thread_command)) { print("bad executable (invalid load commands)\n"); return -1; } threadcmd = (struct thread_command *)lc; err = load_unixthread(&stack_start, &stack_end, &entry, threadcmd); if (err) return -1; break; case LC_LOAD_DYLINKER: if (filetype == MH_DYLINKER) { print("bad executable (dylinker needs a dylinker)\n"); return -1; } if (linker_entry) { print("bad executable (multiple dylinker commands)\n"); } if (lc->cmdsize < sizeof(struct dylinker_command)) { print("bad executable (invalid load commands)\n"); return -1; } dycmd = (struct dylinker_command *)lc; err = load_dylinker(&linker_entry, dycmd); if (err) return -1; break; case LC_THREAD: if (filetype == MH_EXECUTE) { print("bad executable (stackless thread)\n"); return -1; } if (stack_end || entry) { print("bad executable (multiple thread commands)\n"); return -1; } if (lc->cmdsize < sizeof(struct thread_command)) { print("bad executable (invalid load commands)\n"); return -1; } threadcmd = (struct thread_command *)lc; err = load_thread(&entry, threadcmd); if (err) return -1; break; default: break; } } // Done with the headers VG_(free)(headers); if (filetype == MH_EXECUTE) { // Verify the necessary pieces for an executable: // a stack // a text segment // an entry point (static or linker) if (!stack_end || !stack_start) { print("bad executable (no stack)\n"); return -1; } if (!text) { print("bad executable (no text segment)\n"); return -1; } if (!entry && !linker_entry) { print("bad executable (no entry point)\n"); return -1; } } else if (filetype == MH_DYLINKER) { // Verify the necessary pieces for a dylinker: // an entry point if (!entry) { print("bad executable (no entry point)\n"); return -1; } } if (out_stack_start) *out_stack_start = stack_start; if (out_stack_end) *out_stack_end = stack_end; if (out_text) *out_text = text; if (out_entry) *out_entry = entry; if (out_linker_entry) *out_linker_entry = linker_entry; return 0; }
Int ML_(am_readlink)(HChar* path, HChar* buf, UInt bufsiz) { SysRes res; res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz); return sr_isError(res) ? -1 : sr_Res(res); }
/* Load a fat Mach-O executable. */ static int load_fat_file(int fd, vki_off_t offset, vki_off_t size, unsigned long filetype, const char *filename, vki_uint8_t **out_stack_start, vki_uint8_t **out_stack_end, vki_uint8_t **out_text, vki_uint8_t **out_entry, vki_uint8_t **out_linker_entry) { struct fat_header fh; vki_off_t arch_offset; int i; cpu_type_t good_arch; SysRes res; #if defined(VGA_ppc32) good_arch = CPU_TYPE_POWERPC; #elif defined(VGA_ppc64) good_arch = CPU_TYPE_POWERPC64; #elif defined(VGA_x86) good_arch = CPU_TYPE_I386; #elif defined(VGA_amd64) good_arch = CPU_TYPE_X86_64; #else # error unknown architecture #endif // Read fat header // All fat contents are BIG-ENDIAN if (size < sizeof(fh)) { print("bad executable (bad fat header)\n"); return -1; } res = VG_(pread)(fd, &fh, sizeof(fh), offset); if (sr_isError(res) || sr_Res(res) != sizeof(fh)) { print("bad executable (bad fat header)\n"); return -1; } // Scan arch headers looking for a good one arch_offset = offset + sizeof(fh); fh.nfat_arch = VG_(ntohl)(fh.nfat_arch); for (i = 0; i < fh.nfat_arch; i++) { struct fat_arch arch; if (arch_offset + sizeof(arch) > size) { print("bad executable (corrupt fat archs)\n"); return -1; } res = VG_(pread)(fd, &arch, sizeof(arch), arch_offset); arch_offset += sizeof(arch); if (sr_isError(res) || sr_Res(res) != sizeof(arch)) { VG_(printf)("bad executable (corrupt fat arch) %x %llu\n", arch.cputype, (ULong)arch_offset); return -1; } arch.cputype = VG_(ntohl)(arch.cputype); arch.cpusubtype = VG_(ntohl)(arch.cpusubtype); arch.offset = VG_(ntohl)(arch.offset); arch.size = VG_(ntohl)(arch.size); arch.align = VG_(ntohl)(arch.align); if (arch.cputype == good_arch) { // use this arch if (arch.offset > size || arch.offset + arch.size > size) { print("bad executable (corrupt fat arch 2)\n"); return -1; } return load_mach_file(fd, offset+arch.offset, arch.size, filetype, filename, out_stack_start, out_stack_end, out_text, out_entry, out_linker_entry); } } print("bad executable (can't run on this machine)\n"); return -1; }
Int ML_(am_getpid)( void ) { SysRes sres = VG_(do_syscall0)(__NR_getpid); aspacem_assert(!sr_isError(sres)); return sr_Res(sres); }
/* Map a given fat or thin object aboard, find the thin part if necessary, do some checks, and write details of both the fat and thin parts into *ii. Returns False (and leaves the file unmapped) on failure. Guarantees to return pointers to a valid(ish) Mach-O image if it succeeds. */ static Bool map_image_aboard ( DebugInfo* di, /* only for err msgs */ /*OUT*/ImageInfo* ii, UChar* filename ) { VG_(memset)(ii, 0, sizeof(*ii)); /* First off, try to map the thing in. */ { SizeT size; SysRes fd, sres; struct vg_stat stat_buf; fd = VG_(stat)(filename, &stat_buf); if (sr_isError(fd)) { ML_(symerr)(di, True, "Can't stat image (to determine its size)?!"); return False; } size = stat_buf.size; fd = VG_(open)(filename, VKI_O_RDONLY, 0); if (sr_isError(fd)) { ML_(symerr)(di, True, "Can't open image to read symbols?!"); return False; } sres = VG_(am_mmap_file_float_valgrind) ( size, VKI_PROT_READ, sr_Res(fd), 0 ); if (sr_isError(sres)) { ML_(symerr)(di, True, "Can't mmap image to read symbols?!"); return False; } VG_(close)(sr_Res(fd)); ii->img = (UChar*)sr_Res(sres); ii->img_szB = size; } /* Now it's mapped in and we have .img and .img_szB set. Look for the embedded Mach-O object. If not findable, unmap and fail. */ { struct fat_header* fh_be; struct fat_header fh; struct MACH_HEADER* mh; // Assume initially that we have a thin image, and update // these if it turns out to be fat. ii->macho_img = ii->img; ii->macho_img_szB = ii->img_szB; // Check for fat header. if (ii->img_szB < sizeof(struct fat_header)) { ML_(symerr)(di, True, "Invalid Mach-O file (0 too small)."); goto unmap_and_fail; } // Fat header is always BIG-ENDIAN fh_be = (struct fat_header *)ii->img; fh.magic = VG_(ntohl)(fh_be->magic); fh.nfat_arch = VG_(ntohl)(fh_be->nfat_arch); if (fh.magic == FAT_MAGIC) { // Look for a good architecture. struct fat_arch *arch_be; struct fat_arch arch; Int f; if (ii->img_szB < sizeof(struct fat_header) + fh.nfat_arch * sizeof(struct fat_arch)) { ML_(symerr)(di, True, "Invalid Mach-O file (1 too small)."); goto unmap_and_fail; } for (f = 0, arch_be = (struct fat_arch *)(fh_be+1); f < fh.nfat_arch; f++, arch_be++) { Int cputype; # if defined(VGA_ppc) cputype = CPU_TYPE_POWERPC; # elif defined(VGA_ppc64) cputype = CPU_TYPE_POWERPC64; # elif defined(VGA_x86) cputype = CPU_TYPE_X86; # elif defined(VGA_amd64) cputype = CPU_TYPE_X86_64; # else # error "unknown architecture" # endif arch.cputype = VG_(ntohl)(arch_be->cputype); arch.cpusubtype = VG_(ntohl)(arch_be->cpusubtype); arch.offset = VG_(ntohl)(arch_be->offset); arch.size = VG_(ntohl)(arch_be->size); if (arch.cputype == cputype) { if (ii->img_szB < arch.offset + arch.size) { ML_(symerr)(di, True, "Invalid Mach-O file (2 too small)."); goto unmap_and_fail; } ii->macho_img = ii->img + arch.offset; ii->macho_img_szB = arch.size; break; } } if (f == fh.nfat_arch) { ML_(symerr)(di, True, "No acceptable architecture found in fat file."); goto unmap_and_fail; } } /* Sanity check what we found. */ /* assured by logic above */ vg_assert(ii->img_szB >= sizeof(struct fat_header)); if (ii->macho_img_szB < sizeof(struct MACH_HEADER)) { ML_(symerr)(di, True, "Invalid Mach-O file (3 too small)."); goto unmap_and_fail; } if (ii->macho_img_szB > ii->img_szB) { ML_(symerr)(di, True, "Invalid Mach-O file (thin bigger than fat)."); goto unmap_and_fail; } if (ii->macho_img >= ii->img && ii->macho_img + ii->macho_img_szB <= ii->img + ii->img_szB) { /* thin entirely within fat, as expected */ } else { ML_(symerr)(di, True, "Invalid Mach-O file (thin not inside fat)."); goto unmap_and_fail; } mh = (struct MACH_HEADER *)ii->macho_img; if (mh->magic != MAGIC) { ML_(symerr)(di, True, "Invalid Mach-O file (bad magic)."); goto unmap_and_fail; } if (ii->macho_img_szB < sizeof(struct MACH_HEADER) + mh->sizeofcmds) { ML_(symerr)(di, True, "Invalid Mach-O file (4 too small)."); goto unmap_and_fail; } } vg_assert(ii->img); vg_assert(ii->macho_img); vg_assert(ii->img_szB > 0); vg_assert(ii->macho_img_szB > 0); vg_assert(ii->macho_img >= ii->img); vg_assert(ii->macho_img + ii->macho_img_szB <= ii->img + ii->img_szB); return True; /* success */ /*NOTREACHED*/ unmap_and_fail: unmap_image(ii); return False; /* bah! */ }