static CF_RETURNS_RETAINED CFArrayRef load_code_signatures(const char *path) { bool fully_parsed_binary = false; CFMutableDictionaryRef result = NULL; CFMutableArrayRef results = CFArrayCreateMutable(kCFAllocatorDefault, 0, &kCFTypeArrayCallBacks); FILE *binary = open_bundle(path, "r"); if (!binary) binary = fopen(path, "r"); require(binary, out); struct mach_header header; require(1 == fread(&header, sizeof(header), 1, binary), out); if ((header.magic == MH_MAGIC) || (header.magic == MH_MAGIC_64)) { if (header.magic == MH_MAGIC_64) fseek(binary, sizeof(struct mach_header_64) - sizeof(struct mach_header), SEEK_CUR); result = load_code_signature(binary, 0 /*non fat*/); require(result, out); CFStringRef type = CFStringCreateWithFormat(kCFAllocatorDefault, NULL, CFSTR("CPU type: (%d,%d)"), header.cputype, header.cpusubtype); CFDictionarySetValue(result, CFSTR("ARCH"), type); CFRelease(type); CFArrayAppendValue(results, result); } else { struct fat_header fat; require(!fseek(binary, 0L, SEEK_SET), out); require(1 == fread(&fat, sizeof(fat), 1, binary), out); require(ntohl(fat.magic) == FAT_MAGIC, out); uint32_t slice, slices = ntohl(fat.nfat_arch); struct fat_arch *archs = calloc(slices, sizeof(struct fat_arch)); require(slices == fread(archs, sizeof(struct fat_arch), slices, binary), out); for (slice = 0; slice < slices; slice++) { uint32_t slice_offset = ntohl(archs[slice].offset); require(!fseek(binary, slice_offset, SEEK_SET), out); require(1 == fread(&header, sizeof(header), 1, binary), out); require((header.magic == MH_MAGIC) || (header.magic == MH_MAGIC_64), out); if (header.magic == MH_MAGIC_64) fseek(binary, sizeof(struct mach_header_64) - sizeof(struct mach_header), SEEK_CUR); result = load_code_signature(binary, slice_offset); require(result, out); CFStringRef type = CFStringCreateWithFormat(kCFAllocatorDefault, NULL, CFSTR("CPU type: (%d,%d)"), header.cputype, header.cpusubtype); CFDictionarySetValue(result, CFSTR("ARCH"), type); CFRelease(type); CFArrayAppendValue(results, result); CFRelease(result); } } fully_parsed_binary = true; out: if (!fully_parsed_binary) { if (results) { CFRelease(results); results = NULL; } } if (binary) fclose(binary); return results; }
/* * The file size of a mach-o file is limited to 32 bits; this is because * this is the limit on the kalloc() of enough bytes for a mach_header and * the contents of its sizeofcmds, which is currently constrained to 32 * bits in the file format itself. We read into the kernel buffer the * commands section, and then parse it in order to parse the mach-o file * format load_command segment(s). We are only interested in a subset of * the total set of possible commands. If "map"==VM_MAP_NULL or * "thread"==THREAD_NULL, do not make permament VM modifications, * just preflight the parse. */ static load_return_t parse_machfile( struct vnode *vp, vm_map_t map, thread_t thread, struct mach_header *header, off_t file_offset, off_t macho_size, int depth, int64_t aslr_offset, load_result_t *result ) { uint32_t ncmds; struct load_command *lcp; struct dylinker_command *dlp = 0; struct uuid_command *uulp = 0; integer_t dlarchbits = 0; void * control; load_return_t ret = LOAD_SUCCESS; caddr_t addr; void * kl_addr; vm_size_t size,kl_size; size_t offset; size_t oldoffset; /* for overflow check */ int pass; proc_t p = current_proc(); /* XXXX */ int error; int resid=0; size_t mach_header_sz = sizeof(struct mach_header); boolean_t abi64; boolean_t got_code_signatures = FALSE; int64_t slide = 0; if (header->magic == MH_MAGIC_64 || header->magic == MH_CIGAM_64) { mach_header_sz = sizeof(struct mach_header_64); } /* * Break infinite recursion */ if (depth > 6) { return(LOAD_FAILURE); } depth++; /* * Check to see if right machine type. */ if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) || !grade_binary(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) return(LOAD_BADARCH); abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64); switch (header->filetype) { case MH_OBJECT: case MH_EXECUTE: case MH_PRELOAD: if (depth != 1) { return (LOAD_FAILURE); } break; case MH_FVMLIB: case MH_DYLIB: if (depth == 1) { return (LOAD_FAILURE); } break; case MH_DYLINKER: if (depth != 2) { return (LOAD_FAILURE); } break; default: return (LOAD_FAILURE); } /* * Get the pager for the file. */ control = ubc_getobject(vp, UBC_FLAGS_NONE); /* * Map portion that must be accessible directly into * kernel's map. */ if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size) return(LOAD_BADMACHO); /* * Round size of Mach-O commands up to page boundry. */ size = round_page(mach_header_sz + header->sizeofcmds); if (size <= 0) return(LOAD_BADMACHO); /* * Map the load commands into kernel memory. */ addr = 0; kl_size = size; kl_addr = kalloc(size); addr = (caddr_t)kl_addr; if (addr == NULL) return(LOAD_NOSPACE); error = vn_rdwr(UIO_READ, vp, addr, size, file_offset, UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p); if (error) { if (kl_addr ) kfree(kl_addr, kl_size); return(LOAD_IOERROR); } /* * For PIE and dyld, slide everything by the ASLR offset. */ aslr_offset = 0; if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) { slide = aslr_offset; } /* * Scan through the commands, processing each one as necessary. */ for (pass = 1; pass <= 3; pass++) { /* * Check that the entry point is contained in an executable segments */ if ((pass == 3) && (result->validentry == 0)) { thread_state_initialize(thread); ret = LOAD_FAILURE; break; } /* * Loop through each of the load_commands indicated by the * Mach-O header; if an absurd value is provided, we just * run off the end of the reserved section by incrementing * the offset too far, so we are implicitly fail-safe. */ offset = mach_header_sz; ncmds = header->ncmds; while (ncmds--) { /* * Get a pointer to the command. */ lcp = (struct load_command *)(addr + offset); oldoffset = offset; offset += lcp->cmdsize; /* * Perform prevalidation of the struct load_command * before we attempt to use its contents. Invalid * values are ones which result in an overflow, or * which can not possibly be valid commands, or which * straddle or exist past the reserved section at the * start of the image. */ if (oldoffset > offset || lcp->cmdsize < sizeof(struct load_command) || offset > header->sizeofcmds + mach_header_sz) { ret = LOAD_BADMACHO; break; } /* * Act on struct load_command's for which kernel * intervention is required. */ switch(lcp->cmd) { case LC_SEGMENT: case LC_SEGMENT_64: if (pass != 2) break; ret = load_segment(lcp, header->filetype, control, file_offset, macho_size, vp, map, slide, result); break; case LC_UNIXTHREAD: if (pass != 1) break; ret = load_unixthread( (struct thread_command *) lcp, thread, slide, result); break; case LC_MAIN: if (pass != 1) break; if (depth != 1) break; ret = load_main( (struct entry_point_command *) lcp, thread, slide, result); break; case LC_LOAD_DYLINKER: if (pass != 3) break; if ((depth == 1) && (dlp == 0)) { dlp = (struct dylinker_command *)lcp; dlarchbits = (header->cputype & CPU_ARCH_MASK); } else { ret = LOAD_FAILURE; } break; case LC_UUID: if (pass == 1 && depth == 1) { uulp = (struct uuid_command *)lcp; memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid)); } break; case LC_CODE_SIGNATURE: /* CODE SIGNING */ if (pass != 1) break; /* pager -> uip -> load signatures & store in uip set VM object "signed_pages" */ ret = load_code_signature( (struct linkedit_data_command *) lcp, vp, file_offset, macho_size, header->cputype, (depth == 1) ? result : NULL); if (ret != LOAD_SUCCESS) { printf("proc %d: load code signature error %d " "for file \"%s\"\n", p->p_pid, ret, vp->v_name); ret = LOAD_SUCCESS; /* ignore error */ } else { got_code_signatures = TRUE; } break; #if CONFIG_CODE_DECRYPTION #ifndef __arm__ case LC_ENCRYPTION_INFO: if (pass != 3) break; ret = set_code_unprotect( (struct encryption_info_command *) lcp, addr, map, slide, vp); if (ret != LOAD_SUCCESS) { printf("proc %d: set_code_unprotect() error %d " "for file \"%s\"\n", p->p_pid, ret, vp->v_name); /* Don't let the app run if it's * encrypted but we failed to set up the * decrypter */ psignal(p, SIGKILL); } break; #endif #endif default: /* Other commands are ignored by the kernel */ ret = LOAD_SUCCESS; break; } if (ret != LOAD_SUCCESS) break; } if (ret != LOAD_SUCCESS) break; } if (ret == LOAD_SUCCESS) { if (! got_code_signatures) { struct cs_blob *blob; /* no embedded signatures: look for detached ones */ blob = ubc_cs_blob_get(vp, -1, file_offset); if (blob != NULL) { /* get flags to be applied to the process */ result->csflags |= blob->csb_flags; } } /* Make sure if we need dyld, we got it */ if (result->needs_dynlinker && !dlp) { ret = LOAD_FAILURE; } if ((ret == LOAD_SUCCESS) && (dlp != 0)) { /* load the dylinker, and always slide it by the ASLR * offset regardless of PIE */ ret = load_dylinker(dlp, dlarchbits, map, thread, depth, aslr_offset, result); } if((ret == LOAD_SUCCESS) && (depth == 1)) { if (result->thread_count == 0) { ret = LOAD_FAILURE; } } } if (kl_addr ) kfree(kl_addr, kl_size); return(ret); }
/* * The file size of a mach-o file is limited to 32 bits; this is because * this is the limit on the kalloc() of enough bytes for a mach_header and * the contents of its sizeofcmds, which is currently constrained to 32 * bits in the file format itself. We read into the kernel buffer the * commands section, and then parse it in order to parse the mach-o file * format load_command segment(s). We are only interested in a subset of * the total set of possible commands. */ static load_return_t parse_machfile( struct vnode *vp, vm_map_t map, thread_t thread, struct mach_header *header, off_t file_offset, off_t macho_size, int depth, load_result_t *result ) { uint32_t ncmds; struct load_command *lcp; struct dylinker_command *dlp = 0; integer_t dlarchbits = 0; void * pager; load_return_t ret = LOAD_SUCCESS; caddr_t addr; void * kl_addr; vm_size_t size,kl_size; size_t offset; size_t oldoffset; /* for overflow check */ int pass; proc_t p = current_proc(); /* XXXX */ int error; int resid=0; task_t task; size_t mach_header_sz = sizeof(struct mach_header); boolean_t abi64; boolean_t got_code_signatures = FALSE; if (header->magic == MH_MAGIC_64 || header->magic == MH_CIGAM_64) { mach_header_sz = sizeof(struct mach_header_64); } /* * Break infinite recursion */ if (depth > 6) { return(LOAD_FAILURE); } task = (task_t)get_threadtask(thread); depth++; /* * Check to see if right machine type. */ if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) || !grade_binary(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) return(LOAD_BADARCH); abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64); switch (header->filetype) { case MH_OBJECT: case MH_EXECUTE: case MH_PRELOAD: if (depth != 1) { return (LOAD_FAILURE); } break; case MH_FVMLIB: case MH_DYLIB: if (depth == 1) { return (LOAD_FAILURE); } break; case MH_DYLINKER: if (depth != 2) { return (LOAD_FAILURE); } break; default: return (LOAD_FAILURE); } /* * Get the pager for the file. */ pager = (void *) ubc_getpager(vp); /* * Map portion that must be accessible directly into * kernel's map. */ if ((mach_header_sz + header->sizeofcmds) > macho_size) return(LOAD_BADMACHO); /* * Round size of Mach-O commands up to page boundry. */ size = round_page(mach_header_sz + header->sizeofcmds); if (size <= 0) return(LOAD_BADMACHO); /* * Map the load commands into kernel memory. */ addr = 0; kl_size = size; kl_addr = kalloc(size); addr = (caddr_t)kl_addr; if (addr == NULL) return(LOAD_NOSPACE); error = vn_rdwr(UIO_READ, vp, addr, size, file_offset, UIO_SYSSPACE32, 0, kauth_cred_get(), &resid, p); if (error) { if (kl_addr ) kfree(kl_addr, kl_size); return(LOAD_IOERROR); } /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */ /* * Scan through the commands, processing each one as necessary. */ for (pass = 1; pass <= 2; pass++) { /* * Loop through each of the load_commands indicated by the * Mach-O header; if an absurd value is provided, we just * run off the end of the reserved section by incrementing * the offset too far, so we are implicitly fail-safe. */ offset = mach_header_sz; ncmds = header->ncmds; while (ncmds--) { /* * Get a pointer to the command. */ lcp = (struct load_command *)(addr + offset); oldoffset = offset; offset += lcp->cmdsize; /* * Perform prevalidation of the struct load_command * before we attempt to use its contents. Invalid * values are ones which result in an overflow, or * which can not possibly be valid commands, or which * straddle or exist past the reserved section at the * start of the image. */ if (oldoffset > offset || lcp->cmdsize < sizeof(struct load_command) || offset > header->sizeofcmds + mach_header_sz) { ret = LOAD_BADMACHO; break; } /* * Act on struct load_command's for which kernel * intervention is required. */ switch(lcp->cmd) { case LC_SEGMENT_64: if (pass != 1) break; ret = load_segment_64( (struct segment_command_64 *)lcp, pager, file_offset, macho_size, ubc_getsize(vp), map, result); break; case LC_SEGMENT: if (pass != 1) break; ret = load_segment( (struct segment_command *) lcp, pager, file_offset, macho_size, ubc_getsize(vp), map, result); break; case LC_THREAD: if (pass != 2) break; ret = load_thread((struct thread_command *)lcp, thread, result); break; case LC_UNIXTHREAD: if (pass != 2) break; ret = load_unixthread( (struct thread_command *) lcp, thread, result); break; case LC_LOAD_DYLINKER: if (pass != 2) break; if ((depth == 1) && (dlp == 0)) { dlp = (struct dylinker_command *)lcp; dlarchbits = (header->cputype & CPU_ARCH_MASK); } else { ret = LOAD_FAILURE; } break; case LC_CODE_SIGNATURE: /* CODE SIGNING */ if (pass != 2) break; /* pager -> uip -> load signatures & store in uip set VM object "signed_pages" */ ret = load_code_signature( (struct linkedit_data_command *) lcp, vp, file_offset, macho_size, header->cputype, (depth == 1) ? result : NULL); if (ret != LOAD_SUCCESS) { printf("proc %d: load code signature error %d " "for file \"%s\"\n", p->p_pid, ret, vp->v_name); ret = LOAD_SUCCESS; /* ignore error */ } else { got_code_signatures = TRUE; } break; default: /* Other commands are ignored by the kernel */ ret = LOAD_SUCCESS; break; } if (ret != LOAD_SUCCESS) break; } if (ret != LOAD_SUCCESS) break; } if (ret == LOAD_SUCCESS) { if (! got_code_signatures) { struct cs_blob *blob; /* no embedded signatures: look for detached ones */ blob = ubc_cs_blob_get(vp, -1, file_offset); if (blob != NULL) { /* get flags to be applied to the process */ result->csflags |= blob->csb_flags; } } if (dlp != 0) ret = load_dylinker(dlp, dlarchbits, map, thread, depth, result, abi64); if(depth == 1) { if (result->thread_count == 0) { ret = LOAD_FAILURE; } else if ( abi64 ) { #ifdef __ppc__ /* Map in 64-bit commpage */ /* LP64todo - make this clean */ /* * PPC51: ppc64 is limited to 51-bit addresses. * Memory above that limit is handled specially * at the pmap level. */ pmap_map_sharedpage(current_task(), get_map_pmap(map)); #endif /* __ppc__ */ } } } if (kl_addr ) kfree(kl_addr, kl_size); if (ret == LOAD_SUCCESS) (void)ubc_map(vp, PROT_READ | PROT_EXEC); return(ret); }