示例#1
0
/* Load a Segment Dynamically */
static
int
dynamic_load_segment(struct vnode *v, off_t offset, vaddr_t vaddr, 
	     size_t memsize, size_t filesize,
	     int is_executable, int permissions)
{
	/*If we split between two pages, add another page:
	 * For example, if vaddr = 0x444970, and size = 192,
	 * we need from 0x444970 to 0x445162. So we need
	 * 0x44000 AND 0x445000 -- TWO PAGES. Simply aligning 
	 * to a page boundary will get is 0x44000, but not the 
	 * 162 bytes that spill into the next page. So we need to
	 * account for that now:
	 */
	int result;
	if(((vaddr & SUB_FRAME) + filesize > PAGE_SIZE))
	{
		DEBUG(DB_DEMAND, "Spillover Detected\n");
		/*Copy the first part of the page*/
		struct page *page = page_alloc(curthread->t_addrspace, vaddr & PAGE_FRAME,permissions);
		//Get amount to copy
		size_t amt_to_copy = PAGE_SIZE - (vaddr & SUB_FRAME);
		//Copy the rest	
		result = load_segment(v,offset,vaddr,amt_to_copy,amt_to_copy,is_executable);
		KASSERT(page->state == LOCKED);
		page->state = DIRTY;
		if(result) { return result; }
		/* Copy the second part of the page */
		vaddr_t spillover_va = ((vaddr & PAGE_FRAME) + PAGE_SIZE);
		bool lock = get_coremap_lock();
		page = page_alloc(curthread->t_addrspace, spillover_va,permissions);
		release_coremap_lock(lock);
		result = load_segment(v,offset + amt_to_copy, vaddr, memsize - amt_to_copy, filesize - amt_to_copy,is_executable);
		if(result) { return result;}
		KASSERT(page->state == LOCKED);
		page->state = DIRTY;
	}
	else
	{
		bool lock = get_coremap_lock();
		struct page *page = page_alloc(curthread->t_addrspace,vaddr & PAGE_FRAME,permissions);
		release_coremap_lock(lock);
		result = load_segment(v,offset,vaddr,memsize,filesize,is_executable);
		KASSERT(page->state == LOCKED);
		page->state = DIRTY;
		return result;
	}
	return 0;
}
示例#2
0
文件: elfload.c 项目: nmav/cspim
/**
 * Iterate program headers and copy PT_LOAD segments at their proper
 * locations.  If any segment type other than PT_NULL or PT_LOAD is
 * encountered, return an error.
 */
static int load_segments(struct mips_cpu *pcpu)
{
	const char *elf   = pcpu->elf;
	size_t      elfsz = pcpu->elfsz;
	Elf32_Ehdr *eh    = (Elf32_Ehdr*)elf;
	unsigned    i;
	
	for(i = 0; i < eh->e_phnum; i++) {
		Elf32_Phdr *ph = get_phdr(elf, elfsz, i);
		
		if(!ph)
			return -1;
		switch(ph->p_type) {
		case PT_NULL:
			break;
		case PT_LOAD:
			if(load_segment(pcpu, ph))
				return -1;
			break;
		default:
			return -1;
		}
	}
	return 0;
}
示例#3
0
int seg_ondemand_load(struct segment *seg, int idx){
	DEBUG(DB_VM,"On-demanding page loading on index %d\n", idx);
	int result = uframe_alloc1(&seg->pagetable[idx].pfn, curproc->pid, idx+ADDR_MAPPING_NUM(seg->vbase));
	if (result) return result;
	seg->pagetable[idx].alloc = true;

	DEBUG(DB_VM,"\tDone allocation, segment %p loading on demand? %d\n", seg, seg->ondemand);

	if (!seg->ondemand) return 0; // let process to write

	int idx_offset = idx * PAGE_SIZE;
	int fsize = seg->ph.p_filesz - idx_offset;
	DEBUG(DB_VM,"\tProcess to be read vnode %p, offset:%x, size:%d\n", seg->as->v, idx_offset, fsize);
	if (fsize <= 0) return 0;

	// text/data segment -- load on demand
	result = load_segment(seg->as,
		seg->as->v,
		seg->ph.p_offset + idx_offset,
		seg->vbase + idx_offset,
		PAGE_SIZE,
		fsize,
		seg->ph.p_flags & PF_X);
	return result;
}
示例#4
0
文件: elf.c 项目: B-Rich/serialice
static unsigned int
load_segments(unsigned long *file_addr)
{
	struct ehdr *ehdr = (struct ehdr *) file_addr;
	/* Calculate program header address */
	struct phdr *phdr =
	    (struct phdr *) (((unsigned char *) file_addr) + ehdr->e_phoff);
	int i;
	/* loop e_phnum times */
	for (i = 0; i <= ehdr->e_phnum; i++) {
		/* PT_LOAD ? */
		if (phdr->p_type == 1) {
			/* copy segment */
			load_segment(file_addr, phdr);
		}
		/* step to next header */
		phdr =
		    (struct phdr *) (((unsigned char *) phdr) +
				     ehdr->e_phentsize);
	}
	return ehdr->e_entry;
}
static int load_image(struct pil_device *pil)
{
	int i, ret;
	char fw_name[30];
	struct elf32_hdr *ehdr;
	const struct elf32_phdr *phdr;
	const struct firmware *fw;
	unsigned long proxy_timeout = pil->desc->proxy_timeout;

	down_read(&pil_pm_rwsem);
	snprintf(fw_name, sizeof(fw_name), "%s.mdt", pil->desc->name);
	ret = request_firmware(&fw, fw_name, &pil->dev);
	if (ret) {
		dev_err(&pil->dev, "Failed to locate %s\n", fw_name);
		goto out;
	}

	if (fw->size < sizeof(*ehdr)) {
		dev_err(&pil->dev, "Not big enough to be an elf header\n");
		ret = -EIO;
		goto release_fw;
	}

	ehdr = (struct elf32_hdr *)fw->data;
	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
		dev_err(&pil->dev, "Not an elf header\n");
		ret = -EIO;
		goto release_fw;
	}

	if (ehdr->e_phnum == 0) {
		dev_err(&pil->dev, "No loadable segments\n");
		ret = -EIO;
		goto release_fw;
	}
	if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
	    sizeof(struct elf32_hdr) > fw->size) {
		dev_err(&pil->dev, "Program headers not within mdt\n");
		ret = -EIO;
		goto release_fw;
	}

	ret = pil->desc->ops->init_image(pil->desc, fw->data, fw->size);
	if (ret) {
		dev_err(&pil->dev, "Invalid firmware metadata\n");
		goto release_fw;
	}

	phdr = (const struct elf32_phdr *)(fw->data + sizeof(struct elf32_hdr));
	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
		if (!segment_is_loadable(phdr))
			continue;

		ret = load_segment(phdr, i, pil);
		if (ret) {
			dev_err(&pil->dev, "Failed to load segment %d\n",
					i);
			goto release_fw;
		}
	}

	ret = pil_proxy_vote(pil);
	if (ret) {
		dev_err(&pil->dev, "Failed to proxy vote\n");
		goto release_fw;
	}

	ret = pil->desc->ops->auth_and_reset(pil->desc);
	if (ret) {
		dev_err(&pil->dev, "Failed to bring out of reset\n");
		proxy_timeout = 0; /* Remove proxy vote immediately on error */
		goto err_boot;
	}
	dev_info(&pil->dev, "brought %s out of reset\n", pil->desc->name);
err_boot:
	pil_proxy_unvote(pil, proxy_timeout);
release_fw:
	release_firmware(fw);
out:
	up_read(&pil_pm_rwsem);
	return ret;
}
示例#6
0
/*
 * Load an ELF executable user program into the current address space.
 *
 * Returns the entry point (initial PC) for the program in ENTRYPOINT.
 */
int
load_elf(struct vnode *v, vaddr_t *entrypoint)
{
	Elf_Ehdr eh;   /* Executable header */
	Elf_Phdr ph;   /* "Program header" = segment header */
	int result, i;
	struct iovec iov;
	struct uio ku;
	struct addrspace *as;

	as = curproc_getas();

	/*
	 * Read the executable header from offset 0 in the file.
	 */

	uio_kinit(&iov, &ku, &eh, sizeof(eh), 0, UIO_READ);
	result = VOP_READ(v, &ku);
	if (result) {
		return result;
	}

	if (ku.uio_resid != 0) {
		/* short read; problem with executable? */
		kprintf("ELF: short read on header - file truncated?\n");
		return ENOEXEC;
	}

	/*
	 * Check to make sure it's a 32-bit ELF-version-1 executable
	 * for our processor type. If it's not, we can't run it.
	 *
	 * Ignore EI_OSABI and EI_ABIVERSION - properly, we should
	 * define our own, but that would require tinkering with the
	 * linker to have it emit our magic numbers instead of the
	 * default ones. (If the linker even supports these fields,
	 * which were not in the original elf spec.)
	 */

	if (eh.e_ident[EI_MAG0] != ELFMAG0 ||
	    eh.e_ident[EI_MAG1] != ELFMAG1 ||
	    eh.e_ident[EI_MAG2] != ELFMAG2 ||
	    eh.e_ident[EI_MAG3] != ELFMAG3 ||
	    eh.e_ident[EI_CLASS] != ELFCLASS32 ||
	    eh.e_ident[EI_DATA] != ELFDATA2MSB ||
	    eh.e_ident[EI_VERSION] != EV_CURRENT ||
	    eh.e_version != EV_CURRENT ||
	    eh.e_type!=ET_EXEC ||
	    eh.e_machine!=EM_MACHINE) {
		return ENOEXEC;
	}

	/*
	 * Go through the list of segments and set up the address space.
	 *
	 * Ordinarily there will be one code segment, one read-only
	 * data segment, and one data/bss segment, but there might
	 * conceivably be more. You don't need to support such files
	 * if it's unduly awkward to do so.
	 *
	 * Note that the expression eh.e_phoff + i*eh.e_phentsize is 
	 * mandated by the ELF standard - we use sizeof(ph) to load,
	 * because that's the structure we know, but the file on disk
	 * might have a larger structure, so we must use e_phentsize
	 * to find where the phdr starts.
	 */

	for (i=0; i<eh.e_phnum; i++) {
		off_t offset = eh.e_phoff + i*eh.e_phentsize;
		uio_kinit(&iov, &ku, &ph, sizeof(ph), offset, UIO_READ);

		result = VOP_READ(v, &ku);
		if (result) {
			return result;
		}

		if (ku.uio_resid != 0) {
			/* short read; problem with executable? */
			kprintf("ELF: short read on phdr - file truncated?\n");
			return ENOEXEC;
		}

		switch (ph.p_type) {
		    case PT_NULL: /* skip */ continue;
		    case PT_PHDR: /* skip */ continue;
		    case PT_MIPS_REGINFO: /* skip */ continue;
		    case PT_LOAD: break;
		    default:
			kprintf("loadelf: unknown segment type %d\n", 
				ph.p_type);
			return ENOEXEC;
		}

		result = as_define_region(as,
					  ph.p_vaddr, ph.p_memsz,
					  ph.p_flags & PF_R,
					  ph.p_flags & PF_W,
					  ph.p_flags & PF_X);
		if (result) {
			return result;
		}
	}

	result = as_prepare_load(as);
	if (result) {
		return result;
	}

	/*
	 * Now actually load each segment.
	 */

	for (i=0; i<eh.e_phnum; i++) {
		off_t offset = eh.e_phoff + i*eh.e_phentsize;
		uio_kinit(&iov, &ku, &ph, sizeof(ph), offset, UIO_READ);

		result = VOP_READ(v, &ku);
		if (result) {
			return result;
		}

		if (ku.uio_resid != 0) {
			/* short read; problem with executable? */
			kprintf("ELF: short read on phdr - file truncated?\n");
			return ENOEXEC;
		}

		switch (ph.p_type) {
		    case PT_NULL: /* skip */ continue;
		    case PT_PHDR: /* skip */ continue;
		    case PT_MIPS_REGINFO: /* skip */ continue;
		    case PT_LOAD: break;
		    default:
			kprintf("loadelf: unknown segment type %d\n", 
				ph.p_type);
			return ENOEXEC;
		}

		result = load_segment(as, v, ph.p_offset, ph.p_vaddr, 
				      ph.p_memsz, ph.p_filesz,
				      ph.p_flags & PF_X);
		if (result) {
			return result;
		}
	}

	result = as_complete_load(as);
	if (result) {
		return result;
	}

	*entrypoint = eh.e_entry;

	return 0;
}
示例#7
0
/*
 * The file size of a mach-o file is limited to 32 bits; this is because
 * this is the limit on the kalloc() of enough bytes for a mach_header and
 * the contents of its sizeofcmds, which is currently constrained to 32
 * bits in the file format itself.  We read into the kernel buffer the
 * commands section, and then parse it in order to parse the mach-o file
 * format load_command segment(s).  We are only interested in a subset of
 * the total set of possible commands. If "map"==VM_MAP_NULL or
 * "thread"==THREAD_NULL, do not make permament VM modifications,
 * just preflight the parse.
 */
static
load_return_t
parse_machfile(
	uint8_t 		*vp,       
	struct mach_header	*header,
	off_t			file_offset,
	off_t			macho_size,
	int			depth,
	int64_t			aslr_offset,
	int64_t			dyld_aslr_offset,
	load_result_t		*result
)
{
	uint32_t		ncmds;
	struct load_command	*lcp;
	struct dylinker_command	*dlp = 0;
	integer_t		dlarchbits = 0;
	void *			control;
	load_return_t		ret = LOAD_SUCCESS;
	caddr_t			addr;
	vm_size_t		size,kl_size;
	size_t			offset;
	size_t			oldoffset;	/* for overflow check */
	int			pass;
	size_t			mach_header_sz = sizeof(struct mach_header);
	boolean_t		abi64;
	boolean_t		got_code_signatures = FALSE;
	int64_t			slide = 0;

	if (header->magic == MH_MAGIC_64 ||
	    header->magic == MH_CIGAM_64) {
	    	mach_header_sz = sizeof(struct mach_header_64);
	}

	/*
	 *	Break infinite recursion
	 */
	if (depth > 6) {
        printf("parse_machfile 1: %s\n", load_to_string(LOAD_FAILURE));
		return(LOAD_FAILURE);
	}

	depth++;

	/*
	 *	Check to see if right machine type.
	 */
    // this should be implemented by qemu somehow.
	/*if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
	    !grade_binary(header->cputype, 
	    	header->cpusubtype & ~CPU_SUBTYPE_MASK))
		return(LOAD_BADARCH);*/
		
	abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
    
	switch (header->filetype) {
	
	case MH_OBJECT:
	case MH_EXECUTE:
	case MH_PRELOAD:
		if (depth != 1) {
            printf("parse_machfile 2: %s\n", load_to_string(LOAD_FAILURE));
			return (LOAD_FAILURE);
		}
		break;
		
	case MH_FVMLIB:
	case MH_DYLIB:
		if (depth == 1) {
            printf("parse_machfile 2: %s\n", load_to_string(LOAD_FAILURE));
			return (LOAD_FAILURE);
		}
		break;

	case MH_DYLINKER:
		if (depth != 2) {
            printf("parse_machfile 3: %s\n", load_to_string(LOAD_FAILURE));
			return (LOAD_FAILURE);
		}
		break;
		
	default:
        printf("parse_machfile 4: %s, header->filetype = %d\n", load_to_string(LOAD_FAILURE), header->filetype);
		return (LOAD_FAILURE);
	}

	/*
	 *	Map portion that must be accessible directly into
	 *	kernel's map.
	 */
    if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size) {
        printf("parse_machfile 5: %s header->sizeofcmds: %d macho_size %d\n", load_to_string(LOAD_BADMACHO), header->sizeofcmds, macho_size);
		return(LOAD_BADMACHO);
    }

	/*
	 *	Round size of Mach-O commands up to page boundry.
	 */
	size = round_page(mach_header_sz + header->sizeofcmds);
    if (size <= 0) {
        printf("parse_machfile 6: %s\n", load_to_string(LOAD_BADMACHO));
		return(LOAD_BADMACHO);
    }

	/*
	 * Map the load commands into kernel memory.
	 */
	addr = 0;
	kl_size = size;
	addr = (caddr_t)(vp);
    if (addr == NULL) {
        printf("parse_machfile 7: %s\n", load_to_string(LOAD_NOSPACE));
		return(LOAD_NOSPACE);
    }

	/*
	 *	For PIE and dyld, slide everything by the ASLR offset.
	 */
	if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) {
		slide = aslr_offset;
	}

	 /*
	 *  Scan through the commands, processing each one as necessary.
	 *  We parse in three passes through the headers:
	 *  1: thread state, uuid, code signature
	 *  2: segments
	 *  3: dyld, encryption, check entry point
	 */
	
	for (pass = 1; pass <= 3; pass++) {

		/*
		 * Check that the entry point is contained in an executable segments
		 */ 
		if ((pass == 3) && (result->validentry == 0)) {
			ret = LOAD_FAILURE;
			break;
		}

		/*
		 * Loop through each of the load_commands indicated by the
		 * Mach-O header; if an absurd value is provided, we just
		 * run off the end of the reserved section by incrementing
		 * the offset too far, so we are implicitly fail-safe.
		 */
		offset = mach_header_sz;
		ncmds = header->ncmds;

		while (ncmds--) {
			/*
			 *	Get a pointer to the command.
			 */
			lcp = (struct load_command *)(addr + offset);
			oldoffset = offset;
			offset += lcp->cmdsize;

			/*
			 * Perform prevalidation of the struct load_command
			 * before we attempt to use its contents.  Invalid
			 * values are ones which result in an overflow, or
			 * which can not possibly be valid commands, or which
			 * straddle or exist past the reserved section at the
			 * start of the image.
			 */
			if (oldoffset > offset ||
			    lcp->cmdsize < sizeof(struct load_command) ||
			    offset > header->sizeofcmds + mach_header_sz) {
				ret = LOAD_BADMACHO;
				break;
			}

			/*
			 * Act on struct load_command's for which kernel
			 * intervention is required.
			 */
            printf("Command: %s\n", command_to_string(lcp->cmd));
			switch(lcp->cmd) {
			case LC_SEGMENT:
				if (pass != 2)
					break;

				if (abi64) {
					/*
					 * Having an LC_SEGMENT command for the
					 * wrong ABI is invalid <rdar://problem/11021230>
					 */
					ret = LOAD_BADMACHO;
					break;
				}

				ret = load_segment(lcp,
				                   header->filetype,
				                   control,
				                   file_offset,
				                   macho_size,
				                   vp,
				                   slide,
				                   result);
				break;
			case LC_SEGMENT_64:
				if (pass != 2)
					break;

				if (!abi64) {
					/*
					 * Having an LC_SEGMENT_64 command for the
					 * wrong ABI is invalid <rdar://problem/11021230>
					 */
					ret = LOAD_BADMACHO;
					break;
				}

				ret = load_segment(lcp,
				                   header->filetype,
				                   control,
				                   file_offset,
				                   macho_size,
				                   vp,
				                   slide,
				                   result);
				break;
			case LC_UNIXTHREAD:
				if (pass != 1)
					break;
				ret = load_unixthread(
						 (struct thread_command *) lcp,
						 result);
				break;
			case LC_MAIN:
				if (pass != 1)
					break;
				if (depth != 1)
					break;
				ret = load_main(
						 (struct entry_point_command *) lcp,
						 result);
				break;
			case LC_LOAD_DYLINKER:
				if (pass != 3)
					break;
				if ((depth == 1) && (dlp == 0)) {
					dlp = (struct dylinker_command *)lcp;
					dlarchbits = (header->cputype & CPU_ARCH_MASK);
				} else {
					ret = LOAD_FAILURE;
				}
				break;
			case LC_UUID:
				if (pass == 1 && depth == 1) {
					ret = load_uuid((struct uuid_command *) lcp,
							(char *)addr + mach_header_sz + header->sizeofcmds,
							result);
				}
				break;
			case LC_CODE_SIGNATURE:
				/* CODE SIGNING */
				if (pass != 1)
					break;
				/* pager -> uip ->
				   load signatures & store in uip
				   set VM object "signed_pages"
				*/
				/*ret = load_code_signature(
					(struct linkedit_data_command *) lcp,
					vp,
					file_offset,
					macho_size,
					header->cputype,
					result);*/
				if (ret != LOAD_SUCCESS) {
					printf("proc: load code signature error %d ", ret);
					ret = LOAD_SUCCESS; /* ignore error */
				} else {
					got_code_signatures = TRUE;
				}
				break;
#if CONFIG_CODE_DECRYPTION
			case LC_ENCRYPTION_INFO:
			case LC_ENCRYPTION_INFO_64:
				if (pass != 3)
					break;
				ret = set_code_unprotect(
					(struct encryption_info_command *) lcp,
					addr, map, slide, vp,
					header->cputype, header->cpusubtype);
				if (ret != LOAD_SUCCESS) {
					printf("proc %d: set_code_unprotect() error %d "
					       "for file \"%s\"\n",
					       p->p_pid, ret, vp->v_name);
					/* 
					 * Don't let the app run if it's 
					 * encrypted but we failed to set up the
					 * decrypter. If the keys are missing it will
					 * return LOAD_DECRYPTFAIL.
					 */
					 if (ret == LOAD_DECRYPTFAIL) {
						/* failed to load due to missing FP keys */
						proc_lock(p);
						p->p_lflag |= P_LTERM_DECRYPTFAIL;
						proc_unlock(p);
					}
					 psignal(p, SIGKILL);
				}
				break;
#endif
			default:
				/* Other commands are ignored by the kernel */
				ret = LOAD_SUCCESS;
				break;
			}
            
            printf("parse_machfile 9: ret %s\n", load_to_string(ret));

			if (ret != LOAD_SUCCESS)
				break;
		}
        
		if (ret != LOAD_SUCCESS)
			break;
	}

    if (ret == LOAD_SUCCESS) {
	    if (! got_code_signatures) {
		    //struct cs_blob *blob;
		    /* no embedded signatures: look for detached ones */
		    //blob = ubc_cs_blob_get(vp, -1, file_offset);
		    //if (blob != NULL) {
			//unsigned int cs_flag_data = blob->csb_flags;
			//if(0 != ubc_cs_generation_check(vp)) {
			//	if (0 != ubc_cs_blob_revalidate(vp, blob)) {
			//		/* clear out the flag data if revalidation fails */
			//		cs_flag_data = 0;
			//		result->csflags &= ~CS_VALID;
			//	}
			//}
			/* get flags to be applied to the process */
			//result->csflags |= cs_flag_data;
		    //}
	    }

		/* Make sure if we need dyld, we got it */
		if (result->needs_dynlinker && !dlp) {
			ret = LOAD_FAILURE;
		}

	    if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
			/*
		 	* load the dylinker, and slide it by the independent DYLD ASLR
		 	* offset regardless of the PIE-ness of the main binary.
		 	*/
			ret = load_dylinker(dlp, dlarchbits, depth, dyld_aslr_offset, result);
		}

	    if((ret == LOAD_SUCCESS) && (depth == 1)) {
			if (result->thread_count == 0) {
				ret = LOAD_FAILURE;
			}
	    }
    }
    
    printf("parse_machfile 8: %s\n", load_to_string(ret));

	return(ret);
}
static int load_image(struct pil_device *pil)
{
	int i, ret;
	char fw_name[30];
	struct elf32_hdr *ehdr;
	const struct elf32_phdr *phdr;
	const struct firmware *fw;
	unsigned long proxy_timeout = pil->desc->proxy_timeout;
#ifdef CONFIG_SEC_DEBUG
	static int load_count;
#endif
#ifdef CONFIG_SEC_PERIPHERAL_SECURE_CHK
	static int load_count_fwd;
	static int load_count_auth;
#endif

	down_read(&pil_pm_rwsem);
	snprintf(fw_name, sizeof(fw_name), "%s.mdt", pil->desc->name);
	ret = request_firmware(&fw, fw_name, &pil->dev);
	if (ret) {
		dev_err(&pil->dev, "%s: Failed to locate %s\n",
				pil->desc->name, fw_name);
#ifdef CONFIG_SEC_DEBUG
		load_count++;
		if (load_count > 10 && check_power_off_and_restart() == 0)
			panic("Failed to load %s image!", fw_name);
#endif
		goto out;
	}

	if (fw->size < sizeof(*ehdr)) {
		dev_err(&pil->dev, "%s: Not big enough to be an elf header\n",
				pil->desc->name);
		ret = -EIO;
		goto release_fw;
	}

	ehdr = (struct elf32_hdr *)fw->data;
	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
		dev_err(&pil->dev, "%s: Not an elf header\n", pil->desc->name);
		ret = -EIO;
		goto release_fw;
	}

	if (ehdr->e_phnum == 0) {
		dev_err(&pil->dev, "%s: No loadable segments\n",
				pil->desc->name);
		ret = -EIO;
		goto release_fw;
	}
	if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
	    sizeof(struct elf32_hdr) > fw->size) {
		dev_err(&pil->dev, "%s: Program headers not within mdt\n",
				pil->desc->name);
		ret = -EIO;
		goto release_fw;
	}

	ret = pil->desc->ops->init_image(pil->desc, fw->data, fw->size);
	if (ret) {
		dev_err(&pil->dev, "%s: Invalid firmware metadata %d\n",
				pil->desc->name, ret);
#ifdef CONFIG_SEC_PERIPHERAL_SECURE_CHK
		load_count_fwd++;
		if (load_count_fwd > 10) {
			release_firmware(fw);
			up_read(&pil_pm_rwsem);
			sec_peripheral_secure_check_fail();
		} else {
			goto release_fw;
		}
#else
		goto release_fw;
#endif
	}

	phdr = (const struct elf32_phdr *)(fw->data + sizeof(struct elf32_hdr));
	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
		if (!segment_is_loadable(phdr))
			continue;

		ret = load_segment(phdr, i, pil);
		if (ret) {
			dev_err(&pil->dev, "%s: Failed to load segment %d\n",
					pil->desc->name, i);
			goto release_fw;
		}
	}

	ret = pil_proxy_vote(pil);
	if (ret) {
		dev_err(&pil->dev, "%s: Failed to proxy vote\n",
					pil->desc->name);
		goto release_fw;
	}

	ret = pil->desc->ops->auth_and_reset(pil->desc);
	if (ret) {
		dev_err(&pil->dev, "%s: Failed to bring out of reset %d\n",
				pil->desc->name, ret);
		proxy_timeout = 0; /* Remove proxy vote immediately on error */
#ifdef CONFIG_SEC_PERIPHERAL_SECURE_CHK
		load_count_auth++;
		if (load_count_auth > 10) {
			release_firmware(fw);
			up_read(&pil_pm_rwsem);
			sec_peripheral_secure_check_fail();
		} else {
			goto release_fw;
		}
#else
		goto err_boot;
#endif
	}
	dev_info(&pil->dev, "%s: Brought out of reset\n", pil->desc->name);
#ifndef CONFIG_SEC_PERIPHERAL_SECURE_CHK
err_boot:
#endif
	pil_proxy_unvote(pil, proxy_timeout);
release_fw:
	release_firmware(fw);
out:
	up_read(&pil_pm_rwsem);
	return ret;
}
示例#9
0
文件: spawn.c 项目: donguoxing/Lab3
// Spawn a child process from a program image.
// In Lab 4, the image is loaded from the kernel.
// In Lab 5, you will allow images to be loaded from the file system.
// prog: the name of the program to run.
// argv: pointer to null-terminated array of pointers to strings,
// 	 which will be passed to the child as its command-line arguments.
// Returns child envid on success, < 0 on failure.
envid_t
spawn(const char* progname, const char** argv)
{
	uint8_t elf_header_buf[512];
	ssize_t elf_size;
	struct Trapframe child_tf; 	/* Hint!! */

	// Insert your code, following approximately this procedure:
	//
	//   - Look up the program using sys_program_lookup.
	//     Return an error code if no such program exists.
	//
	envid_t child, envid;
	int pid, err;
	pid = sys_program_lookup(progname,sizeof(progname)); 
	if(pid < 0)
		return pid;
	//   - Set 'elf_size' to the program's ELF binary size using
	//     sys_program_size.
	//
	elf_size = sys_program_size(pid);
	//   - Map the program's first page at UTEMP using the map_page helper.
	//
	envid = sys_getenvid();
	map_page(envid, pid, 0, (void *)UTEMP, PTE_U|PTE_P);

	//   - Copy the 512-byte ELF header from UTEMP into elf_header_buf.
	//
	memcpy(elf_header_buf, (void *)UTEMP, 512);
	//   - Read the ELF header, as you have before, and sanity check its
	//     magic number.  (Check out your load_elf for hints!)
	//
	struct Elf *elfbin = (struct Elf *)elf_header_buf;
    if (elfbin->e_magic != ELF_MAGIC)
    	return -E_INVAL;
	//   - Use sys_exofork() to create a new environment.
	//
	child = sys_exofork();
	if (child < 0)
		panic("sys_exofork: %e", child);
	if (child == 0) {
		return 0;
	}

	//   - Set child_tf to an initial struct Trapframe for the child.
	//     Hint: The sys_exofork() system call has already created
	//     a good starting point.  It is accessible at
	//     envs[ENVX(child)].env_tf.
	//     Hint: You must do something with the program's entry point.
	//     What?  (See load_elf!)
	//
	child_tf = envs[ENVX(child)].env_tf;
	//child_tf.tf_regs = envs[ENVX(child)].env_tf.tf_regs;
	child_tf.tf_eip = elfbin->e_entry;
	//sys_env_set_trapframe(child, &child_tf);
	
	//   - Call the init_stack() function to set up the initial stack
	//     page for the child environment.
	//
	if((err = init_stack(child,argv,&(child_tf.tf_esp))) < 0){
	sys_env_destroy(child);
	return err;
	}
	//   - Map all of the program's segments that are of p_type
	//     ELF_PROG_LOAD into the new environment's address space.
	//     Use the load_segment() helper function below.
	//     All the 'struct Proghdr' structures will be accessible
	//     within the first 512 bytes of the ELF.
	//
	// load each program segment (ignores ph flags)
	struct Proghdr *ph, *eph;
    ph = (struct Proghdr *) ((uint8_t *) elfbin + elfbin->e_phoff);
	eph = ph + elfbin->e_phnum;
	for(; ph < eph; ph++){
		if(ph->p_type != ELF_PROG_LOAD){
			continue;
		}
		assert(ph->p_filesz <= ph->p_memsz);
		load_segment(child, pid, ph, elfbin, elf_size);
	}
	//   - Call sys_env_set_trapframe(child, &child_tf) to set up the
	//     correct initial eip and esp values in the child.
	//
	if((err = sys_env_set_trapframe(child, &child_tf)) < 0){
		sys_env_destroy(child);
		return err;
	}
	//   - Start the child process running with sys_env_set_status().
	///
	if((err = sys_env_set_status(child,ENV_RUNNABLE)) < 0){
		sys_env_destroy(child);
		return err;
	}
	//panic("spawn unimplemented!");
	return 0;
}
示例#10
0
/*
 * The file size of a mach-o file is limited to 32 bits; this is because
 * this is the limit on the kalloc() of enough bytes for a mach_header and
 * the contents of its sizeofcmds, which is currently constrained to 32
 * bits in the file format itself.  We read into the kernel buffer the
 * commands section, and then parse it in order to parse the mach-o file
 * format load_command segment(s).  We are only interested in a subset of
 * the total set of possible commands. If "map"==VM_MAP_NULL or
 * "thread"==THREAD_NULL, do not make permament VM modifications,
 * just preflight the parse.
 */
static
load_return_t
parse_machfile(
	struct vnode 		*vp,       
	vm_map_t		map,
	thread_t		thread,
	struct mach_header	*header,
	off_t			file_offset,
	off_t			macho_size,
	int			depth,
	int64_t			aslr_offset,
	load_result_t		*result
)
{
	uint32_t		ncmds;
	struct load_command	*lcp;
	struct dylinker_command	*dlp = 0;
	struct uuid_command	*uulp = 0;
	integer_t		dlarchbits = 0;
	void *			control;
	load_return_t		ret = LOAD_SUCCESS;
	caddr_t			addr;
	void *			kl_addr;
	vm_size_t		size,kl_size;
	size_t			offset;
	size_t			oldoffset;	/* for overflow check */
	int			pass;
	proc_t			p = current_proc();		/* XXXX */
	int			error;
	int resid=0;
	size_t			mach_header_sz = sizeof(struct mach_header);
	boolean_t		abi64;
	boolean_t		got_code_signatures = FALSE;
	int64_t			slide = 0;

	if (header->magic == MH_MAGIC_64 ||
	    header->magic == MH_CIGAM_64) {
	    	mach_header_sz = sizeof(struct mach_header_64);
	}

	/*
	 *	Break infinite recursion
	 */
	if (depth > 6) {
		return(LOAD_FAILURE);
	}

	depth++;

	/*
	 *	Check to see if right machine type.
	 */
	if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) ||
	    !grade_binary(header->cputype, 
	    	header->cpusubtype & ~CPU_SUBTYPE_MASK))
		return(LOAD_BADARCH);
		
	abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
		
	switch (header->filetype) {
	
	case MH_OBJECT:
	case MH_EXECUTE:
	case MH_PRELOAD:
		if (depth != 1) {
			return (LOAD_FAILURE);
		}
		break;
		
	case MH_FVMLIB:
	case MH_DYLIB:
		if (depth == 1) {
			return (LOAD_FAILURE);
		}
		break;

	case MH_DYLINKER:
		if (depth != 2) {
			return (LOAD_FAILURE);
		}
		break;
		
	default:
		return (LOAD_FAILURE);
	}

	/*
	 *	Get the pager for the file.
	 */
	control = ubc_getobject(vp, UBC_FLAGS_NONE);

	/*
	 *	Map portion that must be accessible directly into
	 *	kernel's map.
	 */
	if ((off_t)(mach_header_sz + header->sizeofcmds) > macho_size)
		return(LOAD_BADMACHO);

	/*
	 *	Round size of Mach-O commands up to page boundry.
	 */
	size = round_page(mach_header_sz + header->sizeofcmds);
	if (size <= 0)
		return(LOAD_BADMACHO);

	/*
	 * Map the load commands into kernel memory.
	 */
	addr = 0;
	kl_size = size;
	kl_addr = kalloc(size);
	addr = (caddr_t)kl_addr;
	if (addr == NULL)
		return(LOAD_NOSPACE);

	error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
	    UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
	if (error) {
		if (kl_addr )
			kfree(kl_addr, kl_size);
		return(LOAD_IOERROR);
	}

	/*
	 *	For PIE and dyld, slide everything by the ASLR offset.
	 */
    aslr_offset = 0;
	if ((header->flags & MH_PIE) || (header->filetype == MH_DYLINKER)) {
		slide = aslr_offset;
	}

	/*
	 *	Scan through the commands, processing each one as necessary.
	 */
	for (pass = 1; pass <= 3; pass++) {

		/*
		 * Check that the entry point is contained in an executable segments
		 */ 
		if ((pass == 3) && (result->validentry == 0)) {
			thread_state_initialize(thread);
			ret = LOAD_FAILURE;
			break;
		}

		/*
		 * Loop through each of the load_commands indicated by the
		 * Mach-O header; if an absurd value is provided, we just
		 * run off the end of the reserved section by incrementing
		 * the offset too far, so we are implicitly fail-safe.
		 */
		offset = mach_header_sz;
		ncmds = header->ncmds;

		while (ncmds--) {
			/*
			 *	Get a pointer to the command.
			 */
			lcp = (struct load_command *)(addr + offset);
			oldoffset = offset;
			offset += lcp->cmdsize;

			/*
			 * Perform prevalidation of the struct load_command
			 * before we attempt to use its contents.  Invalid
			 * values are ones which result in an overflow, or
			 * which can not possibly be valid commands, or which
			 * straddle or exist past the reserved section at the
			 * start of the image.
			 */
			if (oldoffset > offset ||
			    lcp->cmdsize < sizeof(struct load_command) ||
			    offset > header->sizeofcmds + mach_header_sz) {
				ret = LOAD_BADMACHO;
				break;
			}

			/*
			 * Act on struct load_command's for which kernel
			 * intervention is required.
			 */
			switch(lcp->cmd) {
			case LC_SEGMENT:
			case LC_SEGMENT_64:
				if (pass != 2)
					break;
				ret = load_segment(lcp,
				    		   header->filetype,
						   control,
						   file_offset,
						   macho_size,
						   vp,
						   map,
						   slide,
						   result);
				break;
			case LC_UNIXTHREAD:
				if (pass != 1)
					break;
				ret = load_unixthread(
						 (struct thread_command *) lcp,
						 thread,
						 slide,
						 result);
				break;
			case LC_MAIN:
				if (pass != 1)
					break;
				if (depth != 1)
					break;
				ret = load_main(
						 (struct entry_point_command *) lcp,
						 thread,
						 slide,
						 result);
				break;
			case LC_LOAD_DYLINKER:
				if (pass != 3)
					break;
				if ((depth == 1) && (dlp == 0)) {
					dlp = (struct dylinker_command *)lcp;
					dlarchbits = (header->cputype & CPU_ARCH_MASK);
				} else {
					ret = LOAD_FAILURE;
				}
				break;
			case LC_UUID:
				if (pass == 1 && depth == 1) {
					uulp = (struct uuid_command *)lcp;
					memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
				}
				break;
			case LC_CODE_SIGNATURE:
				/* CODE SIGNING */
				if (pass != 1)
					break;
				/* pager -> uip ->
				   load signatures & store in uip
				   set VM object "signed_pages"
				*/
				ret = load_code_signature(
					(struct linkedit_data_command *) lcp,
					vp,
					file_offset,
					macho_size,
					header->cputype,
					(depth == 1) ? result : NULL);
				if (ret != LOAD_SUCCESS) {
					printf("proc %d: load code signature error %d "
					       "for file \"%s\"\n",
					       p->p_pid, ret, vp->v_name);
					ret = LOAD_SUCCESS; /* ignore error */
				} else {
					got_code_signatures = TRUE;
				}
				break;
#if CONFIG_CODE_DECRYPTION
#ifndef __arm__
			case LC_ENCRYPTION_INFO:
				if (pass != 3)
					break;
				ret = set_code_unprotect(
					(struct encryption_info_command *) lcp,
					addr, map, slide, vp);
				if (ret != LOAD_SUCCESS) {
					printf("proc %d: set_code_unprotect() error %d "
					       "for file \"%s\"\n",
					       p->p_pid, ret, vp->v_name);
					/* Don't let the app run if it's 
					 * encrypted but we failed to set up the
					 * decrypter */
					 psignal(p, SIGKILL);
				}
				break;
#endif
#endif
			default:
				/* Other commands are ignored by the kernel */
				ret = LOAD_SUCCESS;
				break;
			}
			if (ret != LOAD_SUCCESS)
				break;
		}
		if (ret != LOAD_SUCCESS)
			break;
	}
	if (ret == LOAD_SUCCESS) { 
	    if (! got_code_signatures) {
		    struct cs_blob *blob;
		    /* no embedded signatures: look for detached ones */
		    blob = ubc_cs_blob_get(vp, -1, file_offset);
		    if (blob != NULL) {
			    /* get flags to be applied to the process */
			    result->csflags |= blob->csb_flags;
		    }
	    }

		/* Make sure if we need dyld, we got it */
		if (result->needs_dynlinker && !dlp) {
			ret = LOAD_FAILURE;
		}

	    if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
		    /* load the dylinker, and always slide it by the ASLR
		     * offset regardless of PIE */
		    ret = load_dylinker(dlp, dlarchbits, map, thread, depth, aslr_offset, result);
	    }

	    if((ret == LOAD_SUCCESS) && (depth == 1)) {
			if (result->thread_count == 0) {
				ret = LOAD_FAILURE;
			}
	    }
	}

	if (kl_addr )
		kfree(kl_addr, kl_size);

	return(ret);
}
示例#11
0
/*
  Loads a Mach-O executable into memory, along with any threads, 
  stacks, and dylinker.
  Returns 0 on success, -1 on any failure.
  fd[offset..offset+size) is a Mach-O thin file.
  filetype is MH_EXECUTE or MH_DYLINKER.
  The mapped but empty stack is returned in *out_stack.
  The executable's Mach headers are returned in *out_text.
  The executable's entry point is returned in *out_entry.
  The dylinker's entry point (if any) is returned in *out_linker_entry.
  GrP fixme need to return whether dylinker was found - stack layout is different
*/
static int 
load_thin_file(int fd, vki_off_t offset, vki_off_t size, unsigned long filetype, 
               const char *filename, 
               vki_uint8_t **out_stack_start, vki_uint8_t **out_stack_end, 
               vki_uint8_t **out_text, vki_uint8_t **out_entry, vki_uint8_t **out_linker_entry)
{
   struct MACH_HEADER mh;
   vki_uint8_t *headers;
   vki_uint8_t *headers_end;
   struct load_command *lc;
   struct load_command *lcend;
   struct SEGMENT_COMMAND *segcmd;
   struct thread_command *threadcmd;
   struct dylinker_command *dycmd;
   int err;
   SysRes res;
   vki_size_t len;

   vki_uint8_t *stack_start = NULL;   // allocated thread stack (hot end)
   vki_uint8_t *stack_end = NULL;   // allocated thread stack (cold end)
   vki_uint8_t *entry = NULL;   // static entry point
   vki_uint8_t *text = NULL;    // start of text segment (i.e. the mach headers)
   vki_uint8_t *linker_entry = NULL; // dylinker entry point

   // Read Mach-O header
   if (sizeof(mh) > size) {
      print("bad executable (no Mach-O header)\n");
   }
   res = VG_(pread)(fd, &mh, sizeof(mh), offset);
   if (sr_isError(res)  ||  sr_Res(res) != sizeof(mh)) {
      print("bad executable (no Mach-O header)\n");
      return -1;
   }
   

   // Sanity-check the header itself
   if (mh.magic != MAGIC) {
      print("bad executable (no Mach-O magic)\n");
      return -1;
   }

   if (mh.filetype != filetype) {
      // expecting MH_EXECUTE or MH_DYLINKER
      print("bad executable (wrong file type)\n");
      return -1;
   }


   // Map all headers into memory
   len = sizeof(mh) + mh.sizeofcmds;
   if (len > size) {
      print("bad executable (missing load commands)\n");
      return -1;
   }

   headers = VG_(malloc)("ume.macho.headers", len);
   res = VG_(pread)(fd, headers, len, offset);
   if (sr_isError(res)) {
      print("couldn't read load commands from executable\n");
      return -1;
   }
   headers_end = headers + len;

   
   // Map some segments into client memory:
   // LC_SEGMENT    (text, data, etc)
   // UNIXSTACK     (stack)
   // LOAD_DYLINKER (dyld)
   lcend = (struct load_command *)(headers + mh.sizeofcmds + sizeof(mh));
   for (lc = (struct load_command *)(headers + sizeof(mh)); 
        lc < lcend; 
        lc = (struct load_command *)(lc->cmdsize + (vki_uint8_t *)lc))
   {
      if ((vki_uint8_t *)lc < headers  ||  
          lc->cmdsize+(vki_uint8_t *)lc > headers_end) {
          print("bad executable (invalid load commands)\n");
          return -1;
      }

      switch (lc->cmd) {
      case LC_SEGMENT_CMD:
         if (lc->cmdsize < sizeof(struct SEGMENT_COMMAND)) {
            print("bad executable (invalid load commands)\n");
            return -1;
         }
         segcmd = (struct SEGMENT_COMMAND *)lc;
         err = load_segment(fd, offset, size, &text, &stack_start, 
                            segcmd, filename);
         if (err) return -1;
          
         break;

      case LC_UNIXTHREAD:
         if (stack_end  ||  entry) {
            print("bad executable (multiple thread commands)\n");
            return -1;
         }
         if (lc->cmdsize < sizeof(struct thread_command)) {
            print("bad executable (invalid load commands)\n");
            return -1;
         }
         threadcmd = (struct thread_command *)lc;
         err = load_unixthread(&stack_start, &stack_end, &entry, threadcmd);
         if (err) return -1;
         break;

      case LC_LOAD_DYLINKER:
         if (filetype == MH_DYLINKER) {
            print("bad executable (dylinker needs a dylinker)\n");
            return -1;
         }
         if (linker_entry) {
            print("bad executable (multiple dylinker commands)\n");
         }
         if (lc->cmdsize < sizeof(struct dylinker_command)) {
            print("bad executable (invalid load commands)\n");
            return -1;
         }
         dycmd = (struct dylinker_command *)lc;
         err = load_dylinker(&linker_entry, dycmd);
         if (err) return -1;
         break;

      case LC_THREAD:
         if (filetype == MH_EXECUTE) {
            print("bad executable (stackless thread)\n");
            return -1;
         }
         if (stack_end  ||  entry) {
            print("bad executable (multiple thread commands)\n");
            return -1;
         }
         if (lc->cmdsize < sizeof(struct thread_command)) {
            print("bad executable (invalid load commands)\n");
            return -1;
         }
         threadcmd = (struct thread_command *)lc;
         err = load_thread(&entry, threadcmd);
         if (err) return -1;
         break;

      default:
         break;
      }
   }


   // Done with the headers
   VG_(free)(headers);

   if (filetype == MH_EXECUTE) {
      // Verify the necessary pieces for an executable:
      // a stack
      // a text segment
      // an entry point (static or linker)
      if (!stack_end || !stack_start) {
         print("bad executable (no stack)\n");
         return -1;
      }
      if (!text) {
         print("bad executable (no text segment)\n");
         return -1;
      }
      if (!entry  &&  !linker_entry) {
         print("bad executable (no entry point)\n");
         return -1;
      }
   }
   else if (filetype == MH_DYLINKER) {
      // Verify the necessary pieces for a dylinker:
      // an entry point
      if (!entry) {
         print("bad executable (no entry point)\n");
         return -1;
      }
   }

   if (out_stack_start) *out_stack_start = stack_start;
   if (out_stack_end) *out_stack_end = stack_end;
   if (out_text)  *out_text = text;
   if (out_entry) *out_entry = entry;
   if (out_linker_entry) *out_linker_entry = linker_entry;
   
   return 0;
}
示例#12
0
文件: vm.cpp 项目: Reisen/Vitiate
int
main() {
    /*
     * ------------------------------------------------------------------------
     * Allocate VM Memory.
     *
     * First 4096 Bytes: VM Itself
     * Next 16384 Bytes: Instruction Cache and Shadow Instruction Cache
     * Final 8192 Bytes: VM Stack.
     * ------------------------------------------------------------------------
     */
    auto buffer = (uint8_t*)mmap
        ( nullptr
        , 28672
        , PROT_EXEC | PROT_READ | PROT_WRITE
        , MAP_PRIVATE | MAP_ANONYMOUS
        , -1
        , 0
        );

    /* Initialize Virtual Machine. */
    load_segment("vm_a.bin", buffer);
    load_segment("cache.bin", buffer + 4096);

    /* Load, Create, and Scramble Instruction Index. */
    struct Instruction {
        size_t index;
        const char *name;

        bool operator==(Instruction const& other) const {
            return strncmp(name, other.name, strlen(name)) == 0;
        }
    };

    Instruction
    instruction_index[] = {
        #include "instruction_cache.hpp"
    };

    __builtin_printf("--- Shuffling Instructions--------------\n");
    Instruction
    shuffling_index[] = {
        #include "instruction_cache.hpp"
    };

    // Copy the Index to keep he original intact.
    std::copy(std::begin(instruction_index), std::end(instruction_index), std::begin(shuffling_index));

    // Randomly Shuffle the Index
    srand(time(0));
    std::random_shuffle
        ( std::begin(shuffling_index)
        , std::end(shuffling_index) - 1
        );

    // Keeps Track of Write Position in Shadow Instruction Cache.
    size_t shuffle_offset = 0;

    // Iterate Shuffled Index and Create that in the Shadow Cache.
    for(auto &entry : shuffling_index) {
        __builtin_printf("Index(%02d): %s\n", ((size_t)&entry - (size_t)shuffling_index) / sizeof(Instruction), entry.name);

        // Find instruction in the old index, in order to find its size.
        auto instruction = std::find
            ( std::begin(instruction_index)
            , std::end(instruction_index) - 1
            , entry
            );

        // Find size of instruction.
        signed difference = (instruction + 1)->index - instruction->index + 1;

        // Ignore the last dummy instruction (always has a negative offset)
        if(difference > 0) {
            __builtin_printf
                ( "Distance: (%d - %d) = %d\n"
                , (instruction + 1)->index
                , instruction->index
                , difference
                );

            // Write the instruction into Shadow Cache.
            memcpy
                ( buffer + 8192
                , buffer + 4096 + entry.index
                , difference
                );

            // Update the Index.
            entry.index = shuffle_offset;
            shuffle_offset += difference;

        }
    }
    __builtin_printf("----------------------------------------\n\n");

    /*
     * ------------------------------------------------------------------------
     * Display instruction confirmations.
     * ------------------------------------------------------------------------
     */
    for(auto &entry : instruction_index) {
        auto shuffled = std::find
            ( std::begin(shuffling_index)
            , std::end(shuffling_index)
            , entry
            );

        __builtin_printf
            ( "%12s: 0x%x 0x%x\n"
            , entry.name
            , *(buffer + 4096 + entry.index)
            , *(buffer + 8192 + shuffled->index)
            );
    }


    /*
     * ------------------------------------------------------------------------
     * Compiling VM assembly.
     * ------------------------------------------------------------------------
     */
    size_t program[] = {
        2,
        (size_t)buffer + 4096,
        (size_t)buffer + 4096,
        (size_t)buffer + 4096,
        0,
        (size_t)buffer + 4096,
        (size_t)buffer + 4096,
        (size_t)buffer + 4096,
        0
    };

    int result = Metamorph(buffer,program);
    __builtin_printf("Result: %d\n\n", result);
}
示例#13
0
文件: load.c 项目: Drakenhielm/Pintos
/* Loads an ELF executable from FILE_NAME into the current thread.
   Stores the executable's entry point into *EIP
   and its initial stack pointer into *ESP.
   Returns true if successful, false otherwise. */
bool
load (const char *file_name, void (**eip) (void), void **esp) 
{
  struct thread *t = thread_current ();
  struct Elf32_Ehdr ehdr;
  struct file *file = NULL;
  off_t file_ofs;
  bool success = false;
  int i;

  /* Allocate and activate page directory. */
  t->pagedir = pagedir_create ();
  if (t->pagedir == NULL) 
    goto done;
  process_activate ();

  /* Set up stack. */
  if (!setup_stack (esp)){
    goto done;
  }

  /* Open executable file. */
  file = filesys_open (file_name);
  if (file == NULL) 
    {
      printf ("load: %s: open failed\n", file_name);
      goto done; 
    }

  /* Read and verify executable header. */
  if (file_read (file, &ehdr, sizeof ehdr) != sizeof ehdr
      || memcmp (ehdr.e_ident, "\177ELF\1\1\1", 7)
      || ehdr.e_type != 2
      || ehdr.e_machine != 3
      || ehdr.e_version != 1
      || ehdr.e_phentsize != sizeof (struct Elf32_Phdr)
      || ehdr.e_phnum > 1024) 
    {
      printf ("load: %s: error loading executable\n", file_name);
      goto done; 
    }

  /* Read program headers. */
  file_ofs = ehdr.e_phoff;
  for (i = 0; i < ehdr.e_phnum; i++) 
    {
      struct Elf32_Phdr phdr;

      if (file_ofs < 0 || file_ofs > file_length (file))
        goto done;
      file_seek (file, file_ofs);

      if (file_read (file, &phdr, sizeof phdr) != sizeof phdr)
        goto done;
      file_ofs += sizeof phdr;
      switch (phdr.p_type) 
        {
        case PT_NULL:
        case PT_NOTE:
        case PT_PHDR:
        case PT_STACK:
        default:
          /* Ignore this segment. */
          break;
        case PT_DYNAMIC:
        case PT_INTERP:
        case PT_SHLIB:
          goto done;
        case PT_LOAD:
          if (validate_segment (&phdr, file)) 
            {
              bool writable = (phdr.p_flags & PF_W) != 0;
              uint32_t file_page = phdr.p_offset & ~PGMASK;
              uint32_t mem_page = phdr.p_vaddr & ~PGMASK;
              uint32_t page_offset = phdr.p_vaddr & PGMASK;
              uint32_t read_bytes, zero_bytes;
              if (phdr.p_filesz > 0)
                {
                  /* Normal segment.
                     Read initial part from disk and zero the rest. */
                  read_bytes = page_offset + phdr.p_filesz;
                  zero_bytes = (ROUND_UP (page_offset + phdr.p_memsz, PGSIZE)
                                - read_bytes);
                }
              else 
                {
                  /* Entirely zero.
                     Don't read anything from disk. */
                  read_bytes = 0;
                  zero_bytes = ROUND_UP (page_offset + phdr.p_memsz, PGSIZE);
                }
              if (!load_segment (file, file_page, (void *) mem_page,
                                 read_bytes, zero_bytes, writable))
                goto done;
            }
          else
            goto done;
          break;
        }
    }

  /* Start address. */
  *eip = (void (*) (void)) ehdr.e_entry;

  success = true;

 done:
  /* We arrive here whether the load is successful or not. */
  file_close (file);
  return success;
}
示例#14
0
int locate_segments(int table, segment_t *segment_table, long int size_ptr, int *ptr)
{
	int start_key, i;
	int segment_num;
	char *compress_ptr;
	
	compress_ptr = memalign(ALIGN, 2*size_ptr*sizeof(int));
	
	for(segment_num=0; segment_num<NUM_SEGMENTS; segment_num++) {
		// Заполнение сегмента
		#ifdef USE_FILE_DB
		if (table == R)
			load_segment(segment_table[segment_num].size, ptr, R_DB);
		if (table == S)
			load_segment(segment_table[segment_num].size, ptr, S_DB);
		#else
			if(segment_num == 0) {
				start_key = 0;
			}
			else {
				start_key = start_key + segment_table[segment_num-1].size;
			}
			gen_segment(table, segment_num, segment_table[segment_num].size, ptr, start_key);
			#ifdef CREATE_FILE_DB
				if (table == R)
					save_segment(segment_table[segment_num].size, ptr, R_DB);
				if (table == S)
					save_segment(segment_table[segment_num].size, ptr, S_DB);
			#endif
		#endif
		
		// Вывод сегмента на печать
		#ifdef USE_DEBUG_PRINT
			printf("\t\t");
			for(i=0; i<segment_table[segment_num].size; i++)
				printf(" (%d, %d)", ptr[db_index_key(segment_table[segment_num].size, i)], ptr[db_index_value(segment_table[segment_num].size, i)]);
			printf("\n");
		#endif
		
		// Сжатие сегмента
		#ifdef USE_COMPRESS
			segment_table[segment_num].compress_size = size_ptr;
			segment_compress(ptr, 2*sizeof(int)*segment_table[segment_num].size, compress_ptr, &segment_table[segment_num].compress_size);
		#else
			segment_table[segment_num].compress_size = 2*sizeof(int)*segment_table[segment_num].size;
		#endif
		
		// Выделение памяти для постоянного хранения сегмента
		segment_table[segment_num].ptr = (char *)memalign(ALIGN, segment_table[segment_num].compress_size+1);
		
		if(segment_table[segment_num].ptr == NULL) {
			printf("ERROR: could not allocate memmory for %dth segment!\n", segment_num);
			return -1;
		}
		
		// Копирование сегмента в место постоянного хранения
		#ifdef USE_COMPRESS
			str_copy(compress_ptr, segment_table[segment_num].ptr, segment_table[segment_num].compress_size+1);
		#else
			str_copy(ptr, segment_table[segment_num].ptr, segment_table[segment_num].compress_size+1);
		#endif
		
		// Вывод сжатого сегмента на печать
		#ifdef USE_DEBUG_PRINT
			printf("\t\t segment was compressed to %ld bytes (%s)\n", segment_table[segment_num].compress_size, segment_table[segment_num].ptr);
		#endif
	}
	
	free(compress_ptr);
	
	return 0;
}
示例#15
0
/*
 * The file size of a mach-o file is limited to 32 bits; this is because
 * this is the limit on the kalloc() of enough bytes for a mach_header and
 * the contents of its sizeofcmds, which is currently constrained to 32
 * bits in the file format itself.  We read into the kernel buffer the
 * commands section, and then parse it in order to parse the mach-o file
 * format load_command segment(s).  We are only interested in a subset of
 * the total set of possible commands.
 */
static
load_return_t
parse_machfile(
    struct vnode 		*vp,
    vm_map_t		map,
    thread_t		thread,
    struct mach_header	*header,
    off_t			file_offset,
    off_t			macho_size,
    int			depth,
    load_result_t		*result
)
{
    uint32_t		ncmds;
    struct load_command	*lcp;
    struct dylinker_command	*dlp = 0;
    integer_t		dlarchbits = 0;
    void *			pager;
    load_return_t		ret = LOAD_SUCCESS;
    caddr_t			addr;
    void *			kl_addr;
    vm_size_t		size,kl_size;
    size_t			offset;
    size_t			oldoffset;	/* for overflow check */
    int			pass;
    proc_t			p = current_proc();		/* XXXX */
    int			error;
    int resid=0;
    task_t task;
    size_t			mach_header_sz = sizeof(struct mach_header);
    boolean_t		abi64;
    boolean_t		got_code_signatures = FALSE;

    if (header->magic == MH_MAGIC_64 ||
            header->magic == MH_CIGAM_64) {
        mach_header_sz = sizeof(struct mach_header_64);
    }

    /*
     *	Break infinite recursion
     */
    if (depth > 6) {
        return(LOAD_FAILURE);
    }

    task = (task_t)get_threadtask(thread);

    depth++;

    /*
     *	Check to see if right machine type.
     */
    if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) ||
            !grade_binary(header->cputype,
                          header->cpusubtype & ~CPU_SUBTYPE_MASK))
        return(LOAD_BADARCH);

    abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);

    switch (header->filetype) {

    case MH_OBJECT:
    case MH_EXECUTE:
    case MH_PRELOAD:
        if (depth != 1) {
            return (LOAD_FAILURE);
        }
        break;

    case MH_FVMLIB:
    case MH_DYLIB:
        if (depth == 1) {
            return (LOAD_FAILURE);
        }
        break;

    case MH_DYLINKER:
        if (depth != 2) {
            return (LOAD_FAILURE);
        }
        break;

    default:
        return (LOAD_FAILURE);
    }

    /*
     *	Get the pager for the file.
     */
    pager = (void *) ubc_getpager(vp);

    /*
     *	Map portion that must be accessible directly into
     *	kernel's map.
     */
    if ((mach_header_sz + header->sizeofcmds) > macho_size)
        return(LOAD_BADMACHO);

    /*
     *	Round size of Mach-O commands up to page boundry.
     */
    size = round_page(mach_header_sz + header->sizeofcmds);
    if (size <= 0)
        return(LOAD_BADMACHO);

    /*
     * Map the load commands into kernel memory.
     */
    addr = 0;
    kl_size = size;
    kl_addr = kalloc(size);
    addr = (caddr_t)kl_addr;
    if (addr == NULL)
        return(LOAD_NOSPACE);

    error = vn_rdwr(UIO_READ, vp, addr, size, file_offset,
                    UIO_SYSSPACE32, 0, kauth_cred_get(), &resid, p);
    if (error) {
        if (kl_addr )
            kfree(kl_addr, kl_size);
        return(LOAD_IOERROR);
    }
    /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */

    /*
     *	Scan through the commands, processing each one as necessary.
     */
    for (pass = 1; pass <= 2; pass++) {
        /*
         * Loop through each of the load_commands indicated by the
         * Mach-O header; if an absurd value is provided, we just
         * run off the end of the reserved section by incrementing
         * the offset too far, so we are implicitly fail-safe.
         */
        offset = mach_header_sz;
        ncmds = header->ncmds;
        while (ncmds--) {
            /*
             *	Get a pointer to the command.
             */
            lcp = (struct load_command *)(addr + offset);
            oldoffset = offset;
            offset += lcp->cmdsize;

            /*
             * Perform prevalidation of the struct load_command
             * before we attempt to use its contents.  Invalid
             * values are ones which result in an overflow, or
             * which can not possibly be valid commands, or which
             * straddle or exist past the reserved section at the
             * start of the image.
             */
            if (oldoffset > offset ||
                    lcp->cmdsize < sizeof(struct load_command) ||
                    offset > header->sizeofcmds + mach_header_sz) {
                ret = LOAD_BADMACHO;
                break;
            }

            /*
             * Act on struct load_command's for which kernel
             * intervention is required.
             */
            switch(lcp->cmd) {
            case LC_SEGMENT_64:
                if (pass != 1)
                    break;
                ret = load_segment_64(
                          (struct segment_command_64 *)lcp,
                          pager,
                          file_offset,
                          macho_size,
                          ubc_getsize(vp),
                          map,
                          result);
                break;
            case LC_SEGMENT:
                if (pass != 1)
                    break;
                ret = load_segment(
                          (struct segment_command *) lcp,
                          pager,
                          file_offset,
                          macho_size,
                          ubc_getsize(vp),
                          map,
                          result);
                break;
            case LC_THREAD:
                if (pass != 2)
                    break;
                ret = load_thread((struct thread_command *)lcp,
                                  thread,
                                  result);
                break;
            case LC_UNIXTHREAD:
                if (pass != 2)
                    break;
                ret = load_unixthread(
                          (struct thread_command *) lcp,
                          thread,
                          result);
                break;
            case LC_LOAD_DYLINKER:
                if (pass != 2)
                    break;
                if ((depth == 1) && (dlp == 0)) {
                    dlp = (struct dylinker_command *)lcp;
                    dlarchbits = (header->cputype & CPU_ARCH_MASK);
                } else {
                    ret = LOAD_FAILURE;
                }
                break;
            case LC_CODE_SIGNATURE:
                /* CODE SIGNING */
                if (pass != 2)
                    break;
                /* pager -> uip ->
                   load signatures & store in uip
                   set VM object "signed_pages"
                */
                ret = load_code_signature(
                          (struct linkedit_data_command *) lcp,
                          vp,
                          file_offset,
                          macho_size,
                          header->cputype,
                          (depth == 1) ? result : NULL);
                if (ret != LOAD_SUCCESS) {
                    printf("proc %d: load code signature error %d "
                           "for file \"%s\"\n",
                           p->p_pid, ret, vp->v_name);
                    ret = LOAD_SUCCESS; /* ignore error */
                } else {
                    got_code_signatures = TRUE;
                }
                break;
            default:
                /* Other commands are ignored by the kernel */
                ret = LOAD_SUCCESS;
                break;
            }
            if (ret != LOAD_SUCCESS)
                break;
        }
        if (ret != LOAD_SUCCESS)
            break;
    }
    if (ret == LOAD_SUCCESS) {
        if (! got_code_signatures) {
            struct cs_blob *blob;
            /* no embedded signatures: look for detached ones */
            blob = ubc_cs_blob_get(vp, -1, file_offset);
            if (blob != NULL) {
                /* get flags to be applied to the process */
                result->csflags |= blob->csb_flags;
            }
        }

        if (dlp != 0)
            ret = load_dylinker(dlp, dlarchbits, map, thread, depth, result, abi64);

        if(depth == 1) {
            if (result->thread_count == 0) {
                ret = LOAD_FAILURE;
            } else if ( abi64 ) {
#ifdef __ppc__
                /* Map in 64-bit commpage */
                /* LP64todo - make this clean */
                /*
                 * PPC51: ppc64 is limited to 51-bit addresses.
                 * Memory above that limit is handled specially
                 * at the pmap level.
                 */
                pmap_map_sharedpage(current_task(), get_map_pmap(map));
#endif /* __ppc__ */
            }
        }
    }

    if (kl_addr )
        kfree(kl_addr, kl_size);

    if (ret == LOAD_SUCCESS)
        (void)ubc_map(vp, PROT_READ | PROT_EXEC);

    return(ret);
}