Пример #1
0
/*
 * This routine returns the a pointer the section structure of the named
 * section in the named segment if it exists in the currently executing
 * kernel, which it is presumed to be linked into.  Otherwise it returns NULL.
 */
kernel_section_t *
getsectbyname(
    const char *segname,
    const char *sectname)
{
	return(getsectbynamefromheader(
		(kernel_mach_header_t *)&_mh_execute_header, segname, sectname));
}
Пример #2
0
GSList*
mono_w32process_get_modules (pid_t pid)
{
	GSList *ret = NULL;
	MonoW32ProcessModule *mod;
	guint32 count;
	int i = 0;

	if (pid != getpid ())
		return NULL;

	count = _dyld_image_count ();
	for (i = 0; i < count; i++) {
#if SIZEOF_VOID_P == 8
		const struct mach_header_64 *hdr;
		const struct section_64 *sec;
#else
		const struct mach_header *hdr;
		const struct section *sec;
#endif
		const char *name;

		name = _dyld_get_image_name (i);
#if SIZEOF_VOID_P == 8
		hdr = (const struct mach_header_64*)_dyld_get_image_header (i);
		sec = getsectbynamefromheader_64 (hdr, SEG_DATA, SECT_DATA);
#else
		hdr = _dyld_get_image_header (i);
		sec = getsectbynamefromheader (hdr, SEG_DATA, SECT_DATA);
#endif

		/* Some dynlibs do not have data sections on osx (#533893) */
		if (sec == 0)
			continue;

		mod = g_new0 (MonoW32ProcessModule, 1);
		mod->address_start = GINT_TO_POINTER (sec->addr);
		mod->address_end = GINT_TO_POINTER (sec->addr+sec->size);
		mod->perms = g_strdup ("r--p");
		mod->address_offset = 0;
		mod->device = makedev (0, 0);
		mod->inode = i;
		mod->filename = g_strdup (name);

		if (g_slist_find_custom (ret, mod, mono_w32process_module_equals) == NULL) {
			ret = g_slist_prepend (ret, mod);
		} else {
			mono_w32process_module_free (mod);
		}
	}

	return g_slist_reverse (ret);
}
Пример #3
0
static void on_remove_image( const struct mach_header* h, intptr_t slide )
{
    int i;

    for( i = 0; i < NUM_DATA_SEGS; ++i )
    {
        const struct section* sect = getsectbynamefromheader( h,
                                        data_segs[i].seg,
                                        data_segs[i].sect );
        if( sect == NULL || sect->size == 0 )
            continue;
        gc_removeRange( (void*) sect->addr + slide );
    }
}
Пример #4
0
const struct section *
getsectbyname(
    const char *segname,
    const char *sectname)
{
#ifndef __OPENSTEP__
    struct mach_header *mhp = _NSGetMachExecuteHeader();
#else /* defined(__OPENSTEP__) */
    static struct mach_header *mhp = NULL;
    DECLARE_VAR(_mh_execute_header, struct mach_header);
    SETUP_VAR(_mh_execute_header);
    mhp = (struct mach_header *)(& USE_VAR(_mh_execute_header));
#endif /* __OPENSTEP__ */
    return(getsectbynamefromheader(mhp, segname, sectname));
}
Пример #5
0
// This routine returns the a pointer to the data for the named section in the
// named segment if it exist in the mach header passed to it.  Also it returns
// the size of the section data indirectly through the pointer size.  Otherwise
//  it returns zero for the pointer and the size.
char*
getsectdatafromheader(struct mach_header *mhp, const char *segname, const char *sectname, unsigned long *size)
{
	const struct section	*sp;

	//printk("getsectdatafromheader\n");
	sp = getsectbynamefromheader(mhp, segname, sectname);
	if(sp == (struct section*)0) {
		*size = 0;
		return((char*)0);
	}
	*size = sp->size;
	//
	return( (char*)(sp->addr) );
}
Пример #6
0
/*
 * This routine returns the a pointer to the data for the named section in the
 * named segment if it exist in the mach header passed to it.  Also it returns
 * the size of the section data indirectly through the pointer size.  Otherwise
 * it returns zero for the pointer and the size.
 */
char *
getsectdatafromheader(
    struct mach_header *mhp,
    const char *segname,
    const char *sectname,
    unsigned long *size)
{
    const struct section *sp;

    sp = getsectbynamefromheader(mhp, segname, sectname);
    if(sp == NULL) {
        *size = 0;
        return(NULL);
    }
    *size = sp->size;
    return((char *)((uintptr_t)(sp->addr)));
}
Пример #7
0
static void on_add_image( const struct mach_header* h, intptr_t slide )
{
    int i;

    for( i = 0; i < NUM_DATA_SEGS; ++i )
    {
#ifdef __LP64__
        const struct section_64* sect = getsectbynamefromheader_64( h,
#else
        const struct section* sect = getsectbynamefromheader( h,
#endif
                                        data_segs[i].seg,
                                        data_segs[i].sect );
        if( sect == NULL || sect->size == 0 )
            continue;
        gc_addRange( (void*) sect->addr + slide, sect->size );
    }
}
Пример #8
0
/*
 * This routine returns the a pointer to the data for the named section in the
 * named segment if it exist in the mach header passed to it.  Also it returns
 * the size of the section data indirectly through the pointer size.  Otherwise
 *  it returns zero for the pointer and the size.
 *
 * This routine can operate against any kernel mach header.
 */
void *
getsectdatafromheader(
    kernel_mach_header_t *mhp,
    const char *segname,
    const char *sectname,
    unsigned long *size)
{		
	const kernel_section_t *sp;
	void *result;

	sp = getsectbynamefromheader(mhp, segname, sectname);
	if(sp == (kernel_section_t *)0){
	    *size = 0;
	    return((char *)0);
	}
	*size = sp->size;
	result = (void *)sp->addr; 
	return result;
}
Пример #9
0
/*
 * This routine returns the a pointer to the data for the named section in the
 * named segment if it exist in the named Framework.  Also it returns the size
 * of the section data indirectly through the pointer size.  Otherwise it
 * returns zero for the pointer and the size.  The last component of the path
 * of the Framework is passed as FrameworkName.
 */
void *
getsectdatafromFramework(
    const char *FrameworkName,
    const char *segname,
    const char *sectname,
    unsigned long *size)
{
    uint32_t i, n;
    uintptr_t vmaddr_slide;
#ifndef __LP64__
    struct mach_header *mh;
    const struct section *s;
#else /* defined(__LP64__) */
    struct mach_header_64 *mh;
    const struct section_64 *s;
#endif /* defined(__LP64__) */
    char *name, *p;

    n = _dyld_image_count();
    for(i = 0; i < n ; i++) {
        name = _dyld_get_image_name(i);
        p = strrchr(name, '/');
        if(p != NULL && p[1] != '\0')
            name = p + 1;
        if(strcmp(name, FrameworkName) != 0)
            continue;
        mh = _dyld_get_image_header(i);
        vmaddr_slide = _dyld_get_image_vmaddr_slide(i);
#ifndef __LP64__
        s = getsectbynamefromheader(mh, segname, sectname);
#else /* defined(__LP64__) */
        s = getsectbynamefromheader_64(mh, segname, sectname);
#endif /* defined(__LP64__) */
        if(s == NULL) {
            *size = 0;
            return(NULL);
        }
        *size = s->size;
        return((void *)(s->addr + vmaddr_slide));
    }
    *size = 0;
    return(NULL);
}
Пример #10
0
/* A function called through the vtable when a particular module
   should be unloaded.  */
static int
vm_close (lt_user_data loader_data, lt_module module)
{
  int errors = 0;

  if (module != (lt_module) -1)
    {
      const mach_header *mh = (const mach_header *) module;
      int flags = 0;
      if (mh->magic == LT__MAGIC)
	{
	  lt_dlseterror (dyld_cannot_close);
	  ++errors;
	}
      else
	{
	  /* Currently, if a module contains c++ static destructors and it
	     is unloaded, we get a segfault in atexit(), due to compiler and
	     dynamic loader differences of opinion, this works around that.  */
	  if ((const struct section *) NULL !=
	      getsectbynamefromheader (lt__nsmodule_get_header (module),
				       "__DATA", "__mod_term_func"))
	    {
	      flags |= NSUNLINKMODULE_OPTION_KEEP_MEMORY_MAPPED;
	    }
#if defined(__ppc__)
	  flags |= NSUNLINKMODULE_OPTION_RESET_LAZY_REFERENCES;
#endif
	  if (!NSUnLinkModule (module, flags))
	    {
	      DYLD__SETERROR (CANNOT_CLOSE);
	      ++errors;
	    }
	}
    }

  return errors;
}
Пример #11
0
/*
 * _dyld_init() is the start off point for the dynamic link editor.  It is
 * called before any part of an executable program runs.  This is done either
 * in the executable runtime startoff or by the kernel as a result of an exec(2)
 * system call (which goes through __dyld_start to get here).
 *
 * This routine causes the dynamic shared libraries an executable uses to be
 * mapped, sets up the executable and the libraries to call the dynamic link
 * editor when a lazy reference to a symbol is first used, resolves all non-lazy
 * symbol references needed to start running the program and then returns to
 * the executable program to start up the program.
 */
unsigned long
_dyld_init(
struct mach_header *mh,
unsigned long argc,
char **argv,
char **envp)
{
    unsigned int count;
    kern_return_t r;
    unsigned long entry_point;
    mach_port_t my_mach_host_self;
#ifndef __MACH30__
    struct section *s;
#endif
#ifdef MALLOC_DEBUG
    extern void cthread_init(void);

	cthread_init();
#endif
	
	/* set lock for dyld data structures */
	set_lock();

	/*
	 * Get the cputype and cpusubtype of the machine we're running on.
	 */
	count = HOST_BASIC_INFO_COUNT;
	my_mach_host_self = mach_host_self();
	if((r = host_info(my_mach_host_self, HOST_BASIC_INFO, (host_info_t)
			  (&host_basic_info), &count)) != KERN_SUCCESS){
	    mach_port_deallocate(mach_task_self(), my_mach_host_self);
	    mach_error(r, "can't get host basic info");
	}
	mach_port_deallocate(mach_task_self(), my_mach_host_self);
#if defined(__GONZO_BUNSEN_BEAKER__) && defined(__ppc__)
	if(host_basic_info.cpu_type == CPU_TYPE_POWERPC &&
	   (host_basic_info.cpu_subtype == CPU_SUBTYPE_POWERPC_7400 ||
	    host_basic_info.cpu_subtype == CPU_SUBTYPE_POWERPC_7450 ||
	    host_basic_info.cpu_subtype == CPU_SUBTYPE_POWERPC_970))
	    processor_has_vec = TRUE;
#endif

	/*
	 * Pickup the environment variables for the dynamic link editor.
	 */
	pickup_environment_variables(envp);
	
	/*
	 * Make initial trace entry if requested.
	 */
	DYLD_TRACE_INIT_START(0);

	/*
	 * Create the executable's path from the exec_path and the current
	 * working directory (if needed).  If we did not pick up the exec_path
	 * (we are running with an old kernel) use argv[0] if has a slash in it
	 * as it is a path relative to the current working directory.  Of course
	 * argv[0] may not have anything to do with the filename being executed
	 * in all cases but it is likely to be right.
	 */
	if(exec_path != NULL)
	    create_executables_path(exec_path);
	else if(strchr(argv[0], '/') != NULL)
	    create_executables_path(argv[0]);
	if(dyld_executable_path_debug == TRUE)
	    printf("executables_path = %s\n",
		   executables_path == NULL ? "NULL" : executables_path);

#ifdef DYLD_PROFILING
	s = (struct section *) getsectbynamefromheader(
	    &_mh_dylinker_header, SEG_TEXT, SECT_TEXT);
	monstartup((char *)(s->addr + dyld_image_vmaddr_slide),
		   (char *)(s->addr + dyld_image_vmaddr_slide + s->size));
#endif

#ifndef __MACH30__
	/*
	 * See if the profile server for shared pcsample buffers exists.
	 * Then if so try to setup a pcsample buffer for dyld itself.
	 */
	profile_server = profile_server_exists();
	if(profile_server == TRUE){
	    s = (struct section *) getsectbynamefromheader(
		&_mh_dylinker_header, SEG_TEXT, SECT_TEXT);
	    shared_pcsample_buffer("/usr/lib/dyld", s, dyld_image_vmaddr_slide);
	}
#endif /* __MACH30__ */

	/*
	 * Start off by loading the executable image as the first object image
	 * that make up the program.  This in turn will load the dynamic shared
	 * libraries the executable uses and the libraries those libraries use
	 * to the list of library images that make up the program.
	 */
	if((mh->flags & MH_FORCE_FLAT) != 0 ||
	   dyld_force_flat_namespace == TRUE)
	    force_flat_namespace = TRUE;
	if((mh->flags & MH_NOFIXPREBINDING) == MH_NOFIXPREBINDING)
	    dyld_no_fix_prebinding = TRUE;
	executable_prebound = (mh->flags & MH_PREBOUND) == MH_PREBOUND;
	load_executable_image(argv[0], mh, &entry_point);

	/*
	 * If the prebinding set is still set then try to setup this program to
	 * use the prebound state in it's images.  If any of these fail then
	 * undo any prebinding and bind as usual.
	 */
	if((mh->flags & MH_PREBOUND) != MH_PREBOUND){
	    /*
	     * The executable is not prebound but if the libraries are setup
	     * for prebinding and the executable when built had no undefined
	     * symbols then try to use the prebound libraries.  This is for
	     * the flat namespace case (and only some sub cases, see the
	     * comments in try_to_use_prebound_libraries()).  If this fails
	     * then the two-level namespace cases are handled by the routine
	     * find_twolevel_prebound_lib_subtrees() which is called below.
	     */
	    if(prebinding == TRUE){
		if((mh->flags & MH_NOUNDEFS) == MH_NOUNDEFS){
		    try_to_use_prebound_libraries();
		}
		else{
		    if(dyld_prebind_debug != 0)
			print("dyld: %s: prebinding disabled because "
			       "executable not marked with MH_NOUNDEFS\n",
				argv[0]);
		    prebinding = FALSE;
		}
	    }
	}
	else if(prebinding == TRUE){
	    set_images_to_prebound();
	}
	if(prebinding == FALSE){
	    /*
	     * The program was not fully prebound but if we are not forcing
	     * flat namespace semantics we can still use any sub trees of
	     * libraries that are all two-level namespace and prebound.
	     */
	    if(force_flat_namespace == FALSE)
		find_twolevel_prebound_lib_subtrees();

	    /*
	     * First undo any images that were prebound.
	     */
	    undo_prebound_images(FALSE);

	    /*
	     * Build the initial list of non-lazy symbol references based on the
	     * executable.
	     */
	    if((mh->flags & MH_BINDATLOAD) != 0 || dyld_bind_at_launch == TRUE)
		executable_bind_at_load = TRUE;
	    setup_initial_undefined_list(FALSE);

	    /*
	     * With the undefined list set up link in the needed modules.
	     */
	    link_in_need_modules(FALSE, FALSE, NULL);
	}
	else{
	    if(dyld_prebind_debug != 0){
		if((mh->flags & MH_PREBOUND) != MH_PREBOUND)
		    print("dyld: %s: prebinding enabled using only prebound "
			   "libraries\n", argv[0]);
		else
		    print("dyld: %s: prebinding enabled\n", argv[0]);
	    }
	}
	/*
	 * Now with the program about to be launched set
	 * all_twolevel_modules_prebound to TRUE if all libraries are two-level,
	 * prebound and all modules in them are linked.
	 */
	set_all_twolevel_modules_prebound();
	launched = TRUE;

	/*
	 * If DYLD_EBADEXEC_ONLY is set then print a message as the program
	 * will launch.
	 */
	if(dyld_ebadexec_only == TRUE){
	    error("executable: %s will be launched (DYLD_EBADEXEC_ONLY set, "
		"program not started)", argv[0]);
	    link_edit_error(DYLD_FILE_ACCESS, EBADEXEC, argv[0]);
	}
	
	if(dyld_print_libraries_post_launch == TRUE)
	    dyld_print_libraries = TRUE;

	/* release lock for dyld data structures */
	release_lock();
	
	DYLD_TRACE_INIT_END(0);

	/*
	 * Return the address of the executable's entry point which is used if
	 * this routine was called from __dyld_start.  Otherwise this was called
	 * from the runtime startoff of the executable and this return value is
	 * ignored.
	 */
	return(entry_point);
}
Пример #12
0
NXStream *NXGetStreamOnSection(const char *fileName, const char *segmentName, const char *sectionName)
{
    int             fd;
    struct stat     info;
    NXStream 	   *s = NULL;
    struct fat_header *fh;
    struct mach_header *mh;
    const struct section *sect;
    vm_offset_t mh_page, sect_page;
    unsigned long archOffset;
    unsigned int cnt = HOST_BASIC_INFO_COUNT;
    struct host_basic_info hbi;

    if (host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)(&hbi), &cnt) != KERN_SUCCESS)
      return NULL;

    fd = open(fileName, O_RDONLY, 0444);
    if (fd < 0 || fstat(fd, &info) < 0)
    	return NULL;

    if (((info.st_mode & S_IFMT) != S_IFREG) || (info.st_size < sizeof(*fh))) {
	close(fd);
	return NULL;
    }

    if (map_fd(fd, 0, (vm_offset_t *)&fh, TRUE, (vm_size_t)info.st_size) != KERN_SUCCESS) {
	close(fd);
	return NULL;
    }

#ifdef __BIG_ENDIAN__
    if (fh->magic == FAT_MAGIC) {
#endif __BIG_ENDIAN__
#ifdef __LITTLE_ENDIAN__
    if (fh->magic == NXSwapLong(FAT_MAGIC)) {
#endif __LITTLE_ENDIAN__
	int i;
	struct fat_arch *fa = (struct fat_arch*)(fh + 1);
#ifdef __LITTLE_ENDIAN__
	enum NXByteOrder host_byte_sex = NXHostByteOrder();
	swap_fat_header(fh, host_byte_sex);
#endif __LITTLE_ENDIAN__
	if ((fh->nfat_arch <= 0) || (info.st_size < sizeof(*fh)+sizeof(*fa)*fh->nfat_arch)) {
		vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size);
		close(fd);
		return NULL;
	}
#ifdef __LITTLE_ENDIAN__
	swap_fat_arch(fa, fh->nfat_arch, host_byte_sex);
#endif __LITTLE_ENDIAN__
	for (i = 0; i < fh->nfat_arch; i++, fa++) {
		if (fa->cputype == hbi.cpu_type) {
//****		** check for best cpu_subtype here ** (fa->cpusubtype == hbi.cpu_subtype)
			break;	// for now, accept all subtypes
		}
	}
	if (i >= fh->nfat_arch) {
		vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size);
		close(fd);
 		return NULL;
	}
	archOffset = fa->offset;
	mh = (struct mach_header*)((char*)fh + archOffset);
    } else {
	archOffset = 0L;
    	mh = (struct mach_header*)fh;
    }
    
    if ((info.st_size < archOffset + sizeof(*mh)) ||
	(mh->magic != MH_MAGIC) || (mh->cputype != hbi.cpu_type) ||
	(info.st_size < archOffset + sizeof(*mh) + mh->sizeofcmds) ||
	!check_wellformed_header(mh, info.st_size - archOffset, NO)) { // bug#21223
	vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size);
	close(fd);
 	return NULL;
    }

    /*
     * Get the section data.
     */
    sect = getsectbynamefromheader(mh, segmentName, sectionName);
    if (sect == NULL || sect->size == 0 ||
	(info.st_size < archOffset + sect->offset + sect->size)) {
	vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size);
	close(fd);
	return NULL;
    }

    /*
     * Create the stream.
     */
    s = NXOpenMemory((char *)mh + sect->offset, sect->size,
	NX_READONLY);
    s->flags &= ~NX_USER_OWNS_BUF;

    /*
     * Through away the parts of the file not needed.  Assert that all
     * pages that the file lives on are used only by the file.
     */
    sect_page = round_page((vm_offset_t)mh + sect->offset + sect->size);
    mh_page = round_page((vm_offset_t)fh + info.st_size);
    if (mh_page - sect_page)
	vm_deallocate(mach_task_self(), sect_page, mh_page - sect_page);

    mh_page = trunc_page((vm_offset_t)fh);
    sect_page = trunc_page((vm_offset_t)mh + sect->offset);
    if (sect_page - mh_page)
	vm_deallocate(mach_task_self(), mh_page, sect_page - mh_page);

    if (close(fd) < 0) {
	NXCloseMemory(s, NX_FREEBUFFER);
	s = NULL;
    }

    return s;
}



NXStream *NXGetStreamOnSectionForBestArchitecture(
	const char *fileName,
	const char *segmentName,
	const char *sectionName)
{
    int             fd;
    struct stat     info;
    NXStream 	   *s = NULL;
    struct fat_header *fh;
    struct mach_header *mh;
    const struct section *sect;
    vm_offset_t mh_page, sect_page;
    unsigned long archOffset;
    unsigned int cnt = HOST_BASIC_INFO_COUNT;
    struct host_basic_info hbi;
    int fSwap = NO;

    if (host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)(&hbi), &cnt) != KERN_SUCCESS)
      return NULL;

    fd = open(fileName, O_RDONLY, 0444);
    if (fd < 0 || fstat(fd, &info) < 0)
    	return NULL;

    if (((info.st_mode & S_IFMT) != S_IFREG) || (info.st_size < sizeof(*fh))) {
	close(fd);
	return NULL;
    }

    if (map_fd(fd, 0, (vm_offset_t *)&fh, TRUE, (vm_size_t)info.st_size) != KERN_SUCCESS) {
	close(fd);
	return NULL;
    }

#ifdef __BIG_ENDIAN__
    if (fh->magic == FAT_MAGIC) {
#endif __BIG_ENDIAN__
#ifdef __LITTLE_ENDIAN__
    if (fh->magic == NXSwapLong(FAT_MAGIC)) {
#endif __LITTLE_ENDIAN__
	int i;
	struct fat_arch *fa = (struct fat_arch*)(fh + 1);
#ifdef __LITTLE_ENDIAN__
	enum NXByteOrder host_byte_sex = NXHostByteOrder();
	swap_fat_header(fh, host_byte_sex);
#endif __LITTLE_ENDIAN__
	if ((fh->nfat_arch <= 0) || (info.st_size < sizeof(*fh)+sizeof(*fa)*fh->nfat_arch)) {
	    vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size);
	    close(fd);
	    return NULL;
	}
#ifdef __LITTLE_ENDIAN__
	swap_fat_arch(fa, fh->nfat_arch, host_byte_sex);
#endif __LITTLE_ENDIAN__
	for (i = 0; i < fh->nfat_arch; i++, fa++) {
	    if (fa->cputype == hbi.cpu_type) {
//****		** check for best cpu_subtype here ** (fa->cpusubtype == hbi.cpu_subtype)
		break;	// for now, accept all subtypes
	    }
	}
	if (i >= fh->nfat_arch) {
	    /*
	     * If do not have the correct cpu_type, just use the last type
	     * in file.
	     * NOTE: we could have a list passed in, and choose the best
	     *       based upon that list.
	     */
	    fa--;
	}
	archOffset = fa->offset;
	mh = (struct mach_header*)((char*)fh + archOffset);
    } else {
	archOffset = 0L;
    	mh = (struct mach_header*)fh;
    }
    
    if (info.st_size < archOffset + sizeof(*mh)) {
	vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size);
	close(fd);
 	return NULL;
    }
    
    /* 
     * Do we need to swap the header?  Header is always in byte-order of machine it
     * was compiled for.
     */
    if (mh->magic == NXSwapLong(MH_MAGIC)) {
	fSwap = YES;
#ifdef __LITTLE_ENDIAN__
	swap_mach_header(mh, NX_LittleEndian);
#else
	swap_mach_header(mh, NX_BigEndian);
#endif __LITTLE_ENDIAN__
    }

    
    if ((mh->magic != MH_MAGIC) ||
	(info.st_size < archOffset + sizeof(*mh) + mh->sizeofcmds) ||
	!check_wellformed_header(mh, info.st_size - archOffset, fSwap)) { // bug#21223
	vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size);
	close(fd);
 	return NULL;
    }

    /*
     * Get the section data.
     */
    sect = getsectbynamefromheaderwithswap(mh, segmentName, sectionName, fSwap);
    if (sect == NULL || sect->size == 0 ||
	(info.st_size < archOffset + sect->offset + sect->size)) {
	vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size);
	close(fd);
	return NULL;
    }

    /*
     * Create the stream.
     */
    s = NXOpenMemory((char *)mh + sect->offset, sect->size, NX_READONLY);
    s->flags &= ~NX_USER_OWNS_BUF;

    /*
     * Through away the parts of the file not needed.  Assert that all
     * pages that the file lives on are used only by the file.
     */
    sect_page = round_page((vm_offset_t)mh + sect->offset + sect->size);
    mh_page = round_page((vm_offset_t)fh + info.st_size);
    if (mh_page - sect_page)
	vm_deallocate(mach_task_self(), sect_page, mh_page - sect_page);

    mh_page = trunc_page((vm_offset_t)fh);
    sect_page = trunc_page((vm_offset_t)mh + sect->offset);
    if (sect_page - mh_page)
	vm_deallocate(mach_task_self(), mh_page, sect_page - mh_page);

    if (close(fd) < 0) {
	NXCloseMemory(s, NX_FREEBUFFER);
	s = NULL;
    }

    return s;
}
Пример #13
0
primitiveExecutableModulesAndOffsets(void)
{
    const struct mach_header *h;
    const struct mach_header_64 *h64;
    sqInt i;
    const char *name;
    char *nameObjData;
    sqInt nimages;
    sqInt resultObj;
    const struct section *s;
    const struct section_64 *s64;
    usqIntptr_t size;
    usqIntptr_t slide;
    usqIntptr_t start;
    sqInt valueObj;

	
#  if MAC_OS_X_VERSION_MIN_REQUIRED <= MAC_OS_X_VERSION_10_4

	/* _dyld_present was deprecated in 10.5 */
	if (!(_dyld_present())) {
		return primitiveFail();
	}

#  endif /* MAC_OS_X_VERSION_MIN_REQUIRED <= MAC_OS_X_VERSION_10_4 */

	nimages = _dyld_image_count();
	resultObj = instantiateClassindexableSize(classArray(), nimages * 4);
	if (resultObj == 0) {
		return primitiveFail();
	}
	pushRemappableOop(resultObj);
	for (i = 0; i < nimages; i += 1) {

		/* impossible start & size */
		start = (size = -1);
		name = _dyld_get_image_name(i);
		slide = _dyld_get_image_vmaddr_slide(i);
		
#    if __x86_64__
		h64 = (const struct mach_header_64 *)_dyld_get_image_header(i);
		if (!(h64 == null)) {
			s64 = getsectbynamefromheader_64(h64,SEG_TEXT,SECT_TEXT);
			if (!(s64 == null)) {
				start = s64->addr;
				size = s64->size;
			}
		}

#    else /* __x86_64__ */
		h = _dyld_get_image_header(i);
		if (!(h == null)) {
			s = getsectbynamefromheader(h,SEG_TEXT,SECT_TEXT);
			if (!(s == null)) {
				start = s->addr;
				size = s->size;
			}
		}

#    endif /* __x86_64__ */

		valueObj = instantiateClassindexableSize(classString(), strlen(name));
		if (failed()) {
			popRemappableOop();
			return primitiveFail();
		}
		storePointerofObjectwithValue(i * 4, topRemappableOop(), valueObj);
		nameObjData = arrayValueOf(valueObj);
		memcpy(nameObjData, name, strlen(name));
		valueObj = (BytesPerWord == 8
			? signed64BitIntegerFor(slide)
			: signed32BitIntegerFor(slide));
		if (failed()) {
			popRemappableOop();
			return primitiveFail();
		}
		storePointerofObjectwithValue((i * 4) + 1, topRemappableOop(), valueObj);
		/* begin positiveMachineIntegerFor: */
		valueObj = (BytesPerWord == 8
			? positive64BitIntegerFor(start)
			: positive32BitIntegerFor(start));
		if (failed()) {
			popRemappableOop();
			return primitiveFail();
		}
		storePointerofObjectwithValue((i * 4) + 2, topRemappableOop(), valueObj);
		/* begin positiveMachineIntegerFor: */
		valueObj = (BytesPerWord == 8
			? positive64BitIntegerFor(size)
			: positive32BitIntegerFor(size));
		if (failed()) {
			popRemappableOop();
			return primitiveFail();
		}
		storePointerofObjectwithValue((i * 4) + 3, topRemappableOop(), valueObj);
	}
	resultObj = popRemappableOop();
	return popthenPush(1, resultObj);
}
primitiveExecutableModulesAndOffsets(void) {
    const struct mach_header *h;
    sqInt i;
    const char *name;
    char *nameObjData;
    sqInt nimages;
    sqInt present;
    sqInt resultObj;
    const struct section *s;
    unsigned long size;
    sqInt slide;
    unsigned long start;
    sqInt valueObj;

	present = _dyld_present();
	if (!(present)) {
		return interpreterProxy->primitiveFail();
	}
	nimages = _dyld_image_count();
	resultObj = interpreterProxy->instantiateClassindexableSize(interpreterProxy->classArray(), nimages * 4);
	if (resultObj == 0) {
		return interpreterProxy->primitiveFail();
	}
	interpreterProxy->pushRemappableOop(resultObj);
	for (i = 0; i <= (nimages - 1); i += 1) {

		/* impossible start & size */

		start = size = -1;
		name = _dyld_get_image_name(i);
		slide = _dyld_get_image_vmaddr_slide(i);
		h = _dyld_get_image_header(i);
		if (h != null) {
			s = getsectbynamefromheader(h,SEG_TEXT,SECT_TEXT);
			if (s != null) {
				start = s->addr;
				size = s->size;
			}
		}
		valueObj = interpreterProxy->instantiateClassindexableSize(interpreterProxy->classString(), strlen(name));
		if (interpreterProxy->failed()) {
			interpreterProxy->pop(1);
			return interpreterProxy->primitiveFail();
		}
		interpreterProxy->storePointerofObjectwithValue(i * 4, interpreterProxy->topRemappableOop(), valueObj);
		nameObjData = interpreterProxy->arrayValueOf(valueObj);
		memcpy(nameObjData, name, strlen(name));
		valueObj = interpreterProxy->signed32BitIntegerFor(slide);
		if (interpreterProxy->failed()) {
			interpreterProxy->pop(1);
			return interpreterProxy->primitiveFail();
		}
		interpreterProxy->storePointerofObjectwithValue((i * 4) + 1, interpreterProxy->topRemappableOop(), valueObj);
		valueObj = interpreterProxy->positive32BitIntegerFor(start);
		if (interpreterProxy->failed()) {
			interpreterProxy->pop(1);
			return interpreterProxy->primitiveFail();
		}
		interpreterProxy->storePointerofObjectwithValue((i * 4) + 2, interpreterProxy->topRemappableOop(), valueObj);
		valueObj = interpreterProxy->positive32BitIntegerFor(size);
		if (interpreterProxy->failed()) {
			interpreterProxy->pop(1);
			return interpreterProxy->primitiveFail();
		}
		interpreterProxy->storePointerofObjectwithValue((i * 4) + 3, interpreterProxy->topRemappableOop(), valueObj);
	}
	resultObj = interpreterProxy->popRemappableOop();
	return interpreterProxy->popthenPush(1, resultObj);
}