NXStream *NXMapFile(const char *name, int mode) { int fd; char *buf; struct stat info; NXStream *s = NULL; fd = open(name, O_RDONLY, 0666); if (fd >= 0) { if (fstat(fd, &info) >= 0) { if (info.st_size > 0 || (info.st_mode & S_IFMT) == S_IFDIR) { if (map_fd(fd, 0, (vm_offset_t *)&buf, TRUE, (vm_size_t)info.st_size) == KERN_SUCCESS) { s = NXOpenMemory(buf, info.st_size, mode); s->flags &= ~NX_USER_OWNS_BUF; } } else { s = NXOpenMemory(NULL, 0, mode); } } if (close(fd) < 0) { NXCloseMemory(s, NX_FREEBUFFER); s = NULL; } } return s; }
static NXStream * OpenError() { return NXOpenMemory(NULL, 0, NX_WRITEONLY); }
int TclpDlopen( Tcl_Interp *interp, /* Used for error reporting. */ Tcl_Obj *pathPtr, /* Name of the file containing the desired * code (UTF-8). */ Tcl_LoadHandle *loadHandle, /* Filled with token for dynamically loaded * file which will be passed back to * (*unloadProcPtr)() to unload the file. */ Tcl_FSUnloadFileProc **unloadProcPtr) /* Filled with address of Tcl_FSUnloadFileProc * function which should be used for this * file. */ { struct mach_header *header; char *fileName; char *files[2]; CONST char *native; int result = 1; NXStream *errorStream = NXOpenMemory(0,0,NX_READWRITE); fileName = Tcl_GetString(pathPtr); /* * First try the full path the user gave us. This is particularly * important if the cwd is inside a vfs, and we are trying to load using a * relative path. */ native = Tcl_FSGetNativePath(pathPtr); files = {native,NULL}; result = rld_load(errorStream, &header, files, NULL); if (!result) { /* * Let the OS loader examine the binary search path for whatever * string the user gave us which hopefully refers to a file on the * binary path */ Tcl_DString ds; native = Tcl_UtfToExternalDString(NULL, fileName, -1, &ds); files = {native,NULL}; result = rld_load(errorStream, &header, files, NULL); Tcl_DStringFree(&ds); } if (!result) { char *data; int len, maxlen; NXGetMemoryBuffer(errorStream,&data,&len,&maxlen); Tcl_AppendResult(interp, "couldn't load file \"", fileName, "\": ", data, NULL); NXCloseMemory(errorStream, NX_FREEBUFFER); return TCL_ERROR; } NXCloseMemory(errorStream, NX_FREEBUFFER); *loadHandle = (Tcl_LoadHandle)1; /* A dummy non-NULL value */ *unloadProcPtr = &TclpUnloadFile; return TCL_OK; }
NXStream *NXGetStreamOnSection(const char *fileName, const char *segmentName, const char *sectionName) { int fd; struct stat info; NXStream *s = NULL; struct fat_header *fh; struct mach_header *mh; const struct section *sect; vm_offset_t mh_page, sect_page; unsigned long archOffset; unsigned int cnt = HOST_BASIC_INFO_COUNT; struct host_basic_info hbi; if (host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)(&hbi), &cnt) != KERN_SUCCESS) return NULL; fd = open(fileName, O_RDONLY, 0444); if (fd < 0 || fstat(fd, &info) < 0) return NULL; if (((info.st_mode & S_IFMT) != S_IFREG) || (info.st_size < sizeof(*fh))) { close(fd); return NULL; } if (map_fd(fd, 0, (vm_offset_t *)&fh, TRUE, (vm_size_t)info.st_size) != KERN_SUCCESS) { close(fd); return NULL; } #ifdef __BIG_ENDIAN__ if (fh->magic == FAT_MAGIC) { #endif __BIG_ENDIAN__ #ifdef __LITTLE_ENDIAN__ if (fh->magic == NXSwapLong(FAT_MAGIC)) { #endif __LITTLE_ENDIAN__ int i; struct fat_arch *fa = (struct fat_arch*)(fh + 1); #ifdef __LITTLE_ENDIAN__ enum NXByteOrder host_byte_sex = NXHostByteOrder(); swap_fat_header(fh, host_byte_sex); #endif __LITTLE_ENDIAN__ if ((fh->nfat_arch <= 0) || (info.st_size < sizeof(*fh)+sizeof(*fa)*fh->nfat_arch)) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } #ifdef __LITTLE_ENDIAN__ swap_fat_arch(fa, fh->nfat_arch, host_byte_sex); #endif __LITTLE_ENDIAN__ for (i = 0; i < fh->nfat_arch; i++, fa++) { if (fa->cputype == hbi.cpu_type) { //**** ** check for best cpu_subtype here ** (fa->cpusubtype == hbi.cpu_subtype) break; // for now, accept all subtypes } } if (i >= fh->nfat_arch) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } archOffset = fa->offset; mh = (struct mach_header*)((char*)fh + archOffset); } else { archOffset = 0L; mh = (struct mach_header*)fh; } if ((info.st_size < archOffset + sizeof(*mh)) || (mh->magic != MH_MAGIC) || (mh->cputype != hbi.cpu_type) || (info.st_size < archOffset + sizeof(*mh) + mh->sizeofcmds) || !check_wellformed_header(mh, info.st_size - archOffset, NO)) { // bug#21223 vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } /* * Get the section data. */ sect = getsectbynamefromheader(mh, segmentName, sectionName); if (sect == NULL || sect->size == 0 || (info.st_size < archOffset + sect->offset + sect->size)) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } /* * Create the stream. */ s = NXOpenMemory((char *)mh + sect->offset, sect->size, NX_READONLY); s->flags &= ~NX_USER_OWNS_BUF; /* * Through away the parts of the file not needed. Assert that all * pages that the file lives on are used only by the file. */ sect_page = round_page((vm_offset_t)mh + sect->offset + sect->size); mh_page = round_page((vm_offset_t)fh + info.st_size); if (mh_page - sect_page) vm_deallocate(mach_task_self(), sect_page, mh_page - sect_page); mh_page = trunc_page((vm_offset_t)fh); sect_page = trunc_page((vm_offset_t)mh + sect->offset); if (sect_page - mh_page) vm_deallocate(mach_task_self(), mh_page, sect_page - mh_page); if (close(fd) < 0) { NXCloseMemory(s, NX_FREEBUFFER); s = NULL; } return s; } NXStream *NXGetStreamOnSectionForBestArchitecture( const char *fileName, const char *segmentName, const char *sectionName) { int fd; struct stat info; NXStream *s = NULL; struct fat_header *fh; struct mach_header *mh; const struct section *sect; vm_offset_t mh_page, sect_page; unsigned long archOffset; unsigned int cnt = HOST_BASIC_INFO_COUNT; struct host_basic_info hbi; int fSwap = NO; if (host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)(&hbi), &cnt) != KERN_SUCCESS) return NULL; fd = open(fileName, O_RDONLY, 0444); if (fd < 0 || fstat(fd, &info) < 0) return NULL; if (((info.st_mode & S_IFMT) != S_IFREG) || (info.st_size < sizeof(*fh))) { close(fd); return NULL; } if (map_fd(fd, 0, (vm_offset_t *)&fh, TRUE, (vm_size_t)info.st_size) != KERN_SUCCESS) { close(fd); return NULL; } #ifdef __BIG_ENDIAN__ if (fh->magic == FAT_MAGIC) { #endif __BIG_ENDIAN__ #ifdef __LITTLE_ENDIAN__ if (fh->magic == NXSwapLong(FAT_MAGIC)) { #endif __LITTLE_ENDIAN__ int i; struct fat_arch *fa = (struct fat_arch*)(fh + 1); #ifdef __LITTLE_ENDIAN__ enum NXByteOrder host_byte_sex = NXHostByteOrder(); swap_fat_header(fh, host_byte_sex); #endif __LITTLE_ENDIAN__ if ((fh->nfat_arch <= 0) || (info.st_size < sizeof(*fh)+sizeof(*fa)*fh->nfat_arch)) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } #ifdef __LITTLE_ENDIAN__ swap_fat_arch(fa, fh->nfat_arch, host_byte_sex); #endif __LITTLE_ENDIAN__ for (i = 0; i < fh->nfat_arch; i++, fa++) { if (fa->cputype == hbi.cpu_type) { //**** ** check for best cpu_subtype here ** (fa->cpusubtype == hbi.cpu_subtype) break; // for now, accept all subtypes } } if (i >= fh->nfat_arch) { /* * If do not have the correct cpu_type, just use the last type * in file. * NOTE: we could have a list passed in, and choose the best * based upon that list. */ fa--; } archOffset = fa->offset; mh = (struct mach_header*)((char*)fh + archOffset); } else { archOffset = 0L; mh = (struct mach_header*)fh; } if (info.st_size < archOffset + sizeof(*mh)) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } /* * Do we need to swap the header? Header is always in byte-order of machine it * was compiled for. */ if (mh->magic == NXSwapLong(MH_MAGIC)) { fSwap = YES; #ifdef __LITTLE_ENDIAN__ swap_mach_header(mh, NX_LittleEndian); #else swap_mach_header(mh, NX_BigEndian); #endif __LITTLE_ENDIAN__ } if ((mh->magic != MH_MAGIC) || (info.st_size < archOffset + sizeof(*mh) + mh->sizeofcmds) || !check_wellformed_header(mh, info.st_size - archOffset, fSwap)) { // bug#21223 vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } /* * Get the section data. */ sect = getsectbynamefromheaderwithswap(mh, segmentName, sectionName, fSwap); if (sect == NULL || sect->size == 0 || (info.st_size < archOffset + sect->offset + sect->size)) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } /* * Create the stream. */ s = NXOpenMemory((char *)mh + sect->offset, sect->size, NX_READONLY); s->flags &= ~NX_USER_OWNS_BUF; /* * Through away the parts of the file not needed. Assert that all * pages that the file lives on are used only by the file. */ sect_page = round_page((vm_offset_t)mh + sect->offset + sect->size); mh_page = round_page((vm_offset_t)fh + info.st_size); if (mh_page - sect_page) vm_deallocate(mach_task_self(), sect_page, mh_page - sect_page); mh_page = trunc_page((vm_offset_t)fh); sect_page = trunc_page((vm_offset_t)mh + sect->offset); if (sect_page - mh_page) vm_deallocate(mach_task_self(), mh_page, sect_page - mh_page); if (close(fd) < 0) { NXCloseMemory(s, NX_FREEBUFFER); s = NULL; } return s; }