NXStream *NXMapFile(const char *name, int mode) { int fd; char *buf; struct stat info; NXStream *s = NULL; fd = open(name, O_RDONLY, 0666); if (fd >= 0) { if (fstat(fd, &info) >= 0) { if (info.st_size > 0 || (info.st_mode & S_IFMT) == S_IFDIR) { if (map_fd(fd, 0, (vm_offset_t *)&buf, TRUE, (vm_size_t)info.st_size) == KERN_SUCCESS) { s = NXOpenMemory(buf, info.st_size, mode); s->flags &= ~NX_USER_OWNS_BUF; } } else { s = NXOpenMemory(NULL, 0, mode); } } if (close(fd) < 0) { NXCloseMemory(s, NX_FREEBUFFER); s = NULL; } } return s; }
/* These are my mach based versions, untested and probably bad ... */ caddr_t my_mmap(caddr_t addr, size_t len, int prot, int flags, int fildes, off_t off) { kern_return_t ret_val; /* First map ... */ ret_val = map_fd ( fildes, /* fd */ (vm_offset_t) off, /* offset */ (vm_offset_t*)&addr, /* address */ TRUE, /* find_space */ (vm_size_t) len); /* size */ if (ret_val != KERN_SUCCESS) { mach_error("Error calling map_fd() in mmap", ret_val ); return (caddr_t)0; } /* ... then protect (this is probably bad) */ ret_val = vm_protect( task_self(), /* target_task */ (vm_address_t)addr, /* address */ (vm_size_t) len, /* size */ FALSE, /* set_maximum */ (vm_prot_t) prot); /* new_protection */ if (ret_val != KERN_SUCCESS) { mach_error("vm_protect in mmap()", ret_val ); return (caddr_t)0; } return addr; }
static void *map_it( const char *path, int fd, void *map_at, size_t len ) { kern_return_t rc; vm_offset_t addr; addr = (vm_offset_t)map_at; rc = vm_allocate( task_self(), &addr, len, /* anywhere */ FALSE ); if (rc != KERN_SUCCESS) { mach_error( "vm_allocate", rc ); fprintf( stderr, "%s: could not map at %08lx\n", path, (unsigned long)map_at ); return NULL; } rc = map_fd( fd, 0, &addr, /*find_space*/ FALSE, len ); if (rc != KERN_SUCCESS) { mach_error( "map_fd", rc ); fprintf( stderr, "%s: could not map at %08lx\n", path, (unsigned long)map_at ); return NULL; } return (void *)addr; }
main(int argc, char **argv) { int c, fd, ofd, filesize; kern_return_t r; char *infile, *outfile, *memfile, *oldstring, *os, *newstring; struct stat statbuf; if (argc != 5) usage(); infile = argv[1]; outfile = argv[2]; fd = open(infile, O_RDONLY); if (fd < 0) { perror("open infile"); exit(1); } if (fstat(fd, &statbuf) < 0) { perror("stat infile"); exit(1); } ofd = open(outfile, O_TRUNC|O_RDWR|O_CREAT, 0644); if (ofd < 0) { perror("open outfile"); exit(1); } filesize = statbuf.st_size; oldstring = strFromQuotedStr(argv[3]); newstring = strFromQuotedStr(argv[4]); if (strlen(newstring) > strlen(oldstring)) { fprintf(stderr, "Warning: new string is bigger than old string.\n"); } r = map_fd(fd, (vm_offset_t)0, (vm_offset_t *)&memfile, TRUE, (vm_size_t)filesize); if (r != KERN_SUCCESS) { mach_error("Error calling map_fd()", r); exit(1); } else { os = (char *)strnstr(memfile, oldstring, filesize); if (os == NULL) { fprintf(stderr, "String not found\n"); exit(1); } while (*newstring) *os++ = *newstring++; *os++ = *newstring++; lseek(fd, 0, 0); c = write(ofd, memfile, filesize); if (c < filesize) { perror("write outfile"); exit(2); } exit(0); } }
rs_bool mapf_open( const char *path ) { kern_return_t rc; if (strncmp(path,"sect:",5) == 0) { char *s, name[1000]; strcpy( name, path+5 ); s = strchr( name, ':' ); *s++ = 0; image_file = -1; image_mapping_addr = (vm_address_t)getsectdata( name, s, (int *)&image_mapping_size ); if (!image_mapping_addr) { fprintf( stderr, "could not map segment %s, section %s\n", name, s ); return NO; } } else { image_file = open( path, O_RDONLY, 0 ); if (image_file < 0) { perror( path ); return NO; } image_mapping_size = lseek( image_file, 0L, SEEK_END ); rc = map_fd( image_file, (vm_offset_t)0, &image_mapping_addr, /* find_space */ TRUE, image_mapping_size ); if (rc != KERN_SUCCESS) { fprintf( stderr, "could not map file: %s\n", path ); close( image_file ); return NO; } } image_mapping_ptr = (void *)image_mapping_addr; return YES; }
__private_extern__ KXKextManagerError readFile(const char *path, vm_offset_t * objAddr, vm_size_t * objSize) { KXKextManagerError err = kKXKextManagerErrorFileAccess; int fd; struct stat stat_buf; *objAddr = 0; *objSize = 0; do { if((fd = open(path, O_RDONLY)) == -1) continue; if(fstat(fd, &stat_buf) == -1) continue; if (0 == (stat_buf.st_mode & S_IFREG)) continue; *objSize = stat_buf.st_size; if( KERN_SUCCESS != map_fd(fd, 0, objAddr, TRUE, *objSize)) { *objAddr = 0; *objSize = 0; continue; } err = kKXKextManagerErrorNone; } while( false ); if (-1 != fd) { close(fd); } if (kKXKextManagerErrorNone != err) { fprintf(stderr, "couldn't read %s: %s\n", path, strerror(errno)); } return( err ); }
//int exec (struct replay_command* to_exec, int *exec_rvalue, int *pids_to_fd_pairs[]) { int exec (struct replay_command* to_exec, int *exec_rvalue, struct replay* rpl) { assert (to_exec != NULL); Parms* args = to_exec->params; int current_session_id = to_exec->session_id; switch (to_exec->command) { case MKDIR_OP: { *exec_rvalue = mkdir (args[0].argm->cprt_val, args[1].argm->i_val); } break; case STAT_OP: { struct stat sb; *exec_rvalue = stat(args[0].argm->cprt_val, &sb); } break; case OPEN_OP: { int replayed_fd = open (args[0].argm->cprt_val, args[1].argm->i_val, args[2].argm->i_val); *exec_rvalue = replayed_fd; if (rpl->session_enabled) { set_session_fd (current_session_id, replayed_fd, rpl); } else { int traced_fd = to_exec->expected_retval; if (traced_fd > 0) { map_fd (to_exec->caller->pid, traced_fd, replayed_fd, rpl->pids_to_fd_pairs); } } } break; case READ_OP: { int traced_fd = args[1].argm->i_val; int repl_fd = (rpl->session_enabled) ? session_fd (current_session_id, rpl) : replayed_fd (to_exec->caller->pid, traced_fd, rpl->pids_to_fd_pairs); //FIXME should share a big bufer to avoid malloc'ing time wasting ? int read_count = args[2].argm->i_val; char* buf = (char*) malloc (sizeof (char) * read_count); *exec_rvalue = read (repl_fd, buf, read_count); } break; case PREAD_OP: { //args": ["/local2/bigdata.dat", "22857", "4096", "902141250"] //ssize_t pread(int fd, void *buf, size_t count, off_t offset); int traced_fd = args[1].argm->i_val; int repl_fd = (rpl->session_enabled) ? session_fd (current_session_id, rpl) : replayed_fd (to_exec->caller->pid, traced_fd, rpl->pids_to_fd_pairs); //FIXME should share a big bufer to avoid malloc'ing time wasting ? int read_count = args[2].argm->i_val; char* buf = (char*) malloc (sizeof (char) * read_count); int offset = args[3].argm->i_val; *exec_rvalue = pread (repl_fd, buf, read_count, offset); } break; case PWRITE_OP: { int traced_fd = args[1].argm->i_val; int repl_fd = (rpl->session_enabled) ? session_fd (current_session_id, rpl) : replayed_fd (to_exec->caller->pid, traced_fd, rpl->pids_to_fd_pairs); //FIXME should share a big bufer to avoid malloc'ing time wasting ? int write_count = args[2].argm->i_val; char* buf = (char*) malloc (sizeof (char) * write_count); int offset = args[3].argm->i_val; *exec_rvalue = pwrite (repl_fd, buf, write_count, offset); } break; case WRITE_OP: { int traced_fd = args[1].argm->i_val; int repl_fd = (rpl->session_enabled) ? session_fd (current_session_id, rpl) : replayed_fd (to_exec->caller->pid, traced_fd, rpl->pids_to_fd_pairs); //FIXME should share a big bufer to avoid malloc'ing time wasting ? int write_count = args[2].argm->i_val; char* buf = (char*) malloc (sizeof (char) * write_count); *exec_rvalue = write (repl_fd, buf, write_count); } break; case CLOSE_OP: { int traced_fd = args[0].argm->i_val; int repl_fd = (rpl->session_enabled) ? session_fd (current_session_id, rpl) : replayed_fd (to_exec->caller->pid, traced_fd, rpl->pids_to_fd_pairs); *exec_rvalue = close (repl_fd); //FIXME should we set the fd mapping to something impossible as -1 //i think i this way we do not mask programming errors } break; case FSTAT_OP: { struct stat sb; int traced_fd = args[1].argm->i_val; int repl_fd = (rpl->session_enabled) ? session_fd (current_session_id, rpl) : replayed_fd (to_exec->caller->pid, traced_fd, rpl->pids_to_fd_pairs); *exec_rvalue = fstat(repl_fd, &sb); } break; case RMDIR_OP: { *exec_rvalue = unlink (args[0].argm->cprt_val); } break; case LLSEEK_OP: { int traced_fd = args[1].argm->i_val; int repl_fd = (rpl->session_enabled) ? session_fd (current_session_id, rpl) : replayed_fd (to_exec->caller->pid, traced_fd, rpl->pids_to_fd_pairs); long high = (long) args[2].argm->l_val; long low = (long) args[3].argm->l_val; off_t offset = (off_t) (high << 32) | low; int whence = args[3].argm->i_val; *exec_rvalue = lseek (repl_fd, offset, whence); } break; default: return -1; } return REPLAY_SUCCESS; }
NXStream *NXGetStreamOnSection(const char *fileName, const char *segmentName, const char *sectionName) { int fd; struct stat info; NXStream *s = NULL; struct fat_header *fh; struct mach_header *mh; const struct section *sect; vm_offset_t mh_page, sect_page; unsigned long archOffset; unsigned int cnt = HOST_BASIC_INFO_COUNT; struct host_basic_info hbi; if (host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)(&hbi), &cnt) != KERN_SUCCESS) return NULL; fd = open(fileName, O_RDONLY, 0444); if (fd < 0 || fstat(fd, &info) < 0) return NULL; if (((info.st_mode & S_IFMT) != S_IFREG) || (info.st_size < sizeof(*fh))) { close(fd); return NULL; } if (map_fd(fd, 0, (vm_offset_t *)&fh, TRUE, (vm_size_t)info.st_size) != KERN_SUCCESS) { close(fd); return NULL; } #ifdef __BIG_ENDIAN__ if (fh->magic == FAT_MAGIC) { #endif __BIG_ENDIAN__ #ifdef __LITTLE_ENDIAN__ if (fh->magic == NXSwapLong(FAT_MAGIC)) { #endif __LITTLE_ENDIAN__ int i; struct fat_arch *fa = (struct fat_arch*)(fh + 1); #ifdef __LITTLE_ENDIAN__ enum NXByteOrder host_byte_sex = NXHostByteOrder(); swap_fat_header(fh, host_byte_sex); #endif __LITTLE_ENDIAN__ if ((fh->nfat_arch <= 0) || (info.st_size < sizeof(*fh)+sizeof(*fa)*fh->nfat_arch)) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } #ifdef __LITTLE_ENDIAN__ swap_fat_arch(fa, fh->nfat_arch, host_byte_sex); #endif __LITTLE_ENDIAN__ for (i = 0; i < fh->nfat_arch; i++, fa++) { if (fa->cputype == hbi.cpu_type) { //**** ** check for best cpu_subtype here ** (fa->cpusubtype == hbi.cpu_subtype) break; // for now, accept all subtypes } } if (i >= fh->nfat_arch) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } archOffset = fa->offset; mh = (struct mach_header*)((char*)fh + archOffset); } else { archOffset = 0L; mh = (struct mach_header*)fh; } if ((info.st_size < archOffset + sizeof(*mh)) || (mh->magic != MH_MAGIC) || (mh->cputype != hbi.cpu_type) || (info.st_size < archOffset + sizeof(*mh) + mh->sizeofcmds) || !check_wellformed_header(mh, info.st_size - archOffset, NO)) { // bug#21223 vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } /* * Get the section data. */ sect = getsectbynamefromheader(mh, segmentName, sectionName); if (sect == NULL || sect->size == 0 || (info.st_size < archOffset + sect->offset + sect->size)) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } /* * Create the stream. */ s = NXOpenMemory((char *)mh + sect->offset, sect->size, NX_READONLY); s->flags &= ~NX_USER_OWNS_BUF; /* * Through away the parts of the file not needed. Assert that all * pages that the file lives on are used only by the file. */ sect_page = round_page((vm_offset_t)mh + sect->offset + sect->size); mh_page = round_page((vm_offset_t)fh + info.st_size); if (mh_page - sect_page) vm_deallocate(mach_task_self(), sect_page, mh_page - sect_page); mh_page = trunc_page((vm_offset_t)fh); sect_page = trunc_page((vm_offset_t)mh + sect->offset); if (sect_page - mh_page) vm_deallocate(mach_task_self(), mh_page, sect_page - mh_page); if (close(fd) < 0) { NXCloseMemory(s, NX_FREEBUFFER); s = NULL; } return s; } NXStream *NXGetStreamOnSectionForBestArchitecture( const char *fileName, const char *segmentName, const char *sectionName) { int fd; struct stat info; NXStream *s = NULL; struct fat_header *fh; struct mach_header *mh; const struct section *sect; vm_offset_t mh_page, sect_page; unsigned long archOffset; unsigned int cnt = HOST_BASIC_INFO_COUNT; struct host_basic_info hbi; int fSwap = NO; if (host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)(&hbi), &cnt) != KERN_SUCCESS) return NULL; fd = open(fileName, O_RDONLY, 0444); if (fd < 0 || fstat(fd, &info) < 0) return NULL; if (((info.st_mode & S_IFMT) != S_IFREG) || (info.st_size < sizeof(*fh))) { close(fd); return NULL; } if (map_fd(fd, 0, (vm_offset_t *)&fh, TRUE, (vm_size_t)info.st_size) != KERN_SUCCESS) { close(fd); return NULL; } #ifdef __BIG_ENDIAN__ if (fh->magic == FAT_MAGIC) { #endif __BIG_ENDIAN__ #ifdef __LITTLE_ENDIAN__ if (fh->magic == NXSwapLong(FAT_MAGIC)) { #endif __LITTLE_ENDIAN__ int i; struct fat_arch *fa = (struct fat_arch*)(fh + 1); #ifdef __LITTLE_ENDIAN__ enum NXByteOrder host_byte_sex = NXHostByteOrder(); swap_fat_header(fh, host_byte_sex); #endif __LITTLE_ENDIAN__ if ((fh->nfat_arch <= 0) || (info.st_size < sizeof(*fh)+sizeof(*fa)*fh->nfat_arch)) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } #ifdef __LITTLE_ENDIAN__ swap_fat_arch(fa, fh->nfat_arch, host_byte_sex); #endif __LITTLE_ENDIAN__ for (i = 0; i < fh->nfat_arch; i++, fa++) { if (fa->cputype == hbi.cpu_type) { //**** ** check for best cpu_subtype here ** (fa->cpusubtype == hbi.cpu_subtype) break; // for now, accept all subtypes } } if (i >= fh->nfat_arch) { /* * If do not have the correct cpu_type, just use the last type * in file. * NOTE: we could have a list passed in, and choose the best * based upon that list. */ fa--; } archOffset = fa->offset; mh = (struct mach_header*)((char*)fh + archOffset); } else { archOffset = 0L; mh = (struct mach_header*)fh; } if (info.st_size < archOffset + sizeof(*mh)) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } /* * Do we need to swap the header? Header is always in byte-order of machine it * was compiled for. */ if (mh->magic == NXSwapLong(MH_MAGIC)) { fSwap = YES; #ifdef __LITTLE_ENDIAN__ swap_mach_header(mh, NX_LittleEndian); #else swap_mach_header(mh, NX_BigEndian); #endif __LITTLE_ENDIAN__ } if ((mh->magic != MH_MAGIC) || (info.st_size < archOffset + sizeof(*mh) + mh->sizeofcmds) || !check_wellformed_header(mh, info.st_size - archOffset, fSwap)) { // bug#21223 vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } /* * Get the section data. */ sect = getsectbynamefromheaderwithswap(mh, segmentName, sectionName, fSwap); if (sect == NULL || sect->size == 0 || (info.st_size < archOffset + sect->offset + sect->size)) { vm_deallocate(mach_task_self(), (vm_offset_t)fh, info.st_size); close(fd); return NULL; } /* * Create the stream. */ s = NXOpenMemory((char *)mh + sect->offset, sect->size, NX_READONLY); s->flags &= ~NX_USER_OWNS_BUF; /* * Through away the parts of the file not needed. Assert that all * pages that the file lives on are used only by the file. */ sect_page = round_page((vm_offset_t)mh + sect->offset + sect->size); mh_page = round_page((vm_offset_t)fh + info.st_size); if (mh_page - sect_page) vm_deallocate(mach_task_self(), sect_page, mh_page - sect_page); mh_page = trunc_page((vm_offset_t)fh); sect_page = trunc_page((vm_offset_t)mh + sect->offset); if (sect_page - mh_page) vm_deallocate(mach_task_self(), mh_page, sect_page - mh_page); if (close(fd) < 0) { NXCloseMemory(s, NX_FREEBUFFER); s = NULL; } return s; }
static int copy_file( const char *file, const char *copy) { int fd1, fd2, retval; char *buf; struct stat info; kern_return_t ret; fd1 = open(file, O_RDONLY, 0666); if (fd1 < 0) { perror("open"); return -1; } if (fstat(fd1, &info) < 0) { perror("fstat"); return -1; } if (info.st_size < 0 || (info.st_mode & S_IFMT) != S_IFREG) { return -1; } if ((ret = map_fd(fd1, 0, (vm_offset_t *)&buf, TRUE, info.st_size)) != KERN_SUCCESS) { mach_error("map_fd", ret); return -1; } fd2 = open(copy, O_WRONLY | O_CREAT, 0666); if (fd2 < 0) { perror("open"); return -1; } retval = write(fd2, buf, info.st_size); if (retval < 0) { perror("write"); return retval; } retval = fsync(fd2); if (retval < 0) { perror("fsync"); return retval; } retval = close(fd2); if (retval < 0) { perror("close"); return retval; } retval = close(fd1); if (retval < 0) { perror("close"); return retval; } ret = vm_deallocate(mach_task_self(), (vm_offset_t)buf, (vm_size_t)info.st_size); if (ret != KERN_SUCCESS) { mach_error("vm_deallocate", ret); return ret; } return 0; }
long do_mach_syscall(void *cpu_env, int num, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7, uint32_t arg8) { extern uint32_t mach_reply_port(); long ret = 0; arg1 = tswap32(arg1); arg2 = tswap32(arg2); arg3 = tswap32(arg3); arg4 = tswap32(arg4); arg5 = tswap32(arg5); arg6 = tswap32(arg6); arg7 = tswap32(arg7); arg8 = tswap32(arg8); DPRINTF("mach syscall %d : " , num); switch(num) { /* see xnu/osfmk/mach/syscall_sw.h */ case -26: DPRINTF("mach_reply_port()\n"); ret = mach_reply_port(); break; case -27: DPRINTF("mach_thread_self()\n"); ret = mach_thread_self(); break; case -28: DPRINTF("mach_task_self()\n"); ret = mach_task_self(); break; case -29: DPRINTF("mach_host_self()\n"); ret = mach_host_self(); break; case -31: DPRINTF("mach_msg_trap(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", arg1, arg2, arg3, arg4, arg5, arg6, arg7); ret = target_mach_msg_trap((mach_msg_header_t *)arg1, arg2, arg3, arg4, arg5, arg6, arg7); break; /* may need more translation if target arch is different from host */ #if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__)) case -33: DPRINTF("semaphore_signal_trap(0x%x)\n", arg1); ret = semaphore_signal_trap(arg1); break; case -34: DPRINTF("semaphore_signal_all_trap(0x%x)\n", arg1); ret = semaphore_signal_all_trap(arg1); break; case -35: DPRINTF("semaphore_signal_thread_trap(0x%x)\n", arg1, arg2); ret = semaphore_signal_thread_trap(arg1,arg2); break; #endif case -36: DPRINTF("semaphore_wait_trap(0x%x)\n", arg1); extern int semaphore_wait_trap(int); // XXX: is there any header for that? ret = semaphore_wait_trap(arg1); break; /* may need more translation if target arch is different from host */ #if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__)) case -37: DPRINTF("semaphore_wait_signal_trap(0x%x, 0x%x)\n", arg1, arg2); ret = semaphore_wait_signal_trap(arg1,arg2); break; #endif case -43: DPRINTF("map_fd(0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", arg1, arg2, arg3, arg4, arg5); ret = map_fd(arg1, arg2, (void*)arg3, arg4, arg5); tswap32s((uint32_t*)arg3); break; /* may need more translation if target arch is different from host */ #if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__)) case -61: DPRINTF("syscall_thread_switch(0x%x, 0x%x, 0x%x)\n", arg1, arg2, arg3); ret = syscall_thread_switch(arg1, arg2, arg3); // just a hint to the scheduler; can drop? break; #endif case -89: DPRINTF("mach_timebase_info(0x%x)\n", arg1); struct mach_timebase_info info; ret = mach_timebase_info(&info); if(!is_error(ret)) { struct mach_timebase_info *outInfo = (void*)arg1; outInfo->numer = tswap32(info.numer); outInfo->denom = tswap32(info.denom); } break; case -90: DPRINTF("mach_wait_until()\n"); extern int mach_wait_until(uint64_t); // XXX: is there any header for that? ret = mach_wait_until(((uint64_t)arg2<<32) | (uint64_t)arg1); break; case -91: DPRINTF("mk_timer_create()\n"); extern int mk_timer_create(); // XXX: is there any header for that? ret = mk_timer_create(); break; case -92: DPRINTF("mk_timer_destroy()\n"); extern int mk_timer_destroy(int); // XXX: is there any header for that? ret = mk_timer_destroy(arg1); break; case -93: DPRINTF("mk_timer_create()\n"); extern int mk_timer_arm(int, uint64_t); // XXX: is there any header for that? ret = mk_timer_arm(arg1, ((uint64_t)arg3<<32) | (uint64_t)arg2); break; case -94: DPRINTF("mk_timer_cancel()\n"); extern int mk_timer_cancel(int, uint64_t *); // XXX: is there any header for that? ret = mk_timer_cancel(arg1, (uint64_t *)arg2); if((!is_error(ret)) && arg2) tswap64s((uint64_t *)arg2); break; default: gemu_log("qemu: Unsupported mach syscall: %d(0x%x)\n", num, num); gdb_handlesig (cpu_env, SIGTRAP); exit(0); break; } return ret; }