kdp_return_t kdp_connect (kdp_connection *c) { kdp_return_t ret; CHECK_FATAL (kdp_is_bound (c)); CHECK_FATAL (! kdp_is_connected (c)); /* Do a connect transaction. */ c->request->connect_req.hdr.request = KDP_CONNECT; c->request->connect_req.req_reply_port = c->reqport; c->request->connect_req.exc_note_port = c->excport; strncpy (c->request->connect_req.greeting, "", 64); c->request->connect_req.greeting[63] = '\0'; ret = kdp_transaction (c, c->request, c->response, "remote_connect"); if (ret != RR_SUCCESS) { return ret; } if (c->response->writemem_reply.error) { c->logger (KDP_LOG_ERROR, "kdp_connect: %s\n", kdp_return_string (c->response->connect_reply.error)); return RR_CONNECT; } c->session_key = c->response->hdr.key; c->connected = 1; return RR_SUCCESS; }
void mt_cpu_mt_init(int _gszx,int _gszy,int _lszx,int _lszy) { int i,j,s; printf("mt_cpu_mt_init, hw concurency %d\n",std::thread::hardware_concurrency()); gszx=_gszx;gszy=_gszy; lszx=_lszx;lszy=_lszy; gnx=gszx/lszx;gny=gszy/lszy; gn=gnx*gny; printf("mt_cpu_mt_init, gsz(%d,%d) lsz(%d,%d) gn(%d,%d)=%d\n",gszx,gszy,lszx,lszy,gnx,gny,gn); sBarrier=new SpinningBarrier(gn); JSB::nwait_=0; JSB::step_=0; for(i=0;i<gn;i++){ thread_run[i].store(0); // step_run[i].store(0); } for(i=0;i<gnx;i++) for(j=0;j<gny;j++){ gifs[i*gny+j].id0=i; gifs[i*gny+j].id1=j; ths[i*gny+j]=new std::thread(processing_thread,&(gifs[i*gny+j])); CHECK_FATAL(ths[i*gny+j]==NULL,"std::thread failed."); } // for }
static void next_exception_internal_reply (struct next_inferior_status *inferior, struct next_exception_data *erequest) { kern_return_t ret; struct next_exception_reply ereply; CHECK_FATAL (erequest->task == inferior->task); ereply.header.msg_unused = 0; ereply.header.msg_simple = 1; ereply.header.msg_size = sizeof (struct next_exception_reply); ereply.header.msg_type = erequest->header.msg_type; ereply.header.msg_local_port = PORT_NULL; ereply.header.msg_remote_port = erequest->header.msg_remote_port; ereply.header.msg_id = erequest->header.msg_id + 100; ereply.retcode_type.msg_type_name = MSG_TYPE_INTEGER_32; ereply.retcode_type.msg_type_size = sizeof (int) * 8; ereply.retcode_type.msg_type_number = 1; ereply.retcode_type.msg_type_inline = 1; ereply.retcode_type.msg_type_longform = 0; ereply.retcode_type.msg_type_deallocate = 0; ereply.retcode_type.msg_type_unused = 0; ereply.retcode = KERN_SUCCESS; inferior_debug ("sending exception reply\n"); ret = msg_send (&ereply.header, MSG_OPTION_NONE, 0); MACH_WARN_ERROR (ret); }
static void info_mach_ports_command (char *args, int from_tty) { port_name_array_t port_names_data; port_type_array_t port_types_data; unsigned int name_count, type_count; kern_return_t result; int index; task_t task; CHECK_ARGS ("Task", args); sscanf (args, "0x%x", &task); result = port_names (task, &port_names_data, &name_count, &port_types_data, &type_count); MACH_CHECK_ERROR (result); CHECK_FATAL (name_count == type_count); printf_unfiltered ("Ports for task %#x:\n", task); for (index = 0; index < name_count; ++index) { printf_unfiltered ("port name: %#x, type %#x\n", port_names_data[index], port_types_data[index]); } vm_deallocate (task_self (), (vm_address_t) port_names_data, (name_count * sizeof (mach_port_t))); vm_deallocate (task_self (), (vm_address_t) port_types_data, (type_count * sizeof (mach_port_type_t))); }
void test_keys(Ardb& db) { DBID dbid = 0; db.HSet(dbid, "myhash_v0", "field", "100"); db.SAdd(dbid, "myset_v0", "field"); db.LPush(dbid, "mylist", "122"); db.ZAdd(dbid, "myzset", ValueData((int64) 3), "v0"); db.Set(dbid, "mykey", "12312"); StringArray ret; db.Keys(dbid, "my*", "", 100, ret); CHECK_FATAL(ret.size() < 5, "keys my* size error:%zu", ret.size()); ret.clear(); db.Keys(dbid, "*set*", "", 100, ret); CHECK_FATAL(ret.size() < 2, "keys *set* size error:%zu", ret.size()); }
void zMutex::unlock() { _lockedCount--; #if defined(_WIN32) LeaveMutex((LPCRITICAL_SECTION)_cs); #else int res = pthread_mutex_unlock(&_mutex); CHECK_FATAL(res, "pthread_mutex_unlock"); #endif }
void zMutex::lock() { #if defined(_WIN32) EnterMutex((LPCRITICAL_SECTION)_cs); #else int res = pthread_mutex_lock(&_mutex); CHECK_FATAL(res, "pthread_mutex_lock"); #endif _lockedCount++; }
zMutex::~zMutex() { sync(); #if defined(_WIN32) DeleteMutex((LPCRITICAL_SECTION)_cs); delete (CRITICAL_SECTION*)_cs; #else int res = pthread_mutex_destroy(&_mutex); CHECK_FATAL(res, "pthread_mutex_destroy"); #endif }
void next_handle_exception (struct next_inferior_status *inferior, msg_header_t *message, struct target_waitstatus *status) { next_exception_data* msg = (next_exception_data*) message; CHECK_FATAL (inferior != NULL); next_debug_exception (msg); if (msg->task != inferior->task) { inferior_debug ("ignoring exception forwarded from subprocess\n"); next_exception_forwarded_reply (inferior, msg); return; } inferior->last_thread = msg->thread; if (inferior_handle_exceptions_flag) { next_inferior_suspend_mach (inferior); next_exception_internal_reply (inferior, msg); status->kind = TARGET_WAITKIND_STOPPED; switch (msg->exception) { case EXC_BAD_ACCESS: status->value.sig = TARGET_EXC_BAD_ACCESS; break; case EXC_BAD_INSTRUCTION: status->value.sig = TARGET_EXC_BAD_INSTRUCTION; break; case EXC_ARITHMETIC: status->value.sig = TARGET_EXC_ARITHMETIC; break; case EXC_EMULATION: status->value.sig = TARGET_EXC_EMULATION; break; case EXC_SOFTWARE: status->value.sig = TARGET_EXC_SOFTWARE; break; case EXC_BREAKPOINT: status->value.sig = TARGET_EXC_BREAKPOINT; status->value.sig = TARGET_SIGNAL_TRAP; break; default: status->value.sig = TARGET_SIGNAL_UNKNOWN; break; } } else { next_exception_forwarded_reply (inferior, msg); } }
static void next_signal_thread (void *arg) { next_signal_thread_status *s = (next_signal_thread_status *) arg; CHECK_FATAL (s != NULL); for (;;) { next_signal_thread_message msg; WAITSTATUS status = 0; pid_t pid = 0; pthread_testcancel (); sigthread_debug_re ("next_signal_thread: waiting for signals for pid %d\n", s->inferior_pid); pid = waitpid (s->inferior_pid, &status, 0); sigthread_debug_re ("next_signal_thread: done waiting for signals for pid %d\n", s->inferior_pid); if ((pid < 0) && (errno == ECHILD)) { sigthread_debug_re ("next_signal_thread: no children present: waiting for parent\n"); for (;;) { pthread_testcancel (); sched_yield (); } } if ((pid < 0) && (errno == EINTR)) { sigthread_debug_re ("next_signal_thread: wait interrupted; continuing\n"); continue; } if (pid < 0) { fprintf (sigthread_stderr_re, "next_signal_thread: unexpected error: %s\n", strerror (errno)); abort (); } if (sigthread_debugflag) { sigthread_debug_re ("next_signal_thread: got status %d for pid %d (expected inferior is %d)\n", status, pid, s->inferior_pid); sigthread_debug_re ("next_signal_thread: got signal "); next_signal_thread_debug_status (sigthread_stderr_re, status); } if (pid != s->inferior_pid) { fprintf (sigthread_stderr_re, "next_signal_thread: got status value %d for unexpected pid %d\n", status, pid); abort (); } msg.pid = pid; msg.status = status; write (s->transmit_fd, &msg, sizeof (msg)); } }
static kdp_return_t kdp_exception_reply (kdp_connection *c, kdp_pkt_t *response) { kdp_return_t kdpret; kdp_exception_ack_t ack; CHECK_FATAL (kdp_is_connected (c)); CHECK_FATAL (kdp_is_bound (c)); ack.hdr.request = KDP_EXCEPTION; ack.hdr.is_reply = 1; ack.hdr.seq = response->exception.hdr.seq; ack.hdr.key = response->exception.hdr.key; kdpret = kdp_transmit_exception (c, (kdp_pkt_t *) &ack); if (kdpret != RR_SUCCESS) { c->logger (KDP_LOG_ERROR, "kdp_exception_reply: unable to acknowledge exception: %s\n", kdp_return_string (kdpret)); } return kdpret; }
zMutex::zMutex(void) { _lockedCount = 0; _isDestroying = false; #if defined(_WIN32) _cs = new CRITICAL_SECTION; InitializeMutexAndSpinCount((LPCRITICAL_SECTION)_cs, 500); #else pthread_mutexattr_t mutexAttr; int res = pthread_mutexattr_init(&mutexAttr); CHECK_FATAL(res, "pthread_mutexattr_init"); res = pthread_mutexattr_settype(&mutexAttr, PTHREAD_MUTEX_RECURSIVE); CHECK_FATAL(res, "pthread_mutexattr_settype PTHREAD_MUTEX_RECURSIVE"); res = pthread_mutex_init(&_mutex, &mutexAttr); CHECK_FATAL(res, "pthread_mutex_init"); res = pthread_mutexattr_destroy(&mutexAttr); CHECK_FATAL(res, "pthread_mutexattr_destroy"); #endif }
void next_signal_thread_create (next_signal_thread_status *s, int pid) { int fd[2]; int ret; ret = pipe (fd); CHECK_FATAL (ret == 0); s->transmit_fd = fd[1]; s->receive_fd = fd[0]; s->inferior_pid = pid; s->signal_thread = gdb_thread_fork ((gdb_thread_fn_t) &next_signal_thread, s); }
kdp_return_t kdp_disconnect (kdp_connection *c) { kdp_return_t ret; CHECK_FATAL (kdp_is_connected (c)); c->request->disconnect_req.hdr.request = KDP_DISCONNECT; ret = kdp_transaction (c, c->request, c->response, "kdp_disconnect"); if (ret != RR_SUCCESS) { return ret; } c->connected = 0; return RR_SUCCESS; }
int macosx_thread_valid (task_t task, thread_t thread) { thread_array_t thread_list; unsigned int thread_count = 0; kern_return_t kret; unsigned int found = 0; unsigned int i; CHECK_FATAL (task != TASK_NULL); kret = task_threads (task, &thread_list, &thread_count); #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("macosx_thread_valid - task_threads (%d, %p, %d) returned 0x%lx\n", task, &thread_list, thread_count, kret); #endif if ((kret == KERN_INVALID_ARGUMENT) || (kret == MACH_SEND_INVALID_RIGHT) || (kret == MACH_RCV_INVALID_NAME)) { return 0; } MACH_CHECK_ERROR (kret); for (i = 0; i < thread_count; i++) { if (thread_list[i] == thread) { found = 1; } } kret = vm_deallocate (mach_task_self (), (vm_address_t) thread_list, (vm_size_t) (thread_count * sizeof (thread_t))); MACH_CHECK_ERROR (kret); #ifdef DEBUG_MACOSX_MUTILS if (!found) { mutils_debug ("thread 0x%lx no longer valid for task 0x%lx\n", (unsigned long) thread, (unsigned long) task); } #endif return found; }
kdp_return_t kdp_reattach (kdp_connection *c) { kdp_return_t ret; CHECK_FATAL (!kdp_is_connected (c)); c->request->reattach_req.hdr.request = KDP_REATTACH; c->request->reattach_req.req_reply_port = c->reqport; ret = kdp_transaction (c, c->request, c->response, "kdp_reattach"); if (ret != RR_SUCCESS) { return ret; } c->connected = 1; return RR_SUCCESS; }
kdp_return_t kdp_hostreboot (kdp_connection *c) { kdp_return_t ret; CHECK_FATAL (kdp_is_connected (c)); c->request->hostreboot_req.hdr.request = KDP_HOSTREBOOT; c->retries = 1; ret = kdp_transaction (c, c->request, c->response, "kdp_hostreboot"); c->connected = 0; if (ret != RR_SUCCESS) { return ret; } return RR_SUCCESS; }
void test_type(Ardb& db) { DBID dbid = 0; db.SAdd(dbid, "myset", "123"); db.LPush(dbid, "mylist", "value0"); db.ZAdd(dbid, "myzset1", ValueData((int64) 1), "one"); db.HSet(dbid, "myhash", "field1", "value1"); db.Set(dbid, "skey", "abc"); db.SetBit(dbid, "mybits", 1, 1); CHECK_FATAL(db.Type(dbid, "myset") != SET_META, "type failed."); CHECK_FATAL(db.Type(dbid, "mylist") != LIST_META, "type failed."); CHECK_FATAL(db.Type(dbid, "myzset1") != ZSET_META, "type failed."); CHECK_FATAL(db.Type(dbid, "myhash") != HASH_META, "type failed."); CHECK_FATAL(db.Type(dbid, "skey") != STRING_META, "type failed."); CHECK_FATAL(db.Type(dbid, "mybits") != BITSET_META, "type failed."); }
static int mach_xfer_memory_remainder (CORE_ADDR memaddr, char *myaddr, int len, int write, task_t task) { vm_size_t pagesize = child_get_pagesize (); vm_offset_t mempointer; /* local copy of inferior's memory */ mach_msg_type_number_t memcopied; /* for vm_read to use */ CORE_ADDR pageaddr = memaddr - (memaddr % pagesize); kern_return_t kret; CHECK_FATAL (((memaddr + len - 1) - ((memaddr + len - 1) % pagesize)) == pageaddr); if (!write) { kret = mach_vm_read (task, pageaddr, pagesize, &mempointer, &memcopied); if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to read page for region at 0x%8.8llx with length %lu from inferior: %s (0x%lx)\n", (uint64_t) pageaddr, (unsigned long) len, MACH_ERROR_STRING (kret), kret); #endif return 0; } if (memcopied != pagesize) { kret = vm_deallocate (mach_task_self (), mempointer, memcopied); if (kret != KERN_SUCCESS) { warning ("Unable to deallocate memory used by failed read from inferior: %s (0x%lx)", MACH_ERROR_STRING (kret), (unsigned long) kret); } #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to read region at 0x%8.8llx with length %lu from inferior: " "vm_read returned %lu bytes instead of %lu\n", (uint64_t) pageaddr, (unsigned long) pagesize, (unsigned long) memcopied, (unsigned long) pagesize); #endif return 0; } memcpy (myaddr, ((unsigned char *) 0) + mempointer + (memaddr - pageaddr), len); kret = vm_deallocate (mach_task_self (), mempointer, memcopied); if (kret != KERN_SUCCESS) { warning ("Unable to deallocate memory used to read from inferior: %s (0x%ulx)", MACH_ERROR_STRING (kret), kret); return 0; } } else { /* We used to read in a whole page, then modify the page contents, then write that page back out. I bet we did that so we didn't break up page maps or something like that. However, in Leopard there's a bug in the shared cache implementation, such that if we write into it with whole pages the maximum page protections don't get set properly and we can no longer reset the execute bit. In 64 bit Leopard apps, the execute bit has to be set or we can't run code from there. If we figure out that not writing whole pages causes problems of it's own, then we will have to revisit this. */ #if defined (TARGET_POWERPC) vm_machine_attribute_val_t flush = MATTR_VAL_CACHE_FLUSH; /* This vm_machine_attribute only works on PPC, so no reason to keep failing on x86... */ kret = vm_machine_attribute (mach_task_self (), mempointer, pagesize, MATTR_CACHE, &flush); #ifdef DEBUG_MACOSX_MUTILS if (kret != KERN_SUCCESS) { mutils_debug ("Unable to flush GDB's address space after memcpy prior to vm_write: %s (0x%lx)\n", MACH_ERROR_STRING (kret), kret); } #endif #endif kret = mach_vm_write (task, memaddr, (pointer_t) myaddr, len); if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to write region at 0x%8.8llx with length %lu to inferior: %s (0x%lx)\n", (uint64_t) memaddr, (unsigned long) len, MACH_ERROR_STRING (kret), kret); #endif return 0; } } return len; }
void test_sort_zset(Ardb& db) { DBID dbid = 0; db.ZClear(dbid, "myzset"); db.ZAdd(dbid, "myzset", ValueData((int64) 0), "v0"); db.ZAdd(dbid, "myzset", ValueData((int64) 10), "v10"); db.ZAdd(dbid, "myzset", ValueData((int64) 3), "v3"); db.ZAdd(dbid, "myzset", ValueData((int64) 5), "v5"); StringArray args; ValueDataArray vs; db.Sort(dbid, "myzset", args, vs); std::string str; CHECK_FATAL(vs.size() != 4, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "v0", "sort result[0]:%s", str.c_str()); CHECK_FATAL(vs[1].ToString(str) != "v3", "sort result[1]:%s", str.c_str()); CHECK_FATAL(vs[2].ToString(str) != "v5", "sort result[2]:%s", str.c_str()); CHECK_FATAL(vs[3].ToString(str) != "v10", "sort result[3]:%s", str.c_str()); vs.clear(); string_to_string_array("limit 1 2", args); db.Sort(dbid, "myzset", args, vs); CHECK_FATAL(vs.size() != 2, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "v3", "sort result[0]:%s", str.c_str()); CHECK_FATAL(vs[1].ToString(str) != "v5", "sort result[1]:%s", str.c_str()); vs.clear(); args.clear(); string_to_string_array("by weight_*", args); db.Set(dbid, "weight_v0", "1000"); db.Set(dbid, "weight_v3", "900"); db.Set(dbid, "weight_v5", "800"); db.Set(dbid, "weight_v10", "700"); db.Sort(dbid, "myzset", args, vs); CHECK_FATAL(vs.size() != 4, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "v10", "sort result[0]:%s", str.c_str()); CHECK_FATAL(vs[1].ToString(str) != "v5", "sort result[1]:%s", str.c_str()); CHECK_FATAL(vs[2].ToString(str) != "v3", "sort result[2]:%s", str.c_str()); CHECK_FATAL(vs[3].ToString(str) != "v0", "sort result[3]:%s", str.c_str()); db.HSet(dbid, "myhash_v0", "field", "100"); db.HSet(dbid, "myhash_v3", "field", "10"); db.HSet(dbid, "myhash_v5", "field", "9"); db.HSet(dbid, "myhash_v10", "field", "1000"); string_to_string_array("by weight_* get myhash_*->field aggregate sum", args); db.Sort(dbid, "myzset", args, vs); CHECK_FATAL(vs.size() != 1, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "1119", "sort result[0]:%s", str.c_str()); string_to_string_array("by weight_* get myhash_*->field aggregate min", args); db.Sort(dbid, "myzset", args, vs); CHECK_FATAL(vs.size() != 1, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "9", "sort result[0]:%s", str.c_str()); string_to_string_array("by weight_* get myhash_*->field aggregate max", args); db.Sort(dbid, "myzset", args, vs); CHECK_FATAL(vs.size() != 1, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "1000", "sort result[0]:%s", str.c_str()); string_to_string_array("by weight_* get myhash_*->field aggregate avg", args); db.Sort(dbid, "myzset", args, vs); CHECK_FATAL(vs.size() != 1, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "279.75", "sort result[0]:%s", str.c_str()); string_to_string_array("by weight_* get myhash_*->field aggregate count", args); db.Sort(dbid, "myzset", args, vs); CHECK_FATAL(vs.size() != 1, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "4", "sort result[0]:%s", str.c_str()); }
void test_sort_set(Ardb& db) { DBID dbid = 0; db.SClear(dbid, "myset"); db.SAdd(dbid, "myset", "ab3"); db.SAdd(dbid, "myset", "ab2"); db.SAdd(dbid, "myset", "ab1"); db.SAdd(dbid, "myset", "ab4"); StringArray args; ValueDataArray vs; db.Sort(dbid, "myset", args, vs); std::string str; CHECK_FATAL(vs.size() != 4, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "ab1", "sort result[0]:%s", str.c_str()); CHECK_FATAL(vs[1].ToString(str) != "ab2", "sort result[1]:%s", str.c_str()); CHECK_FATAL(vs[2].ToString(str) != "ab3", "sort result[2]:%s", str.c_str()); CHECK_FATAL(vs[3].ToString(str) != "ab4", "sort result[3]:%s", str.c_str()); vs.clear(); string_to_string_array("limit 1 2", args); db.Sort(dbid, "myset", args, vs); CHECK_FATAL(vs.size() != 2, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "ab2", "sort result[0]:%s", str.c_str()); CHECK_FATAL(vs[1].ToString(str) != "ab3", "sort result[1]:%s", str.c_str()); vs.clear(); args.clear(); string_to_string_array("by weight_*", args); db.Set(dbid, "weight_ab1", "1000"); db.Set(dbid, "weight_ab2", "900"); db.Set(dbid, "weight_ab3", "800"); db.Set(dbid, "weight_ab4", "700"); db.Sort(dbid, "myset", args, vs); CHECK_FATAL(vs.size() != 4, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "ab4", "sort result[0]:%s", str.c_str()); CHECK_FATAL(vs[1].ToString(str) != "ab3", "sort result[1]:%s", str.c_str()); CHECK_FATAL(vs[2].ToString(str) != "ab2", "sort result[2]:%s", str.c_str()); CHECK_FATAL(vs[3].ToString(str) != "ab1", "sort result[3]:%s", str.c_str()); db.HSet(dbid, "myhash_ab1", "field", "hash100"); db.HSet(dbid, "myhash_ab2", "field", "hash10"); db.HSet(dbid, "myhash_ab3", "field", "hash9"); db.HSet(dbid, "myhash_ab4", "field", "hash1000"); args.clear(); string_to_string_array("by weight_* get myhash_*->field get #", args); vs.clear(); db.Sort(dbid, "myset", args, vs); CHECK_FATAL(vs.size() != 8, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "hash1000", "sort result[0]:%s", str.c_str()); CHECK_FATAL(vs[2].ToString(str) != "hash9", "sort result[2]:%s", str.c_str()); CHECK_FATAL(vs[4].ToString(str) != "hash10", "sort result[4]:%s", str.c_str()); CHECK_FATAL(vs[6].ToString(str) != "hash100", "sort result[6]:%s", str.c_str()); CHECK_FATAL(vs[1].ToString(str) != "ab4", "sort result[1]:%s", str.c_str()); CHECK_FATAL(vs[3].ToString(str) != "ab3", "sort result[3]:%s", str.c_str()); CHECK_FATAL(vs[5].ToString(str) != "ab2", "sort result[5]:%s", str.c_str()); CHECK_FATAL(vs[7].ToString(str) != "ab1", "sort result[7]:%s", str.c_str()); }
CORE_ADDR ppc_parse_instructions (CORE_ADDR start, CORE_ADDR end, ppc_function_properties * props) { CORE_ADDR pc = start; CORE_ADDR last_recognized_insn = start; int unrecognized_insn_count = 0; /* We want to allow some unrecognized instructions, but we don't want to keep scanning forever. So this is the number of unrecognized instructions before we bail from the prologue scanning. */ int max_insn = 6; /* If we don't recognize an instruction, keep going at least this long. This is supposed to handle the case where instructions we don't recognize get inserted into the prologue. */ int saw_pic_base_setup = 0; unsigned int lr_reg = 0xffffffff; /* temporary cookies that we use to tell us that we have seen the lr moved into a gpr. * Set to 0xffffffff at start. * Set to the stw instruction for the register we see in the mflr. * Set to 0 when we see the lr get stored on the stack. */ unsigned int lr_64_reg = 0xffffffff; /* temporary cookies that we use to tell us that we have seen the lr moved into a gpr. This version is for std - used for 64 bit PPC. * Set to 0xffffffff at start. * Set to the stw instruction for the register we see in the mflr. * Set to 0 when we see the lr get stored on the stack. */ unsigned int cr_reg = 0xffffffff; /* Same as lr_reg but for condition reg. */ int offset2 = 0; /* This seems redundant to me, but I am not going to bother to take it out right now. */ CHECK_FATAL (props != NULL); ppc_clear_function_properties (props); CHECK_FATAL (start != INVALID_ADDRESS); /* instructions must be word-aligned */ CHECK_FATAL ((start % 4) == 0); /* instructions must be word-aligned */ CHECK_FATAL ((end == INVALID_ADDRESS) || (end % 4) == 0); CHECK_FATAL ((end >= start) || (end == INVALID_ADDRESS)); for (pc = start; (end == INVALID_ADDRESS) || (pc < end); pc += 4) { ULONGEST op = 0; int insn_recognized = 1; if (!safe_read_memory_unsigned_integer (pc, 4, &op)) { ppc_debug ("ppc_parse_instructions: Got an error reading at 0x%s", paddr_nz (pc)); /* We got an error reading the PC, so let's get out of here... */ return last_recognized_insn; } /* This bcl is part of the sequence: mflr r0 (optional - only if leaf function) bcl .+4 mflr r31 mtlr r0 (do this if you stored it at the top) */ if ((op & 0xfe000005) == 0x42000005) /* bcl .+4 another relocatable way to access global data */ { props->lr_invalid = pc; saw_pic_base_setup = 1; props->pic_base_address = pc + 4; goto processed_insn; } /* This mr r31,r12 is part of an ObjC selector prologue like this: mflr r0 stmw r30,-8(r1) mr r31,r12 (the PIC base was in r12, put it in r31) But don't get tricked into using this expression if we've already seen a normal pic base mflr insn. Note: By convention, the address of the start of the function is placed in R12 when calling an ObjC selector, so we stuff the START address we were given in to pic_base_address on the hope that START was actually the start of the function. */ if (!saw_pic_base_setup && (op == 0x7d9f6378 || op == 0x7d9e6378)) { saw_pic_base_setup = 1; props->pic_base_reg = (op & 0x1f0000) >> 16; props->pic_base_address = start; goto processed_insn; } /* Look at other branch instructions. There are a couple of MacOS X Special purpose routines that are used in function prologues. These are: * saveWorld: used to be used in user code to set up info for C++ exceptions, though now it is only used in throw itself. * saveFP: saves the FP registers AND the lr * saveVec: saves the AltiVec registers. If the bl is not one of these, we are probably not in a prologue, and we should get out... */ else if ((op & 0xfc000003) == 0x48000001) /* bl <FN> */
void next_create_inferior_for_task (struct next_inferior_status *inferior, task_t task, int pid) { kern_return_t ret; CHECK_FATAL (inferior != NULL); next_inferior_destroy (inferior); next_inferior_reset (inferior); inferior->task = task; inferior->pid = pid; inferior->attached_in_ptrace = 0; inferior->stopped_in_ptrace = 0; inferior->suspend_count = 0; /* */ dyld_init_paths (&inferior->dyld_status.path_info); /* get notification messages for current task */ ret = port_allocate (task_self (), &inferior->notify_port); MACH_CHECK_ERROR (ret); ret = port_set_backlog (task_self (), inferior->notify_port, PORT_BACKLOG_MAX); MACH_CHECK_ERROR (ret); if (inferior_bind_notify_port_flag) { ret = task_set_notify_port (task_self (), inferior->notify_port); MACH_CHECK_ERROR (ret); } /* initialize signal port */ ret = port_allocate (task_self (), &inferior->signal_port); MACH_CHECK_ERROR (ret); ret = port_set_backlog (task_self (), inferior->signal_port, PORT_BACKLOG_MAX); MACH_CHECK_ERROR (ret); /* initialize dyld port */ ret = port_allocate (task_self (), &inferior->dyld_port); MACH_WARN_ERROR (ret); ret = port_set_backlog (task_self (), inferior->dyld_port, PORT_BACKLOG_MAX); MACH_CHECK_ERROR (ret); /* initialize gdb exception port */ ret = port_allocate (task_self (), &inferior->exception_port); MACH_CHECK_ERROR (ret); ret = port_set_backlog (task_self (), inferior->exception_port, PORT_BACKLOG_MAX); MACH_CHECK_ERROR (ret); ret = port_allocate (task_self (), &inferior->exception_reply_port); MACH_CHECK_ERROR (ret); ret = port_set_backlog (task_self (), inferior->exception_reply_port, PORT_BACKLOG_MAX); MACH_CHECK_ERROR (ret); /* commandeer inferior exception port */ if (inferior_bind_exception_port_flag) { next_save_exception_ports (inferior->task, &inferior->saved_exceptions); ret = task_set_exception_port (task, inferior->exception_port); MACH_CHECK_ERROR (ret); } inferior->last_thread = next_primary_thread_of_task (inferior->task); }
bool ExtCamControl::start(JSON* pJson) { g_pExtCamControl = this; //Init Camera m_pCamFront = new _CamStream(); CHECK_FATAL(m_pCamFront->init(pJson, "FRONTL")); //Init Marker Detector m_pMD = new _MarkerDetector(); CHECK_FATAL(m_pMD->init(pJson, "RED_CIRCLE")); m_pMD->m_pCamStream = m_pCamFront; m_pCamFront->m_bHSV = true; //Init BgFg Detector m_pBgFgD = new _BgFgDetector(); CHECK_FATAL(m_pBgFgD->init(pJson, "")); m_pBgFgD->m_pCamStream = m_pCamFront; //Init Autopilot /* m_pAP = new _AutoPilot(); CHECK_FATAL(m_pAP->setup(&m_Json, "")); m_pAP->init(); m_pAP->setCamStream(m_pCamFront, CAM_FRONT); m_pAP->m_pOD = m_pOD; m_pAP->m_pFD = m_pFD; // m_pMD = m_pAP->m_pCamStream[CAM_FRONT].m_pCam->m_pMarkerDetect; */ //Connect to Mavlink /* m_pMavlink = new _MavlinkInterface(); CHECK_FATAL(m_pMavlink->setup(&m_Json, "FC")); CHECK_INFO(m_pMavlink->open()); */ //Main window m_pShow = new CamFrame(); m_pMat = new CamFrame(); m_pMat2 = new CamFrame(); //Init UI Monitor m_pUIMonitor = new UIMonitor(); m_pUIMonitor->init("OpenKAI demo", pJson); m_pUIMonitor->addFullFrame(m_pShow); //Start threads m_pCamFront->start(); // m_pMavlink->start(); // m_pDF->start(); m_pMD->start(); // m_pAP->start(); m_pBgFgD->start(); //UI thread m_bRun = true; namedWindow(APP_NAME, CV_WINDOW_NORMAL); setWindowProperty(APP_NAME, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN); setMouseCallback(APP_NAME, onMouseExtCamControl, NULL); while (m_bRun) { // Mavlink_Messages mMsg; // mMsg = m_pMavlink->current_messages; // m_pCamFront->m_pCamL->m_bGimbal = true; // m_pCamFront->m_pCamL->setAttitude(mMsg.attitude.roll, 0, mMsg.time_stamps.attitude); if(!showScreenMarkerDetector()) { showScreenBgFgDetector(); } //Handle key input m_key = waitKey(30); handleKey(m_key); } // m_pAP->stop(); m_pMD->stop(); // m_pMavlink->stop(); m_pBgFgD->stop(); m_pMD->complete(); m_pBgFgD->complete(); // m_pDF->complete(); // m_pAP->complete(); // m_pCamFront->complete(); // m_pMavlink->complete(); // m_pMavlink->close(); // delete m_pAP; // delete m_pMavlink; // delete m_pDF; delete m_pCamFront; return 0; }
kdp_return_t kdp_transaction (kdp_connection *c, kdp_pkt_t *request, kdp_pkt_t *response, char *name) { kdp_return_t rtn; int retries = c->retries; CHECK_FATAL (kdp_is_bound (c)); CHECK_FATAL (request != NULL); CHECK_FATAL (response != NULL); CHECK_FATAL (name != NULL); request->hdr.seq = c->seqno; request->hdr.key = c->session_key; request->hdr.is_reply = 0; rtn = RR_RECV_TIMEOUT; while (retries--) { rtn = kdp_transmit_debug (c, request); if (rtn != RR_SUCCESS) { break; } if (c->timed_out) { rtn = kdp_reply_wait (c, response, 1); if (rtn == RR_RECV_TIMEOUT) { return rtn; } c->logger (KDP_LOG_INFO, "kdp_transaction (%s): " "host responding; continuing transactions\n", name); } else { rtn = kdp_reply_wait (c, response, c->receive_timeout); } if (rtn == RR_RECV_TIMEOUT) { c->logger (KDP_LOG_INFO, "kdp_transaction (%s): transation timed out\n", name); c->logger (KDP_LOG_INFO, "kdp_transaction (%s): re-sending transaction\n", name); continue; } break; } if (rtn == RR_RECV_TIMEOUT) { c->logger (KDP_LOG_INFO, "kdp_transaction (%s): host not responding; will retry\n", name); c->timed_out = 1; return rtn; } if (rtn != RR_SUCCESS) { return rtn; } /* Check for correct session key (except for CONNECT requests). */ if ((request->hdr.request != KDP_CONNECT) && (response->hdr.key != c->session_key)) { c->logger (KDP_LOG_ERROR, "kdp_transaction (%s): invalid session key %d (expected %d)\n", name, response->hdr.key, c->session_key); return RR_BAD_ACK; } if (! response->hdr.is_reply) { c->logger (KDP_LOG_ERROR, "kdp_transaction (%s): " "response was not tagged as a reply packet\n", name); return RR_BAD_ACK; } if (response->hdr.request != request->hdr.request) { c->logger (KDP_LOG_ERROR, "kdp_transaction (%s):" "packet type of request (%d) does not match packet type of reply (%d)\n", name, request->hdr.request, response->hdr.request); return RR_BAD_ACK; } return RR_SUCCESS; }
int mach_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write, task_t task) { mach_vm_address_t r_start = 0; mach_vm_address_t r_end = 0; mach_vm_size_t r_size = 0; vm_prot_t orig_protection = 0; vm_prot_t max_orig_protection = 0; CORE_ADDR cur_memaddr; char *cur_myaddr; int cur_len; vm_size_t pagesize = child_get_pagesize (); kern_return_t kret; int ret; /* check for out-of-range address */ r_start = memaddr; if (r_start != memaddr) { errno = EINVAL; return 0; } if (len == 0) { return 0; } CHECK_FATAL (myaddr != NULL); errno = 0; /* check for case where memory available only at address greater than address specified */ { kret = macosx_get_region_info (task, memaddr, &r_start, &r_size, &orig_protection, &max_orig_protection); if (kret != KERN_SUCCESS) { return 0; } if (r_start > memaddr) { if ((r_start - memaddr) <= MINUS_INT_MIN) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("First available address near 0x%8.8llx is at 0x%8.8llx; returning\n", (uint64_t) memaddr, (uint64_t) r_start); #endif return -(r_start - memaddr); } else { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("First available address near 0x%8.8llx is at 0x%8.8llx" "(too far; returning 0)\n", (uint64_t) memaddr, (uint64_t) r_start); #endif return 0; } } } cur_memaddr = memaddr; cur_myaddr = myaddr; cur_len = len; while (cur_len > 0) { int changed_protections = 0; /* We want the inner-most map containing our address, so set the recurse depth to some high value, and call mach_vm_region_recurse. */ kret = macosx_get_region_info (task, cur_memaddr, &r_start, &r_size, &orig_protection, &max_orig_protection); if (r_start > cur_memaddr) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Next available region for address at 0x%8.8llx is 0x%8.8llx\n", (uint64_t) cur_memaddr, (uint64_t) r_start); #endif break; } if (write) { /* Keep the execute permission if we modify protections. */ vm_prot_t new_prot = VM_PROT_READ | VM_PROT_WRITE; /* Do we need to modify our protections? */ if (orig_protection & VM_PROT_WRITE) { /* We don't need to modify our protections. */ kret = KERN_SUCCESS; #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("We already have write access to the region " "containing: 0x%8.8llx, skipping permission modification.\n", (uint64_t) cur_memaddr); #endif } else { changed_protections = 1; mach_vm_size_t prot_size; if (cur_len < r_size - (cur_memaddr - r_start)) prot_size = cur_len; else prot_size = cur_memaddr - r_start; kret = macosx_vm_protect (task, r_start, r_size, cur_memaddr, prot_size, new_prot, 0); if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Without COPY failed: %s (0x%lx)\n", MACH_ERROR_STRING (kret), kret); #endif kret = macosx_vm_protect (task, r_start, r_size, cur_memaddr, prot_size, VM_PROT_COPY | new_prot, 0); } if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to add write access to region at 0x8.8llx: %s (0x%lx)\n", (uint64_t) r_start, MACH_ERROR_STRING (kret), kret); #endif break; } } } r_end = r_start + r_size; CHECK_FATAL (r_start <= cur_memaddr); CHECK_FATAL (r_end >= cur_memaddr); CHECK_FATAL ((r_start % pagesize) == 0); CHECK_FATAL ((r_end % pagesize) == 0); CHECK_FATAL (r_end >= (r_start + pagesize)); if ((cur_memaddr % pagesize) != 0) { int max_len = pagesize - (cur_memaddr % pagesize); int op_len = cur_len; if (op_len > max_len) { op_len = max_len; } ret = mach_xfer_memory_remainder (cur_memaddr, cur_myaddr, op_len, write, task); } else if (cur_len >= pagesize) { int max_len = r_end - cur_memaddr; int op_len = cur_len; if (op_len > max_len) { op_len = max_len; } op_len -= (op_len % pagesize); ret = mach_xfer_memory_block (cur_memaddr, cur_myaddr, op_len, write, task); } else { ret = mach_xfer_memory_remainder (cur_memaddr, cur_myaddr, cur_len, write, task); } if (write) { /* This vm_machine_attribute isn't supported on i386, so let's not try. */ #if defined (TARGET_POWERPC) vm_machine_attribute_val_t flush = MATTR_VAL_CACHE_FLUSH; kret = vm_machine_attribute (task, r_start, r_size, MATTR_CACHE, &flush); if (kret != KERN_SUCCESS) { static int nwarn = 0; nwarn++; if (nwarn <= MAX_INSTRUCTION_CACHE_WARNINGS) { warning ("Unable to flush data/instruction cache for region at 0x%8.8llx: %s", (uint64_t) r_start, MACH_ERROR_STRING (ret)); } if (nwarn == MAX_INSTRUCTION_CACHE_WARNINGS) { warning ("Support for flushing the data/instruction cache on this " "machine appears broken"); warning ("No further warning messages will be given."); } } #endif /* Try and restore permissions on the minimal address range. */ if (changed_protections) { mach_vm_size_t prot_size; if (cur_len < r_size - (cur_memaddr - r_start)) prot_size = cur_len; else prot_size = cur_memaddr - r_start; kret = macosx_vm_protect (task, r_start, r_size, cur_memaddr, prot_size, orig_protection, 0); if (kret != KERN_SUCCESS) { warning ("Unable to restore original permissions for region at 0x%8.8llx", (uint64_t) r_start); } } } cur_memaddr += ret; cur_myaddr += ret; cur_len -= ret; if (ret == 0) { break; } } return len - cur_len; }
void test_sort_list(Ardb& db) { DBID dbid = 0; db.LClear(dbid, "mylist"); db.RPush(dbid, "mylist", "100"); db.RPush(dbid, "mylist", "10"); db.RPush(dbid, "mylist", "9"); db.RPush(dbid, "mylist", "1000"); StringArray args; ValueDataArray vs; db.Sort(dbid, "mylist", args, vs); CHECK_FATAL(vs.size() != 4, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].integer_value != 9, "sort result[0]:%"PRId64, vs[0].integer_value); CHECK_FATAL(vs[1].integer_value != 10, "sort result[0]:%"PRId64, vs[1].integer_value); CHECK_FATAL(vs[2].integer_value != 100, "sort result[0]:%"PRId64, vs[2].integer_value); CHECK_FATAL(vs[3].integer_value != 1000, "sort result[0]:%"PRId64, vs[3].integer_value); vs.clear(); args.clear(); string_to_string_array("limit 1 2", args); db.Sort(dbid, "mylist", args, vs); CHECK_FATAL(vs.size() != 2, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].integer_value != 10, "sort result[0]:%"PRId64, vs[0].integer_value); CHECK_FATAL(vs[1].integer_value != 100, "sort result[0]:%"PRId64, vs[1].integer_value); vs.clear(); args.clear(); string_to_string_array("by weight_*", args); db.Set(dbid, "weight_100", "1000"); db.Set(dbid, "weight_10", "900"); db.Set(dbid, "weight_9", "800"); db.Set(dbid, "weight_1000", "700"); db.Sort(dbid, "mylist", args, vs); CHECK_FATAL(vs.size() != 4, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].integer_value != 1000, "sort result[0]:%"PRId64, vs[0].integer_value); CHECK_FATAL(vs[1].integer_value != 9, "sort result[0]:%"PRId64, vs[1].integer_value); CHECK_FATAL(vs[2].integer_value != 10, "sort result[0]:%"PRId64, vs[2].integer_value); CHECK_FATAL(vs[3].integer_value != 100, "sort result[0]:%"PRId64, vs[3].integer_value); db.HSet(dbid, "myhash", "field_100", "hash100"); db.HSet(dbid, "myhash", "field_10", "hash10"); db.HSet(dbid, "myhash", "field_9", "hash9"); db.HSet(dbid, "myhash", "field_1000", "hash1000"); args.clear(); string_to_string_array("by weight_* get myhash->field_* get #", args); vs.clear(); db.Sort(dbid, "mylist", args, vs); std::string str; CHECK_FATAL(vs.size() != 8, "sort result size error:%zu", vs.size()); CHECK_FATAL(vs[0].ToString(str) != "hash1000", "sort result[0]:%s", str.c_str()); CHECK_FATAL(vs[2].ToString(str) != "hash9", "sort result[2]:%s", str.c_str()); CHECK_FATAL(vs[4].ToString(str) != "hash10", "sort result[4]:%s", str.c_str()); CHECK_FATAL(vs[6].ToString(str) != "hash100", "sort result[6]:%s", str.c_str()); CHECK_FATAL(vs[1].integer_value != 1000, "sort result[1]:%"PRId64, vs[1].integer_value); CHECK_FATAL(vs[3].integer_value != 9, "sort result[3]:%"PRId64, vs[3].integer_value); CHECK_FATAL(vs[5].integer_value != 10, "sort result[5]:%"PRId64, vs[5].integer_value); CHECK_FATAL(vs[7].integer_value != 100, "sort result[7]:%"PRId64, vs[7].integer_value); }
static int mach_xfer_memory_block (CORE_ADDR memaddr, char *myaddr, int len, int write, task_t task) { vm_size_t pagesize = child_get_pagesize (); vm_offset_t mempointer; /* local copy of inferior's memory */ mach_msg_type_number_t memcopied; /* for vm_read to use */ kern_return_t kret; CHECK_FATAL ((memaddr % pagesize) == 0); CHECK_FATAL ((len % pagesize) == 0); if (!write) { kret = mach_vm_read (task, memaddr, len, &mempointer, &memcopied); if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to read region at 0x%8.8llx with length %lu from inferior: %s (0x%lx)\n", (uint64_t) memaddr, (unsigned long) len, MACH_ERROR_STRING (kret), kret); #endif return 0; } if (memcopied != len) { kret = vm_deallocate (mach_task_self (), mempointer, memcopied); if (kret != KERN_SUCCESS) { warning ("Unable to deallocate memory used by failed read from inferior: %s (0x%ux)", MACH_ERROR_STRING (kret), kret); } #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to read region at 0x%8.8llx with length %lu from inferior: " "vm_read returned %lu bytes instead of %lu\n", (uint64_t) memaddr, (unsigned long) len, (unsigned long) memcopied, (unsigned long) len); #endif return 0; } memcpy (myaddr, ((unsigned char *) 0) + mempointer, len); kret = vm_deallocate (mach_task_self (), mempointer, memcopied); if (kret != KERN_SUCCESS) { warning ("Unable to deallocate memory used by read from inferior: %s (0x%ulx)", MACH_ERROR_STRING (kret), kret); return 0; } } else { kret = mach_vm_write (task, memaddr, (pointer_t) myaddr, len); if (kret != KERN_SUCCESS) { #ifdef DEBUG_MACOSX_MUTILS mutils_debug ("Unable to write region at 0x%8.8llx with length %lu from inferior: %s (0x%lx)\n", (uint64_t) memaddr, (unsigned long) len, MACH_ERROR_STRING (kret), kret); #endif return 0; } } return len; }