static char * use_hwinfo(struct chip_loc *chip) { char *name; hwi_tag *dev_tag; hwi_tag *tag; unsigned off; unsigned loc; struct asinfo_entry *as; off = HWI_NULL_OFF; for( ;; ) { off = hwi_find_tag(off, 0, HWI_TAG_NAME_device); if(off == HWI_NULL_OFF) return(NULL); dev_tag = hwi_off2tag(off); if(dev_tag->item.owner == HWI_NULL_OFF) continue; tag = hwi_off2tag(dev_tag->item.owner); if(strcmp(__hwi_find_string(tag->item.itemname), HWI_ITEM_DEVCLASS_RTC) == 0) break; /* for the new hwinfo structure */ if(dev_tag->device.pnpid == hwi_devclass_RTC) break; } tag = hwi_off2tag(off); name = __hwi_find_string(tag->item.itemname); loc = hwi_find_tag(off, 1, HWI_TAG_NAME_location); if(loc != HWI_NULL_OFF) { tag = hwi_off2tag(loc); chip->phys = tag->location.base; chip->reg_shift = tag->location.regshift; if(tag->location.addrspace == 0xffff) { fprintf(stderr, "\nWrong address space for RTC.\n"); return NULL; } as = &SYSPAGE_ENTRY(asinfo)[tag->location.addrspace/sizeof(*as)]; chip->access_type = (strcmp(__hwi_find_string(as->name), "memory") == 0); } off = hwi_find_tag(off, 1, HWI_TAG_NAME_regname); if(off != HWI_NULL_OFF) { tag = hwi_off2tag(off); chip->century_reg = tag->regname.offset; } return(name); }
static void cache_iterate(int cache_idx, uintptr_t base, size_t len, int flags, unsigned term_flag) { struct cacheattr_entry *cache_base; struct cacheattr_entry *cache; struct cacheattr_entry *subset_cache; subset_cache = NULL; cache_base = SYSPAGE_ENTRY(cacheattr); for(;;) { if(cache_idx == CACHE_LIST_END) break; cache = &cache_base[cache_idx]; if(cache->flags & term_flag) break; if(cache->flags & CACHE_FLAG_SUBSET) { //We'll get to it later subset_cache = cache; } else { if(subset_cache != NULL) { // // We've skipped issuing control functions to some cache levels // because they obey the 'subset' property. Have to issue one // to the last level now. // cache_one(subset_cache, base, len, flags); } cache_one(cache, base, len, flags); subset_cache = NULL; } cache_idx = cache->next; } if(subset_cache != NULL) { // // We've skipped issuing control functions to some cache levels // because they obey the 'subset' property. Have to issue one // to the last level now. // cache_one(subset_cache, base, len, flags); } }
int main() { int i; int id1, id2; struct _pulse pulse; // Request I/O privileges ThreadCtl( _NTO_TCTL_IO, 0 ); chid = ChannelCreate( 0 ); coid = ConnectAttach( 0, 0, chid, _NTO_SIDE_CHANNEL, 0 ); SIGEV_PULSE_INIT( &event1, coid, getprio(0), MY_PULSE_CODE1, 0 ); id1=InterruptAttach( SYSPAGE_ENTRY(qtime)->intr, &handler1, NULL, 0, 0 ); SIGEV_PULSE_INIT( &event2, coid, getprio(0), MY_PULSE_CODE2, 0 ); id2=InterruptAttach( SYSPAGE_ENTRY(qtime)->intr, &handler2, NULL, 0, 0 ); for( i = 0; i < 10; ++i ) { // Wait for ISR to wake us up MsgReceivePulse( chid, &pulse, sizeof( pulse ), NULL ); if(pulse.code == MY_PULSE_CODE1) { printf( "1000 events\n" ); } else if(pulse.code == MY_PULSE_CODE2) { printf( "2500 events\n" ); } } // Disconnect the ISR handler InterruptDetach(id1); InterruptDetach(id2); return 0; }
void vx_capability_check(void) { /* * Check compatibility with startup MMU configuration */ if ((SYSPAGE_ENTRY(cpuinfo)->flags & ARM_CPU_FLAG_V6) == 0) { kprintf("startup did not set ARM_CPU_FLAG_V6\n"); crash(); } if ((SYSPAGE_ENTRY(cpuinfo)->flags & ARM_CPU_FLAG_V6_ASID) == 0) { kprintf("startup did not set ARM_CPU_FLAG_V6_ASID\n"); crash(); } }
double get_cpu_frequency() { static double freq = -1; if (freq != -1) return freq; #ifdef _WIN32 LARGE_INTEGER li; QueryPerformanceFrequency(&li); freq = (double)li.QuadPart; #else # ifdef __QNX__ freq = SYSPAGE_ENTRY( qtime )->cycles_per_sec; # else std::ifstream ifs("/proc/cpuinfo"); std::string token; while(!ifs.eof()){ ifs >> token; if (token == "cpu"){ ifs >> token; if (token == "MHz"){ ifs >> token; ifs >> freq; freq *= 1e6; break; } }
int SampleLoopTask::Init(char *name, double rate, double *actual_sample_rate, int priority){ PeriodicTask::Init(name, rate, actual_sample_rate, priority); SampleRate = *actual_sample_rate; printf("actual sample rate %lf\n", *actual_sample_rate); for(int i=0; i<MATLAB_NET_NUM_CH; i++){ MNET->AddSignal(i, &(num[i])); } for(int i=0; i<VISION_NET_NUM_CH; i++){ VNET->AddSignal(i, &(vnum[i])); } // initialize gains Kp = 350; //150.0; //150.0; //0.2; //200; //20; //300.; Kd = 1.0; //0.9; //0.01; //10; //50; //10.; Ki = 0.0; pCmd = 0.; iCmd = 0.; currentI = 0.; handTheta_prev = 0.; handVel_prev = 0.; done = 0; ncycles_prev = 0; cps = SYSPAGE_ENTRY(qtime)->cycles_per_sec; lp.go = 0; }
int TraceEvent(int code, ...) { int data[10]; // Can't have more than 1+7+2 elements va_list arg; unsigned i; unsigned count=_TRACE_GET_COUNT((unsigned)code); data[0] = code; va_start(arg, code); for(i=1; i<count; ++i) { data[i] = va_arg(arg, int); } va_end(arg); if(in_interrupt()) { int r_v; if((r_v = SYSPAGE_ENTRY(callin)->trace_event(data))) { errno = r_v; return (-1); } return (0); } else { return (__traceevent(data)); } }
_Uint32t cpu_perfreg_id(void) { static int perfreg_id = 0; struct cpuinfo_entry *cpu_p; unsigned type; if ( perfreg_id != 0 ) { return perfreg_id; } /* NYI: Shouldn't be looking for CPU type here. Should be moved to the init_cpu<family>.c file to localize CPU type checks. */ cpu_p = SYSPAGE_ENTRY(cpuinfo); type = PPC_GET_FAM_MEMBER(cpu_p->cpu); switch(type) { case PPC_7450: case PPC_7455: perfreg_id = PERFREGS_MAKEID(PERFREGS_CPUID_PPC,PERFREGS_PARTID_7450); break; default: perfreg_id = -1; break; } return perfreg_id; }
static bool isIntegerDivSupported() { #ifdef ARM_CPU_FLAG_IDIV return !!(SYSPAGE_ENTRY(cpuinfo)->flags & ARM_CPU_FLAG_IDIV); #else return false; #endif }
int walk_asinfo(const char *name, int (*func)(struct asinfo_entry *, char *, void *), void *data) { char *str = SYSPAGE_ENTRY(strings)->data; struct asinfo_entry *as = SYSPAGE_ENTRY(asinfo); char *curr; unsigned num; num = _syspage_ptr->asinfo.entry_size / sizeof(*as); for( ;; ) { if(num == 0) return 1; curr = &str[as->name]; if(name == 0 || strcmp(curr, name) == 0) { if(!func(as, curr, data)) return 0; } ++as; --num; } }
/* * cpu_start_ap(void) * CPU specific stuff for getting next AP in an SMP system initialized * and into the kernel. */ void cpu_start_ap(uintptr_t start) { if(_syspage_ptr->smp.entry_size > 0) { SYSPAGE_ENTRY(smp)->start_addr = (void *)start; } else if(_syspage_ptr->un.x86.smpinfo.entry_size > 0) { // // This code can be removed later, after all the startup's have // been updated to use the new CPU independent smp section. // SYSPAGE_CPU_ENTRY(x86, smpinfo)->ap_start_addr = (void *)start; } }
void async_check_init(unsigned channel) { struct callout_entry *callout; callout = SYSPAGE_ENTRY(callout); break_detect = callout->debug[channel].break_detect; if(break_detect != NULL) { old_timer_reload = callout->timer_reload; if(old_timer_reload == NULL) old_timer_reload = dummy_timer_reload; callout->timer_reload = outside_timer_reload; } }
double get_tick_count_ha() { uint64_t cps, cycle; double sec; cycle=ClockCycles( ); cps = SYSPAGE_ENTRY(qtime)->cycles_per_sec; sec=(double)cycle/cps; return(sec); }
static char * find_rtc_name() { char *name; name = getenv(RTC_ENV_NAME); if(name != NULL) return(name); name = SYSPAGE_ENTRY(strings)->data; for( ;; ) { if(name[0] == '\0') return(NULL); if(memcmp(name, RTC_SYSPAGE_NAME "=", sizeof(RTC_SYSPAGE_NAME)) == 0) break; name += strlen(name) + 1; } return(&name[sizeof(RTC_SYSPAGE_NAME)]); }
// This implementation works on x86 where taking the address of the // first parameter gives you access to the rest as a linear array. int TraceEvent(int code, ...) { if(in_interrupt()) { int r_v; if((r_v = SYSPAGE_ENTRY(callin)->trace_event(&code))) { errno = r_v; return (-1); } return (0); } else { return (__traceevent(&code)); } }
void init_smp() { unsigned cpu = RUNCPU; /* * Unmap the 1-1 mapping startup built to enable the MMU */ memmgr.init_mem(2); /* * FIXME: need to set nohalt so loop does not halt CPU whilst in * its Ring0(kerext_idle) code. This will cause other CPUs * spin waiting for the syscall to return. * * This wastes power - should try to figure out how to get * SMP idle to actually use a wait-for-interrupt etc. to * halt the cpu? */ nohalt = 1; /* * Proc threads must access the user cpupage pointer so they get the * correct cpupage for the CPU they are running on. */ _cpupage_ptr = privateptr->user_cpupageptr; /* * Attach the callout send_ipi routine */ send_ipi_rtn = SYSPAGE_ENTRY(smp)->send_ipi; /* * Set up exception save areas for this cpu */ smp_exc_save(&und_save[cpu], &abt_save[cpu], &irq_save[cpu], &fiq_save[cpu]); /* * Set initial MMU domain register value for first __ker_exit */ mmu_set_domain(0); /* * Return through __ker_exit into this cpu's idle thread */ ker_start(); }
static void cache_one(struct cacheattr_entry *cache, uintptr_t base, size_t len, int flags) { uintptr_t vaddr; size_t cache_len; vaddr = base & ~(cache->line_size-1); cache_len = len + (base - vaddr); while(cache_len != 0) { unsigned lines; paddr_t addr; size_t valid_len; valid_len = cache_len; addr = vaddr; if(cache->flags & CACHE_FLAG_CTRL_PHYS) { if(vaddrinfo(NULL, vaddr, &addr, &valid_len) == PROT_NONE) { unsigned page = SYSPAGE_ENTRY(system_private)->pagesize; /* make vaddr skip to the next page boundary */ addr = (vaddr + page) & (~(page-1)); if(cache_len < (addr - vaddr)) { cache_len = 0; } else { cache_len -= (addr - vaddr); vaddr = addr; } continue; } } lines = (valid_len + cache->line_size - 1) / cache->line_size; while(lines != 0) { unsigned done_lines; size_t done_size; //NYI: need to make handle 64 bit paddrs.... done_lines = cache->control((paddr32_t)addr, lines, flags, cache, _syspage_ptr); if(done_lines == 0) return; /* whole cache handled */ done_size = done_lines * cache->line_size; addr += done_size; lines -= done_lines; } cache_len -= valid_len; } }
static int map_ifs(struct asinfo_entry *as, char *name, void *d) { struct image_header *image; struct system_private_entry *spp; struct boot_data *data = d; image = imagefs_mount(as->start, (as->end - as->start) + 1, 0, _RESMGR_FLAG_AFTER, sysmgr_prp->root, 0); if((image != (void *)-1) && (((struct image_header *)image)->boot_ino[0])) { data->boot = image; spp = SYSPAGE_ENTRY(system_private); if(image->flags & IMAGE_FLAGS_INO_BITS) { data->proc_offset = -1U; } else { data->proc_offset = spp->boot_pgm[spp->boot_idx].base - as->start; } } return 1; }
/**************************************************************************** REMARKS: Function to execute a service at ring 0. This is done using the clock interrupt handler since the code we attach to it will always run at ring 0. ****************************************************************************/ static void CallRing0(void) { #ifdef __QNXNTO__ uint clock_intno = SYSPAGE_ENTRY(qtime)->intr; #else uint clock_intno = 0; /* clock irq */ #endif int intrid; #ifdef __QNXNTO__ mlock((void*)&_PM_R0, sizeof(_PM_R0)); ThreadCtl(_NTO_TCTL_IO, 0); #endif #ifdef __QNXNTO__ if ((intrid = InterruptAttach(_NTO_INTR_CLASS_EXTERNAL | clock_intno, _PM_ring0_isr, (void*)&_PM_R0, sizeof(_PM_R0), _NTO_INTR_FLAGS_END)) == -1) { #else if ((intrid = qnx_hint_attach(clock_intno, _PM_ring0_isr, FP_SEG(&_PM_R0))) == -1) { #endif perror("Attach"); exit(-1); } while (_PM_R0.service != -1) ; #ifdef __QNXNTO__ InterruptDetach(intrid); #else qnx_hint_detach(intrid); #endif } /**************************************************************************** REMARKS: Flush the translation lookaside buffer. ****************************************************************************/ void PMAPI PM_flushTLB(void) { _PM_R0.service = R0_FLUSH_TLB; CallRing0(); }
/* called by mt_buffer_init, writes ltt_subbuffer_header * at pt0, which must point to a traceset's begining * returns a pointer to the end of the header */ void * mt_write_header(void *pt0) { gettimeofday(&ts, NULL); ltt_subbuffer_header_t *ltt_hd; ltt_hd = (ltt_subbuffer_header_t *) pt0; ltt_hd->cycle_count_begin = 0; /* Cycle count at subbuffer start */ ltt_hd->cycle_count_end = 0; /* Cycle count at subbuffer end */ ltt_hd->magic_number = 0x00D6B7ED; /* * Trace magic number. * contains endianness information. */ ltt_hd->major_version = 2; ltt_hd->minor_version = 3; ltt_hd->arch_size = 0; /* Architecture pointer size */ ltt_hd->alignment = _MT_ALIGNMENT;/* LTT data alignment */ ltt_hd->start_time_sec = ts.tv_sec; /* NTP-corrected start time */ ltt_hd->start_time_usec = ts.tv_usec; ltt_hd->start_freq = SYSPAGE_ENTRY( qtime )->cycles_per_sec; /* * Frequency at trace start, * used all along the trace. */ ltt_hd->freq_scale = 0; /* Frequency scaling (divisor) */ ltt_hd->lost_size = 0; /* Size unused at end of subbuffer */ ltt_hd->buf_size = 0xFFFFFFFF; /* Size of this subbuffer */ ltt_hd->events_lost = 0; /* * Events lost in this subbuffer since * the beginning of the trace. * (may overflow) */ ltt_hd->subbuf_corrupt = 0; /* * Corrupted (lost) subbuffers since * the begginig of the trace. * (may overflow) */ return (void *) ltt_hd->header_end; }
static void transfer_aps(void) { unsigned i; struct syspage_entry *sp; volatile uint32_t *pending; sp = lsp.syspage.p; pending = &SYSPAGE_ENTRY(smp)->pending; for(i = 1; i < lsp.syspage.p->num_cpu; ++i) { //Get one AP into the syspage spin callout *pending = 1; mem_barrier(); #ifdef __MIPS__ setcp0_count(0); #endif syspage_available = i; do { mem_barrier(); // give the bus a break. } while(*pending != 0); } }
/** * Log all of the schedule data collected at runtime to the kernel * event stream and stdout. */ void Task::logData() { uint64_t cps; float realTime = 0; float realTransitionTime = 0; char data[256]; // Determine the clock rate cps = SYSPAGE_ENTRY(qtime)->cycles_per_sec; // Calculate the real compute time period realTime = ((float)((float)realComputeTime / (float)cps) * 1000); realTransitionTime = (float)((float)computeTransitionTime / (float)cps) * 1000; // Log the data sprintf(data, "TDATA %d,%d,%d,%d,%d,%d,%f,%f,%f", uid, deadlineEvents, deadlinesMissed, totalComputationTimeMissed, totalComputationTime / NS_PER_MS, totalComputationCycles, realTransitionTime / realTime, realTime, ((totalComputationTime / NS_PER_MS) - realTime) / (totalComputationTime / NS_PER_MS)); TraceEvent(_NTO_TRACE_INSERTUSRSTREVENT, EVENT_PROXY_DATA, data); cout << data << endl; }
int cache_control(uintptr_t base, size_t len, int flags) { struct cpuinfo_entry *cpu; int data_flags; int code_flags; //NYI: deal with SMP issues cpu = &SYSPAGE_ENTRY(cpuinfo)[0]; data_flags = flags & (MS_SYNC|MS_ASYNC|MS_INVALIDATE); if(data_flags != 0) { cache_iterate(cpu->data_cache, base, len, data_flags, 0); } code_flags = flags & (MS_INVALIDATE|MS_INVALIDATE_ICACHE); if(code_flags != 0) { if(data_flags == 0) { //Push data towards main memory until we run into a unified //cache. This is so that when the icache(s) refill, they'll //get up-to-date data. cache_iterate(cpu->data_cache, base, len, MS_SYNC, CACHE_FLAG_INSTR); } cache_iterate(cpu->ins_cache, base, len, code_flags, 0); } return(0); }
clsCTL _ctl; clsSVO _svo; clsCMM _cmm; clsDLG _dlg; clsNET _net; clsURG _urg; clsCoop _coop; clsPTU _ptu; clsMain _main; clsUser _user; clsParser _parser; // parser is used to load system parameters //global uint64_t _cycle0; // initial clock cycle as the application start uint64_t _cps = SYSPAGE_ENTRY(qtime)->cycles_per_sec; //cycles in one second uint64_t _cpms = (uint64_t)(0.001*_cps); //inline double GetTime() { return (double)(ClockCycles()-_cycle0)/_cps; } //path clsPath _path[MAX_PATH]; // 20 paths int _nPath = 0; clsTmpPath _pathTmp; //for dynamically created path struct COMMANDENTRY { short code; const char *pszName; };
void (mdriver_init)(void) { mdriver_ptr = SYSPAGE_ENTRY(mdriver); mdriver_num = _syspage_ptr->mdriver.entry_size / sizeof(*mdriver_ptr); mdriver_check(); }
static Boolean getTime (ClockDriver *self, TimeInternal *time) { #ifdef __QNXNTO__ static TimerIntData tmpData; int ret; uint64_t delta; double tick_delay; uint64_t clock_offset; struct timespec tp; if(!tDataUpdated) { memset(&tData, 0, sizeof(TimerIntData)); if(ThreadCtl(_NTO_TCTL_IO, 0) == -1) { ERROR(THIS_COMPONENT"QNX: could not give process I/O privileges"); return FALSE; } tData.cps = SYSPAGE_ENTRY(qtime)->cycles_per_sec; tData.ns_per_tick = 1000000000.0 / tData.cps; tData.prev_tsc = ClockCycles(); clock_gettime(CLOCK_REALTIME, &tp); tData.last_clock = timespec2nsec(&tp); ret = InterruptAttach(0, timerIntHandler, &tData, sizeof(TimerIntData), _NTO_INTR_FLAGS_END | _NTO_INTR_FLAGS_TRK_MSK); if(ret == -1) { ERROR(THIS_COMPONENT"QNX: could not attach to timer interrupt"); return FALSE; } tDataUpdated = TRUE; time->seconds = tp.tv_sec; time->nanoseconds = tp.tv_nsec; return; } memcpy(&tmpData, &tData, sizeof(TimerIntData)); delta = ClockCycles() - tmpData.prev_tsc; /* compute time since last clock update */ tick_delay = (double)delta / (double)tmpData.filtered_delta; clock_offset = (uint64_t)(tick_delay * tmpData.ns_per_tick * (double)tmpData.filtered_delta); /* not filtered yet */ if(tData.counter < 2) { clock_offset = 0; } DBGV("QNX getTime cps: %lld tick interval: %.09f, time since last tick: %lld\n", tmpData.cps, tmpData.filtered_delta * tmpData.ns_per_tick, clock_offset); nsec2timespec(&tp, tmpData.last_clock + clock_offset); time->seconds = tp.tv_sec; time->nanoseconds = tp.tv_nsec; return TRUE; #else #if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0) struct timespec tp; if (clock_gettime(CLOCK_REALTIME, &tp) < 0) { PERROR(THIS_COMPONENT"clock_gettime() failed, exiting."); exit(0); } time->seconds = tp.tv_sec; time->nanoseconds = tp.tv_nsec; #else struct timeval tv; gettimeofday(&tv, 0); time->seconds = tv.tv_sec; time->nanoseconds = tv.tv_usec * 1000; #endif /* _POSIX_TIMERS */ #endif /* __QNXNTO__ */ return TRUE; }
int main( int argc, char *argv[] ) { db_clt_typ *pclt = NULL; char hostname[MAXHOSTNAMELEN+1]; db_data_typ db_data; posix_timer_typ *ptmr; int recv_type; int millisec = 5000; trig_info_typ trig_info; uint64_t ticksPerMilliSec = SYSPAGE_ENTRY(qtime)->cycles_per_sec / 1000000; unsigned count = 0; unsigned total_diff = 0; /* Initialize the database. */ get_local_name(hostname, MAXHOSTNAMELEN); if( (pclt = database_init(argv[0], hostname, DEFAULT_SERVICE, COMM_QNX6_XPORT )) == NULL ) { fprintf(stderr, "Database initialization error in ids_io\n"); veh_done( pclt ); exit( EXIT_FAILURE ); } /* Initialize the timer. */ if ((ptmr = timer_init(millisec, DB_CHANNEL(pclt))) == NULL) { printf("timer_init failed\n"); exit( EXIT_FAILURE ); } print_timer(ptmr); if( setjmp( exit_env ) != 0 ) { printf("average timediff = %u\n", total_diff / count); veh_done( pclt ); exit( EXIT_SUCCESS ); } else sig_ign( sig_list, sig_hand ); for( ;; ) { /* Now wait for a trigger. */ recv_type= clt_ipc_receive(pclt, &trig_info, sizeof(trig_info)); if (recv_type == DB_TIMER) { printf("received timer alarm\n"); } else if(DB_TRIG_VAR(&trig_info) == 200) { fflush(stdout); /* Read DB_DII_OUT_VAR and send DII control * to the hardware. */ if( clt_read( pclt, 200, 200, &db_data ) == FALSE) { fprintf( stderr, "clt_read( DB_DII_OUT_VAR ).\n" ); } else { uint64_t *incoming_time = (uint64_t*) db_data.value.user; uint64_t timediff = ClockCycles() - *incoming_time; timediff /= ticksPerMilliSec; total_diff += timediff; ++count; } } else printf("Unknown trigger, recv_type %d\n", recv_type); } }
/* * This function does all command procesing for interfacing to gdb. */ static boolean do_gdb_interface(struct kdebug_entry *entry, CPU_REGISTERS *ctx, ulong_t signal) { int length; struct kdebug_info *kinfo; const struct kdebug_private *kprivate; THREAD *thread; /* * Indicate that we've gone back to debug mode */ for (length = 0; length < 4; length++) dbg_putc('|'); if(protocol == 0) { // generic GDB 4.16 wants the response to the continue/step // command sent before it transmits anything else. ksprintf(outbuf,"S%02xk", (unsigned)signal); putpacket(); } while(getpacket()) { connected = TRUE; outbuf[0] = 0; #ifdef DEBUG_GDB kprintf("Processing packet '%s'\n", inbuf); #endif switch(inbuf[0]) { /* Tell the gdb client our signal number */ case '?' : if(gdb_test_reloc_sem()) { paddr_t base; char *str = SYSPAGE_ENTRY(strings)->data; struct asinfo_entry *as = SYSPAGE_ENTRY(asinfo); while(strcmp(&str[as->name], "imagefs") != 0) { ++as; } base = gdb_image_base(as->start); gdb_clear_reloc_sem(); ksprintf(outbuf,"N%02x%P;%P;%P", (unsigned)signal, base, base, (paddr_t)(base + as->end - as->start + 1)); } else { ksprintf(outbuf,"S%02xk", (unsigned)signal); } for(length=1;outbuf[length];length++) { if((outbuf[length] >= 'A') && (outbuf[length] <='Z')) outbuf[length]=outbuf[length]+('a'-'A'); } if(gdb_debug) gdb_printf("%s", outbuf); break; /* toggle debug flag */ case 'd' : gdb_debug = !(gdb_debug); break; /* return the value of the CPU registers */ case 'g' : /* temp solution, need to add an offset item in kdebug_private for fpu data */ if((kinfo = private->kdebug_info)== NULL || (kprivate = kinfo->kdbg_private) == NULL || (thread = ((void **)kprivate->actives)[0]) == NULL) { gdb_get_cpuregs(ctx,NULL); } else { gdb_get_cpuregs(ctx,thread->fpudata); } break; /* set the value of the CPU registers - return OK */ case 'G' : /* temp solution, need to add an offset item in kdebug_private for fpu data */ if((kinfo = private->kdebug_info)== NULL || (kprivate = kinfo->kdbg_private) == NULL || (thread = ((void **)kprivate->actives)[0]) == NULL) { gdb_set_cpuregs(ctx,NULL); } else { gdb_set_cpuregs(ctx,thread->fpudata); } strcpy(outbuf,"OK"); break; /* get target information */ case 'i': gdb_get_info(); break; /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */ case 'm' : gdb_read_membytes(ctx); break; /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */ case 'M' : gdb_write_membytes(ctx); break; /* cAA..AA Continue at address AA..AA(optional) */ case 'c' : gdb_proc_continue(ctx, 0); /* continue the process */ return(TRUE); /* sAA..AA Step one instruction from AA..AA(optional) */ case 's' : gdb_proc_continue(ctx, 1); /* step one instruction */ return(TRUE); /* q???? Generic query */ case 'q': if(memcmp(&inbuf[1], "Rcmd,", 5) == 0) { // remote command char *p; p = &inbuf[6]; hex2mem(p, scratch, strlen(p)); #define MEM_CMD "mem " if(memcmp(scratch, MEM_CMD, sizeof(MEM_CMD)-1) == 0) { monitor_mem(&scratch[sizeof(MEM_CMD)-1]); } } break; /* k Kill program */ case 'k' : putpacket(); /*ACK the packet early (since we're going bye-bye) */ gdb_prep_reboot(); SYSPAGE_ENTRY(callout)->reboot(_syspage_ptr, 0); break; /* D Detach from host */ case 'D' : connected = FALSE; return(FALSE); } /* switch */
int main(int argc, char *argv[]) { messip_channel_t *ch; #if !defined(ONEWAY_MESSAGE) char rec_buff[80]; #endif char snd_buff[80]; int32_t answer; int i; double d; #if defined(__QNXNTO__) uint64_t cps, cycle1, cycle2, ncycles; #else /*!__QNXNTO__ */ struct timespec before, after; #endif /* __QNXNTO__ */ int q = 0; #if 1 struct sched_param param; if ((param.sched_priority=sched_get_priority_max(SCHED_FIFO)) == -1) fprintf (stderr, "sched_get_priority_max(): %s\n", strerror (errno)); if (( sched_setscheduler(0, SCHED_FIFO, ¶m)) == -1) fprintf (stderr, "sched_setscheduler(): %s\n", strerror (errno)); #endif #if defined(__QNXNTO__) /* find out how many cycles per second */ cps = SYSPAGE_ENTRY(qtime)->cycles_per_sec; printf( "This system has %lld cycles/sec.\n",cps ); #endif /* __QNXNTO__ */ do { // Locate the channel where to send message to ch = messip_channel_connect(NULL, (argc < 2) ? "one" : argv[1], MESSIP_NOTIMEOUT); assert(ch != NULL); #if 1 // Send messages for (i = 0; i < SEND_TIMES; i++) { sprintf(snd_buff, "%d", i); #if defined(__QNXNTO__) cycle1=ClockCycles(); #else /*!__QNXNTO__ */ clock_gettime(CLOCK_REALTIME, &before); #endif /* __QNXNTO__ */ #if defined(ONEWAY_MESSAGE) messip_send(ch, 100, 200, (void *) snd_buff, strlen(snd_buff)+1, // Type=100 Subtype=200 &answer, NULL, -1, MESSIP_NOTIMEOUT); #else messip_send(ch, 100, 200, (void *) snd_buff, strlen(snd_buff)+1, // Type=100 Subtype=200 &answer, rec_buff, sizeof(rec_buff), MESSIP_NOTIMEOUT); #endif #if defined(__QNXNTO__) cycle2=ClockCycles(); ncycles=cycle2-cycle1; d=(double)ncycles/cps; usleep(1000); #else /*!__QNXNTO__ */ clock_gettime(CLOCK_REALTIME, &after); d = (after.tv_sec+after.tv_nsec/1e9) -(before.tv_sec+before.tv_nsec/1e9); #endif /* __QNXNTO__ */ #if defined(ONEWAY_MESSAGE) printf("%.9f\n", d); #else printf("%s: %.9f\n", rec_buff, d); #endif } if (i < SEND_TIMES) { perror("messip_send()"); } #endif messip_channel_disconnect(ch, MESSIP_NOTIMEOUT); } while(q++ < 3); return 0; }
int elfcore(int fd, FILE *fp, const char *path, long coresize) { procfs_sysinfo *sysinfo; int sysinfo_len; procfs_info info; procfs_status status; int ret; procfs_mapinfo *mem = NULL, *mapinfos = NULL, *ldd_infos = NULL; int numnote=0, num, i, j, seg = 0, err, n_ldd_infos = 0; Elf32_Ehdr elf; Elf32_Phdr phdr; struct memelfnote notes[20], thread_note; off_t offset = 0, dataoff; uint64_t cur_tid_base = 0, cur_tid_size = 0; if (nodumpmem) { if (-1 == get_ldd_mapinfos(fd, &ldd_infos, &n_ldd_infos)) { /* should we bail out here? */ n_ldd_infos = 0; } } if((ret = devctl(fd, DCMD_PROC_SYSINFO, 0, 0, &sysinfo_len)) != EOK) { errno = ret; goto bailout; } if(sysinfo = alloca(sysinfo_len)) { if((ret = devctl(fd, DCMD_PROC_SYSINFO, sysinfo, sysinfo_len, 0)) != EOK) { errno = ret; goto bailout; } } if((ret = devctl(fd, DCMD_PROC_INFO, &info, sizeof info, 0)) != EOK) { errno = ret; goto bailout; } pagesize = sysconf( _SC_PAGESIZE ); if ( membuf == NULL && ((membuf = malloc( pagesize )) == NULL) ) { goto bailout; } // write elf header memcpy(elf.e_ident, ELFMAG, SELFMAG); elf.e_ident[EI_CLASS] = ELFCLASS32; elf.e_ident[EI_DATA] = ELFDATANATIVE; elf.e_ident[EI_VERSION] = EV_CURRENT; #if defined (__ARM__) elf.e_ident[EI_OSABI] = ELFOSABI_ARM; #endif memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); if((ret = devctl(fd, DCMD_PROC_PAGEDATA, NULL, 0, &num)) != EOK) { errno = ret; goto bailout; } mapinfos = malloc( num * sizeof *mem ); if ( mapinfos == NULL ) { goto bailout; } if((ret = devctl(fd, DCMD_PROC_PAGEDATA, mapinfos, num*sizeof(*mapinfos), &num)) != EOK) { errno = ret; goto bailout; } mem = malloc( (n_ldd_infos + num) * sizeof(*mem) ); if ( mem == NULL ) { goto bailout; } /* find the offending thread */ for(status.tid = 1; devctl(fd, DCMD_PROC_TIDSTATUS, &status, sizeof status, 0) == EOK; status.tid++) { dprintf(("thread %d.flags is %#x\n", status.tid, status.flags )); if(status.why == _DEBUG_WHY_SIGNALLED) { // This is the faulting thread... dprintf(("thread %d is was SIGNALLED\n", status.tid )); cur_tid = status.tid; cur_tid_base = status.stkbase; cur_tid_size = status.stksize; } dprintf(("thread %d.why is %#x\n", status.tid, status.why )); } if(cur_tid == 0) { /* can't find the faulting thread then we need to dump all stack information */ cur_tid_only = 0; } for(seg = 0, i = 0; i < num; i++) { if(!(mapinfos[i].flags & PG_HWMAPPED) || (nodumpmem && !(mapinfos[i].flags & MAP_STACK)) ) { continue; } if ( (nodumpphys && (mapinfos[i].flags & MAP_PHYS) && !(mapinfos[i].flags & MAP_ANON)) ) { continue; } /* if we only want to dump the offending tid's stack */ if(cur_tid_only && (mapinfos[i].flags & MAP_STACK) && !OFFENDING_THREAD(cur_tid_base, cur_tid_size, &mapinfos[i])) { continue; } memcpy(&mem[seg], &mapinfos[i], sizeof(*mem)); seg++; } dprintf(("ldd mapinfos:\n")); for(i = 0; i < n_ldd_infos; i++) { dprintf(("%svaddr=%#llx, offset=%#llx, size=%#llx, flags=%#x\n", ldd_infos[i].flags & PG_HWMAPPED?"*":"", ldd_infos[i].vaddr, ldd_infos[i].offset, ldd_infos[i].size, ldd_infos[i].flags )); memcpy( &mem[seg], &ldd_infos[i], sizeof(*mem));; seg++; } free(mapinfos); mapinfos = NULL; if(n_ldd_infos) { free(ldd_infos); ldd_infos = NULL; } num = seg; elf.e_type = ET_CORE; elf.e_machine = EM_NATIVE; elf.e_version = EV_CURRENT; elf.e_entry = 0; elf.e_phoff = sizeof(elf); elf.e_shoff = 0; #ifdef __SH__ { struct cpuinfo_entry *cpu; cpu = SYSPAGE_ENTRY(cpuinfo); switch ( SH4_PVR_FAM(cpu[0].cpu) ) { case SH4_PVR_SH4A: dprintf(("Noting SH4-A CPU\n")); elf.e_flags = EF_SH4A; break; case SH4_PVR_SH4: default: elf.e_flags = EF_SH4; break; } } #else elf.e_flags = 0; #endif elf.e_ehsize = sizeof(elf); elf.e_phentsize = sizeof(phdr); elf.e_phnum = seg+1; /* xxxx */ elf.e_shentsize = 0; elf.e_shnum = 0; elf.e_shstrndx = 0; if(dump_write( fp, &elf, sizeof elf, &coresize ) == -1) goto bailout; offset += sizeof elf; offset += (elf.e_phnum) * sizeof phdr; if(sysinfo) { // write QNT_CORE_SYSINFO note memset( notes, 0, sizeof notes ); notes[numnote].name = QNX_NOTE_NAME; notes[numnote].type = QNT_CORE_SYSINFO; notes[numnote].datasz = roundup(sysinfo_len, sizeof (Elf32_Word)); notes[numnote].data = sysinfo; numnote++; } // write QNT_CORE_INFO note notes[numnote].name = QNX_NOTE_NAME; notes[numnote].type = QNT_CORE_INFO; notes[numnote].datasz = sizeof(info); notes[numnote].data = &info; numnote++; /* Write notes phdr entry */ { int sz = 0; memset( &phdr, 0, sizeof phdr ); for(i = 0; i < numnote; i++) sz += notesize(¬es[i]); for(status.tid = 1; devctl(fd, DCMD_PROC_TIDSTATUS, &status, sizeof status, 0) == EOK; status.tid++) { procfs_greg greg; procfs_fpreg fpreg; int size; if ( (err = devctl(fd, DCMD_PROC_CURTHREAD, &status.tid, sizeof status.tid, 0 )) != EOK ) { continue; } if (cur_tid_only && (cur_tid != status.tid)) { continue; } fixup_stack_boundary( &status, mem, seg ); thread_note.name = QNX_NOTE_NAME; thread_note.type = QNT_CORE_STATUS; thread_note.datasz = sizeof(status); thread_note.data = &status; sz += notesize( &thread_note ); if(devctl(fd, DCMD_PROC_GETGREG, &greg, sizeof greg, &size) == EOK) { thread_note.name = QNX_NOTE_NAME; thread_note.type = QNT_CORE_GREG; thread_note.datasz = size; thread_note.data = &greg; sz += notesize( &thread_note ); } if(devctl(fd, DCMD_PROC_GETFPREG, &fpreg, sizeof fpreg, &size) == EOK) { thread_note.name = QNX_NOTE_NAME; thread_note.type = QNT_CORE_FPREG; thread_note.datasz = size; thread_note.data = &fpreg; sz += notesize( &thread_note ); } } phdr.p_type = PT_NOTE; phdr.p_offset = offset; phdr.p_vaddr = 0; phdr.p_paddr = 0; phdr.p_filesz = sz; phdr.p_memsz = 0; phdr.p_flags = 0; phdr.p_align = 0; offset += phdr.p_filesz; if(dump_write( fp, &phdr, sizeof(phdr), &coresize) == -1) goto bailout; } /* Page-align dumped data */ dataoff = offset = roundup(offset, pagesize); for ( i = 0; i < seg; i++ ) { memset( &phdr, 0, sizeof phdr ); phdr.p_type = PT_LOAD; phdr.p_offset = offset; phdr.p_vaddr = mem[i].vaddr; phdr.p_paddr = 0; phdr.p_memsz = mem[i].size; phdr.p_flags = PF_W|PF_R; if ( mem[i].flags & MAP_ELF ) phdr.p_flags |= PF_X; phdr.p_align = pagesize; phdr.p_filesz = phdr.p_memsz; offset += phdr.p_filesz; if(dump_write( fp, &phdr, sizeof(phdr), &coresize) == -1) goto bailout; } for(i = 0; i < numnote; i++) { if (!writenote(¬es[i], fp, &coresize )) goto bailout; } for(status.tid = 1; devctl(fd, DCMD_PROC_TIDSTATUS, &status, sizeof status, 0) == EOK; status.tid++) { procfs_greg greg; procfs_fpreg fpreg; int size; if ( devctl(fd, DCMD_PROC_CURTHREAD, &status.tid, sizeof status.tid, 0 ) != EOK ) { continue; } if ( cur_tid == 0 ) cur_tid = status.tid; if (cur_tid_only && (cur_tid != status.tid)) { continue; } else if ( status.tid == cur_tid ) { dprintf(("thread %d is current thread!\n", status.tid )); slog_tid( &status, path ); status.flags |= _DEBUG_FLAG_CURTID; } // write QNT_CORE_STATUS note thread_note.name = QNX_NOTE_NAME; thread_note.type = QNT_CORE_STATUS; thread_note.datasz = sizeof(status); thread_note.data = &status; if ( !writenote( &thread_note, fp, &coresize ) ) goto bailout; if(devctl(fd, DCMD_PROC_GETGREG, &greg, sizeof greg, &size) == EOK) { // write QNT_CORE_GREG note thread_note.name = QNX_NOTE_NAME; thread_note.type = QNT_CORE_GREG; thread_note.datasz = size; thread_note.data = &greg; if ( !writenote( &thread_note, fp, &coresize ) ) goto bailout; } if(devctl(fd, DCMD_PROC_GETFPREG, &fpreg, sizeof fpreg, &size) == EOK) { // write QNT_CORE_FPREG note thread_note.name = QNX_NOTE_NAME; thread_note.type = QNT_CORE_FPREG; thread_note.datasz = size; thread_note.data = &fpreg; if ( !writenote( &thread_note, fp, &coresize ) ) goto bailout; } } dump_seek( fp, dataoff ); for ( j = 0; j < seg; j++ ) { if ( lseek( fd, mem[j].vaddr, SEEK_SET ) == -1 ) goto bailout; if ( mem[j].flags & MAP_STACK ) dump_stack_memory( fd, fp, &mem[j], &coresize ); else if (!nodumpmem) dump_memory( fd, fp, &mem[j], &coresize ); } // Return EOK when accually writing ELF files free(mem); return EOK; bailout: if ( mapinfos != NULL ) { free(mapinfos); } if ( ldd_infos != NULL ) { free(ldd_infos); } if ( mem != NULL ) { free(mem); } return errno; }