void _native_rng_init_det(void) { DEBUG("_native_rng_init_det\n"); _native_syscall_enter(); real_srandom(_native_rng_seed); _native_syscall_leave(); }
unsigned long hwtimer_arch_now(void) { struct timespec t; DEBUG("hwtimer_arch_now()\n"); _native_syscall_enter(); #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); t.tv_sec = mts.tv_sec; t.tv_nsec = mts.tv_nsec; #else if (real_clock_gettime(CLOCK_MONOTONIC, &t) == -1) { err(EXIT_FAILURE, "hwtimer_arch_now: clock_gettime"); } #endif _native_syscall_leave(); native_hwtimer_now = ts2ticks(&t) - time_null; struct timeval tv; ticks2tv(native_hwtimer_now, &tv); DEBUG("hwtimer_arch_now(): it is now %lu s %lu us\n", (unsigned long)tv.tv_sec, (unsigned long)tv.tv_usec); DEBUG("hwtimer_arch_now(): returning %lu\n", native_hwtimer_now); return native_hwtimer_now; }
void handle_uart_sock() { int s; socklen_t t; struct sockaddr remote; t = sizeof(remote); _native_syscall_enter(); if ((s = accept(_native_uart_sock, &remote, &t)) == -1) { err(EXIT_FAILURE, "handle_uart_sock: accept"); } else { warnx("handle_uart_sock: successfully accepted socket"); } if (dup2(s, STDOUT_FILENO) == -1) { err(EXIT_FAILURE, "handle_uart_sock: dup2()"); } if (dup2(s, STDIN_FILENO) == -1) { err(EXIT_FAILURE, "handle_uart_sock: dup2()"); } _native_syscall_leave(); _native_uart_conn = s; }
/** * empty signal mask */ int unregister_interrupt(int sig) { DEBUG("XXX: unregister_interrupt()\n"); _native_syscall_enter(); if (sigaddset(&_native_sig_set, sig) == -1) { err(EXIT_FAILURE, "unregister_interrupt: sigaddset"); } native_irq_handlers[sig] = NULL; /* reset signal handler for sig */ struct sigaction sa; sa.sa_handler = SIG_IGN; /* there may be late signals, so we need to ignore those */ sa.sa_mask = _native_sig_set_dint; sa.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK; if (sigaction(sig, &sa, NULL)) { err(EXIT_FAILURE, "unregister_interrupt: sigaction"); } /* change sigmask for remaining signal handlers */ sa.sa_sigaction = native_isr_entry; for (int i = 0; i < 255; i++) { if (native_irq_handlers[i] != NULL) { if (sigaction(sig, &sa, NULL)) { err(EXIT_FAILURE, "register_interrupt: sigaction"); } } } _native_syscall_leave(); return 0; }
unsigned int timer_read(tim_t dev) { if (dev >= TIMER_NUMOF) { return 0; } struct timespec t; DEBUG("timer_read()\n"); _native_syscall_enter(); #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); t.tv_sec = mts.tv_sec; t.tv_nsec = mts.tv_nsec; #else if (real_clock_gettime(CLOCK_MONOTONIC, &t) == -1) { err(EXIT_FAILURE, "timer_read: clock_gettime"); } #endif _native_syscall_leave(); return ts2ticks(&t) - time_null; }
/** * unblock signals */ unsigned enableIRQ(void) { unsigned int prev_state; if (_native_in_isr == 1) { #ifdef DEVELHELP real_write(STDERR_FILENO, "enableIRQ + _native_in_isr\n", 27); #else DEBUG("enableIRQ + _native_in_isr\n"); #endif } _native_syscall_enter(); DEBUG("enableIRQ()\n"); /* Mark the IRQ as enabled first since sigprocmask could call the handler * before returning to userspace. */ prev_state = native_interrupts_enabled; native_interrupts_enabled = 1; if (sigprocmask(SIG_SETMASK, &_native_sig_set, NULL) == -1) { err(EXIT_FAILURE, "enableIRQ: sigprocmask"); } _native_syscall_leave(); DEBUG("enableIRQ(): return\n"); return prev_state; }
/** * register signal/interrupt handler for signal sig * * TODO: use appropriate data structure for signal * handlers. */ int register_interrupt(int sig, _native_callback_t handler) { DEBUG("register_interrupt()\n"); _native_syscall_enter(); if (sigdelset(&_native_sig_set, sig)) { err(EXIT_FAILURE, "register_interrupt: sigdelset"); } native_irq_handlers[sig] = handler; /* set current dINT sigmask for all signals */ struct sigaction sa; sa.sa_sigaction = native_isr_entry; sa.sa_mask = _native_sig_set_dint; sa.sa_flags = SA_RESTART | SA_SIGINFO | SA_ONSTACK; for (int i = 0; i < 255; i++) { if (native_irq_handlers[i] != NULL) { if (sigaction(sig, &sa, NULL)) { err(EXIT_FAILURE, "register_interrupt: sigaction"); } } } _native_syscall_leave(); return 0; }
/** * unblock signals */ unsigned enableIRQ(void) { unsigned int prev_state; if (_native_in_isr == 1) { #if DEVELHELP real_write(STDERR_FILENO, "enableIRQ + _native_in_isr\n", 27); #else DEBUG("enableIRQ + _native_in_isr\n"); #endif } _native_syscall_enter(); DEBUG("enableIRQ()\n"); if (sigprocmask(SIG_SETMASK, &_native_sig_set, NULL) == -1) { err(EXIT_FAILURE, "enableIRQ(): sigprocmask()"); } prev_state = native_interrupts_enabled; native_interrupts_enabled = 1; _native_syscall_leave(); DEBUG("enableIRQ(): return\n"); return prev_state; }
void *realloc(void *ptr, size_t size) { void *r; _native_syscall_enter(); r = real_realloc(ptr, size); _native_syscall_leave(); return r; }
void *malloc(size_t size) { void *r; _native_syscall_enter(); r = real_malloc(size); _native_syscall_leave(); return r; }
/** * set next_timer to the next lowest enabled timer index */ void schedule_timer(void) { /* try to find *an active* timer */ next_timer = -1; for (int i = 0; i < HWTIMER_MAXTIMERS; i++) { if (native_hwtimer_isset[i] == 1) { next_timer = i; break; } } if (next_timer == -1) { DEBUG("schedule_timer(): no valid timer found - nothing to schedule\n"); struct itimerval null_timer; null_timer.it_interval.tv_sec = 0; null_timer.it_interval.tv_usec = 0; null_timer.it_value.tv_sec = 0; null_timer.it_value.tv_usec = 0; if (real_setitimer(ITIMER_REAL, &null_timer, NULL) == -1) { err(EXIT_FAILURE, "schedule_timer: setitimer"); } return; } /* find the next pending timer (next_timer now points to *a* valid pending timer) */ for (int i = 0; i < HWTIMER_MAXTIMERS; i++) { if ( (native_hwtimer_isset[i] == 1) && (tv2ticks(&(native_hwtimer[i].it_value)) < tv2ticks(&(native_hwtimer[next_timer].it_value))) ) { /* timer in slot i is active and the timeout is more recent than next_timer */ next_timer = i; } } /* next pending timer is in slot next_timer */ struct timeval now; hwtimer_arch_now(); // update timer ticks2tv(native_hwtimer_now, &now); struct itimerval result; memset(&result, 0, sizeof(result)); int retval = timeval_subtract(&result.it_value, &native_hwtimer[next_timer].it_value, &now); if (retval || (tv2ticks(&result.it_value) < HWTIMERMINOFFSET)) { DEBUG("\033[31mschedule_timer(): timer is already due (%i), mitigating.\033[0m\n", next_timer); result.it_value.tv_sec = 0; result.it_value.tv_usec = 1; } _native_syscall_enter(); if (real_setitimer(ITIMER_REAL, &result, NULL) == -1) { err(EXIT_FAILURE, "schedule_timer: setitimer"); } else { DEBUG("schedule_timer(): set next timer (%i).\n", next_timer); } _native_syscall_leave(); }
ssize_t _native_writev(int fd, const struct iovec *iov, int iovcnt) { ssize_t r; _native_syscall_enter(); r = real_writev(fd, iov, iovcnt); _native_syscall_leave(); return r; }
void _native_rng_init_hq(void) { DEBUG("_native_rng_init_hq\n"); _native_syscall_enter(); dev_random = real_open("/dev/random", O_RDONLY); if (dev_random == -1) { err(EXIT_FAILURE, "_native_rng_init_hq: open(/dev/random)"); } _native_syscall_leave(); }
ssize_t _native_write(int fd, const void *buf, size_t count) { ssize_t r; _native_syscall_enter(); r = real_write(fd, buf, count); _native_syscall_leave(); return r; }
ssize_t _native_read(int fd, void *buf, size_t count) { ssize_t r; _native_syscall_enter(); r = real_read(fd, buf, count); _native_syscall_leave(); return r; }
/** * Add or remove handler for signal * * To be called with interrupts disabled * */ void set_signal_handler(int sig, bool add) { struct sigaction sa; int ret; /* update the signal mask so enableIRQ()/disableIRQ() will be aware */ if (add) { _native_syscall_enter(); ret = sigdelset(&_native_sig_set, sig); _native_syscall_leave(); } else { _native_syscall_enter(); ret = sigaddset(&_native_sig_set, sig); _native_syscall_leave(); } if (ret == -1) { err(EXIT_FAILURE, "set_signal_handler: sigdelset"); } memset(&sa, 0, sizeof(sa)); /* Disable other signal during execution of the handler for this signal. */ memcpy(&sa.sa_mask, &_native_sig_set_dint, sizeof(sa.sa_mask)); /* restart interrupted systems call and custom signal stack */ sa.sa_flags = SA_RESTART | SA_ONSTACK; if (add) { sa.sa_flags |= SA_SIGINFO; /* sa.sa_sigaction is used */ sa.sa_sigaction = native_isr_entry; } else { sa.sa_handler = SIG_IGN; } _native_syscall_enter(); if (sigaction(sig, &sa, NULL)) { err(EXIT_FAILURE, "set_signal_handler: sigaction"); } _native_syscall_leave(); }
time_t rtc_time(struct timeval *time) { if (native_rtc_enabled == 1) { _native_syscall_enter(); if (gettimeofday(time, NULL) == -1) { err(EXIT_FAILURE, "rtc_time: gettimeofday"); } _native_syscall_leave(); } return time->tv_sec; }
unsigned _native_rng_read_det(char *buf, unsigned num) { DEBUG("_native_rng_read_det\n"); for (unsigned i = 0; i < num; i++) { _native_syscall_enter(); buf[i] = (char)real_random(); _native_syscall_leave(); } return num; }
void rtc_get_localtime(struct tm *localt) { time_t t; if (native_rtc_enabled == 1) { _native_syscall_enter(); t = time(NULL); if (localtime_r(&t, localt) == NULL) { err(EXIT_FAILURE, "rtc_get_localtime: localtime_r"); } _native_syscall_leave(); } }
void *calloc(size_t nmemb, size_t size) { /* XXX: This is a dirty hack to enable old dlsym versions to run. * Throw it out when Ubuntu 12.04 support runs out (in 2017-04)! */ if (!real_calloc) { return NULL; } void *r; _native_syscall_enter(); r = real_calloc(nmemb, size); _native_syscall_leave(); return r; }
unsigned _native_rng_read_hq(char *buf, unsigned num) { DEBUG("_native_rng_read_hq\n"); unsigned offset = 0; while (num > 0) { _native_syscall_enter(); int r = real_read(dev_random, (buf + offset), num); _native_syscall_leave(); if (r == -1) { err(EXIT_FAILURE, "_native_rng_read_hq: read"); } num -= r; offset += r; } return offset; }
static void do_timer_set(unsigned int offset) { DEBUG("%s\n", __func__); if (offset && offset < NATIVE_TIMER_MIN_RES) { offset = NATIVE_TIMER_MIN_RES; } memset(&itv, 0, sizeof(itv)); itv.it_value.tv_sec = (offset / 1000000); itv.it_value.tv_usec = offset % 1000000; DEBUG("timer_set(): setting %u.%06u\n", (unsigned)itv.it_value.tv_sec, (unsigned)itv.it_value.tv_usec); _native_syscall_enter(); if (real_setitimer(ITIMER_REAL, &itv, NULL) == -1) { err(EXIT_FAILURE, "timer_arm: setitimer"); } _native_syscall_leave(); }
/** * block signals */ unsigned disableIRQ(void) { unsigned int prev_state; _native_syscall_enter(); DEBUG("disableIRQ()\n"); if (_native_in_isr == 1) { DEBUG("disableIRQ + _native_in_isr\n"); } if (sigprocmask(SIG_SETMASK, &_native_sig_set_dint, NULL) == -1) { err(EXIT_FAILURE, "disableIRQ: sigprocmask"); } prev_state = native_interrupts_enabled; native_interrupts_enabled = 0; DEBUG("disableIRQ(): return\n"); _native_syscall_leave(); return prev_state; }
void *calloc(size_t nmemb, size_t size) { /* dynamically load calloc when it's needed - this is necessary to * support profiling as it uses calloc before startup runs */ if (!real_calloc) { if (_native_in_calloc) { /* XXX: This is a dirty hack to enable old dlsym versions to run. * Throw it out when Ubuntu 12.04 support runs out (in 2017-04)! */ return NULL; } else { _native_in_calloc = 1; *(void **)(&real_calloc) = dlsym(RTLD_NEXT, "calloc"); _native_in_calloc = 0; } } void *r; _native_syscall_enter(); r = real_calloc(nmemb, size); _native_syscall_leave(); return r; }
void *malloc(size_t size) { /* dynamically load malloc when it's needed - this is necessary to * support g++ 5.2.0 as it uses malloc before startup runs */ if (!real_malloc) { if (_native_in_malloc) { /* XXX: This is a dirty hack for behaviour that came along * with g++ 5.2.0. * Throw it out when whatever made it necessary it is fixed. */ return NULL; } else { _native_in_malloc = 1; *(void **)(&real_malloc) = dlsym(RTLD_NEXT, "malloc"); _native_in_malloc = 0; } } void *r; _native_syscall_enter(); r = real_malloc(size); _native_syscall_leave(); return r; }
void free(void *ptr) { _native_syscall_enter(); real_free(ptr); _native_syscall_leave(); }