// Sets the processor affinity // // Mathilda works by breaking up sets of Instructions to be // executed by child processes. This function is called // by child processes to bind them to a specific cpu // // @param[in] CPU number to bind to // @return Returns OK or ERR from sched_setaffinity int MathildaFork::set_affinity(uint32_t c) { if(c >= cores) { c = 0; } cpu_set_t cpus; CPU_ZERO(&cpus); CPU_SET(c, &cpus); int ret = sched_setaffinity(0, sizeof(cpus), &cpus); core = c; #ifdef DEBUG if(ret == ERR) { fprintf(stdout, "[MathildaFork] Failed to bind process %d to CPU %d. Cache invalidation may occur!\n", getpid(), c); } else { fprintf(stdout, "[MathildaFork] Child (pid: %d) successfully bound to CPU %d\n", getpid(), c); } #endif return ret; }
/** * set_affinity * * When loading or unloading a system-wide context, we must pin the pfmsetup * process to that CPU before making the system call. Also, get the current * affinity and return it to the caller so we can change it back later. **/ static int set_affinity(int cpu, cpu_set_t *old_cpu_set) { cpu_set_t new_cpu_set; int rc; rc = sched_getaffinity(0, sizeof(*old_cpu_set), old_cpu_set); if (rc) { rc = errno; LOG_ERROR("Can't get current process affinity mask: %d\n", rc); return rc; } CPU_ZERO(&new_cpu_set); CPU_SET(cpu, &new_cpu_set); rc = sched_setaffinity(0, sizeof(new_cpu_set), &new_cpu_set); if (rc) { rc = errno; LOG_ERROR("Can't set process affinity to CPU %d: %d\n", cpu, rc); return rc; } return 0; }
PETSC_EXTERN PetscErrorCode PetscThreadCommCreate_OpenMP(PetscThreadComm tcomm) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscStrcpy(tcomm->type,OPENMP);CHKERRQ(ierr); tcomm->ops->runkernel = PetscThreadCommRunKernel_OpenMP; tcomm->ops->getrank = PetscThreadCommGetRank_OpenMP; #pragma omp parallel num_threads(tcomm->nworkThreads) shared(tcomm) { #if defined(PETSC_HAVE_SCHED_CPU_SET_T) cpu_set_t mset; PetscInt ncores, icorr,trank; PetscGetNCores(&ncores); CPU_ZERO(&mset); trank = omp_get_thread_num(); icorr = tcomm->affinities[trank]%ncores; CPU_SET(icorr,&mset); sched_setaffinity(0,sizeof(cpu_set_t),&mset); #endif } PetscFunctionReturn(0); }
int set_cpu_affinity(pid_t pid, unsigned long new_mask) { unsigned long cur_mask; unsigned int len = sizeof(new_mask); if (sched_getaffinity(pid, len, (cpu_set_t *) &cur_mask) < 0) { perror("sched_getaffinity"); return -1; } printf("pid %d's old affinity: %08lx\n", pid, cur_mask); if (sched_setaffinity(pid, len, (cpu_set_t *) &new_mask)) { perror("sched_setaffinity"); return -1; } if (sched_getaffinity(pid, len, (cpu_set_t *) &cur_mask) < 0) { perror("sched_getaffinity"); return -1; } printf(" pid %d's new affinity: %08lx\n", pid, cur_mask); return 0; }
static void PtyReader_28979140(PtyReader_28979140_Arg* arg) { arg->finished = false; cpu_set_t cpus; ASSERT_EQ(0, sched_getaffinity(0, sizeof(cpu_set_t), &cpus)); CPU_CLR(arg->main_cpu_id, &cpus); ASSERT_EQ(0, sched_setaffinity(0, sizeof(cpu_set_t), &cpus)); uint32_t counter = 0; while (counter <= arg->data_count) { char buf[4096]; // Use big buffer to read to hit the bug more easily. size_t to_read = std::min(sizeof(buf), (arg->data_count + 1 - counter) * sizeof(uint32_t)); ASSERT_TRUE(android::base::ReadFully(arg->slave_fd, buf, to_read)); size_t num_of_value = to_read / sizeof(uint32_t); uint32_t* p = reinterpret_cast<uint32_t*>(buf); while (num_of_value-- > 0) { if (*p++ != counter++) { arg->matched = false; } } } close(arg->slave_fd); arg->finished = true; }
static void __binding_cpu(void) { int curr_cpu_max = __cpus_nums(); srand(time(NULL)); int num = rand() % curr_cpu_max; while(!num) { num = rand() % curr_cpu_max; } log_debug("CPU: %d\n", num); cpu_set_t mask; __CPU_ZERO(&mask); __CPU_SET(num, &mask); sched_setaffinity(0,sizeof(cpu_set_t),&mask); }
int main(int argc, char **argv) { cpu_set_t mask; timespec start, end; list = NULL; CPU_ZERO(&mask); CPU_SET(0, &mask); sched_setaffinity(0, sizeof(mask), &mask); pgsz = sysconf(_SC_PAGESIZE); allocatedsize = 0; counter = 0; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start); allocate(); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &end); print_time_diff(start, end); return 0; }
int main() { int64_t value = -1; // Lock to cpu0 cpu_set_t mask; CPU_ZERO(&mask); CPU_SET(0, &mask); assert(!sched_setaffinity(0, sizeof(mask), &mask)); while (1) { int fd = start_counter(); timing_computation(); ssize_t nread = read(fd, &value, sizeof(value)); assert(nread == sizeof(value)); close(fd); printf("%lld\n", value); } return 0; }
RhsRobotBase::RhsRobotBase() //Constructor { cpu_set_t mask; //struct sched_param param; printf("\n\t\t%s \"%s\"\n\tVersion %s built %s at %s\n\n", ROBOT_NAME, ROBOT_NICKNAME, ROBOT_VERSION, __DATE__, __TIME__); // run our code on the second core CPU_ZERO(&mask); CPU_SET(1, &mask); sched_setaffinity(0, sizeof(mask), &mask); //param.sched_priority = 10; //printf("did this work %d\n", sched_setscheduler(0, SCHED_FIFO, ¶m)); // what are our priority limits? previousRobotState = ROBOT_STATE_UNKNOWN; currentRobotState = ROBOT_STATE_UNKNOWN; SmartDashboard::init(); loop = 0; //Initializes the loop counter }
void __upc_affinity_set (upc_info_p u, int thread_id) { const upc_thread_info_p tinfo = &u->thread_info[thread_id]; switch (u->sched_policy) { case GUPCR_SCHED_POLICY_CPU: case GUPCR_SCHED_POLICY_CPU_STRICT: { const int sched_affinity = tinfo->sched_affinity; cpu_set_t set; CPU_ZERO (&set); CPU_SET (sched_affinity, &set); if (sched_setaffinity (0, sizeof (set), &set)) { __upc_fatal ("Scheduling cannot be set"); } } break; case GUPCR_SCHED_POLICY_NODE: __upc_numa_sched_set (u, thread_id); break; default: /* auto - no scheduling support */ break; } /* set memory policy only if we are not AUTO scheduling */ if ((u->sched_policy != GUPCR_SCHED_POLICY_AUTO) && (u->mem_policy != GUPCR_MEM_POLICY_AUTO)) __upc_numa_memory_affinity_set (u, thread_id); #ifdef DEBUG_AFFINITY printf ("affinity: %d (%s,%s) scheduling (%d,%d)\n", thread_id, upc_sched_policy_to_string (u->sched_policy), upc_mem_policy_to_string (u->mem_policy), tinfo->sched_affinity, tinfo->mem_affinity); #endif /* DEBUG_AFFINITY */ }
int worker(int cpuid) { /***********************************************/ /* Ensure we are running on the cpu */ /* specified. */ /***********************************************/ cpu_set_t mask; CPU_ZERO(&mask); CPU_SET(cpuid, &mask); if (sched_setaffinity(getpid(), 64, &mask) < 0) { perror("sched_setaffinity"); // exit(1); } /***********************************************/ /* Now we are locked to a cpu. */ /***********************************************/ /***********************************************/ /* Wait for master to give us the "go" */ /* signal. */ /***********************************************/ while (data[cpuid] == 0) ; /***********************************************/ /* Let master know we saw it. */ /***********************************************/ data[cpuid] = 2; /***********************************************/ /* Wait for master to see our response. */ /***********************************************/ while (data[cpuid] == 2) ; exit(0); }
void *thread_fn(void *param) { struct thread_param *tp = param; struct timespec ts; int rc; unsigned long mask = 1 << tp->cpu; rc = sched_setaffinity(0, sizeof(mask), &mask); if (rc < 0) { EPRINTF("UNRESOLVED: Thread %s index %d: Can't set affinity: " "%d %s", tp->name, tp->index, rc, strerror(rc)); exit(UNRESOLVED); } test_set_priority(pthread_self(), SCHED_FIFO, tp->priority); DPRINTF(stdout, "#EVENT %f %s Thread Started\n", seconds_read() - base_time, tp->name); tp->progress = 0; ts.tv_sec = 0; ts.tv_nsec = tp->sleep_ms * 1000 * 1000; while (!tp->stop) { do_work(5, &tp->progress); if (tp->sleep_ms == 0) continue; rc = nanosleep(&ts, NULL); if (rc < 0) { EPRINTF("UNRESOLVED: Thread %s %d: nanosleep returned " "%d %s \n", tp->name, tp->index, rc, strerror(rc)); exit(UNRESOLVED); } } DPRINTF(stdout, "#EVENT %f %s Thread Stopped\n", seconds_read() - base_time, tp->name); return NULL; }
void *incrementer(void *arg) { int i; int proc_num = *(int *)arg; cpu_set_t set; CPU_ZERO(&set); CPU_SET(proc_num, &set); if (sched_setaffinity(gettid(), sizeof(cpu_set_t), &set)) { perror("sched_setaffinity"); return NULL; } for (i = 0; i < INC_TO; i++) { #if defined _USE_ATOMIC __sync_fetch_and_add(&global_int, 1); #elif defined _USE_GTM __transaction_atomic { global_int++; } #elif defined _USE_MUTEX pthread_mutex_lock(&mutex); global_int++; pthread_mutex_unlock(&mutex); #elif defined _USE_SPIN pthread_spin_lock(&spinlock); global_int++; pthread_spin_unlock(&spinlock); #else global_int++; #endif } return NULL; }
// 将当前进程绑定到参数cpu_affinity对应比特位为1的位置的CPU核心。 // cpu_affinity[in]: 如果为1将绑定到第0个CPU,如果为2将绑定到第1个CPU,如果为4将绑定到第2个CPU,以此类推。 // 同时支持绑定到多个CPU,比如为3可以绑定到第0和第1个CPU,为5可以绑定到第0和第2个CPU。 void ngx_setaffinity(uint64_t cpu_affinity, ngx_log_t *log) { cpu_set_t mask; ngx_uint_t i; ngx_log_error(NGX_LOG_NOTICE, log, 0, "sched_setaffinity(0x%08Xl)", cpu_affinity); CPU_ZERO(&mask); i = 0; do { if (cpu_affinity & 1) { CPU_SET(i, &mask); } i++; cpu_affinity >>= 1; } while (cpu_affinity); if (sched_setaffinity(0, sizeof(cpu_set_t), &mask) == -1) { ngx_log_error(NGX_LOG_ALERT, log, ngx_errno, "sched_setaffinity() failed"); } }
void *threadFunction(void *arg) { int threadID = *((int*)arg); #ifdef USE_AFFINITY int cpu_id = threadID%4; cpu_set_t mask; CPU_ZERO(&mask); CPU_SET(cpu_id,&mask); sched_setaffinity(0,sizeof(mask),&mask); #endif int matrixNum = 0; while (1) { HMap detectedEvents[4]; AMap cops; AMap criminals; pthread_mutex_lock(&matrixFileMutex); matrixNum = fMatrixReader(detectedEvents); pthread_mutex_unlock(&matrixFileMutex); if (!matrixNum) { pthread_exit(0); } fGetMovement(detectedEvents[0],detectedEvents[1],cops); fGetMovement(detectedEvents[2],detectedEvents[3],criminals); fDetectCrucialEvent(cops, criminals, matrixNum); } pthread_exit(0); }
/** * Try to ping process to a specific CPU. Returns the CPU we are * currently running on. */ static int pin_to_cpu(int run_cpu) { cpu_set_t *cpusetp; size_t size; int num_cpus; num_cpus = CPU_SETSIZE; /* take default, currently 1024 */ cpusetp = CPU_ALLOC(num_cpus); if (cpusetp == NULL) return sched_getcpu(); size = CPU_ALLOC_SIZE(num_cpus); CPU_ZERO_S(size, cpusetp); CPU_SET_S(run_cpu, size, cpusetp); if (sched_setaffinity(0, size, cpusetp) < 0) { CPU_FREE(cpusetp); return sched_getcpu(); } /* figure out on which cpus we actually run */ CPU_FREE(cpusetp); return run_cpu; }
/* ****************************************************************************** SUBROUTINE: set_affinity Set this process to run on the input processor number. The processor numbers start with 0 going to N-1 processors. ****************************************************************************** */ int set_affinity(int processor) { extern int sched_getaffinity(); extern int sched_setaffinity(); unsigned long new_mask; unsigned int len = sizeof(new_mask); unsigned long cur_mask; pid_t p = 0; int ret; new_mask = 1<<(processor); //printf("set_affinity: %ld\n",new_mask); ret = sched_getaffinity(p, len, &cur_mask); // printf("sched_getaffinity = %d, cur_mask = %08lx\n",ret,cur_mask); if(ret != 0) abort(); ret = sched_setaffinity(p, len, &new_mask); // printf("sched_setaffinity = %d, new_mask = %08lx\n",ret,new_mask); if(ret != 0) abort(); ret = sched_getaffinity(p, len, &cur_mask); // printf("sched_getaffinity = %d, cur_mask = %08lx\n",ret,cur_mask); if(ret != 0) abort(); if(cur_mask != new_mask) { printf("affinity did not get set! exiting\n"); exit(-1); } fflush(stdout); return 0; }
/*----------------------------------------------------------------------------*/ int main(int argc, char *argv[] ) { int core =0; if(argc >= 2) { core = atoi(argv[1]); printf("affinity to core %d \n", core); } else { printf("affinity to 0 core by default \n"); } /*initialize thread bind cpu*/ cpu_set_t cpus; CPU_ZERO(&cpus); CPU_SET((unsigned)core, &cpus); sched_setaffinity(0, sizeof(cpus), &cpus); RunServerThread(); }
void timing_enter_max_prio(void) { int res; int new_scheduler = SCHED_FIFO; struct sched_param new_sched_params; cpu_set_t new_affinity; if (in_max_prio) return; /* remember old scheduler settings */ res = sched_getaffinity(0, sizeof(affinity), &affinity); if (res < 0) return; scheduler = sched_getscheduler(0); if (scheduler < 0) return; res = sched_getparam(0, &sched_params); if (res < 0) return; /* set high prio */ CPU_ZERO(&new_affinity); CPU_SET(0, &new_affinity); res = sched_setaffinity(0, sizeof(new_affinity), &new_affinity); if (res < 0) return; new_scheduler = SCHED_FIFO; new_sched_params = sched_params; new_sched_params.sched_priority = sched_get_priority_max(new_scheduler); res = sched_setscheduler(0, new_scheduler, &new_sched_params); if (res < 0) return; in_max_prio = 1; }
int set_process_affinity(int cpu) { int retval = -1; #if defined(CPU_ZERO) cpu_set_t cpu_mask; CPU_ZERO(&cpu_mask); if (cpu >= 0 && cpu <= CPU_SETSIZE) { CPU_SET(cpu, &cpu_mask); } else { fprintf (stderr, "Wrong cpu id: %d\n", cpu); return -1; } retval = sched_setaffinity(0, sizeof(cpu_mask), &cpu_mask); #elif defined(cpuset_create) cpuset_t *cpu_mask = cpuset_create(); cpuset_zero(cpu_mask); if (cpu >= 0 && cpu <= cpuset_size(cpu_mask)) { cpuset_set(cpu, cpu_mask); } else { fprintf (stderr, "Wrong cpu id: %d\n", cpu); return -1; } retval = _sched_setaffinity(0, 0, cpuset_size(cpu_mask), cpu_mask); cpuset_destroy(cpu_mask); #else #error "no cpuset" #endif if (retval == -1) perror("Error at sched_setaffinity()"); return retval; }
void Sys_SetProcessorAffinity( void ) { #if defined(__linux__) uint32_t cores; if ( sscanf( com_affinity->string, "%X", &cores ) != 1 ) cores = 1; // set to first core only const long numCores = sysconf( _SC_NPROCESSORS_ONLN ); if ( !cores ) cores = (1 << numCores) - 1; // use all cores cpu_set_t set; CPU_ZERO( &set ); for ( int i = 0; i < numCores; i++ ) { if ( cores & (1<<i) ) { CPU_SET( i, &set ); } } sched_setaffinity( 0, sizeof( set ), &set ); #elif defined(MACOS_X) //TODO: Apple's APIs for this are weird but exist on a per-thread level. Good enough for us. #endif }
static PyObject * set_process_affinity_mask(PyObject *self, PyObject *args) { unsigned long new_mask; unsigned long cur_mask; unsigned int len = sizeof(new_mask); pid_t pid; if (!PyArg_ParseTuple(args, "il:set_process_affinity_mask", &pid, &new_mask)) return NULL; if (sched_getaffinity(pid, len, (cpu_set_t *)&cur_mask) < 0) { PyErr_SetFromErrno(PyExc_ValueError); return NULL; } if (sched_setaffinity(pid, len, (cpu_set_t *)&new_mask)) { PyErr_SetFromErrno(PyExc_ValueError); return NULL; } return Py_BuildValue("l", cur_mask); }
int main(int ac, char **av) { size_t len; size_t range; size_t size; int i; unsigned long mask = 0; unsigned int masklen = sizeof(mask); char *addr; cpu_set_t core; int num_cpus = 1; if(ac > 1) num_cpus = atoi(av[1]); for(i = 0; i < num_cpus; i++) { CPU_ZERO(&core); CPU_SET(i, &core); if (sched_setaffinity(0, sizeof(core), &core) < 0) { perror("sched_setaffinity"); exit(-1); } len = 128 * 1024 * 1024; addr = (char *)malloc(len); fprintf(stderr, "\"stride=%d\n", STRIDE); for (range = LOWER; range <= len; range = step(range)) { loads(addr, range, STRIDE, i); } free(addr); } return(0); }
//################################################################################################## static void *magma_dapplyQ_parallel_section(void *arg) { magma_int_t my_core_id = ((magma_dapplyQ_id_data*)arg) -> id; magma_dapplyQ_data* data = ((magma_dapplyQ_id_data*)arg) -> data; magma_int_t allcores_num = data -> threads_num; magma_int_t n = data -> n; magma_int_t ne = data -> ne; magma_int_t n_gpu = data -> n_gpu; magma_int_t nb = data -> nb; magma_int_t Vblksiz = data -> Vblksiz; double *E = data -> E; magma_int_t lde = data -> lde; double *V = data -> V; magma_int_t ldv = data -> ldv; double *TAU = data -> TAU; double *T = data -> T; magma_int_t ldt = data -> ldt; double *dE = data -> dE; magma_int_t ldde = data -> ldde; pthread_barrier_t* barrier = &(data -> barrier); magma_int_t info; #ifdef ENABLE_TIMER real_Double_t timeQcpu=0.0, timeQgpu=0.0; #endif magma_int_t n_cpu = ne - n_gpu; // with MKL and when using omp_set_num_threads instead of mkl_set_num_threads // it need that all threads setting it to 1. magma_set_lapack_numthreads(1); #ifdef MAGMA_SETAFFINITY //#define PRINTAFFINITY #ifdef PRINTAFFINITY affinity_set print_set; print_set.print_affinity(my_core_id, "starting affinity"); #endif cpu_set_t old_set, new_set; //store current affinity CPU_ZERO(&old_set); sched_getaffinity( 0, sizeof(old_set), &old_set); //set new affinity // bind threads CPU_ZERO(&new_set); CPU_SET(my_core_id, &new_set); sched_setaffinity( 0, sizeof(new_set), &new_set); #ifdef PRINTAFFINITY print_set.print_affinity(my_core_id, "set affinity"); #endif #endif if (my_core_id == 0) { //============================================= // on GPU on thread 0: // - apply V2*Z(:,1:N_GPU) //============================================= #ifdef ENABLE_TIMER timeQgpu = magma_wtime(); #endif magma_dsetmatrix(n, n_gpu, E, lde, dE, ldde); magma_dbulge_applyQ_v2(MagmaLeft, n_gpu, n, nb, Vblksiz, dE, ldde, V, ldv, T, ldt, &info); magma_device_sync(); #ifdef ENABLE_TIMER timeQgpu = magma_wtime()-timeQgpu; printf(" Finish Q2_GPU GGG timing= %f\n", timeQgpu); #endif } else { //============================================= // on CPU on threads 1:allcores_num-1: // - apply V2*Z(:,N_GPU+1:NE) //============================================= #ifdef ENABLE_TIMER if (my_core_id == 1) timeQcpu = magma_wtime(); #endif magma_int_t n_loc = magma_ceildiv(n_cpu, allcores_num-1); double* E_loc = E + (n_gpu+ n_loc * (my_core_id-1))*lde; n_loc = min(n_loc,n_cpu - n_loc * (my_core_id-1)); magma_dtile_bulge_applyQ(my_core_id, MagmaLeft, n_loc, n, nb, Vblksiz, E_loc, lde, V, ldv, TAU, T, ldt); pthread_barrier_wait(barrier); #ifdef ENABLE_TIMER if (my_core_id == 1) { timeQcpu = magma_wtime()-timeQcpu; printf(" Finish Q2_CPU CCC timing= %f\n", timeQcpu); } #endif } // END if my_core_id #ifdef MAGMA_SETAFFINITY //restore old affinity sched_setaffinity(0, sizeof(old_set), &old_set); #ifdef PRINTAFFINITY print_set.print_affinity(my_core_id, "restored_affinity"); #endif #endif return 0; }
int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused) { int err = -1, fd, cpu; struct cpu_map *cpus; struct perf_evsel *evsel; unsigned int nr_openat_calls = 111, i; cpu_set_t cpu_set; struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX); char sbuf[STRERR_BUFSIZE]; char errbuf[BUFSIZ]; if (threads == NULL) { pr_debug("thread_map__new\n"); return -1; } cpus = cpu_map__new(NULL); if (cpus == NULL) { pr_debug("cpu_map__new\n"); goto out_thread_map_delete; } CPU_ZERO(&cpu_set); evsel = perf_evsel__newtp("syscalls", "sys_enter_openat"); if (IS_ERR(evsel)) { tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat"); pr_debug("%s\n", errbuf); goto out_thread_map_delete; } if (perf_evsel__open(evsel, cpus, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", str_error_r(errno, sbuf, sizeof(sbuf))); goto out_evsel_delete; } for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int ncalls = nr_openat_calls + cpu; /* * XXX eventually lift this restriction in a way that * keeps perf building on older glibc installations * without CPU_ALLOC. 1024 cpus in 2010 still seems * a reasonable upper limit tho :-) */ if (cpus->map[cpu] >= CPU_SETSIZE) { pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); continue; } CPU_SET(cpus->map[cpu], &cpu_set); if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) { pr_debug("sched_setaffinity() failed on CPU %d: %s ", cpus->map[cpu], str_error_r(errno, sbuf, sizeof(sbuf))); goto out_close_fd; } for (i = 0; i < ncalls; ++i) { fd = openat(0, "/etc/passwd", O_RDONLY); close(fd); } CPU_CLR(cpus->map[cpu], &cpu_set); } /* * Here we need to explicitly preallocate the counts, as if * we use the auto allocation it will allocate just for 1 cpu, * as we start by cpu 0. */ if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) { pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr); goto out_close_fd; } err = 0; for (cpu = 0; cpu < cpus->nr; ++cpu) { unsigned int expected; if (cpus->map[cpu] >= CPU_SETSIZE) continue; if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { pr_debug("perf_evsel__read_on_cpu\n"); err = -1; break; } expected = nr_openat_calls + cpu; if (perf_counts(evsel->counts, cpu, 0)->val != expected) { pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n", expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); err = -1; } } perf_evsel__free_counts(evsel); out_close_fd: perf_evsel__close_fd(evsel, 1, threads->nr); out_evsel_delete: perf_evsel__delete(evsel); out_thread_map_delete: thread_map__put(threads); return err; }
/* * stress_tlb_shootdown() * stress out TLB shootdowns */ static int stress_tlb_shootdown(const args_t *args) { const size_t page_size = args->page_size; const size_t mmap_size = page_size * MMAP_PAGES; pid_t pids[MAX_TLB_PROCS]; cpu_set_t proc_mask_initial; if (sched_getaffinity(0, sizeof(proc_mask_initial), &proc_mask_initial) < 0) { pr_fail_err("could not get CPU affinity"); return EXIT_FAILURE; } do { uint8_t *mem, *ptr; int retry = 128; cpu_set_t proc_mask; int32_t tlb_procs, i; const int32_t max_cpus = stress_get_processors_configured(); CPU_ZERO(&proc_mask); CPU_OR(&proc_mask, &proc_mask_initial, &proc_mask); tlb_procs = max_cpus; if (tlb_procs > MAX_TLB_PROCS) tlb_procs = MAX_TLB_PROCS; if (tlb_procs < MIN_TLB_PROCS) tlb_procs = MIN_TLB_PROCS; for (;;) { mem = mmap(NULL, mmap_size, PROT_WRITE | PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if ((void *)mem == MAP_FAILED) { if ((errno == EAGAIN) || (errno == ENOMEM) || (errno == ENFILE)) { if (--retry < 0) return EXIT_NO_RESOURCE; } else { pr_fail_err("mmap"); } } else { break; } } (void)memset(mem, 0, mmap_size); for (i = 0; i < tlb_procs; i++) pids[i] = -1; for (i = 0; i < tlb_procs; i++) { int32_t j, cpu = -1; for (j = 0; j < max_cpus; j++) { if (CPU_ISSET(j, &proc_mask)) { cpu = j; CPU_CLR(j, &proc_mask); break; } } if (cpu == -1) break; pids[i] = fork(); if (pids[i] < 0) break; if (pids[i] == 0) { cpu_set_t mask; char buffer[page_size]; (void)setpgid(0, g_pgrp); stress_parent_died_alarm(); /* Make sure this is killable by OOM killer */ set_oom_adjustment(args->name, true); CPU_ZERO(&mask); CPU_SET(cpu % max_cpus, &mask); (void)sched_setaffinity(args->pid, sizeof(mask), &mask); for (ptr = mem; ptr < mem + mmap_size; ptr += page_size) { /* Force tlb shoot down on page */ (void)mprotect(ptr, page_size, PROT_READ); (void)memcpy(buffer, ptr, page_size); (void)munmap(ptr, page_size); } _exit(0); } } for (i = 0; i < tlb_procs; i++) { if (pids[i] != -1) { int status, ret; ret = shim_waitpid(pids[i], &status, 0); if ((ret < 0) && (errno == EINTR)) { int j; /* * We got interrupted, so assume * it was the alarm (timedout) or * SIGINT so force terminate */ for (j = i; j < tlb_procs; j++) { if (pids[j] != -1) (void)kill(pids[j], SIGKILL); } /* re-wait on the failed wait */ (void)shim_waitpid(pids[i], &status, 0); /* and continue waitpid on the pids */ } } } (void)munmap(mem, mmap_size); (void)sched_setaffinity(0, sizeof(proc_mask_initial), &proc_mask_initial); inc_counter(args); } while (keep_stressing()); return EXIT_SUCCESS; }
/* The main CPU accumulator thread */ void guppi_accum_thread(void *_args) { float **accumulator; //indexed accumulator[accum_id][chan][subband][stokes] char accum_dirty[NUM_SW_STATES]; struct sdfits_data_columns data_cols[NUM_SW_STATES]; int payload_type; int i, j, k, rv; /* Get arguments */ struct guppi_thread_args *args = (struct guppi_thread_args *)_args; /* Set cpu affinity */ cpu_set_t cpuset, cpuset_orig; sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig); //CPU_ZERO(&cpuset); CPU_CLR(13, &cpuset); CPU_SET(9, &cpuset); rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); if (rv<0) { guppi_error("guppi_accum_thread", "Error setting cpu affinity."); perror("sched_setaffinity"); } /* Set priority */ rv = setpriority(PRIO_PROCESS, 0, args->priority); if (rv<0) { guppi_error("guppi_accum_thread", "Error setting priority level."); perror("set_priority"); } /* Attach to status shared mem area */ struct guppi_status st; rv = guppi_status_attach(&st); if (rv!=GUPPI_OK) { guppi_error("guppi_accum_thread", "Error attaching to status shared memory."); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_status_detach, &st); pthread_cleanup_push((void *)set_exit_status, &st); pthread_cleanup_push((void *)guppi_thread_set_finished, args); /* Init status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "init"); guppi_status_unlock_safe(&st); /* Read in general parameters */ struct guppi_params gp; struct sdfits sf; pthread_cleanup_push((void *)guppi_free_sdfits, &sf); /* Attach to databuf shared mem */ struct guppi_databuf *db_in, *db_out; db_in = guppi_databuf_attach(args->input_buffer); char errmsg[256]; if (db_in==NULL) { sprintf(errmsg, "Error attaching to input databuf(%d) shared memory.", args->input_buffer); guppi_error("guppi_accum_thread", errmsg); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_databuf_detach, db_in); db_out = guppi_databuf_attach(args->output_buffer); if (db_out==NULL) { sprintf(errmsg, "Error attaching to output databuf(%d) shared memory.", args->output_buffer); guppi_error("guppi_accum_thread", errmsg); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_databuf_detach, db_out); /* Determine high/low bandwidth mode */ char bw_mode[16]; if (hgets(st.buf, "BW_MODE", 16, bw_mode)) { if(strncmp(bw_mode, "high", 4) == 0) payload_type = INT_PAYLOAD; else if(strncmp(bw_mode, "low", 3) == 0) payload_type = FLOAT_PAYLOAD; else guppi_error("guppi_net_thread", "Unsupported bandwidth mode"); } else guppi_error("guppi_net_thread", "BW_MODE not set"); /* Read nchan and nsubband from status shared memory */ guppi_read_obs_params(st.buf, &gp, &sf); /* Allocate memory for vector accumulators */ create_accumulators(&accumulator, sf.hdr.nchan, sf.hdr.nsubband); pthread_cleanup_push((void *)destroy_accumulators, accumulator); /* Clear the vector accumulators */ for(i = 0; i < NUM_SW_STATES; i++) accum_dirty[i] = 1; reset_accumulators(accumulator, data_cols, accum_dirty, sf.hdr.nsubband, sf.hdr.nchan); /* Loop */ int curblock_in=0, curblock_out=0; int first=1; float reqd_exposure=0; double accum_time=0; int integ_num; float pfb_rate; int heap, accumid, struct_offset, array_offset; char *hdr_in=NULL, *hdr_out=NULL; struct databuf_index *index_in, *index_out; int nblock_int=0, npacket=0, n_pkt_drop=0, n_heap_drop=0; signal(SIGINT,cc); while (run) { /* Note waiting status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "waiting"); guppi_status_unlock_safe(&st); /* Wait for buf to have data */ rv = guppi_databuf_wait_filled(db_in, curblock_in); if (rv!=0) continue; /* Note waiting status and current block*/ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "accumulating"); hputi4(st.buf, "ACCBLKIN", curblock_in); guppi_status_unlock_safe(&st); /* Read param struct for this block */ hdr_in = guppi_databuf_header(db_in, curblock_in); if (first) guppi_read_obs_params(hdr_in, &gp, &sf); else guppi_read_subint_params(hdr_in, &gp, &sf); /* Do any first time stuff: first time code runs, not first time process this block */ if (first) { /* Set up first output header. This header is copied from block to block each time a new block is created */ hdr_out = guppi_databuf_header(db_out, curblock_out); memcpy(hdr_out, guppi_databuf_header(db_in, curblock_in), GUPPI_STATUS_SIZE); /* Read required exposure and PFB rate from status shared memory */ reqd_exposure = sf.data_columns.exposure; pfb_rate = sf.hdr.efsampfr / (2 * sf.hdr.nchan); /* Initialise the index in the output block */ index_out = (struct databuf_index*)guppi_databuf_index(db_out, curblock_out); index_out->num_datasets = 0; index_out->array_size = sf.hdr.nsubband * sf.hdr.nchan * NUM_STOKES * 4; first=0; } /* Loop through each spectrum (heap) in input buffer */ index_in = (struct databuf_index*)guppi_databuf_index(db_in, curblock_in); for(heap = 0; heap < index_in->num_heaps; heap++) { /* If invalid, record it and move to next heap */ if(!index_in->cpu_gpu_buf[heap].heap_valid) { n_heap_drop++; continue; } /* Read in heap from buffer */ char* heap_addr = (char*)(guppi_databuf_data(db_in, curblock_in) + sizeof(struct freq_spead_heap) * heap); struct freq_spead_heap* freq_heap = (struct freq_spead_heap*)(heap_addr); char* payload_addr = (char*)(guppi_databuf_data(db_in, curblock_in) + sizeof(struct freq_spead_heap) * MAX_HEAPS_PER_BLK + (index_in->heap_size - sizeof(struct freq_spead_heap)) * heap ); int *i_payload = (int*)(payload_addr); float *f_payload = (float*)(payload_addr); accumid = freq_heap->status_bits & 0x7; /*Debug: print heap */ /* printf("%d, %d, %d, %d, %d, %d\n", freq_heap->time_cntr, freq_heap->spectrum_cntr, freq_heap->integ_size, freq_heap->mode, freq_heap->status_bits, freq_heap->payload_data_off); */ /* If we have accumulated for long enough, write vectors to output block */ if(accum_time >= reqd_exposure) { for(i = 0; i < NUM_SW_STATES; i++) { /*If a particular accumulator is dirty, write it to output buffer */ if(accum_dirty[i]) { /*If insufficient space, first mark block as filled and request new block*/ index_out = (struct databuf_index*)(guppi_databuf_index(db_out, curblock_out)); if( (index_out->num_datasets+1) * (index_out->array_size + sizeof(struct sdfits_data_columns)) > db_out->block_size) { printf("Accumulator finished with output block %d\n", curblock_out); /* Write block number to status buffer */ guppi_status_lock_safe(&st); hputi4(st.buf, "ACCBLKOU", curblock_out); guppi_status_unlock_safe(&st); /* Update packet count and loss fields in output header */ hputi4(hdr_out, "NBLOCK", nblock_int); hputi4(hdr_out, "NPKT", npacket); hputi4(hdr_out, "NPKTDROP", n_pkt_drop); hputi4(hdr_out, "NHPDROP", n_heap_drop); /* Close out current integration */ guppi_databuf_set_filled(db_out, curblock_out); /* Wait for next output buf */ curblock_out = (curblock_out + 1) % db_out->n_block; guppi_databuf_wait_free(db_out, curblock_out); while ((rv=guppi_databuf_wait_free(db_out, curblock_out)) != GUPPI_OK) { if (rv==GUPPI_TIMEOUT) { guppi_warn("guppi_accum_thread", "timeout while waiting for output block"); continue; } else { guppi_error("guppi_accum_thread", "error waiting for free databuf"); run=0; pthread_exit(NULL); break; } } hdr_out = guppi_databuf_header(db_out, curblock_out); memcpy(hdr_out, guppi_databuf_header(db_in, curblock_in), GUPPI_STATUS_SIZE); /* Initialise the index in new output block */ index_out = (struct databuf_index*)guppi_databuf_index(db_out, curblock_out); index_out->num_datasets = 0; index_out->array_size = sf.hdr.nsubband * sf.hdr.nchan * NUM_STOKES * 4; nblock_int=0; npacket=0; n_pkt_drop=0; n_heap_drop=0; } /*Update index for output buffer*/ index_out = (struct databuf_index*)(guppi_databuf_index(db_out, curblock_out)); if(index_out->num_datasets == 0) struct_offset = 0; else struct_offset = index_out->disk_buf[index_out->num_datasets-1].array_offset + index_out->array_size; array_offset = struct_offset + sizeof(struct sdfits_data_columns); index_out->disk_buf[index_out->num_datasets].struct_offset = struct_offset; index_out->disk_buf[index_out->num_datasets].array_offset = array_offset; /*Copy sdfits_data_columns struct to disk buffer */ memcpy(guppi_databuf_data(db_out, curblock_out) + struct_offset, &data_cols[i], sizeof(struct sdfits_data_columns)); /*Copy data array to disk buffer */ memcpy(guppi_databuf_data(db_out, curblock_out) + array_offset, accumulator[i], index_out->array_size); /*Update SDFITS data_columns pointer to data array */ ((struct sdfits_data_columns*) (guppi_databuf_data(db_out, curblock_out) + struct_offset))->data = (unsigned char*)(guppi_databuf_data(db_out, curblock_out) + array_offset); index_out->num_datasets = index_out->num_datasets + 1; } } accum_time = 0; integ_num += 1; reset_accumulators(accumulator, data_cols, accum_dirty, sf.hdr.nsubband, sf.hdr.nchan); } /* Only add spectrum to accumulator if blanking bit is low */ if((freq_heap->status_bits & 0x08) == 0) { /* Fill in data columns header fields */ if(!accum_dirty[accumid]) { /*Record SPEAD header fields*/ data_cols[accumid].time = index_in->cpu_gpu_buf[heap].heap_rcvd_mjd; data_cols[accumid].time_counter = freq_heap->time_cntr; data_cols[accumid].integ_num = integ_num; data_cols[accumid].sttspec = freq_heap->spectrum_cntr; data_cols[accumid].accumid = accumid; /* Fill in rest of fields from status buffer */ strcpy(data_cols[accumid].object, sf.data_columns.object); data_cols[accumid].azimuth = sf.data_columns.azimuth; data_cols[accumid].elevation = sf.data_columns.elevation; data_cols[accumid].bmaj = sf.data_columns.bmaj; data_cols[accumid].bmin = sf.data_columns.bmin; data_cols[accumid].bpa = sf.data_columns.bpa; data_cols[accumid].centre_freq_idx = sf.data_columns.centre_freq_idx; data_cols[accumid].ra = sf.data_columns.ra; data_cols[accumid].dec = sf.data_columns.dec; data_cols[accumid].exposure = 0.0; for(i = 0; i < NUM_SW_STATES; i++) data_cols[accumid].centre_freq[i] = sf.data_columns.centre_freq[i]; accum_dirty[accumid] = 1; } data_cols[accumid].exposure += (float)(freq_heap->integ_size)/pfb_rate; data_cols[accumid].stpspec = freq_heap->spectrum_cntr; /* Add spectrum to appropriate vector accumulator (high-bw mode) */ if(payload_type == INT_PAYLOAD) { for(i = 0; i < sf.hdr.nchan; i++) { for(j = 0; j < sf.hdr.nsubband; j++) { for(k = 0; k < NUM_STOKES; k++) { accumulator[accumid] [i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k] += (float)i_payload[i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k]; } } } } /* Add spectrum to appropriate vector accumulator (low-bw mode) */ else { for(i = 0; i < sf.hdr.nchan; i++) { for(j = 0; j < sf.hdr.nsubband; j++) { for(k = 0; k < NUM_STOKES; k++) { accumulator[accumid] [i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k] += f_payload[i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k]; } } } } } accum_time += (double)freq_heap->integ_size / pfb_rate; } /* Update packet count and loss fields from input header */ nblock_int++; npacket += gp.num_pkts_rcvd; n_pkt_drop += gp.num_pkts_dropped; /* Done with current input block */ guppi_databuf_set_free(db_in, curblock_in); curblock_in = (curblock_in + 1) % db_in->n_block; /* Check for cancel */ pthread_testcancel(); } pthread_exit(NULL); pthread_cleanup_pop(0); /* Closes set_exit_status */ pthread_cleanup_pop(0); /* Closes set_finished */ pthread_cleanup_pop(0); /* Closes guppi_free_sdfits */ pthread_cleanup_pop(0); /* Closes ? */ pthread_cleanup_pop(0); /* Closes destroy_accumulators */ pthread_cleanup_pop(0); /* Closes guppi_status_detach */ pthread_cleanup_pop(0); /* Closes guppi_databuf_detach */ }
int main(int argc, char **argv) { int ret; baseRateInfo_t info; pthread_attr_t attr; pthread_t baseRateThread; size_t stackSize; unsigned long cpuMask = 0x1; unsigned int len = sizeof(cpuMask); printf("**starting the model**\n"); fflush(stdout); rtmSetErrorStatus(beagleboard_communication_M, 0); /* Unused arguments */ (void)(argc); (void)(argv); /* All threads created by this process must run on a single CPU */ ret = sched_setaffinity(0, len, (cpu_set_t *) &cpuMask); CHECK_STATUS(ret, "sched_setaffinity"); /* Initialize semaphore used for thread synchronization */ ret = sem_init(&stopSem, 0, 0); CHECK_STATUS(ret, "sem_init:stopSem"); /* Create threads executing the Simulink model */ pthread_attr_init(&attr); ret = pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED); CHECK_STATUS(ret, "pthread_attr_setinheritsched"); ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); CHECK_STATUS(ret, "pthread_attr_setdetachstate"); /* PTHREAD_STACK_MIN is the minimum stack size required to start a thread */ stackSize = 64000 + PTHREAD_STACK_MIN; ret = pthread_attr_setstacksize(&attr, stackSize); CHECK_STATUS(ret, "pthread_attr_setstacksize"); /* Block signal used for timer notification */ info.period = 0.01; info.signo = SIGRTMIN; sigemptyset(&info.sigMask); MW_blockSignal(info.signo, &info.sigMask); signal(SIGTERM, MW_exitHandler); /* kill */ signal(SIGHUP, MW_exitHandler); /* kill -HUP */ signal(SIGINT, MW_exitHandler); /* Interrupt from keyboard */ signal(SIGQUIT, MW_exitHandler); /* Quit from keyboard */ beagleboard_communication_initialize(); /* Create base rate task */ ret = pthread_create(&baseRateThread, &attr, (void *) baseRateTask, (void *) &info); CHECK_STATUS(ret, "pthread_create"); pthread_attr_destroy(&attr); /* Wait for a stopping condition. */ MW_sem_wait(&stopSem); /* Received stop signal */ printf("**stopping the model**\n"); if (rtmGetErrorStatus(beagleboard_communication_M) != NULL) { printf("\n**%s**\n", rtmGetErrorStatus(beagleboard_communication_M)); } /* Disable rt_OneStep() here */ /* Terminate model */ beagleboard_communication_terminate(); return 0; }
int main(int argc, char **argv) { int ret; baseRateInfo_t info; pthread_attr_t attr; pthread_t baseRateThread; size_t stackSize; unsigned long cpuMask = 0x1; unsigned int len = sizeof(cpuMask); printf("**starting the model**\n"); fflush(stdout); rtmSetErrorStatus(raspberrypi_audioequalizer_M, 0); rtExtModeParseArgs(argc, (const char_T **)argv, NULL); /* All threads created by this process must run on a single CPU */ ret = sched_setaffinity(0, len, (cpu_set_t *) &cpuMask); CHECK_STATUS(ret, "sched_setaffinity"); /* Initialize semaphore used for thread synchronization */ ret = sem_init(&stopSem, 0, 0); CHECK_STATUS(ret, "sem_init:stopSem"); /* Create threads executing the Simulink model */ pthread_attr_init(&attr); ret = pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED); CHECK_STATUS(ret, "pthread_attr_setinheritsched"); ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); CHECK_STATUS(ret, "pthread_attr_setdetachstate"); /* PTHREAD_STACK_MIN is the minimum stack size required to start a thread */ stackSize = 64000 + PTHREAD_STACK_MIN; ret = pthread_attr_setstacksize(&attr, stackSize); CHECK_STATUS(ret, "pthread_attr_setstacksize"); /* Block signal used for timer notification */ info.period = 0.1; info.signo = SIGRTMIN; sigemptyset(&info.sigMask); MW_blockSignal(info.signo, &info.sigMask); signal(SIGTERM, MW_exitHandler); /* kill */ signal(SIGHUP, MW_exitHandler); /* kill -HUP */ signal(SIGINT, MW_exitHandler); /* Interrupt from keyboard */ signal(SIGQUIT, MW_exitHandler); /* Quit from keyboard */ raspberrypi_audioequalizer_initialize(); /* External mode */ rtSetTFinalForExtMode(&rtmGetTFinal(raspberrypi_audioequalizer_M)); rtExtModeCheckInit(1); { boolean_T rtmStopReq = FALSE; rtExtModeWaitForStartPkt(raspberrypi_audioequalizer_M->extModeInfo, 1, &rtmStopReq); if (rtmStopReq) { rtmSetStopRequested(raspberrypi_audioequalizer_M, TRUE); } } rtERTExtModeStartMsg(); /* Create base rate task */ ret = pthread_create(&baseRateThread, &attr, (void *) baseRateTask, (void *) &info); CHECK_STATUS(ret, "pthread_create"); pthread_attr_destroy(&attr); /* Wait for a stopping condition. */ MW_sem_wait(&stopSem); /* Received stop signal */ printf("**stopping the model**\n"); if (rtmGetErrorStatus(raspberrypi_audioequalizer_M) != NULL) { printf("\n**%s**\n", rtmGetErrorStatus(raspberrypi_audioequalizer_M)); } /* External mode shutdown */ rtExtModeShutdown(1); /* Disable rt_OneStep() here */ /* Terminate model */ raspberrypi_audioequalizer_terminate(); return 0; }
void julia_init(char *imageFile) { jl_page_size = jl_getpagesize(); jl_find_stack_bottom(); jl_dl_handle = jl_load_dynamic_library(NULL, JL_RTLD_DEFAULT); #ifdef __WIN32__ uv_dlopen("ntdll.dll",jl_ntdll_handle); //bypass julia's pathchecking for system dlls uv_dlopen("Kernel32.dll",jl_kernel32_handle); uv_dlopen("msvcrt.dll",jl_crtdll_handle); uv_dlopen("Ws2_32.dll",jl_winsock_handle); _jl_exe_handle.handle = GetModuleHandleA(NULL); #endif jl_io_loop = uv_default_loop(); //this loop will internal events (spawining process etc.) init_stdio(); #if defined(__linux__) int ncores = jl_cpu_cores(); if (ncores > 1) { cpu_set_t cpumask; CPU_ZERO(&cpumask); for(int i=0; i < ncores; i++) { CPU_SET(i, &cpumask); } sched_setaffinity(0, sizeof(cpu_set_t), &cpumask); } #endif #ifdef JL_GC_MARKSWEEP jl_gc_init(); jl_gc_disable(); #endif jl_init_frontend(); jl_init_types(); jl_init_tasks(jl_stack_lo, jl_stack_hi-jl_stack_lo); jl_init_codegen(); jl_an_empty_cell = (jl_value_t*)jl_alloc_cell_1d(0); jl_init_serializer(); if (!imageFile) { jl_main_module = jl_new_module(jl_symbol("Main")); jl_main_module->parent = jl_main_module; jl_core_module = jl_new_module(jl_symbol("Core")); jl_core_module->parent = jl_main_module; jl_set_const(jl_main_module, jl_symbol("Core"), (jl_value_t*)jl_core_module); jl_module_using(jl_main_module, jl_core_module); jl_current_module = jl_core_module; jl_init_intrinsic_functions(); jl_init_primitives(); jl_load("boot.jl"); jl_get_builtin_hooks(); jl_boot_file_loaded = 1; jl_init_box_caches(); } if (imageFile) { JL_TRY { jl_restore_system_image(imageFile); } JL_CATCH { JL_PRINTF(JL_STDERR, "error during init:\n"); jl_show(jl_stderr_obj(), jl_exception_in_transit); JL_PRINTF(JL_STDOUT, "\n"); jl_exit(1); } } // set module field of primitive types int i; void **table = jl_core_module->bindings.table; for(i=1; i < jl_core_module->bindings.size; i+=2) { if (table[i] != HT_NOTFOUND) { jl_binding_t *b = (jl_binding_t*)table[i]; if (b->value && jl_is_datatype(b->value)) { jl_datatype_t *tt = (jl_datatype_t*)b->value; tt->name->module = jl_core_module; } } } // the Main module is the one which is always open, and set as the // current module for bare (non-module-wrapped) toplevel expressions. // it does "using Base" if Base is available. if (jl_base_module != NULL) { jl_add_standard_imports(jl_main_module); } // eval() uses Main by default, so Main.eval === Core.eval jl_module_import(jl_main_module, jl_core_module, jl_symbol("eval")); jl_current_module = jl_main_module; #ifndef __WIN32__ struct sigaction actf; memset(&actf, 0, sizeof(struct sigaction)); sigemptyset(&actf.sa_mask); actf.sa_handler = fpe_handler; actf.sa_flags = 0; if (sigaction(SIGFPE, &actf, NULL) < 0) { JL_PRINTF(JL_STDERR, "sigaction: %s\n", strerror(errno)); jl_exit(1); } stack_t ss; ss.ss_flags = 0; ss.ss_size = SIGSTKSZ; ss.ss_sp = malloc(ss.ss_size); if (sigaltstack(&ss, NULL) < 0) { JL_PRINTF(JL_STDERR, "sigaltstack: %s\n", strerror(errno)); jl_exit(1); } struct sigaction act; memset(&act, 0, sizeof(struct sigaction)); sigemptyset(&act.sa_mask); act.sa_sigaction = segv_handler; act.sa_flags = SA_ONSTACK | SA_SIGINFO; if (sigaction(SIGSEGV, &act, NULL) < 0) { JL_PRINTF(JL_STDERR, "sigaction: %s\n", strerror(errno)); jl_exit(1); } #else if (signal(SIGFPE, (void (__cdecl *)(int))fpe_handler) == SIG_ERR) { JL_PRINTF(JL_STDERR, "Couldn't set SIGFPE\n"); jl_exit(1); } #endif #ifdef JL_GC_MARKSWEEP jl_gc_enable(); #endif }