float get_perm_lrt(subject **s,int nsub,par_info *pi) { float lrt; FILE *fo; gc_res *case_res,*cont_res,*all_res; assert ((case_res=(gc_res *)malloc(sizeof(gc_res)))!=0); assert ((cont_res=(gc_res *)malloc(sizeof(gc_res)))!=0); assert ((all_res=(gc_res *)malloc(sizeof(gc_res)))!=0); run_gc("permall",s,nsub,2,0,pi); assert((fo=fopen("permall.gco","r"))!=0); get_likes(all_res,fo); fclose(fo); run_gc("permcase",s,nsub,1,0,pi); assert((fo=fopen("permcase.gco","r"))!=0); get_likes(case_res,fo); fclose(fo); run_gc("permcont",s,nsub,0,0,pi); assert((fo=fopen("permcont.gco","r"))!=0); get_likes(cont_res,fo); fclose(fo); lrt=-2*(all_res->ll_h1-case_res->ll_h1-cont_res->ll_h1); free(case_res); free(cont_res); free(all_res); return lrt; }
/* This is called when a thread hits an interrupt at a GC safe point. This means * that another thread is already trying to start a GC run, so we don't need to * try and do that, just enlist in the run. */ void MVM_gc_enter_from_interrupt(MVMThreadContext *tc) { MVMuint8 decr = 0; AO_t curr; tc->gc_work_count = 0; add_work(tc, tc); /* grab our child */ signal_child(tc); /* Count us in to the GC run. Wait for a vote to steal. */ GCORCH_LOG(tc, "Thread %d run %d : Entered from interrupt\n"); while ((curr = tc->instance->gc_start) < 2 || !MVM_trycas(&tc->instance->gc_start, curr, curr - 1)) { /* apr_sleep(1); apr_thread_yield();*/ } /* Wait for all threads to indicate readiness to collect. */ while (tc->instance->gc_start) { /* apr_sleep(1); apr_thread_yield();*/ } run_gc(tc, MVMGCWhatToDo_NoInstance); }
void fill_res(char *root,gc_res *res,subject **s,int nsub,int cc,int gr,par_info *pi,int maxhap) { char fname[100]; FILE *fo; run_gc(root,s,nsub,cc,gr,pi); sprintf(fname,"%s.gco",root); assert((fo=fopen(fname,"r"))!=0); get_likes(res,fo); get_haps(res,fo,pi->n_loci_to_use,maxhap); fclose(fo); }
/** * make_free_space - make more free space on the file-system. * @c: UBIFS file-system description object * * This function is called when an operation cannot be budgeted because there * is supposedly no free space. But in most cases there is some free space: * o budgeting is pessimistic, so it always budgets more than it is actually * needed, so shrinking the liability is one way to make free space - the * cached data will take less space then it was budgeted for; * o GC may turn some dark space into free space (budgeting treats dark space * as not available); * o commit may free some LEB, i.e., turn freeable LEBs into free LEBs. * * So this function tries to do the above. Returns %-EAGAIN if some free space * was presumably made and the caller has to re-try budgeting the operation. * Returns %-ENOSPC if it couldn't do more free space, and other negative error * codes on failures. */ static int make_free_space(struct ubifs_info *c) { int err, retries = 0; long long liab1, liab2; do { liab1 = get_liability(c); /* * We probably have some dirty pages or inodes (liability), try * to write them back. */ dbg_budg("liability %lld, run write-back", liab1); shrink_liability(c, NR_TO_WRITE); liab2 = get_liability(c); if (liab2 < liab1) return -EAGAIN; dbg_budg("new liability %lld (not shrunk)", liab2); /* Liability did not shrink again, try GC */ dbg_budg("Run GC"); err = run_gc(c); if (!err) return -EAGAIN; if (err != -EAGAIN && err != -ENOSPC) /* Some real error happened */ return err; dbg_budg("Run commit (retries %d)", retries); err = ubifs_run_commit(c); if (err) return err; } while (retries++ < MAX_MKSPC_RETRIES); return -ENOSPC; }
/* This is called when a thread hits an interrupt at a GC safe point. This means * that another thread is already trying to start a GC run, so we don't need to * try and do that, just enlist in the run. */ void MVM_gc_enter_from_interrupt(MVMThreadContext *tc) { AO_t curr; GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from interrupt\n"); MVM_telemetry_timestamp(tc, "gc_enter_from_interrupt"); /* If profiling, record that GC is starting. */ if (tc->instance->profiling) MVM_profiler_log_gc_start(tc, is_full_collection(tc)); /* We'll certainly take care of our own work. */ tc->gc_work_count = 0; add_work(tc, tc); /* Indicate that we're ready to GC. Only want to decrement it if it's 2 or * greater (0 should never happen; 1 means the coordinator is still counting * up how many threads will join in, so we should wait until it decides to * decrement.) */ while ((curr = MVM_load(&tc->instance->gc_start)) < 2 || !MVM_trycas(&tc->instance->gc_start, curr, curr - 1)) { /* MVM_platform_thread_yield();*/ } /* Wait for all threads to indicate readiness to collect. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Waiting for other threads\n"); while (MVM_load(&tc->instance->gc_start)) { /* MVM_platform_thread_yield();*/ } GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entering run_gc\n"); run_gc(tc, MVMGCWhatToDo_NoInstance); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete\n"); /* If profiling, record that GC is over. */ if (tc->instance->profiling) MVM_profiler_log_gc_end(tc); }
/** * make_free_space - make more free space on the file-system. * @c: UBIFS file-system description object * @ri: information about previous invocations of this function * * This function is called when an operation cannot be budgeted because there * is supposedly no free space. But in most cases there is some free space: * o budgeting is pessimistic, so it always budgets more then it is actually * needed, so shrinking the liability is one way to make free space - the * cached data will take less space then it was budgeted for; * o GC may turn some dark space into free space (budgeting treats dark space * as not available); * o commit may free some LEB, i.e., turn freeable LEBs into free LEBs. * * So this function tries to do the above. Returns %-EAGAIN if some free space * was presumably made and the caller has to re-try budgeting the operation. * Returns %-ENOSPC if it couldn't do more free space, and other negative error * codes on failures. */ static int make_free_space(struct ubifs_info *c, struct retries_info *ri) { int err; /* * If we have some dirty pages and inodes (liability), try to write * them back unless this was tried too many times without effect * already. */ if (ri->shrink_retries < MAX_SHRINK_RETRIES && !ri->try_gc) { long long liability; spin_lock(&c->space_lock); liability = c->budg_idx_growth + c->budg_data_growth + c->budg_dd_growth; spin_unlock(&c->space_lock); if (ri->prev_liability >= liability) { /* Liability does not shrink, next time try GC then */ ri->shrink_retries += 1; if (ri->gc_retries < MAX_GC_RETRIES) ri->try_gc = 1; dbg_budg("liability did not shrink: retries %d of %d", ri->shrink_retries, MAX_SHRINK_RETRIES); } dbg_budg("force write-back (count %d)", ri->shrink_cnt); shrink_liability(c, NR_TO_WRITE + ri->shrink_cnt); ri->prev_liability = liability; ri->shrink_cnt += 1; return -EAGAIN; } /* * Try to run garbage collector unless it was already tried too many * times. */ if (ri->gc_retries < MAX_GC_RETRIES) { ri->gc_retries += 1; dbg_budg("run GC, retries %d of %d", ri->gc_retries, MAX_GC_RETRIES); ri->try_gc = 0; err = run_gc(c); if (!err) return -EAGAIN; if (err == -EAGAIN) { dbg_budg("GC asked to commit"); err = ubifs_run_commit(c); if (err) return err; return -EAGAIN; } if (err != -ENOSPC) return err; /* * GC could not make any progress. If this is the first time, * then it makes sense to try to commit, because it might make * some dirty space. */ dbg_budg("GC returned -ENOSPC, retries %d", ri->nospc_retries); if (ri->nospc_retries >= MAX_NOSPC_RETRIES) return err; ri->nospc_retries += 1; } /* Neither GC nor write-back helped, try to commit */ if (ri->cmt_retries < MAX_CMT_RETRIES) { ri->cmt_retries += 1; dbg_budg("run commit, retries %d of %d", ri->cmt_retries, MAX_CMT_RETRIES); err = ubifs_run_commit(c); if (err) return err; return -EAGAIN; } return -ENOSPC; }
/* This is called when the allocator finds it has run out of memory and wants * to trigger a GC run. In this case, it's possible (probable, really) that it * will need to do that triggering, notifying other running threads that the * time has come to GC. */ void MVM_gc_enter_from_allocator(MVMThreadContext *tc) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Entered from allocate\n"); /* Try to start the GC run. */ if (MVM_trycas(&tc->instance->gc_start, 0, 1)) { MVMThread *last_starter = NULL; MVMuint32 num_threads = 0; MVMuint32 is_full; /* Need to wait for other threads to reset their gc_status. */ while (MVM_load(&tc->instance->gc_ack)) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : waiting for other thread's gc_ack\n"); MVM_platform_thread_yield(); } /* We are the winner of the GC starting race. This gives us some * extra responsibilities as well as doing the usual things. * First, increment GC sequence number. */ MVM_incr(&tc->instance->gc_seq_number); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC thread elected coordinator: starting gc seq %d\n", (int)MVM_load(&tc->instance->gc_seq_number)); /* Decide if it will be a full collection. */ is_full = is_full_collection(tc); /* If profiling, record that GC is starting. */ if (tc->instance->profiling) MVM_profiler_log_gc_start(tc, is_full); /* Ensure our stolen list is empty. */ tc->gc_work_count = 0; /* Flag that we didn't agree on this run that all the in-trays are * cleared (a responsibility of the co-ordinator. */ MVM_store(&tc->instance->gc_intrays_clearing, 1); /* We'll take care of our own work. */ add_work(tc, tc); /* Find other threads, and signal or steal. */ do { MVMThread *threads = (MVMThread *)MVM_load(&tc->instance->threads); if (threads && threads != last_starter) { MVMThread *head = threads; MVMuint32 add; while ((threads = (MVMThread *)MVM_casptr(&tc->instance->threads, head, NULL)) != head) { head = threads; } add = signal_all_but(tc, head, last_starter); last_starter = head; if (add) { GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Found %d other threads\n", add); MVM_add(&tc->instance->gc_start, add); num_threads += add; } } /* If there's an event loop thread, wake it up to participate. */ if (tc->instance->event_loop_wakeup) uv_async_send(tc->instance->event_loop_wakeup); } while (MVM_load(&tc->instance->gc_start) > 1); /* Sanity checks. */ if (!MVM_trycas(&tc->instance->threads, NULL, last_starter)) MVM_panic(MVM_exitcode_gcorch, "threads list corrupted\n"); if (MVM_load(&tc->instance->gc_finish) != 0) MVM_panic(MVM_exitcode_gcorch, "Finish votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_finish)); /* gc_ack gets an extra so the final acknowledger * can also free the STables. */ MVM_store(&tc->instance->gc_finish, num_threads + 1); MVM_store(&tc->instance->gc_ack, num_threads + 2); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : finish votes is %d\n", (int)MVM_load(&tc->instance->gc_finish)); /* Now we're ready to start, zero promoted since last full collection * counter if this is a full collect. */ if (is_full) MVM_store(&tc->instance->gc_promoted_bytes_since_last_full, 0); /* Signal to the rest to start */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator signalling start\n"); if (MVM_decr(&tc->instance->gc_start) != 1) MVM_panic(MVM_exitcode_gcorch, "Start votes was %"MVM_PRSz"\n", MVM_load(&tc->instance->gc_start)); /* Start collecting. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : coordinator entering run_gc\n"); run_gc(tc, MVMGCWhatToDo_All); /* Free any STables that have been marked for deletion. It's okay for * us to muck around in another thread's fromspace while it's mutating * tospace, really. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Freeing STables if needed\n"); MVM_gc_collect_free_stables(tc); /* If profiling, record that GC is over. */ if (tc->instance->profiling) MVM_profiler_log_gc_end(tc); GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : GC complete (cooridnator)\n"); } else { /* Another thread beat us to starting the GC sync process. Thus, act as * if we were interrupted to GC. */ GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, "Thread %d run %d : Lost coordinator election\n"); MVM_gc_enter_from_interrupt(tc); } }
/* This is called when the allocator finds it has run out of memory and wants * to trigger a GC run. In this case, it's possible (probable, really) that it * will need to do that triggering, notifying other running threads that the * time has come to GC. */ void MVM_gc_enter_from_allocator(MVMThreadContext *tc) { GCORCH_LOG(tc, "Thread %d run %d : Entered from allocate\n"); /* Try to start the GC run. */ if (MVM_trycas(&tc->instance->gc_start, 0, 1)) { MVMThread *last_starter = NULL; MVMuint32 num_threads = 0; /* We are the winner of the GC starting race. This gives us some * extra responsibilities as well as doing the usual things. * First, increment GC sequence number. */ tc->instance->gc_seq_number++; GCORCH_LOG(tc, "Thread %d run %d : GC thread elected coordinator: starting gc seq %d\n", tc->instance->gc_seq_number); /* Ensure our stolen list is empty. */ tc->gc_work_count = 0; /* need to wait for other threads to reset their gc_status. */ while (tc->instance->gc_ack) apr_thread_yield(); add_work(tc, tc); /* grab our child */ signal_child(tc); do { if (tc->instance->threads && tc->instance->threads != last_starter) { MVMThread *head; MVMuint32 add; while (!MVM_trycas(&tc->instance->threads, (head = tc->instance->threads), NULL)); add = signal_all_but(tc, head, last_starter); last_starter = head; if (add) { GCORCH_LOG(tc, "Thread %d run %d : Found %d other threads\n", add); MVM_atomic_add(&tc->instance->gc_start, add); num_threads += add; } } } while (tc->instance->gc_start > 1); if (!MVM_trycas(&tc->instance->threads, NULL, last_starter)) MVM_panic(MVM_exitcode_gcorch, "threads list corrupted\n"); if (tc->instance->gc_finish != 0) MVM_panic(MVM_exitcode_gcorch, "finish votes was %d\n", tc->instance->gc_finish); tc->instance->gc_ack = tc->instance->gc_finish = num_threads + 1; GCORCH_LOG(tc, "Thread %d run %d : finish votes is %d\n", (int)tc->instance->gc_finish); /* signal to the rest to start */ if (MVM_atomic_decr(&tc->instance->gc_start) != 1) MVM_panic(MVM_exitcode_gcorch, "start votes was %d\n", tc->instance->gc_finish); run_gc(tc, MVMGCWhatToDo_All); } else { /* Another thread beat us to starting the GC sync process. Thus, act as * if we were interrupted to GC. */ GCORCH_LOG(tc, "Thread %d run %d : Lost coordinator election\n"); MVM_gc_enter_from_interrupt(tc); } }
void aggr_source_discovery::gc() { run_gc(m_cache); }
int main(int argc, char* argv[]) { printf("\n\n\n\n----- Phantom exec test v. 0.5\n\n"); run_init_functions( INIT_LEVEL_PREPARE ); run_init_functions( INIT_LEVEL_INIT ); // before video //drv_video_win32.mouse = mouse_callback; //video_drv = &drv_video_win32; //video_drv = &drv_video_x11; args(argc,argv); pvm_bulk_init( bulk_seek_f, bulk_read_f ); pvm_video_init(); video_drv->mouse = mouse_callback; drv_video_init_windows(); init_main_event_q(); init_new_windows(); scr_mouse_set_cursor(drv_video_get_default_mouse_bmp()); mem = malloc(size+1024*10); setDiffMem( mem, malloc(size+1024*10), size ); hal_init( mem, size ); //pvm_alloc_threaded_init(); // no threads yet - no lock run_init_functions( INIT_LEVEL_LATE ); #if 0 videotest(); //getchar(); exit(0); #endif #if 0 new_videotest(); getchar(); exit(0); #endif char *dir = getenv("PHANTOM_HOME"); char *rest = "plib/bin/classes"; if( dir == NULL ) { dir = "pcode"; rest = "classes"; } char fn[1024]; snprintf( fn, 1024, "%s/%s", dir, rest ); if( load_code( &bulk_code, &bulk_size, fn ) ) //"pcode/classes") ) { printf("No bulk classes file '%s'\n", fn ); exit(22); } bulk_read_pos = bulk_code; pvm_root_init(); // TODO use stray catcher in pvm_test too //stray(); #if 0 //ui_loop( argc, argv, "test"); printf("\nPhantom code finished\n" ); //getchar(); //{ char c; read( 0, &c, 1 ); } sleep(100); #else dbg_init(); kernel_debugger(); #endif #if 0 pvm_memcheck(); printf("will run GC\n" ); run_gc(); printf("press enter\n" ); // getchar(); pvm_memcheck(); save_mem(mem, size); #endif return 0; }