static void myth_malloc_wrapper_free(void *ptr)
{
#ifdef MYTH_WRAP_MALLOC_RUNTIME
  /* before wrapping completed, we simply forget about it.
     (real_free not available yet, so we cannot call it.
     the problem may be deeper. the ptr may have been allocated
     by yet another function (not the original system malloc),
     so even passing it to real_free may not be the right action */
  if (!g_wrap_malloc_completed) {
    /* leak */
    return;
  }
  if (!g_wrap_malloc) {
    /* we call real_free, except for region we have allocated
       before wrapping is complete */
    if (!sys_alloc_region(ptr)) {
      return real_free(ptr);
    }
  }
#endif
  if (!ptr)return;
  if (!real_free){
    real_free=dlsym(RTLD_NEXT,"free");
    assert(real_free);
  }
#ifdef MYTH_WRAP_MALLOC_DLSYM_ENABLED
  //do nothing if in dlsym region
  intptr_t s,e;
  s=(intptr_t)s_malloc_dlsym_region;
  e=s+MYTH_WRAP_MALLOC_DLSYM_SIZE;
  if (s<=((intptr_t)ptr) && ((intptr_t)ptr)<e)return;
#endif
  malloc_wrapper_header_t rptr=(malloc_wrapper_header_t)ptr;
  rptr--;
  uint64_t idx=rptr->s.fl_index;
  if (idx>=FREE_LIST_NUM){
    //fprintf(stderr,"free A,%p,%d\n",rptr->s.org_ptr,(int)idx);
    real_free(rptr->s.org_ptr);
    return;
  }
  if (g_worker_thread_num && (g_alloc_hook_ok==g_worker_thread_num)){
    myth_running_env_t env;
    env=myth_get_current_env();
    int rank=env->rank;
    myth_freelist_push(g_myth_malloc_wrapper_fl[rank][idx],(void**)rptr);
    return ;
  }
  //fprintf(stderr,"free B,%p,%d\n",rptr->s.org_ptr,(int)idx);
  real_free(rptr->s.org_ptr);
}
示例#2
0
//Termination
void myth_fini_body(void)
{
	//add context switch as a sentinel for emitting logs
	int i;
	for (i=0;i<g_worker_thread_num;i++){
		myth_log_add_context_switch(&g_envs[i],THREAD_PTR_SCHED_TERM);
	}
	myth_startpoint_exit_ex_body(0);
	myth_running_env_t env;
	int rank;
	env=myth_get_current_env();
	rank=env->rank;
	assert(rank==0);
	//Wait for other worker threads
	for (i=1;i<g_worker_thread_num;i++){
		real_pthread_join(g_envs[i].worker,NULL);
	}
	myth_fini_ex_body();
}
void myth_malloc_wrapper_init_worker(int rank)
{
#ifdef MYTH_WRAP_MALLOC_RUNTIME
  /* is it possible to come here before myth_malloc_wrapper_init is called? */
  if (!g_wrap_malloc) return;
#endif
  int i;
  //allocate freelist
  assert(real_malloc);
  g_myth_malloc_wrapper_fl[rank]=real_malloc(sizeof(myth_freelist_t)*FREE_LIST_NUM);
  //initialize
#if FIX_FALSE_SHARING3
  for (i=0;i<FREE_LIST_NUM;i++){
    size_t realsize=MYTH_MALLOC_INDEX_TO_RSIZE(i);
    size_t reqsize=realsize+sizeof(malloc_wrapper_header);
    if (reqsize <= MYTH_WRAP_MALLOC_MIN_MALLOC_SZ) {
      g_myth_malloc_wrapper_fl[rank][i] =
	make_chunks(reqsize,
		    MYTH_WRAP_MALLOC_MIN_MALLOC_SZ);
    } else {
      g_myth_malloc_wrapper_fl[rank][i] = 0;
    }
  }
#else
  for (i=0;i<FREE_LIST_NUM;i++){myth_freelist_init(g_myth_malloc_wrapper_fl[rank][i]);}
#endif
  __sync_fetch_and_add(&g_alloc_hook_ok,1);

#if 0
  printf("g_myth_malloc_wrapper_fl = %ld\n", 
	 (long)g_myth_malloc_wrapper_fl);

  printf("env[%d] = %ld\n", rank, (long)myth_get_current_env());
  printf("g_myth_malloc_wrapper_fl[%d] = %ld\n", 
	 rank, (long)g_myth_malloc_wrapper_fl[rank]);
#elif 0
  printf("0 0 %d %ld A\n", rank, (long)g_myth_malloc_wrapper_fl);
  printf("0 0 %d %ld F\n", rank, (long)g_myth_malloc_wrapper_fl[rank]);
  printf("0 0 %d %ld E\n", rank, (long)&g_envs[rank]);
#endif
}
示例#4
0
myth_thread_t myth_wsapi_runqueue_pop(void)
{
    myth_running_env_t env=myth_get_current_env();
    return myth_queue_pop(&env->runnable_q);
}
示例#5
0
void myth_wsapi_runqueue_push(myth_thread_t th)
{
    myth_running_env_t env=myth_get_current_env();
    myth_queue_push(&env->runnable_q,th);
}
static void * myth_malloc_wrapper_malloc(size_t size)
{
#ifdef MYTH_WRAP_MALLOC_RUNTIME
  /* fall back to the bump allocator before wrapping completed */
  if (!g_wrap_malloc_completed) {
    void *ptr = sys_alloc_align(16, size);
    return ptr;
  }
  /* no wrap. call the real one */
  if (!g_wrap_malloc) {
    return real_malloc(size);
  }
#endif

  //fprintf(stderr,"malloc %d\n",size);
  malloc_wrapper_header_t ptr;
  size_t realsize;
  int idx;
  if (size<16)size=16;
  if (!real_malloc){
    static int load_malloc_protect=0;
    if (load_malloc_protect==0){
      load_malloc_protect=1;
      real_malloc=dlsym(RTLD_NEXT,"malloc");
    }
    else return NULL;
    assert(real_malloc);
  }
  if ((!g_worker_thread_num) || (g_alloc_hook_ok!=g_worker_thread_num) || (size>MYTH_MALLOC_FLSIZE_MAX)){
    ptr=real_malloc(size+sizeof(malloc_wrapper_header));
    if (!ptr){
      fprintf(stderr,"size=%llu\n",(unsigned long long)size);
    }
    assert(ptr);
    ptr->s.fl_index=FREE_LIST_NUM;
    ptr->s.org_ptr=ptr;
    //fprintf(stderr,"malloc A,%p,%d\n",ptr,FREE_LIST_NUM);
    return (void*)(ptr+1);
  }
  idx=MYTH_MALLOC_SIZE_TO_INDEX(size);
  realsize=MYTH_MALLOC_INDEX_TO_RSIZE(idx);
  void **fl_ptr;
  myth_running_env_t env;
  env=myth_get_current_env();
  int rank=env->rank;
  myth_freelist_pop(g_myth_malloc_wrapper_fl[rank][idx],fl_ptr);

#if FIX_FALSE_SHARING2
  if (!fl_ptr) {
    g_myth_malloc_wrapper_fl[rank][idx] =
      make_chunks(realsize+sizeof(malloc_wrapper_header),
		  MYTH_WRAP_MALLOC_MIN_MALLOC_SZ);
    myth_freelist_pop(g_myth_malloc_wrapper_fl[rank][idx],fl_ptr);
    assert(fl_ptr);
  } 
  ptr=(malloc_wrapper_header_t)fl_ptr;
#else
  if (!fl_ptr){
    //Freelist is empty, allocate
    ptr=real_malloc(realsize+sizeof(malloc_wrapper_header));
    //fprintf(stderr,"malloc B,%p,%d\n",ptr,idx);
    assert(ptr);
  }
  else{
    ptr=(malloc_wrapper_header_t)fl_ptr;
  }
#endif
  ptr->s.fl_index=idx;
  ptr->s.org_ptr=ptr;
  return (void*)(ptr+1);
}