Esempio n. 1
0
uint64_t Lock_AtomicAcquire(int isShared)
{
    Lock_Atomic64_t *alock;
    Lock_Atomic64_t *olock;
    if (isShared)
    {
        alock   = &(NodeState.FutexLockShared);
        olock   = &(NodeState.FutexOrderLockShared);
    }
    else
    {
        AppProcess_t *proc = GetMyProcess();
        alock   = &(proc->FutexLockPrivate);
        olock   = &(proc->FutexOrderLockPrivate);
    }
    uint64_t my_turn;

    my_turn = fetch_and_add64(alock, 1);

    if (ATOMIC_READ( olock ) != my_turn)
    {
        // lower our priority while we wait for the lock to become available
        ThreadPriority_Low();
        while ( ATOMIC_READ( olock ) != my_turn )
        {
        }
        ThreadPriority_Medium(); // restore priority
    }

    return my_turn;
}
Esempio n. 2
0
int rqueue_isempty(rqueue_t *rb)
{
    rqueue_page_t *head = ATOMIC_READ(rb->head);
    rqueue_page_t *commit = ATOMIC_READ(rb->commit);
    rqueue_page_t *tail = ATOMIC_READ(rb->tail);
    return ((rb->reader == commit || (head == tail && commit != tail) || ATOMIC_READ(rb->writes) == 0));
}
Esempio n. 3
0
void
arc_get_size(arc_t *cache, size_t *mru_size, size_t *mfu_size, size_t *mrug_size, size_t *mfug_size)
{
    *mru_size = ATOMIC_READ(cache->mru.size);
    *mfu_size = ATOMIC_READ(cache->mfu.size);
    *mrug_size = ATOMIC_READ(cache->mrug.size);
    *mfug_size = ATOMIC_READ(cache->mfug.size);
}
void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz)
{
	static u32 update_time = 0;
	int peak, alloc;
	int i;

	/* initialization */
	if(!update_time) {
		for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) {
			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0);
			ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0);
			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0);
			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0);
		}
		for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) {
			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0);
			ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0);
			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0);
			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0);
		}
	}

	switch(status) {
		case MSTAT_ALLOC_SUCCESS:
			ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
			alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
			peak=ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak));
			if (peak<alloc)
				ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc);

			ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
			alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
			peak=ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak));
			if (peak<alloc)
				ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc);
			break;

		case MSTAT_ALLOC_FAIL:
			ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt));

			ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt));
			break;

		case MSTAT_FREE:
			ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
			ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);

			ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
			ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
			break;
	};

	//if (rtw_get_passing_time_ms(update_time) > 5000) {
	//	rtw_mstat_dump();
		update_time=rtw_get_current_time();
	//}
}
Esempio n. 5
0
void rtw_update_mem_stat(u8 flag, u32 sz)
{
	static u32 update_time = 0;
	int peak, alloc;

	if(!update_time) {
		ATOMIC_SET(&rtw_dbg_mem_stat.vir_alloc,0);
		ATOMIC_SET(&rtw_dbg_mem_stat.vir_peak,0);
		ATOMIC_SET(&rtw_dbg_mem_stat.vir_alloc_err,0);
		ATOMIC_SET(&rtw_dbg_mem_stat.phy_alloc,0);
		ATOMIC_SET(&rtw_dbg_mem_stat.phy_peak,0);
		ATOMIC_SET(&rtw_dbg_mem_stat.phy_alloc_err,0);
	}
		
	switch(flag) {
		case MEM_STAT_VIR_ALLOC_SUCCESS:
			alloc = ATOMIC_ADD_RETURN(&rtw_dbg_mem_stat.vir_alloc, sz);
			peak=ATOMIC_READ(&rtw_dbg_mem_stat.vir_peak);
			if (peak<alloc)
				ATOMIC_SET(&rtw_dbg_mem_stat.vir_peak, alloc);
			break;
			
		case MEM_STAT_VIR_ALLOC_FAIL:
			ATOMIC_INC(&rtw_dbg_mem_stat.vir_alloc_err);
			break;
			
		case MEM_STAT_VIR_FREE:
			alloc = ATOMIC_SUB_RETURN(&rtw_dbg_mem_stat.vir_alloc, sz);
			break;
			
		case MEM_STAT_PHY_ALLOC_SUCCESS:
			alloc = ATOMIC_ADD_RETURN(&rtw_dbg_mem_stat.phy_alloc, sz);
			peak=ATOMIC_READ(&rtw_dbg_mem_stat.phy_peak);
			if (peak<alloc)
				ATOMIC_SET(&rtw_dbg_mem_stat.phy_peak, alloc);
			break;

		case MEM_STAT_PHY_ALLOC_FAIL:
			ATOMIC_INC(&rtw_dbg_mem_stat.phy_alloc_err);
			break;
		
		case MEM_STAT_PHY_FREE:
			alloc = ATOMIC_SUB_RETURN(&rtw_dbg_mem_stat.phy_alloc, sz);
			
	};

	if (rtw_get_passing_time_ms(update_time) > 5000) {
		rtw_dump_mem_stat();
		update_time=rtw_get_current_time();
	}
	
	
}
Esempio n. 6
0
char *rqueue_stats(rqueue_t *rb) {
    char *buf = malloc(1024);
    if (!buf)
        return NULL;

    snprintf(buf, 1024,
           "reader:      %p \n"
           "head:        %p \n"
           "tail:        %p \n"
           "commit:      %p \n"
           "commit_next: %p \n"
           "reads:       %"PRId64" \n"
           "writes:      %"PRId64" \n"
           "mode:        %s \n",
           ATOMIC_READ(rb->reader),
           ATOMIC_READ(rb->head),
           ATOMIC_READ(rb->tail),
           ATOMIC_READ(rb->commit),
           ATOMIC_READ(ATOMIC_READ(rb->commit)->next),
           ATOMIC_READ(rb->reads),
           ATOMIC_READ(rb->writes),
           rb->mode == RQUEUE_MODE_BLOCKING ? "blocking" : "overwrite");

    return buf;
}
Esempio n. 7
0
taskID dag_get_task(CSOUND *csound)
{
    int i;
    int morework = 0;
    int active = csound->dag_num_active;
    volatile enum state *task_status = csound->dag_task_status;
    //printf("**GetTask from %d\n", csound->dag_num_active);
    for (i=0; i<active; i++) {
      if (ATOMIC_CAS(&(task_status[i]), AVAILABLE, INPROGRESS)) {
        return (taskID)i;
      }
      //else if (ATOMIC_READ(task_status[i])==WAITING)
      //  printf("**%d waiting\n", i);
      //else if (ATOMIC_RE\AD(task_status[i])==INPROGRESS)
      //  print(f"**%d active\n", i);
      else if (ATOMIC_READ(task_status[i])==DONE) {
        //printf("**%d done\n", i);
        morework++;
      }
    }
    //dag_print_state(csound);
    if (morework==active) return (taskID)INVALID;
    //printf("taskstodo=%d)\n", morework);
    return (taskID)WAIT;
}
Esempio n. 8
0
void dag_end_task(CSOUND *csound, taskID i)
{
    watchList *to_notify, *next;
    int canQueue;
    int j, k;
    watchList * volatile *task_watch = csound->dag_task_watch;
    ATOMIC_WRITE(csound->dag_task_status[i], DONE); /* as DONE is zero */
    {                                      /* ATOMIC_SWAP */
      do {
        to_notify = ATOMIC_READ(task_watch[i]);
      } while (!ATOMIC_CAS(&task_watch[i],to_notify,&DoNotRead));
    } //to_notify = ATOMIC_SWAP(task_watch[i], &DoNotRead);
    //printf("Ending task %d\n", i);
    next = to_notify;
    while (to_notify) {         /* walk the list of watchers */
      next = to_notify->next;
      j = to_notify->id;
      //printf("%d notifying task %d it finished\n", i, j);
      canQueue = 1;
      for (k=0; k<j; k++) {     /* seek next watch */
        if (csound->dag_task_dep[j][k]==0) continue;
        //printf("investigating task %d (%d)\n", k, csound->dag_task_status[k]);
        if (ATOMIC_READ(csound->dag_task_status[k]) != DONE) {
          //printf("found task %d to watch %d status %d\n",
          //       k, j, csound->dag_task_status[k]);
          if (moveWatch(csound, &task_watch[k], to_notify)) {
            //printf("task %d now watches %d\n", j, k);
            canQueue = 0;
            break;
          }
          else {
            /* assert csound->dag_task_status[j] == DONE and we are in race */
            //printf("Racing status %d %d %d %d\n",
            //       csound->dag_task_status[j], i, j, k);
          }
        }
        //else { printf("not %d\n", k); }
      }
      if (canQueue) {           /*  could use monitor here */
        csound->dag_task_status[j] = AVAILABLE;
      }
      to_notify = next;
    }
    //dag_print_state(csound);
    return;
}
Esempio n. 9
0
u8 rtw_set_802_11_bssid_list_scan(_adapter* padapter)
{	
	_irqL	irqL;
	struct	mlme_priv		*pmlmepriv= &padapter->mlmepriv;
	u8	res=_TRUE;
	
_func_enter_;

	RT_TRACE(_module_rtl871x_ioctl_set_c_,_drv_err_,("+rtw_set_802_11_bssid_list_scan(), fw_state=%x\n", get_fwstate(pmlmepriv)));

	if (padapter == NULL) {
		res=_FALSE;
		goto exit;
	}
	if (padapter->hw_init_completed==_FALSE){
		res = _FALSE;
		RT_TRACE(_module_rtl871x_ioctl_set_c_,_drv_err_,("\n===rtw_set_802_11_bssid_list_scan:hw_init_completed==_FALSE===\n"));
		goto exit;
	}
	
	if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == _TRUE) ||
		(pmlmepriv->LinkDetectInfo.bBusyTraffic == _TRUE))
	{
		// Scan or linking is in progress, do nothing.
		RT_TRACE(_module_rtl871x_ioctl_set_c_,_drv_err_,("rtw_set_802_11_bssid_list_scan fail since fw_state = %x\n", get_fwstate(pmlmepriv)));
		res = _TRUE;

		if(check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING))== _TRUE){
			RT_TRACE(_module_rtl871x_ioctl_set_c_,_drv_err_,("\n###_FW_UNDER_SURVEY|_FW_UNDER_LINKING\n\n"));
		} else {
			RT_TRACE(_module_rtl871x_ioctl_set_c_,_drv_err_,("\n###pmlmepriv->sitesurveyctrl.traffic_busy==_TRUE\n\n"));
		}
	} else {
		NDIS_802_11_SSID ssid;
		
		#ifdef CONFIG_SET_SCAN_DENY_TIMER
		if(ATOMIC_READ(&pmlmepriv->set_scan_deny)==1){
			DBG_871X("%s:%d CONFIG_SET_SCAN_DENY_TIMER deny scan\n", __FUNCTION__, __LINE__);
			indicate_wx_scan_complete_event(padapter);
			return _SUCCESS;
		}
		#endif
		
		_enter_critical_bh(&pmlmepriv->lock, &irqL);		
		
		_rtw_memset((unsigned char*)&ssid, 0, sizeof(NDIS_802_11_SSID));
		
		res = rtw_sitesurvey_cmd(padapter, &ssid);
		
		_exit_critical_bh(&pmlmepriv->lock, &irqL);
	}
exit:
	
_func_exit_;

	return res;	
}
Esempio n. 10
0
void
arc_update_resource_size(arc_t *cache, arc_resource_t res, size_t size)
{
    arc_object_t *obj = (arc_object_t *)res;
    if (obj) {
        MUTEX_LOCK(&cache->lock);
        arc_state_t *state = ATOMIC_READ(obj->state);
        if (LIKELY(state == &cache->mru || state == &cache->mfu)) {
            ATOMIC_DECREASE(state->size, obj->size);
            obj->size = ARC_OBJ_BASE_SIZE(obj) + cache->cos + size;
            ATOMIC_INCREASE(state->size, obj->size);
        }
        ATOMIC_INCREMENT(cache->needs_balance);
        MUTEX_UNLOCK(&cache->lock);
    }
}
Esempio n. 11
0
void *rqueue_read(rqueue_t *rb) {
    int i;
    void *v = NULL;

    for (i = 0; i < RQUEUE_MAX_RETRIES; i++) {

        if (__builtin_expect(ATOMIC_CAS(rb->read_sync, 0, 1), 1)) {
            rqueue_page_t *head = ATOMIC_READ(rb->head);

            rqueue_page_t *commit = ATOMIC_READ(rb->commit);
            rqueue_page_t *tail = ATOMIC_READ(rb->tail);
            rqueue_page_t *next = ATOMIC_READ(head->next);
            rqueue_page_t *old_next = ATOMIC_READ(rb->reader->next);

            if (rb->reader == commit || (head == tail && commit != tail) || ATOMIC_READ(rb->writes) == 0)
            { // nothing to read
                ATOMIC_CAS(rb->read_sync, 1, 0);
                break;
            }

            if (ATOMIC_CAS(rb->reader->next, old_next, RQUEUE_FLAG_ON(next, RQUEUE_FLAG_HEAD))) {
                rb->reader->prev = head->prev;

                if (ATOMIC_CAS(head->prev->next, RQUEUE_FLAG_ON(head, RQUEUE_FLAG_HEAD), rb->reader)) {
                    ATOMIC_CAS(rb->head, head, next);
                    next->prev = rb->reader;
                    rb->reader = head;
                    /*
                    rb->reader->next = next;
                    rb->reader->prev = next->prev;
                    */
                    v = ATOMIC_READ(rb->reader->value);
                    ATOMIC_CAS(rb->reader->value, v, NULL);
                    ATOMIC_INCREMENT(rb->reads);
                    ATOMIC_CAS(rb->read_sync, 1, 0);
                    break;
                } else {
                    fprintf(stderr, "head swap failed\n");
                }
            } else {
                fprintf(stderr, "reader->next swap failed\n");
            }
            ATOMIC_CAS(rb->read_sync, 1, 0);
        }
    }
    return v;
}
Esempio n. 12
0
static void wurfld_get_capabilities(char *useragent, fbuf_t *output) {
    wurfl_device_handle device = wurfl_lookup_useragent(ATOMIC_READ(wurfl), useragent); 
    if (device) {
        fbuf_printf(output, "{\"match_type\":\"%d\",\"matcher_name\":\"%s\",\"device\":\"%s\",",
                wurfl_device_get_match_type(device), wurfl_device_get_matcher_name(device), wurfl_device_get_id(device) );

        wurfl_device_capability_enumerator_handle enumerator = wurfl_device_get_capability_enumerator(device);
        wurfld_fill_capabilities(enumerator, "capabilities", output);
        wurfl_device_capability_enumerator_destroy(enumerator);
        fbuf_add(output, ",");
        enumerator = wurfl_device_get_virtual_capability_enumerator(device);
        wurfld_fill_capabilities(enumerator, "virtual_capabilities", output);
        wurfl_device_capability_enumerator_destroy(enumerator);

        fbuf_add(output, "}\n");
        wurfl_device_destroy(device);
    }
}
Esempio n. 13
0
int _rtw_mstat_dump(char *buf, int len)
{
	int cnt = 0;
	int i;
	int value_t[4][mstat_tf_idx(MSTAT_TYPE_MAX)];
	int value_f[4][mstat_ff_idx(MSTAT_FUNC_MAX)];
	
	int vir_alloc, vir_peak, vir_alloc_err, phy_alloc, phy_peak, phy_alloc_err;
	int tx_alloc, tx_peak, tx_alloc_err, rx_alloc, rx_peak, rx_alloc_err;

	for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) {
		value_t[0][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc));
		value_t[1][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].peak));
		value_t[2][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_cnt));
		value_t[3][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_err_cnt));
	}

	#if 0
	for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) {
		value_f[0][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc));
		value_f[1][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].peak));
		value_f[2][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_cnt));
		value_f[3][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_err_cnt));
	}
	#endif

	cnt += snprintf(buf+cnt, len-cnt, "===================== MSTAT =====================\n");
	cnt += snprintf(buf+cnt, len-cnt, "%4s %10s %10s %10s %10s\n", "TAG", "alloc", "peak", "aloc_cnt", "err_cnt");
	cnt += snprintf(buf+cnt, len-cnt, "-------------------------------------------------\n");
	for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) {
		cnt += snprintf(buf+cnt, len-cnt, "%4s %10d %10d %10d %10d\n", MSTAT_TYPE_str[i], value_t[0][i], value_t[1][i], value_t[2][i], value_t[3][i]);
	}
	#if 0
	cnt += snprintf(buf+cnt, len-cnt, "-------------------------------------------------\n");
	for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) {
		cnt += snprintf(buf+cnt, len-cnt, "%4s %10d %10d %10d %10d\n", MSTAT_FUNC_str[i], value_f[0][i], value_f[1][i], value_f[2][i], value_f[3][i]);
	}
	#endif

	return cnt;
}
Esempio n. 14
0
static void wurfl_init() {
    NOTICE("Initializing WURFL");
    wurfl_handle new_wurfl = wurfl_create(); 
    wurfl_set_engine_target(new_wurfl, WURFL_ENGINE_TARGET_HIGH_PERFORMANCE);
    wurfl_set_cache_provider(new_wurfl, WURFL_CACHE_PROVIDER_DOUBLE_LRU, "10000,3000");
    wurfl_set_root(new_wurfl, wurfl_file);
    wurfl_error err = wurfl_load(new_wurfl);
    if (err != WURFL_OK) {
        WARN("Can't initialize wurfl %s", wurfl_get_error_message(new_wurfl));
        exit(-1);
    }
    wurfl_handle old_wurfl;
    do {
        old_wurfl = ATOMIC_READ(wurfl);
    } while (!ATOMIC_CMPXCHG(wurfl, old_wurfl, new_wurfl));
    if (old_wurfl)
        wurfl_destroy(old_wurfl);
    NOTICE("DONE");
}
Esempio n. 15
0
inline static int moveWatch(CSOUND *csound, watchList * volatile *w,
                            watchList *t)
{
    watchList *local=*w;
    t->next = NULL;
    //printf("moveWatch\n");
    do {
      //dag_print_state(csound);
      local = ATOMIC_READ(*w);
      if (local==&DoNotRead) {
        //printf("local is DoNotRead\n");
        return 0;//was no & earlier
      }
      else t->next = local;
    } while (!ATOMIC_CAS(w,local,t));
    //dag_print_state(csound);
    //printf("moveWatch done\n");
    return 1;
}
Esempio n. 16
0
void rtw_mstat_dump(void *sel)
{
	int i;
	int value_t[4][mstat_tf_idx(MSTAT_TYPE_MAX)];
#ifdef RTW_MEM_FUNC_STAT
	int value_f[4][mstat_ff_idx(MSTAT_FUNC_MAX)];
#endif

	int vir_alloc, vir_peak, vir_alloc_err, phy_alloc, phy_peak, phy_alloc_err;
	int tx_alloc, tx_peak, tx_alloc_err, rx_alloc, rx_peak, rx_alloc_err;

	for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) {
		value_t[0][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc));
		value_t[1][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].peak));
		value_t[2][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_cnt));
		value_t[3][i] = ATOMIC_READ(&(rtw_mem_type_stat[i].alloc_err_cnt));
	}

	#ifdef RTW_MEM_FUNC_STAT
	for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) {
		value_f[0][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc));
		value_f[1][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].peak));
		value_f[2][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_cnt));
		value_f[3][i] = ATOMIC_READ(&(rtw_mem_func_stat[i].alloc_err_cnt));
	}
	#endif

	DBG_871X_SEL_NL(sel, "===================== MSTAT =====================\n");
	DBG_871X_SEL_NL(sel, "%4s %10s %10s %10s %10s\n", "TAG", "alloc", "peak", "aloc_cnt", "err_cnt");
	DBG_871X_SEL_NL(sel, "-------------------------------------------------\n");
	for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) {
		DBG_871X_SEL_NL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_TYPE_str[i], value_t[0][i], value_t[1][i], value_t[2][i], value_t[3][i]);
	}
	#ifdef RTW_MEM_FUNC_STAT
	DBG_871X_SEL_NL(sel, "-------------------------------------------------\n");
	for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) {
		DBG_871X_SEL_NL(sel, "%4s %10d %10d %10d %10d\n", MSTAT_FUNC_str[i], value_f[0][i], value_f[1][i], value_f[2][i], value_f[3][i]);
	}
	#endif
}
Esempio n. 17
0
char CORD_lf_func(size_t i, void * client_data)
{
    register lf_state * state = (lf_state *)client_data;
    register cache_line * volatile * cl_addr =
        &(state -> lf_cache[DIV_LINE_SZ(MOD_CACHE_SZ(i))]);
    register cache_line * cl = (cache_line *)ATOMIC_READ(cl_addr);

    if (cl == 0 || cl -> tag != DIV_LINE_SZ(i)) {
        /* Cache miss */
        refill_data rd;

        rd.state = state;
        rd.file_pos =  i;
        rd.new_cache = GC_NEW_ATOMIC(cache_line);
        if (rd.new_cache == 0) OUT_OF_MEMORY;
        return((char)(GC_word)
              GC_call_with_alloc_lock((GC_fn_type) refill_cache, &rd));
    }
    return(cl -> data[MOD_LINE_SZ(i)]);
}
Esempio n. 18
0
void rtw_dump_mem_stat (void)
{
	int vir_alloc, vir_peak, vir_alloc_err, phy_alloc, phy_peak, phy_alloc_err;
	
	vir_alloc=ATOMIC_READ(&rtw_dbg_mem_stat.vir_alloc);
	vir_peak=ATOMIC_READ(&rtw_dbg_mem_stat.vir_peak);
	vir_alloc_err=ATOMIC_READ(&rtw_dbg_mem_stat.vir_alloc_err);

	phy_alloc=ATOMIC_READ(&rtw_dbg_mem_stat.phy_alloc);
	phy_peak=ATOMIC_READ(&rtw_dbg_mem_stat.phy_peak);
	phy_alloc_err=ATOMIC_READ(&rtw_dbg_mem_stat.phy_alloc_err);

	DBG_871X("vir_alloc:%d, vir_peak:%d,vir_alloc_err:%d, phy_alloc:%d, phy_peak:%d, phy_alloc_err:%d\n"
		, vir_alloc, vir_peak, vir_alloc_err
		, phy_alloc, phy_peak, phy_alloc_err
	);
}
Esempio n. 19
0
/* Balance the lists so that we can fit an object with the given size into
 * the cache. */
static inline void
arc_balance(arc_t *cache)
{
    if (!ATOMIC_READ(cache->needs_balance))
        return;

    MUTEX_LOCK(&cache->lock);
    /* First move objects from MRU/MFU to their respective ghost lists. */
    while (cache->mru.size + cache->mfu.size > cache->c) {
        if (cache->mru.size > cache->p) {
            arc_object_t *obj = arc_state_lru(&cache->mru);
            arc_move(cache, obj, &cache->mrug);
        } else if (cache->mfu.size > cache->c - cache->p) {
            arc_object_t *obj = arc_state_lru(&cache->mfu);
            arc_move(cache, obj, &cache->mfug);
        } else {
            break;
        }
    }

    /* Then start removing objects from the ghost lists. */
    while (cache->mrug.size + cache->mfug.size > cache->c) {
        if (cache->mfug.size > cache->p) {
            arc_object_t *obj = arc_state_lru(&cache->mfug);
            arc_move(cache, obj, NULL);
        } else if (cache->mrug.size > cache->c - cache->p) {
            arc_object_t *obj = arc_state_lru(&cache->mrug);
            arc_move(cache, obj, NULL);
        } else {
            break;
        }
    }

    ATOMIC_SET(cache->needs_balance, 0);
    MUTEX_UNLOCK(&cache->lock);
}
Esempio n. 20
0
int main(int argc, char **argv) {

    int option_index = 0;
    int foreground = 0;
    int loglevel = WURFL_LOGLEVEL_DEFAULT;
    char *listen_address = WURFL_ADDRESS_DEFAULT;
    uint16_t listen_port = WURFL_PORT_DEFAULT;

    static struct option long_options[] = {
        {"debug", 2, 0, 'd'},
        {"foreground", 0, 0, 'f'},
        {"listen", 2, 0, 'l'},
        {"port", 2, 0, 'p'},
        {"wurfl_file", 1, 0, 'w'},
        {"nohttp", 0, 0, 'n'},
        {"singlethread", 0, 0, 's'},
        {"help", 0, 0, 'h'},
        {0, 0, 0, 0}
    };

    char c;
    while ((c = getopt_long (argc, argv, "d:fhl:np:sw:?", long_options, &option_index))) {
        if (c == -1) {
            break;
        }
        switch (c) {
            case 'd':
                loglevel = optarg ? atoi(optarg) : 1;
                break;
            case 'f':
                foreground = 1;
                break;
            case 'l':
                listen_address = optarg;
                break;
            case 'p':
                listen_port = atoi(optarg);
                break;
            case 's':
                single_thread = 1;
                break;
            case 'w':
                wurfl_file = optarg;
                break;
            case 'n':
                use_http = 0;
                break;
            case 'h':
            case '?':
                usage(argv[0], NULL);
                break;
            default:
                break;
        }
    }

    if (!foreground)
        daemon(0, 0);

    log_init("wurfld", loglevel);

    wurfl_init();

    signal(SIGHUP, wurfld_reload);
    signal(SIGINT, wurfld_stop);
    signal(SIGQUIT, wurfld_stop);
    signal(SIGPIPE, wurfld_do_nothing);

    // initialize the callbacks descriptor
    iomux_callbacks_t wurfld_callbacks = {
        .mux_connection = wurfld_connection_handler,
        .mux_input = wurfld_input_handler,
        .mux_eof = wurfld_eof_handler,
        .mux_output = NULL,
        .mux_timeout = NULL,
        .priv = &wurfld_callbacks
    };

    iomux = iomux_create();

    int listen_fd = open_socket(listen_address, listen_port);    
    if (listen_fd < 0) {
        ERROR("Can't bind address %s:%d - %s",
                listen_address, listen_port, strerror(errno));
        exit(-1);
    }
    NOTICE("Listening on %s:%d", listen_address, listen_port);

    iomux_add(iomux, listen_fd, &wurfld_callbacks);
    iomux_listen(iomux, listen_fd);

    // this takes over the runloop and handle incoming connections
    iomux_loop(iomux, 0);

    // if we are here, iomux has exited the loop
    NOTICE("exiting");
    iomux_destroy(iomux);
    wurfl_destroy(ATOMIC_READ(wurfl));
    close(listen_fd);
    
    exit(0);
}
Esempio n. 21
0
// the returned object is retained, the caller must call arc_release_resource(obj) to release it
arc_resource_t 
arc_lookup(arc_t *cache, const void *key, size_t len, void **valuep, int async)
{
    // NOTE: this is an atomic operation ensured by the hashtable implementation,
    //       we don't do any real copy in our callback but we just increase the refcount
    //       of the object (if found)
    arc_object_t *obj = ht_get_deep_copy(cache->hash, (void *)key, len, NULL, retain_obj_cb, cache);
    if (obj) {
        if (!ATOMIC_READ(cache->mode) || UNLIKELY(ATOMIC_READ(obj->state) != &cache->mfu)) {
            if (UNLIKELY(arc_move(cache, obj, &cache->mfu) == -1)) {
                fprintf(stderr, "Can't move the object into the cache\n");
                return NULL;
            }
            arc_balance(cache);
        }

        if (valuep)
            *valuep = obj->ptr;

        return obj;
    }

    obj = arc_object_create(cache, key, len);
    if (!obj)
        return NULL;

    // let our cache user initialize the underlying object
    cache->ops->init(key, len, async, (arc_resource_t)obj, obj->ptr, cache->ops->priv);
    obj->async = async;

    retain_ref(cache->refcnt, obj->node);
    // NOTE: atomicity here is ensured by the hashtable implementation
    int rc = ht_set_if_not_exists(cache->hash, (void *)key, len, obj, sizeof(arc_object_t));
    switch(rc) {
        case -1:
            fprintf(stderr, "Can't set the new value in the internal hashtable\n");
            release_ref(cache->refcnt, obj->node);
            break;
        case 1:
            // the object has been created in the meanwhile
            release_ref(cache->refcnt, obj->node);
            // XXX - yes, we have to release it twice
            release_ref(cache->refcnt, obj->node);
            return arc_lookup(cache, key, len, valuep, async);
        case 0:
            /* New objects are always moved to the MRU list. */
            rc  = arc_move(cache, obj, &cache->mru);
            if (rc >= 0) {
                arc_balance(cache);
                *valuep = obj->ptr;
                return obj;
            }
            break;
        default:
            fprintf(stderr, "Unknown return code from ht_set_if_not_exists() : %d\n", rc);
            release_ref(cache->refcnt, obj->node);
            break;
    } 
    release_ref(cache->refcnt, obj->node);
    return NULL;
}
Esempio n. 22
0
/* Move the object to the given state. If the state transition requires,
* fetch, evict or destroy the object. */
static inline int
arc_move(arc_t *cache, arc_object_t *obj, arc_state_t *state)
{
    // In the first conditional we check If the object is being locked,
    // which means someone is fetching its value and we don't what
    // don't mess up with it. Whoever is fetching will also take care of moving it
    // to one of the lists (or dropping it)
    // NOTE: while the object is being fetched it doesn't belong
    //       to any list, so there is no point in going ahead
    //       also arc_balance() should never go through this object
    //       (since in none of the lists) so it won't be affected.
    //       The only call which would silently fail is arc_remove()
    //       but if the object is being fetched and need to be removed
    //       will be determined by who is fetching the object or by the
    //       next call to arc_balance() (which would anyway happen if
    //       the object will be put into the cache by the fetcher)
    //
    // In the second conditional instead we handle a specific corner case which
    // happens when concurring threads access an item which has been just fetched
    // but also dropped (so its state is NULL).
    // If a thread entering arc_lookup() manages to get the object out of the hashtable
    // before it's being deleted it will try putting the object to the mfu list without checking first
    // if it was already in a list or not (new objects should be first moved to the 
    // mru list and not the mfu one)
    if (UNLIKELY(obj->locked || (state == &cache->mfu && ATOMIC_READ(obj->state) == NULL)))
        return 0;

    MUTEX_LOCK(&cache->lock);

    arc_state_t *obj_state = ATOMIC_READ(obj->state);

    if (LIKELY(obj_state != NULL)) {

        if (LIKELY(obj_state == state)) {
            // short path for recurring keys
            // (those in the mfu list being hit again)
            if (LIKELY(state->head.next != &obj->head))
                arc_list_move_to_head(&obj->head, &state->head);
            MUTEX_UNLOCK(&cache->lock);
            return 0;
        }

        // if the state is not NULL
        // (and the object is not going to be being removed)
        // move the ^ (p) marker
        if (LIKELY(state != NULL)) {
            if (obj_state == &cache->mrug) {
                size_t csize = cache->mrug.size
                             ? (cache->mfug.size / cache->mrug.size)
                             : cache->mfug.size / 2;
                cache->p = MIN(cache->c, cache->p + MAX(csize, 1));
            } else if (obj_state == &cache->mfug) {
                size_t csize = cache->mfug.size
                             ? (cache->mrug.size / cache->mfug.size)
                             : cache->mrug.size / 2;
                cache->p = MAX(0, cache->p - MAX(csize, 1));
            }
        }

        ATOMIC_DECREASE(obj_state->size, obj->size);
        arc_list_remove(&obj->head);
        ATOMIC_DECREMENT(obj_state->count);
        ATOMIC_SET(obj->state, NULL);
    }

    if (state == NULL) {
        if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0)
            release_ref(cache->refcnt, obj->node);
    } else if (state == &cache->mrug || state == &cache->mfug) {
        obj->async = 0;
        arc_list_prepend(&obj->head, &state->head);
        ATOMIC_INCREMENT(state->count);
        ATOMIC_SET(obj->state, state);
        ATOMIC_INCREASE(state->size, obj->size);
    } else if (obj_state == NULL) {

        obj->locked = 1;
        
        // unlock the cache while the backend is fetching the data
        // (the object has been locked while being fetched so nobody
        // will change its state)
        MUTEX_UNLOCK(&cache->lock);
        size_t size = 0;
        int rc = cache->ops->fetch(obj->ptr, &size, cache->ops->priv);
        switch (rc) {
            case 1:
            case -1:
            {
                if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0)
                    release_ref(cache->refcnt, obj->node);
                return rc;
            }
            default:
            {
                if (size >= cache->c) {
                    // the (single) object doesn't fit in the cache, let's return it
                    // to the getter without (re)adding it to the cache
                    if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0)
                        release_ref(cache->refcnt, obj->node);
                    return 1;
                }
                MUTEX_LOCK(&cache->lock);
                obj->size = ARC_OBJ_BASE_SIZE(obj) + cache->cos + size;
                arc_list_prepend(&obj->head, &state->head);
                ATOMIC_INCREMENT(state->count);
                ATOMIC_SET(obj->state, state);
                ATOMIC_INCREASE(state->size, obj->size);
                ATOMIC_INCREMENT(cache->needs_balance);
                break;
            }
        }
        // since this object is going to be put back into the cache,
        // we need to unmark it so that it won't be ignored next time
        // it's going to be moved to another list
        obj->locked = 0;
    } else {
        arc_list_prepend(&obj->head, &state->head);
        ATOMIC_INCREMENT(state->count);
        ATOMIC_SET(obj->state, state);
        ATOMIC_INCREASE(state->size, obj->size);
    }
    MUTEX_UNLOCK(&cache->lock);
    return 0;
}
Esempio n. 23
0
size_t
arc_mfug_size(arc_t *cache)
{
    return ATOMIC_READ(cache->mfug.size);
}
Esempio n. 24
0
size_t
arc_mru_size(arc_t *cache)
{
    return ATOMIC_READ(cache->mru.size);
}
Esempio n. 25
0
uint64_t
arc_count(arc_t *cache)
{
    return ATOMIC_READ(cache->mru.count) + ATOMIC_READ(cache->mfu.count) +
           ATOMIC_READ(cache->mrug.count) + ATOMIC_READ(cache->mfug.count);
}
Esempio n. 26
0
uint32_t rqueue_read_count(rqueue_t *rb) {
    return ATOMIC_READ(rb->reads);
}
Esempio n. 27
0
uint32_t rqueue_write_count(rqueue_t *rb) {
    return ATOMIC_READ(rb->writes);
}
Esempio n. 28
0
int rqueue_write(rqueue_t *rb, void *value) {
    int retries = 0;
    int did_update = 0;
    int did_move_head = 0;

    rqueue_page_t *temp_page = NULL;
    rqueue_page_t *next_page = NULL;
    rqueue_page_t *tail = NULL;
    rqueue_page_t *head = NULL;
    rqueue_page_t *commit;
    ATOMIC_INCREMENT(rb->num_writers);
    do {
        temp_page = ATOMIC_READ(rb->tail);
        commit = ATOMIC_READ(rb->commit);
        next_page = RQUEUE_FLAG_OFF(ATOMIC_READ(temp_page->next), RQUEUE_FLAG_ALL);
        head = ATOMIC_READ(rb->head);
        if (rb->mode == RQUEUE_MODE_BLOCKING) {
            if (temp_page == commit && next_page == head) {
                if (ATOMIC_READ(rb->writes) - ATOMIC_READ(rb->reads) != 0) {
                    //fprintf(stderr, "No buffer space\n");
                    if (ATOMIC_READ(rb->num_writers) == 1)
                        ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), ATOMIC_READ(rb->tail));
                    ATOMIC_DECREMENT(rb->num_writers);
                    return -2;
                }
            } else if (next_page == head) {
                if (ATOMIC_READ(rb->num_writers) == 1) {
                    tail = temp_page;
                    break;
                } else {
                    if (ATOMIC_READ(rb->num_writers) == 1)
                        ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), ATOMIC_READ(rb->tail));
                    ATOMIC_DECREMENT(rb->num_writers);
                    return -2;
                }
            }
        }
        tail = ATOMIC_CAS_RETURN(rb->tail, temp_page, next_page);
    } while (tail != temp_page && !(RQUEUE_CHECK_FLAG(ATOMIC_READ(tail->next), RQUEUE_FLAG_UPDATE)) && retries++ < RQUEUE_MAX_RETRIES);

    if (!tail) {
        if (ATOMIC_READ(rb->num_writers) == 1)
            ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), ATOMIC_READ(rb->tail));
        ATOMIC_DECREMENT(rb->num_writers);
        return -1;
    } 

    rqueue_page_t *nextp = RQUEUE_FLAG_OFF(ATOMIC_READ(tail->next), RQUEUE_FLAG_ALL);

    if (ATOMIC_CAS(tail->next, RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_HEAD), RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_UPDATE))) {
        did_update = 1;
        //fprintf(stderr, "Did update head pointer\n");
        if (rb->mode == RQUEUE_MODE_OVERWRITE) {
            // we need to advance the head if in overwrite mode ...otherwise we must stop
            //fprintf(stderr, "Will advance head and overwrite old data\n");
            rqueue_page_t *nextpp = RQUEUE_FLAG_OFF(ATOMIC_READ(nextp->next), RQUEUE_FLAG_ALL);
            if (ATOMIC_CAS(nextp->next, nextpp, RQUEUE_FLAG_ON(nextpp, RQUEUE_FLAG_HEAD))) {
                if (ATOMIC_READ(rb->tail) != next_page) {
                    ATOMIC_CAS(nextp->next, RQUEUE_FLAG_ON(nextpp, RQUEUE_FLAG_HEAD), nextpp);
                } else {
                    ATOMIC_CAS(rb->head, head, nextpp);
                    did_move_head = 1;
                }
            }
        }
    }

    void *old_value = ATOMIC_READ(tail->value);
    ATOMIC_CAS(tail->value, old_value, value);
    if (old_value && rb->free_value_cb)
        rb->free_value_cb(old_value);



    if (did_update) {
        //fprintf(stderr, "Try restoring head pointer\n");

        ATOMIC_CAS(tail->next,
                       RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_UPDATE),
                       did_move_head
                       ? RQUEUE_FLAG_OFF(nextp, RQUEUE_FLAG_ALL)
                       : RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_HEAD));

        //fprintf(stderr, "restored head pointer\n");
    }

    ATOMIC_INCREMENT(rb->writes);
    if (ATOMIC_READ(rb->num_writers) == 1)
        ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), tail);
    ATOMIC_DECREMENT(rb->num_writers);
    return 0;
}
void rtw_dump_mem_stat (void)
{
	int vir_alloc, vir_peak, vir_alloc_err, phy_alloc, phy_peak, phy_alloc_err;
	int tx_alloc, tx_peak, tx_alloc_err, rx_alloc, rx_peak, rx_alloc_err;
	
	vir_alloc=ATOMIC_READ(&rtw_dbg_mem_stat.vir_alloc);
	vir_peak=ATOMIC_READ(&rtw_dbg_mem_stat.vir_peak);
	vir_alloc_err=ATOMIC_READ(&rtw_dbg_mem_stat.vir_alloc_err);

	phy_alloc=ATOMIC_READ(&rtw_dbg_mem_stat.phy_alloc);
	phy_peak=ATOMIC_READ(&rtw_dbg_mem_stat.phy_peak);
	phy_alloc_err=ATOMIC_READ(&rtw_dbg_mem_stat.phy_alloc_err);

	tx_alloc=ATOMIC_READ(&rtw_dbg_mem_stat.tx_alloc);
	tx_peak=ATOMIC_READ(&rtw_dbg_mem_stat.tx_peak);
	tx_alloc_err=ATOMIC_READ(&rtw_dbg_mem_stat.tx_alloc_err);
	
	rx_alloc=ATOMIC_READ(&rtw_dbg_mem_stat.rx_alloc);
	rx_peak=ATOMIC_READ(&rtw_dbg_mem_stat.rx_peak);
	rx_alloc_err=ATOMIC_READ(&rtw_dbg_mem_stat.rx_alloc_err);

	DBG_871X(	"vir_alloc:%d, vir_peak:%d, vir_alloc_err:%d\n"
				"phy_alloc:%d, phy_peak:%d, phy_alloc_err:%d\n"
				"tx_alloc:%d, tx_peak:%d, tx_alloc_err:%d\n"
				"rx_alloc:%d, rx_peak:%d, rx_alloc_err:%d\n"
		, vir_alloc, vir_peak, vir_alloc_err
		, phy_alloc, phy_peak, phy_alloc_err
		, tx_alloc, tx_peak, tx_alloc_err
		, rx_alloc, rx_peak, rx_alloc_err
	);
}