// Working flow of evolution void GaMultithreadingAlgorithm::WorkFlow() { // give ID to worker thread int workerId = ATOMIC_INC( _workerIdCounter ) - 1; while( 1 ) { // wait for command from control thread LockSemaphore( _workerForkSync ); // stop the thread to apply parameter change if( _parametersChange ) break; // execute work step if the algorithm is not stopped if( _state == GAS_RUNNING ) WorkStep( workerId ); // only the last worker will releast the others to continue if( !ATOMIC_DEC( _workersThreadIn ) ) UnlockSemaphore( _workerJoinSync, _numberOfThreads - 1 ); // wait for the last worker to reach this point before notifying control thread LockSemaphore( _workerJoinSync ); // the last worker thread to exit notifies control thread that work step is done if( !ATOMIC_DEC( _workersThreadOut ) ) SignalEvent( _controlSync ); // algorithm is stopped if( _state != GAS_RUNNING ) break; } }
void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz) { static u32 update_time = 0; int peak, alloc; int i; /* initialization */ if(!update_time) { for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) { ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0); ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0); ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0); ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0); } for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) { ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0); ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0); ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0); ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0); } } switch(status) { case MSTAT_ALLOC_SUCCESS: ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt)); alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz); peak=ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak)); if (peak<alloc) ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc); ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt)); alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz); peak=ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak)); if (peak<alloc) ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc); break; case MSTAT_ALLOC_FAIL: ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt)); ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt)); break; case MSTAT_FREE: ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt)); ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz); ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt)); ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz); break; }; //if (rtw_get_passing_time_ms(update_time) > 5000) { // rtw_mstat_dump(); update_time=rtw_get_current_time(); //} }
int rb_get_next_signal(void) { int i, sig = 0; if (signal_buff.size != 0) { for (i=1; i<RUBY_NSIG; i++) { if (signal_buff.cnt[i] > 0) { ATOMIC_DEC(signal_buff.cnt[i]); ATOMIC_DEC(signal_buff.size); sig = i; break; } } } return sig; }
int rb_get_next_signal(void) { int i, sig = 0; for (i=1; i<RUBY_NSIG; i++) { if (signal_buff.cnt[i] > 0) { rb_disable_interrupt(); { ATOMIC_DEC(signal_buff.cnt[i]); ATOMIC_DEC(signal_buff.size); } rb_enable_interrupt(); sig = i; break; } } return sig; }
int rb_get_next_signal(rb_vm_t *vm) { int i, sig = 0; for (i=1; i<RUBY_NSIG; i++) { if (vm->signal_buff[i] > 0) { rb_disable_interrupt(); { ATOMIC_DEC(vm->signal_buff[i]); ATOMIC_DEC(vm->buffered_signal_size); } rb_enable_interrupt(); sig = i; break; } } return sig; }
void mbuf_destroy(struct context *ctx) { struct mbuf *buf; while (!TAILQ_EMPTY(&ctx->free_mbufq)) { buf = TAILQ_FIRST(&ctx->free_mbufq); TAILQ_REMOVE(&ctx->free_mbufq, buf, next); mbuf_free(ctx, buf); ATOMIC_DEC(ctx->mstats.free_buffers, 1); } }
inline void rtw_mi_update_fwstate(struct mlme_priv *pmlmepriv, sint state, u8 bset) { _adapter *adapter = container_of(pmlmepriv, _adapter, mlmepriv); struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); struct mi_state *iface_state = &dvobj->iface_state; struct mlme_ext_priv *mlmeext = &adapter->mlmeextpriv; if (!(state & (_FW_LINKED | _FW_UNDER_LINKING | WIFI_UNDER_WPS))) return; if (mlmeext_msr(mlmeext) == WIFI_FW_STATION_STATE) { /*ATOMIC_INC(&(iface_state->sta_num_ret));*/ if (state & _FW_LINKED) (bset) ? ATOMIC_INC(&(iface_state->ld_sta_num_ret)) : ATOMIC_DEC(&(iface_state->ld_sta_num_ret)); if (state & _FW_UNDER_LINKING) (bset) ? ATOMIC_INC(&(iface_state->lg_sta_num_ret)) : ATOMIC_DEC(&(iface_state->lg_sta_num_ret)); } if (mlmeext_msr(mlmeext) == WIFI_FW_AP_STATE && check_fwstate(&adapter->mlmepriv, _FW_LINKED) == _TRUE ) { /*ATOMIC_INC(&(iface_state->ap_num_ret));*/ if (adapter->stapriv.asoc_sta_count > 2) ld_ap_num_ret++; } if (state & WIFI_UNDER_WPS) (bset) ? ATOMIC_INC(&(iface_state->uw_num_ret)) : ATOMIC_DEC(&(iface_state->uw_num_ret)); _rtw_mi_status(adapter, &iface_state->sta_num, &iface_state->ld_sta_num, &iface_state->lg_sta_num , &iface_state->ap_num, &iface_state->ld_ap_num, &iface_state->uwps_num, 1); }
void mbuf_recycle(struct context *ctx, struct mbuf *mbuf) { ATOMIC_DEC(ctx->mstats.buffers, 1); if (ATOMIC_GET(ctx->mstats.free_buffers) > RECYCLE_LENGTH) { mbuf_free(ctx, mbuf); return; } TAILQ_NEXT(mbuf, next) = NULL; TAILQ_INSERT_HEAD(&ctx->free_mbufq, mbuf, next); ATOMIC_INC(ctx->mstats.free_buffers, 1); }
static struct mbuf *_mbuf_get(struct context *ctx) { struct mbuf *mbuf; uint8_t *buf; if (!TAILQ_EMPTY(&ctx->free_mbufq)) { mbuf = TAILQ_FIRST(&ctx->free_mbufq); TAILQ_REMOVE(&ctx->free_mbufq, mbuf, next); ATOMIC_DEC(ctx->mstats.free_buffers, 1); } else { buf = (uint8_t*)malloc(config.bufsize); if (buf == NULL) { return NULL; } mbuf = (struct mbuf *)(buf + ctx->mbuf_offset); } return mbuf; }
void easy_mempool_free(easy_mempool_t *pool, void *ptr) { if (NULL != pool && NULL != ptr) { easy_mempool_buf_t *buf = (easy_mempool_buf_t *)((char *)ptr - sizeof(easy_mempool_buf_t)); if (EASY_MEMPOOL_BUF_MAGIC_NUM == buf->magic_num) { int64_t size = buf->size; buf->magic_num = EASY_MEMPOOL_BUF_FREE_FLAG; if (EASY_MEMPOOL_DIRECT_ALLOC == buf->alloc_type) { pool->allocator->free(buf); ATOMIC_DEC(&(pool->direct_alloc_cnt)); } else { easy_mempool_deref_page_(pool, buf->page_pos); } ATOMIC_SUB(&(pool->mem_total), size); } } }
/** * \brief Free an image. * * Decrement reference count of the image and deallocate associated memory * if no references exist any more. * * \param im image to free */ void kvz_image_free(kvz_picture *const im) { if (im == NULL) return; int32_t new_refcount = ATOMIC_DEC(&(im->refcount)); if (new_refcount > 0) { // There are still references so we don't free the data yet. return; } if (im->base_image != im) { // Free our reference to the base image. kvz_image_free(im->base_image); } else { free(im->fulldata); } // Make sure freed data won't be used. im->base_image = NULL; im->fulldata = NULL; im->y = im->u = im->v = NULL; im->data[COLOR_Y] = im->data[COLOR_U] = im->data[COLOR_V] = NULL; free(im); }
void usb_read_port_complete(struct urb *purb, struct pt_regs *regs) { struct recv_buf *precvbuf = (struct recv_buf *)purb->context; _adapter *padapter =(_adapter *)precvbuf->adapter; struct recv_priv *precvpriv = &padapter->recvpriv; RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete!!!\n")); ATOMIC_DEC(&(precvpriv->rx_pending_cnt)); if(RTW_CANNOT_RX(padapter)) { RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n", padapter->bDriverStopped, padapter->bSurpriseRemoved)); DBG_8192C("%s() RX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) \n", __FUNCTION__,padapter->bDriverStopped, padapter->bSurpriseRemoved); goto exit; } if(purb->status==0) { //SUCCESS if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)) { RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n")); rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); DBG_8192C("%s()-%d: RX Warning!\n", __FUNCTION__, __LINE__); } else { rtw_reset_continual_io_error(adapter_to_dvobj(padapter)); precvbuf->transfer_len = purb->actual_length; skb_put(precvbuf->pskb, purb->actual_length); skb_queue_tail(&precvpriv->rx_skb_queue, precvbuf->pskb); #ifndef CONFIG_FIX_NR_BULKIN_BUFFER if (skb_queue_len(&precvpriv->rx_skb_queue)<=1) #endif tasklet_schedule(&precvpriv->recv_tasklet); precvbuf->pskb = NULL; rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); } } else { RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete : purb->status(%d) != 0 \n", purb->status)); DBG_8192C("###=> usb_read_port_complete => urb status(%d)\n", purb->status); if(rtw_inc_and_chk_continual_io_error(adapter_to_dvobj(padapter)) == _TRUE ) { padapter->bSurpriseRemoved = _TRUE; } switch(purb->status) { case -EINVAL: case -EPIPE: case -ENODEV: case -ESHUTDOWN: //padapter->bSurpriseRemoved=_TRUE; //RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete:bSurpriseRemoved=TRUE\n")); case -ENOENT: padapter->bDriverStopped=_TRUE; RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port_complete:bDriverStopped=TRUE\n")); break; case -EPROTO: case -EILSEQ: case -ETIME: case -ECOMM: case -EOVERFLOW: #ifdef DBG_CONFIG_ERROR_DETECT { HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); pHalData->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL; } #endif rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); break; case -EINPROGRESS: DBG_8192C("ERROR: URB IS IN PROGRESS!/n"); break; default: break; } } exit: _func_exit_; }
void usb_read_port_complete(struct urb *purb, struct pt_regs *regs) { struct recv_buf *precvbuf = (struct recv_buf *)purb->context; _adapter *padapter = (_adapter *)precvbuf->adapter; struct recv_priv *precvpriv = &padapter->recvpriv; ATOMIC_DEC(&(precvpriv->rx_pending_cnt)); if (RTW_CANNOT_RX(padapter)) { RTW_INFO("%s() RX Warning! bDriverStopped(%s) OR bSurpriseRemoved(%s)\n" , __func__ , rtw_is_drv_stopped(padapter) ? "True" : "False" , rtw_is_surprise_removed(padapter) ? "True" : "False"); goto exit; } if (purb->status == 0) { if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)) { RTW_INFO("%s()-%d: urb->actual_length:%u, MAX_RECVBUF_SZ:%u, RXDESC_SIZE:%u\n" , __FUNCTION__, __LINE__, purb->actual_length, MAX_RECVBUF_SZ, RXDESC_SIZE); rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); } else { rtw_reset_continual_io_error(adapter_to_dvobj(padapter)); precvbuf->transfer_len = purb->actual_length; skb_put(precvbuf->pskb, purb->actual_length); skb_queue_tail(&precvpriv->rx_skb_queue, precvbuf->pskb); #ifndef CONFIG_FIX_NR_BULKIN_BUFFER if (skb_queue_len(&precvpriv->rx_skb_queue) <= 1) #endif tasklet_schedule(&precvpriv->recv_tasklet); precvbuf->pskb = NULL; rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); } } else { RTW_INFO("###=> usb_read_port_complete => urb.status(%d)\n", purb->status); if (rtw_inc_and_chk_continual_io_error(adapter_to_dvobj(padapter)) == _TRUE) rtw_set_surprise_removed(padapter); switch (purb->status) { case -EINVAL: case -EPIPE: case -ENODEV: case -ESHUTDOWN: case -ENOENT: rtw_set_drv_stopped(padapter); break; case -EPROTO: case -EILSEQ: case -ETIME: case -ECOMM: case -EOVERFLOW: #ifdef DBG_CONFIG_ERROR_DETECT { HAL_DATA_TYPE *pHalData = GET_HAL_DATA(padapter); pHalData->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL; } #endif rtw_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); break; case -EINPROGRESS: RTW_INFO("ERROR: URB IS IN PROGRESS!/n"); break; default: break; } } exit: return; }
/* {{{ apc_cache_release */ PHP_APCU_API void apc_cache_release(apc_cache_t* cache, apc_cache_entry_t* entry) { ATOMIC_DEC(cache, entry->ref_count); }
bool deref() { return (ATOMIC_DEC(refCount) == 0); }