Exemplo n.º 1
0
static inline apc_cache_entry_t* apc_cache_find_internal(apc_cache_t *cache, zend_string *key, time_t t, zend_bool lock) {
	apc_cache_slot_t** slot;
	zend_ulong h, s;

    volatile apc_cache_entry_t* value = NULL;

	if (lock)
		APC_RLOCK(cache->header);
    
	/* calculate hash and slot */
	apc_cache_hash_slot(cache, key, &h, &s);

	/* find head */
	slot = &cache->slots[s];

	while (*slot) {
		/* check for a matching key by has and identifier */
		if ((h == ZSTR_HASH((*slot)->key.str)) && 
			memcmp(ZSTR_VAL((*slot)->key.str), ZSTR_VAL(key), ZSTR_LEN(key)) == SUCCESS) {

			/* Check to make sure this entry isn't expired by a hard TTL */
			if((*slot)->value->ttl && (time_t) ((*slot)->ctime + (*slot)->value->ttl) < t) {
				/* increment misses on cache */
				ATOMIC_INC(cache, cache->header->nmisses);

				if (lock)
					APC_RUNLOCK(cache->header);
				return NULL;
			}
			
			/* set cache num hits */
			ATOMIC_INC(cache, cache->header->nhits);

			/* grab value */
			value = (*slot)->value;

			(*slot)->atime = t;

			/* Otherwise we are fine, increase counters and return the cache entry */
			ATOMIC_INC(cache, (*slot)->nhits);
			ATOMIC_INC(cache, (*slot)->value->ref_count);

			if (lock)
				APC_RUNLOCK(cache->header);

			return (apc_cache_entry_t*)value;
		}

		/* next */
		slot = &(*slot)->next;		
	}

	/* not found, so increment misses */
	ATOMIC_INC(cache, cache->header->nmisses);

	if (lock)
		APC_RUNLOCK(cache->header);

	return NULL;
}
Exemplo n.º 2
0
/* replaces all inc with add 1, dec with sub 1
 * if cannot replace (eflags constraints), leaves original instruction alone
 */
static dr_emit_flags_t
event_trace(void *drcontext, void *tag, instrlist_t *trace, bool translating)
{
    instr_t *instr, *next_instr;
    int opcode;

    if (!enable)
	return DR_EMIT_DEFAULT;

#ifdef VERBOSE
    dr_printf("in dynamorio_trace(tag="PFX")\n", tag);
    instrlist_disassemble(drcontext, tag, trace, STDOUT);
#endif

    for (instr = instrlist_first(trace); instr != NULL; instr = next_instr) {
	/* grab next now so we don't go over instructions we insert */
	next_instr = instr_get_next(instr);
	opcode = instr_get_opcode(instr);
	if (opcode == OP_inc || opcode == OP_dec) {
            if (!translating)
                ATOMIC_INC(num_examined);
	    if (replace_inc_with_add(drcontext, instr, trace)) {
                if (!translating)
                    ATOMIC_INC(num_converted);
            }
	}
    }

#ifdef VERBOSE
    dr_printf("after dynamorio_trace(tag="PFX"):\n", tag);
    instrlist_disassemble(drcontext, tag, trace, STDOUT);
#endif

    return DR_EMIT_DEFAULT;
}
Exemplo n.º 3
0
static RETSIGTYPE
sighandler(int sig)
{
    ATOMIC_INC(signal_buff.cnt[sig]);
    ATOMIC_INC(signal_buff.size);
#if !defined(BSD_SIGNAL) && !defined(POSIX_SIGNAL)
    ruby_signal(sig, sighandler);
#endif
}
Exemplo n.º 4
0
void rtw_mstat_update(const enum mstat_f flags, const MSTAT_STATUS status, u32 sz)
{
	static u32 update_time = 0;
	int peak, alloc;
	int i;

	/* initialization */
	if(!update_time) {
		for(i=0;i<mstat_tf_idx(MSTAT_TYPE_MAX);i++) {
			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc), 0);
			ATOMIC_SET(&(rtw_mem_type_stat[i].peak), 0);
			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_cnt), 0);
			ATOMIC_SET(&(rtw_mem_type_stat[i].alloc_err_cnt), 0);
		}
		for(i=0;i<mstat_ff_idx(MSTAT_FUNC_MAX);i++) {
			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc), 0);
			ATOMIC_SET(&(rtw_mem_func_stat[i].peak), 0);
			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_cnt), 0);
			ATOMIC_SET(&(rtw_mem_func_stat[i].alloc_err_cnt), 0);
		}
	}

	switch(status) {
		case MSTAT_ALLOC_SUCCESS:
			ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
			alloc = ATOMIC_ADD_RETURN(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);
			peak=ATOMIC_READ(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak));
			if (peak<alloc)
				ATOMIC_SET(&(rtw_mem_type_stat[mstat_tf_idx(flags)].peak), alloc);

			ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
			alloc = ATOMIC_ADD_RETURN(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
			peak=ATOMIC_READ(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak));
			if (peak<alloc)
				ATOMIC_SET(&(rtw_mem_func_stat[mstat_ff_idx(flags)].peak), alloc);
			break;

		case MSTAT_ALLOC_FAIL:
			ATOMIC_INC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_err_cnt));

			ATOMIC_INC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_err_cnt));
			break;

		case MSTAT_FREE:
			ATOMIC_DEC(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc_cnt));
			ATOMIC_SUB(&(rtw_mem_type_stat[mstat_tf_idx(flags)].alloc), sz);

			ATOMIC_DEC(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc_cnt));
			ATOMIC_SUB(&(rtw_mem_func_stat[mstat_ff_idx(flags)].alloc), sz);
			break;
	};

	//if (rtw_get_passing_time_ms(update_time) > 5000) {
	//	rtw_mstat_dump();
		update_time=rtw_get_current_time();
	//}
}
Exemplo n.º 5
0
int conn_read(struct connection *conn, struct mbuf *buf)
{
    int n = socket_read(conn->fd, buf);
    if (n == 0) return CORVUS_EOF;
    if (n == CORVUS_ERR) return CORVUS_ERR;
    if (n == CORVUS_AGAIN) return CORVUS_AGAIN;
    ATOMIC_INC(conn->ctx->stats.recv_bytes, n);
    ATOMIC_INC(conn->info->recv_bytes, n);
    return CORVUS_OK;
}
Exemplo n.º 6
0
static void *easy_mempool_alloc_(easy_mempool_t *pool, uint32_t size, uint32_t align_size)
{
    void                    *ret = NULL;
    int32_t                 alloc_size = size + sizeof(easy_mempool_buf_t);
    alloc_size = easy_mempool_align(alloc_size, align_size);

    if (NULL != pool) {
        if ((pool->mem_total + size) > pool->mem_limit) {
            // memory over limit
        } else if (pool->page_size < alloc_size) {
            easy_mempool_buf_t      *buf = (easy_mempool_buf_t *)pool->allocator->memalign(align_size, alloc_size);

            if (NULL != buf) {
                buf->magic_num = EASY_MEMPOOL_BUF_MAGIC_NUM;
                buf->alloc_type = EASY_MEMPOOL_DIRECT_ALLOC;
                buf->size = size;
                ret = (char *)buf + sizeof(easy_mempool_buf_t);
                ATOMIC_INC(&(pool->direct_alloc_cnt));
            }
        } else {
            easy_mempool_page_t     *page = NULL;
            easy_mempool_buf_t      *buf = NULL;
            int32_t                 page_pos = -1;

            while (1) {
                if (NULL == (page = easy_mempool_get_cur_page_(pool, alloc_size, &page_pos))) {
                    break;
                }

                buf = (easy_mempool_buf_t *)easy_mempool_align_ptr(easy_mempool_alloc_from_page_(page, pool->page_size, alloc_size), align_size);

                if (NULL != buf) {
                    ATOMIC_INC(&(pool->page_metas[page_pos].ref_cnt));
                }

                easy_mempool_deref_page_(pool, page_pos);

                if (NULL != buf) {
                    buf->magic_num = EASY_MEMPOOL_BUF_MAGIC_NUM;
                    buf->alloc_type = EASY_MEMPOOL_ALLOC;
                    buf->page_pos = page_pos;
                    buf->size = size;
                    ret = (char *)buf + sizeof(easy_mempool_buf_t);
                    break;
                }
            }
        }

        if (NULL != ret) {
            ATOMIC_ADD(&(pool->mem_total), size);
        }
    }

    return ret;
}
Exemplo n.º 7
0
Arquivo: signal.c Projeto: genki/ruby
static RETSIGTYPE
sighandler(int sig)
{
    rb_vm_t *vm = GET_VM(); /* fix me for Multi-VM */
    ATOMIC_INC(vm->signal_buff[sig]);
    ATOMIC_INC(vm->buffered_signal_size);

#if !defined(BSD_SIGNAL) && !defined(POSIX_SIGNAL)
    ruby_signal(sig, sighandler);
#endif
}
Exemplo n.º 8
0
void rtw_update_mem_stat(u8 flag, u32 sz)
{
	static u32 update_time = 0;
	int peak, alloc;

	if(!update_time) {
		ATOMIC_SET(&rtw_dbg_mem_stat.vir_alloc,0);
		ATOMIC_SET(&rtw_dbg_mem_stat.vir_peak,0);
		ATOMIC_SET(&rtw_dbg_mem_stat.vir_alloc_err,0);
		ATOMIC_SET(&rtw_dbg_mem_stat.phy_alloc,0);
		ATOMIC_SET(&rtw_dbg_mem_stat.phy_peak,0);
		ATOMIC_SET(&rtw_dbg_mem_stat.phy_alloc_err,0);
	}
		
	switch(flag) {
		case MEM_STAT_VIR_ALLOC_SUCCESS:
			alloc = ATOMIC_ADD_RETURN(&rtw_dbg_mem_stat.vir_alloc, sz);
			peak=ATOMIC_READ(&rtw_dbg_mem_stat.vir_peak);
			if (peak<alloc)
				ATOMIC_SET(&rtw_dbg_mem_stat.vir_peak, alloc);
			break;
			
		case MEM_STAT_VIR_ALLOC_FAIL:
			ATOMIC_INC(&rtw_dbg_mem_stat.vir_alloc_err);
			break;
			
		case MEM_STAT_VIR_FREE:
			alloc = ATOMIC_SUB_RETURN(&rtw_dbg_mem_stat.vir_alloc, sz);
			break;
			
		case MEM_STAT_PHY_ALLOC_SUCCESS:
			alloc = ATOMIC_ADD_RETURN(&rtw_dbg_mem_stat.phy_alloc, sz);
			peak=ATOMIC_READ(&rtw_dbg_mem_stat.phy_peak);
			if (peak<alloc)
				ATOMIC_SET(&rtw_dbg_mem_stat.phy_peak, alloc);
			break;

		case MEM_STAT_PHY_ALLOC_FAIL:
			ATOMIC_INC(&rtw_dbg_mem_stat.phy_alloc_err);
			break;
		
		case MEM_STAT_PHY_FREE:
			alloc = ATOMIC_SUB_RETURN(&rtw_dbg_mem_stat.phy_alloc, sz);
			
	};

	if (rtw_get_passing_time_ms(update_time) > 5000) {
		rtw_dump_mem_stat();
		update_time=rtw_get_current_time();
	}
	
	
}
	// Working flow of evolution
	void GaMultithreadingAlgorithm::WorkFlow()
	{
		// give ID to worker thread
		int workerId = ATOMIC_INC( _workerIdCounter ) - 1;

		while( 1 )
		{
			// wait for command from control thread
			LockSemaphore( _workerForkSync );

			// stop the thread to apply parameter change
			if( _parametersChange )
				break;

			// execute work step if the algorithm is not stopped
			if( _state == GAS_RUNNING )
				WorkStep( workerId );

			// only the last worker will releast the others to continue
			if( !ATOMIC_DEC( _workersThreadIn ) )
				UnlockSemaphore( _workerJoinSync,  _numberOfThreads - 1 );

			// wait for the last worker to reach this point before notifying control thread
			LockSemaphore( _workerJoinSync );

			// the last worker thread to exit notifies control thread that work step is done
			if( !ATOMIC_DEC( _workersThreadOut ) )
				SignalEvent( _controlSync );

			// algorithm is stopped
			if( _state != GAS_RUNNING )
				break;
		}
	}
Exemplo n.º 10
0
static Atom
reserveAtom()
{ size_t index;
#ifdef O_ATOMGC				/* try to find a hole! */
  int i;
  int last = FALSE;
  Atom a;
  unsigned int ref;
  int idx;

  for(index=GD->atoms.no_hole_before, i=MSB(index); !last; i++)
  { size_t upto = (size_t)2<<i;
    Atom b = GD->atoms.array.blocks[i];

    if ( upto >= GD->atoms.highest )
    { upto = GD->atoms.highest;
      last = TRUE;
    }

    for(; index<upto; index++)
    {
      a = b + index;
      ref = a->references;

      if ( ATOM_IS_FREE(ref) &&
	   COMPARE_AND_SWAP(&a->references, ref, ATOM_RESERVED_REFERENCE) )
      { GD->atoms.no_hole_before = index+1;
        a->atom = (index<<LMASK_BITS)|TAG_ATOM;

	return a;
      }
    }
  }
  GD->atoms.no_hole_before = index+1;
#endif /*O_ATOMGC*/

redo:

  index = GD->atoms.highest;
  idx = MSB(index);
  assert(index >= 0);

  if ( !GD->atoms.array.blocks[idx] )
  { allocateAtomBlock(idx);
  }

  a = &GD->atoms.array.blocks[idx][index];
  ref = a->references;

  if ( ATOM_IS_FREE(ref) &&
       COMPARE_AND_SWAP(&a->references, ref, ATOM_RESERVED_REFERENCE) )
  { ATOMIC_INC(&GD->atoms.highest);
    a->atom = (index<<LMASK_BITS)|TAG_ATOM;

    return a;
  }

  goto redo;
}
Exemplo n.º 11
0
/**
 * \brief Get a new pointer to an image.
 *
 * Increment reference count and return the image.
 */
kvz_picture *kvz_image_copy_ref(kvz_picture *im)
{
  // The caller should have had another reference.
  assert(im->refcount > 0);
  ATOMIC_INC(&(im->refcount));

  return im;
}
Exemplo n.º 12
0
/* {{{ apc_cache_exists */
PHP_APCU_API apc_cache_entry_t* apc_cache_exists(apc_cache_t* cache, zend_string *key, time_t t)
{
    if(apc_cache_busy(cache))
    {
        /* cache cleanup in progress */ 
        return NULL;
    }

	/* we only declare volatiles we need */
	{
		apc_cache_slot_t** slot;
	
		volatile apc_cache_entry_t* value = NULL;
		zend_ulong h, s;

        /* get hash and slot */
		apc_cache_hash_slot(cache, key, &h, &s);

        /* read lock header */
		APC_RLOCK(cache->header);	

		/* find head */
		slot = &cache->slots[s];

		while (*slot) {
			/* check for match by hash and identifier */
			if ((h == ZSTR_HASH((*slot)->key.str)) &&
				memcmp(ZSTR_VAL((*slot)->key.str), ZSTR_VAL(key), ZSTR_LEN(key)) == SUCCESS) {

				/* Check to make sure this entry isn't expired by a hard TTL */
				if((*slot)->value->ttl && (time_t) ((*slot)->ctime + (*slot)->value->ttl) < t) {
                    /* marked as a miss */
                    ATOMIC_INC(cache, cache->header->nmisses);

					/* unlock header */
					APC_RUNLOCK(cache->header);

					return NULL;
				}

				/* Return the cache entry ptr */
				value = (*slot)->value;
			
				/* unlock header */
				APC_RUNLOCK(cache->header);
					
				return (apc_cache_entry_t*)value;
			}

			slot = &(*slot)->next;  
		}

		/* unlock header */
		APC_RUNLOCK(cache->header);
	}

    return NULL;
}
Exemplo n.º 13
0
word
intern_indirect(indirect_table *tab, word val, int create ARG_LD)
{ Word	 idata     = addressIndirect(val);	/* points at header */
  size_t isize     = wsizeofInd(*idata);	/* include header */
  unsigned int key = MurmurHashAligned2(idata+1, isize*sizeof(word), MURMUR_SEED);
  indirect_buckets *buckets;

  for(;;)
  { buckets = acquire_itable_buckets(tab);
    unsigned int ki = key & (buckets->size-1);
    indirect *head = buckets->buckets[ki];
    indirect *h;

    acquire_itable_bucket(&buckets->buckets[ki]);
    for(h=buckets->buckets[ki]; h; h = h->next)
    { unsigned int ref = h->references;

      if ( INDIRECT_IS_VALID(ref) &&
	   idata[0] == h->header &&
	   memcmp(idata+1, h->data, isize*sizeof(word)) == 0 )
      { if ( bump_ref(h, ref) )
	{ release_itable_buckets();
	  return h->handle;
	}
      }
    }

    if ( TIGHT(buckets, tab) )
    { simpleMutexLock(&tab->mutex);
      rehash_indirect_table(tab);
      simpleMutexUnlock(&tab->mutex);
    }

    if ( buckets != tab->table || head != buckets->buckets[ki] )
      continue;				/* try again */

    if ( create )
    { indirect *h = reserve_indirect(tab, val PASS_LD);

      h->next = buckets->buckets[ki];
      if ( !COMPARE_AND_SWAP(&buckets->buckets[ki], head, h) ||
	   buckets != tab->table )
      { PL_free(h->data);
	h->references = 0;
	continue;			/* try again */
      }

      h->references = 1 | INDIRECT_VALID_REFERENCE | INDIRECT_RESERVED_REFERENCE;
      ATOMIC_INC(&tab->count);
      release_itable_buckets();

      return h->handle;
    } else
    { release_itable_buckets();
      return 0;
    }
  }
}
Exemplo n.º 14
0
functor_t
lookupFunctorDef(atom_t atom, size_t arity)
{ GET_LD
  int v;
  FunctorDef *table;
  int buckets;
  FunctorDef f, head;

redo:

  acquire_functor_table(table, buckets);

  v = (int)pointerHashValue(atom, buckets);
  head = table[v];

  DEBUG(9, Sdprintf("Lookup functor %s/%d = ", stringAtom(atom), arity));
  for(f = table[v]; f; f = f->next)
  { if (atom == f->name && f->arity == arity)
    { DEBUG(9, Sdprintf("%p (old)\n", f));
      if ( !FUNCTOR_IS_VALID(f->flags) )
      { goto redo;
      }
      release_functor_table();
      return f->functor;
    }
  }

  if ( functorDefTable->buckets * 2 < GD->statistics.functors )
  { LOCK();
    rehashFunctors();
    UNLOCK();
  }

  if ( !( head == table[v] && table == functorDefTable->table ) )
    goto redo;

  f = (FunctorDef) allocHeapOrHalt(sizeof(struct functorDef));
  f->functor = 0L;
  f->name    = atom;
  f->arity   = arity;
  f->flags   = 0;
  f->next    = table[v];
  if ( !( COMPARE_AND_SWAP(&table[v], head, f) &&
          table == functorDefTable->table) )
  { PL_free(f);
    goto redo;
  }
  registerFunctor(f);

  ATOMIC_INC(&GD->statistics.functors);
  PL_register_atom(atom);

  DEBUG(9, Sdprintf("%p (new)\n", f));

  release_functor_table();

  return f->functor;
}
Exemplo n.º 15
0
u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
{		
	int err;
	unsigned int pipe;
	u32 ret = _SUCCESS;
	PURB purb = NULL;	
	struct recv_buf	*precvbuf = (struct recv_buf *)rmem;
	_adapter		*adapter = pintfhdl->padapter;
	struct dvobj_priv	*pdvobj = adapter_to_dvobj(adapter);
	struct pwrctrl_priv *pwrctl = dvobj_to_pwrctl(pdvobj);
	struct recv_priv	*precvpriv = &adapter->recvpriv;
	struct usb_device	*pusbd = pdvobj->pusbdev;

_func_enter_;
	
	if (RTW_CANNOT_RX(adapter) || (precvbuf == NULL))
	{
		RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port:( RTW_CANNOT_RX ) || precvbuf == NULL!!!\n"));
		return _FAIL;
	}

	usb_init_recvbuf(adapter, precvbuf);

	if(precvbuf->pbuf)
	{			
		ATOMIC_INC(&(precvpriv->rx_pending_cnt));
		purb = precvbuf->purb;		

		//translate DMA FIFO addr to pipehandle
		pipe = ffaddr2pipehdl(pdvobj, addr);	

		usb_fill_bulk_urb(purb, pusbd, pipe, 
					precvbuf->pbuf,
            				MAX_RECVBUF_SZ,
            				usb_read_port_complete,
            				precvbuf);//context is precvbuf

		purb->transfer_dma = precvbuf->dma_transfer_addr;
		purb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;								

		err = usb_submit_urb(purb, GFP_ATOMIC);	
		if((err) && (err != (-EPERM)))
		{
			RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("cannot submit rx in-token(err=0x%.8x), URB_STATUS =0x%.8x", err, purb->status));
			DBG_8192C("cannot submit rx in-token(err = 0x%08x),urb_status = %d\n",err,purb->status);
			ret = _FAIL;
		}
		
	}

_func_exit_;

	return ret;
}
Exemplo n.º 16
0
inline void rtw_mi_update_fwstate(struct mlme_priv *pmlmepriv, sint state, u8 bset)
{
    _adapter *adapter = container_of(pmlmepriv, _adapter, mlmepriv);

    struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
    struct mi_state *iface_state = &dvobj->iface_state;
    struct mlme_ext_priv *mlmeext = &adapter->mlmeextpriv;

    if (!(state & (_FW_LINKED | _FW_UNDER_LINKING | WIFI_UNDER_WPS)))
        return;

    if (mlmeext_msr(mlmeext) == WIFI_FW_STATION_STATE) {
        /*ATOMIC_INC(&(iface_state->sta_num_ret));*/

        if (state & _FW_LINKED)
            (bset)	? ATOMIC_INC(&(iface_state->ld_sta_num_ret))
            : ATOMIC_DEC(&(iface_state->ld_sta_num_ret));

        if (state & _FW_UNDER_LINKING)
            (bset)	? ATOMIC_INC(&(iface_state->lg_sta_num_ret))
            : ATOMIC_DEC(&(iface_state->lg_sta_num_ret));
    }

    if (mlmeext_msr(mlmeext) == WIFI_FW_AP_STATE
            && check_fwstate(&adapter->mlmepriv, _FW_LINKED) == _TRUE
       ) {
        /*ATOMIC_INC(&(iface_state->ap_num_ret));*/
        if (adapter->stapriv.asoc_sta_count > 2)
            ld_ap_num_ret++;
    }

    if (state & WIFI_UNDER_WPS)
        (bset)	? ATOMIC_INC(&(iface_state->uw_num_ret))
        : ATOMIC_DEC(&(iface_state->uw_num_ret));

    _rtw_mi_status(adapter, &iface_state->sta_num, &iface_state->ld_sta_num, &iface_state->lg_sta_num
                   , &iface_state->ap_num, &iface_state->ld_ap_num, &iface_state->uwps_num, 1);
}
Exemplo n.º 17
0
void mbuf_recycle(struct context *ctx, struct mbuf *mbuf)
{
    ATOMIC_DEC(ctx->mstats.buffers, 1);

    if (ATOMIC_GET(ctx->mstats.free_buffers) > RECYCLE_LENGTH) {
        mbuf_free(ctx, mbuf);
        return;
    }

    TAILQ_NEXT(mbuf, next) = NULL;
    TAILQ_INSERT_HEAD(&ctx->free_mbufq, mbuf, next);

    ATOMIC_INC(ctx->mstats.free_buffers, 1);
}
Exemplo n.º 18
0
static indirect *
reserve_indirect(indirect_table *tab, word val ARG_LD)
{ size_t index;
  int i;
  int last = FALSE;

  for(index=tab->no_hole_before, i=MSB(index); !last; i++)
  { size_t upto = (size_t)2<<i;
    indirect *b = tab->array.blocks[i];

    if ( upto >= tab->highest )
    { upto = tab->highest;
      last = TRUE;
    }

    for(; index<upto; index++)
    { indirect *a = b + index;
      unsigned int refs = a->references;

      if ( INDIRECT_IS_FREE(refs) &&
	   COMPARE_AND_SWAP(&a->references, refs, INDIRECT_RESERVED_REFERENCE) )
      { tab->no_hole_before = index+1;
	return create_indirect(a, index, val PASS_LD);
      }
    }
  }
  tab->no_hole_before = tab->highest;

  for(;;)
  { int idx;
    indirect *a;
    unsigned int refs;

    index = tab->highest;
    idx = MSB(index);

    if ( !tab->array.blocks[idx] )
      allocate_indirect_block(tab, idx);

    a = &tab->array.blocks[idx][index];
    refs = a->references;

    if ( INDIRECT_IS_FREE(refs) &&
	 COMPARE_AND_SWAP(&a->references, refs, INDIRECT_RESERVED_REFERENCE) )
    { ATOMIC_INC(&tab->highest);
      return create_indirect(a, index, val PASS_LD);
    }
  }
}
Exemplo n.º 19
0
int CursorLeft(struct Tracker_Windows *window,struct WBlocks *wblock){
	if(window->curr_track>0 || (0==window->curr_track && window->curr_track_sub>=0)){

		window->curr_track_sub--;

		if(window->curr_track_sub==-2){
                  do{
                    ATOMIC_INC(window->curr_track, -1);
                    R_ASSERT_RETURN_IF_FALSE2(window->curr_track >= 0, 0);
                    ATOMIC_WRITE(wblock->wtrack, ListFindElement1(&wblock->wtracks->l,window->curr_track));
                  }while(wblock->wtrack==NULL);
                  int num_subtracks = GetNumSubtracks(wblock->wtrack);
                  window->curr_track_sub=num_subtracks-1;
		}

		if(
			window->curr_track<wblock->left_track ||
			(window->curr_track==wblock->left_track && window->curr_track_sub<wblock->left_subtrack)
		){
                        wblock->left_subtrack=-1;//window->curr_track_sub;
			wblock->left_track=window->curr_track;
                        printf("   left_track: %d, left_subtrack: %d. curr_track: %d\n",wblock->left_track, wblock->left_subtrack,window->curr_track);
			//UpdateAllWTracksCoordinates(window,wblock);
                        UpdateWBlockCoordinates(window,wblock);
			return 2;
		}else{
                        printf("   left_track: %d, left_subtrack: %d, curr_track: %d\n",wblock->left_track, wblock->left_subtrack,window->curr_track);
			return 1;
		}
	}else{
                
                if (window->curr_track==TEMPOTRACK)
                  return 0;
                
		ATOMIC_INC(window->curr_track, -1);

                if (window->curr_track==TEMPONODETRACK && window->show_reltempo_track==false)
                  ATOMIC_INC(window->curr_track, -1);

                if (window->curr_track==LINENUMBTRACK)
                  ATOMIC_INC(window->curr_track, -1);

                if (window->curr_track==SIGNATURETRACK && window->show_signature_track==false)
                  ATOMIC_INC(window->curr_track, -1);
                
                if (window->curr_track==LPBTRACK && window->show_lpb_track==false)
                  ATOMIC_INC(window->curr_track, -1);
                
                if (window->curr_track==TEMPOTRACK && window->show_bpm_track==false)
                  set_curr_track_to_leftmost_legal_track(window);

		return 1;
	}
}
Exemplo n.º 20
0
static
dr_signal_action_t event_signal(void *drcontext, dr_siginfo_t *info)
{
    ATOMIC_INC(num_signals);

    if (info->sig == SIGTERM) {
        /* Ignore TERM */
        return DR_SIGNAL_SUPPRESS;
    } else if (info->sig == SIGSEGV) {
        /* Skip the faulting instruction.  This is a sample only! */
        app_pc pc = decode_next_pc(drcontext, info->mcontext->xip);
        if (pc != NULL)
            info->mcontext->xip = pc;
        return DR_SIGNAL_REDIRECT;
    }

    return DR_SIGNAL_DELIVER;
}
Exemplo n.º 21
0
static void
registerFunctor(FunctorDef fd)
{
  size_t index;
  int idx, amask;

  index = ATOMIC_INC(&GD->functors.highest) - 1;
  idx = MSB(index);

  if ( !GD->functors.array.blocks[idx] )
  { allocateFunctorBlock(idx);
  }

  amask = (fd->arity < F_ARITY_MASK ? fd->arity : F_ARITY_MASK);
  fd->functor = MK_FUNCTOR(index, amask);
  GD->functors.array.blocks[idx][index] = fd;
  fd->flags |= VALID_F;

  DEBUG(CHK_SECURE, assert(fd->arity == arityFunctor(fd->functor)));
}
Exemplo n.º 22
0
int conn_write(struct connection *conn, int clear)
{
    ssize_t remain = 0, status, bytes = 0, count = 0;
    struct conn_info *info = conn->info;

    int i, n = 0;
    struct iovec *vec = info->iov.data + info->iov.cursor;
    struct mbuf **bufs = info->iov.buf_ptr + info->iov.cursor;

    while (n < info->iov.len - info->iov.cursor) {
        if (n >= CORVUS_IOV_MAX || bytes >= SSIZE_MAX) break;
        bytes += vec[n++].iov_len;
    }

    status = socket_write(conn->fd, vec, n);
    if (status == CORVUS_AGAIN || status == CORVUS_ERR) return status;

    ATOMIC_INC(conn->ctx->stats.send_bytes, status);

    if (status < bytes) {
        for (i = 0; i < n; i++) {
            count += vec[i].iov_len;
            if (count > status) {
                remain = vec[i].iov_len - (count - status);
                vec[i].iov_base = (char*)vec[i].iov_base + remain;
                vec[i].iov_len -= remain;
                break;
            }
        }
        n = i;
    }

    info->iov.cursor += n;

    if (clear) {
        mbuf_decref(conn->ctx, bufs, n);
    }

    return status;
}
Exemplo n.º 23
0
int server_write(struct connection *server)
{
    struct conn_info *info = server->info;
    if (!STAILQ_EMPTY(&info->ready_queue)) {
        server_make_iov(info);
    }
    if (info->iov.len <= 0) {
        cmd_iov_reset(&info->iov);
        return CORVUS_OK;
    }

    int status = conn_write(server, 0);

    if (status == CORVUS_ERR) {
        LOG(ERROR, "server_write: server %d fail to write iov", server->fd);
        return CORVUS_ERR;
    }
    if (status == CORVUS_AGAIN) return CORVUS_OK;

    ATOMIC_INC(info->send_bytes, status);

    if (info->iov.cursor >= info->iov.len) {
        cmd_iov_free(&info->iov);
    }

    if (!STAILQ_EMPTY(&info->ready_queue) || info->iov.cursor < info->iov.len) {
        if (conn_register(server) == CORVUS_ERR) {
            LOG(ERROR, "server_write: fail to reregister server %d", server->fd);
            return CORVUS_ERR;
        }
    }

    info->last_active = time(NULL);

    return CORVUS_OK;
}
Exemplo n.º 24
0
struct mbuf *mbuf_get(struct context *ctx)
{
    struct mbuf *mbuf;
    uint8_t *buf;

    mbuf = _mbuf_get(ctx);
    if (mbuf == NULL) {
        return NULL;
    }

    buf = (uint8_t *)mbuf - ctx->mbuf_offset;
    mbuf->start = buf;
    mbuf->end = buf + ctx->mbuf_offset;

    mbuf->pos = mbuf->start;
    mbuf->last = mbuf->start;
    mbuf->queue = NULL;
    mbuf->refcount = 0;
    TAILQ_NEXT(mbuf, next) = NULL;

    ATOMIC_INC(ctx->mstats.buffers, 1);

    return mbuf;
}
Exemplo n.º 25
0
Arquivo: signal.c Projeto: yugui/ruby
static void
signal_enque(int sig)
{
    ATOMIC_INC(signal_buff.cnt[sig]);
    ATOMIC_INC(signal_buff.size);
}
Exemplo n.º 26
0
static easy_mempool_page_t *easy_mempool_get_cur_page_(easy_mempool_t *pool, int32_t ensure_size, int32_t *page_pos)
{
    easy_mempool_page_t     *ret = NULL;

    if (NULL != pool) {
        volatile int32_t        oldv = -1;
        volatile int32_t        newv = -1;
        volatile int32_t        cmpv = -1;
        easy_mempool_page_t     *cur_page = NULL;

        while (oldv != pool->cur_page_pos) {
            oldv = pool->cur_page_pos;
            newv = oldv;
            cmpv = oldv;
            ATOMIC_INC(&(pool->page_metas[oldv].ref_cnt));

            if (NULL == pool->page_metas[oldv].page) {
                easy_mempool_page_t     *tmp_page = easy_mempool_alloc_page_(pool);

                if (NULL != tmp_page) {
                    if (NULL != ATOMIC_CAS(&(pool->page_metas[oldv].page), NULL, tmp_page)) {
                        easy_mempool_free_page_(pool, tmp_page);
                    }
                }
            }

            if (NULL == (cur_page = pool->page_metas[oldv].page)) {
                easy_mempool_deref_page_(pool, oldv);
                break;
            }

            if ((pool->page_size - cur_page->base) < ensure_size) {
                int32_t                 base = cur_page->base;
                easy_mempool_deref_page_(pool, oldv);

                if (0 == base) {
                    break;
                }

                int32_t                 counter = 0;

                while (++counter < pool->page_num) {
                    newv = (newv + 1) % pool->page_num;

                    if (0 == ATOMIC_CAS(&(pool->page_metas[newv].ref_cnt), 0, 1)) {
                        if (oldv == ATOMIC_CAS(&(pool->cur_page_pos), cmpv, newv)) {
                            easy_mempool_deref_page_(pool, oldv);
                        } else {
                            easy_mempool_deref_page_(pool, newv);
                        }

                        break;
                    }
                }
            } else {
                *page_pos = oldv;
                ret = cur_page;
                break;
            }
        }
    }

    return ret;
}
Exemplo n.º 27
0
/* handler called when a function is entered. This function creates a new
 * funcDB on the heap if the passed-in pointer is NULL.
 */
int dbgEntrFunc(dbgFuncDB_t **ppFuncDB, const char *file, const char *func, int line)
{
	int iStackPtr = 0; /* TODO: find some better default, this one hurts the least, but it is not clean */
	dbgThrdInfo_t *pThrd;
	dbgFuncDBListEntry_t *pFuncDBListEntry;
	unsigned int i;
	dbgFuncDB_t *pFuncDB;

	assert(ppFuncDB != NULL);
	assert(file != NULL);
	assert(func != NULL);
	pFuncDB = *ppFuncDB;
	assert((pFuncDB == NULL) || (pFuncDB->magic == dbgFUNCDB_MAGIC));

	pThrd = dbgGetThrdInfo(); /* we must do this AFTER the mutexes are initialized! */

	if(pFuncDB == NULL) {
		/* we do not yet have a funcDB and need to create a new one. We also add it
		 * to the linked list of funcDBs. Please note that when a module is unloaded and
		 * then reloaded again, we currently do not try to find its previous funcDB but
		 * instead create a duplicate. While finding the past one is straightforward, it
		 * opens up the question what to do with e.g. mutex data left in it. We do not
		 * yet see any need to handle these questions, so duplicaton seems to be the right
		 * thing to do. -- rgerhards, 2008-03-10
		 */
		/* dbgprintf("%s:%d:%s: called first time, initializing FuncDB\n", pFuncDB->file, pFuncDB->line, pFuncDB->func); */
		/* get a new funcDB and add it to the list (all of this is protected by the mutex) */
		pthread_mutex_lock(&mutFuncDBList);
		if((pFuncDBListEntry = calloc(1, sizeof(dbgFuncDBListEntry_t))) == NULL) {
			dbgprintf("Error %d allocating memory for FuncDB List entry, not adding\n", errno);
			pthread_mutex_unlock(&mutFuncDBList);
			goto exit_it;
		} else {
			if((pFuncDB = calloc(1, sizeof(dbgFuncDB_t))) == NULL) {
				dbgprintf("Error %d allocating memory for FuncDB, not adding\n", errno);
				free(pFuncDBListEntry);
				pthread_mutex_unlock(&mutFuncDBList);
				goto exit_it;
			} else {
				pFuncDBListEntry->pFuncDB = pFuncDB;
				pFuncDBListEntry->pNext = pFuncDBListRoot;
				pFuncDBListRoot = pFuncDBListEntry;
			}
		}
		/* now intialize the funcDB
		 * note that we duplicate the strings, because the address provided may go away
		 * if a loadable module is unloaded!
		 */
		pFuncDB->magic = dbgFUNCDB_MAGIC;
		pFuncDB->file = strdup(file);
		pFuncDB->func = strdup(func);
		pFuncDB->line = line;
		pFuncDB->nTimesCalled = 0;
		for(i = 0 ; i < sizeof(pFuncDB->mutInfo)/sizeof(dbgFuncDBmutInfoEntry_t) ; ++i) {
			pFuncDB->mutInfo[i].lockLn = -1; /* set to not Locked */
		}

		/* a round of safety checks... */
		if(pFuncDB->file == NULL || pFuncDB->func == NULL) {
			dbgprintf("Error %d allocating memory for FuncDB, not adding\n", errno);
			/* do a little bit of cleanup */
			if(pFuncDB->file != NULL)
				free(pFuncDB->file);
			if(pFuncDB->func != NULL)
				free(pFuncDB->func);
			free(pFuncDB);
			free(pFuncDBListEntry);
			pthread_mutex_unlock(&mutFuncDBList);
			goto exit_it;
		}

		/* done mutex-protected operations */
		pthread_mutex_unlock(&mutFuncDBList);

		*ppFuncDB = pFuncDB; /* all went well, so we can update the caller */
	}

	/* when we reach this point, we have a fully-initialized FuncDB! */
	ATOMIC_INC(pFuncDB->nTimesCalled);
	if(bLogFuncFlow && dbgPrintNameIsInList((const uchar*)pFuncDB->file, printNameFileRoot))
		dbgprintf("%s:%d: %s: enter\n", pFuncDB->file, pFuncDB->line, pFuncDB->func);
	if(pThrd->stackPtr >= (int) (sizeof(pThrd->callStack) / sizeof(dbgFuncDB_t*))) {
		dbgprintf("%s:%d: %s: debug module: call stack for this thread full, suspending call tracking\n",
			  pFuncDB->file, pFuncDB->line, pFuncDB->func);
		iStackPtr = pThrd->stackPtr;
	} else {
		iStackPtr = pThrd->stackPtr++;
		if(pThrd->stackPtr > pThrd->stackPtrMax)
			pThrd->stackPtrMax = pThrd->stackPtr;
		pThrd->callStack[iStackPtr] = pFuncDB;
		pThrd->lastLine[iStackPtr] = line;
	}
	
exit_it:
	return iStackPtr;
}
Exemplo n.º 28
0
u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
{
	int err;
	unsigned int pipe;
	u32 ret = _FAIL;
	PURB purb = NULL;
	struct recv_buf	*precvbuf = (struct recv_buf *)rmem;
	_adapter		*adapter = pintfhdl->padapter;
	struct dvobj_priv	*pdvobj = adapter_to_dvobj(adapter);
	//struct pwrctrl_priv *pwrctl = dvobj_to_pwrctl(pdvobj);
	struct recv_priv	*precvpriv = &adapter->recvpriv;
	struct usb_device	*pusbd = pdvobj->pusbdev;

	_func_enter_;

	if (RTW_CANNOT_RX(adapter) || (precvbuf == NULL)) {
		RT_TRACE(_module_hci_ops_os_c_,_drv_err_,("usb_read_port:( RTW_CANNOT_RX ) || precvbuf == NULL!!!\n"));
		goto exit;
	}

	usb_init_recvbuf(adapter, precvbuf);

	if (precvbuf->pskb == NULL) {
		SIZE_PTR tmpaddr = 0;
		SIZE_PTR alignment = 0;

		if (NULL != (precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue)))
			goto recv_buf_hook;

#ifndef CONFIG_FIX_NR_BULKIN_BUFFER
		precvbuf->pskb = rtw_skb_alloc(MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
#endif

		if (precvbuf->pskb == NULL) {
			if (0)
				DBG_871X("usb_read_port() enqueue precvbuf=%p \n", precvbuf);
			/* enqueue precvbuf and wait for free skb */
			rtw_enqueue_recvbuf(precvbuf, &precvpriv->recv_buf_pending_queue);
			goto exit;
		}

		tmpaddr = (SIZE_PTR)precvbuf->pskb->data;
		alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
		skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
	}

recv_buf_hook:
	precvbuf->phead = precvbuf->pskb->head;
	precvbuf->pdata = precvbuf->pskb->data;
	precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
	precvbuf->pend = skb_end_pointer(precvbuf->pskb);
	precvbuf->pbuf = precvbuf->pskb->data;

	purb = precvbuf->purb;

	/* translate DMA FIFO addr to pipehandle */
	pipe = ffaddr2pipehdl(pdvobj, addr);

	usb_fill_bulk_urb(purb, pusbd, pipe,
	                  precvbuf->pbuf,
	                  MAX_RECVBUF_SZ,
	                  usb_read_port_complete,
	                  precvbuf);

	err = usb_submit_urb(purb, GFP_ATOMIC);
	if (err && err != (-EPERM)) {
		DBG_871X("cannot submit rx in-token(err = 0x%08x),urb_status = %d\n"
		         , err, purb->status);
		goto exit;
	}

	ATOMIC_INC(&(precvpriv->rx_pending_cnt));
	ret = _SUCCESS;

exit:

	_func_exit_;

	return ret;
}
Exemplo n.º 29
0
static bool
event_pre_syscall(void *drcontext, int sysnum)
{
    ATOMIC_INC(num_syscalls);
#ifdef LINUX
    if (sysnum == SYS_execve) {
        /* our stats will be re-set post-execve so display now */
        show_results();
# ifdef SHOW_RESULTS
        dr_fprintf(STDERR, "<---- execve ---->\n");
# endif
    }
#endif
#ifdef SHOW_RESULTS
    dr_fprintf(STDERR, "[%d] "PFX" "PFX" "PFX"\n",
               sysnum, 
               dr_syscall_get_param(drcontext, 0),
               dr_syscall_get_param(drcontext, 1),
               dr_syscall_get_param(drcontext, 2));
#endif
    if (sysnum == write_sysnum) {
        /* store params for access post-syscall */
        int i;
        per_thread_t *data = (per_thread_t *) drmgr_get_cls_field(drcontext, tcls_idx);
#ifdef WINDOWS
        /* stderr and stdout are identical in our cygwin rxvt shell so for
         * our example we suppress output starting with 'H' instead
         */
        byte *output = (byte *) dr_syscall_get_param(drcontext, 5);
        byte first;
        size_t read;
        bool ok = dr_safe_read(output, 1, &first, &read);
        if (!ok || read != 1)
            return true; /* data unreadable: execute normally */
        if (dr_is_wow64()) {
            /* store the xcx emulation parameter for wow64 */
            dr_mcontext_t mc = {sizeof(mc),DR_MC_INTEGER/*only need xcx*/};
            dr_get_mcontext(drcontext, &mc);
            data->xcx = mc.xcx;
        }
#endif
        for (i = 0; i < SYS_MAX_ARGS; i++)
            data->param[i] = dr_syscall_get_param(drcontext, i);
        /* suppress stderr */
        if (dr_syscall_get_param(drcontext, 0) == (reg_t) STDERR
#ifdef WINDOWS
            && first == 'H'
#endif
            ) {
            /* pretend it succeeded */
#ifdef LINUX
            /* return the #bytes == 3rd param */
            dr_syscall_set_result(drcontext, dr_syscall_get_param(drcontext, 2));
#else
            /* we should also set the IO_STATUS_BLOCK.Information field */
            dr_syscall_set_result(drcontext, 0);
#endif
#ifdef SHOW_RESULTS
            dr_fprintf(STDERR, "  [%d] => skipped\n", sysnum);
#endif
            return false; /* skip syscall */
        } else if (dr_syscall_get_param(drcontext, 0) == (reg_t) STDOUT) {
            if (!data->repeat) {
                /* redirect stdout to stderr (unless it's our repeat) */
#ifdef SHOW_RESULTS
                dr_fprintf(STDERR, "  [%d] STDOUT => STDERR\n", sysnum);
#endif
                dr_syscall_set_param(drcontext, 0, (reg_t) STDERR);
            }
            /* we're going to repeat this syscall once */
            data->repeat = !data->repeat;
        }
    }
    return true; /* execute normally */
}
Exemplo n.º 30
0
int CursorRight(struct Tracker_Windows *window,struct WBlocks *wblock){
	struct WTracks *wtrack=wblock->wtrack;
	struct WTracks *leftwtrack;
	struct WTracks *rightwtrack;
	int update=0;
	int x2;
        
	if(window->curr_track>=0){

		window->curr_track_sub++;
                int num_subtracks = GetNumSubtracks(wtrack);

		if(window->curr_track_sub>=num_subtracks){
			window->curr_track++;
			if(NextWTrack(wtrack)==NULL){
				window->curr_track--;
				window->curr_track_sub--;
				return 0;
			}else{
				window->curr_track_sub= -1;
				ATOMIC_WRITE(wblock->wtrack, NextWTrack(wtrack));
			}
		}

		while(
			window->curr_track>wblock->right_track
			||
			(
			 window->curr_track==wblock->right_track
			 && window->curr_track_sub>wblock->right_subtrack
			 )
		){
			leftwtrack=ListFindElement1(&wblock->wtracks->l,wblock->left_track);
                        int num_subtracks = GetNumSubtracks(leftwtrack);
			wblock->left_subtrack++;
			if(wblock->left_subtrack>=num_subtracks){
                          if (wblock->left_track < wblock->block->num_tracks-1) {
                            wblock->left_subtrack= -1;
                            wblock->left_track++;
                            //return 0;
                          } else {
                            UpdateAllWTracksCoordinates(window,wblock);
                            wblock->left_subtrack--;
                            return 1;
                          }
			}
			leftwtrack=ListFindElement1(&wblock->wtracks->l,wblock->left_track);
			if(
				wblock->left_track==wblock->block->num_tracks-1 &&
				wblock->left_subtrack==num_subtracks-1
			){
                                UpdateAllWTracksCoordinates(window,wblock);
				return 2;
			}
			UpdateAllWTracksCoordinates(window,wblock);
			update=1;
		}
		for(;;){
		  rightwtrack=ListFindElement1(&wblock->wtracks->l,window->curr_track);
                  int num_subtracks = GetNumSubtracks(rightwtrack);
		  x2=GetXSubTrack2(rightwtrack,window->curr_track_sub);
		  if(x2>wblock->a.x2){
			leftwtrack=ListFindElement1(&wblock->wtracks->l,wblock->left_track);
			wblock->left_subtrack++;
			if(wblock->left_subtrack>=num_subtracks){
                          if (wblock->left_track < wblock->block->num_tracks-1) {
				wblock->left_subtrack= -1;
				wblock->left_track++;
                          } else {
                            wblock->left_subtrack--;
                            UpdateAllWTracksCoordinates(window,wblock);
                            return 1;
                          }
			}
			leftwtrack=ListFindElement1(&wblock->wtracks->l,wblock->left_track);
			UpdateAllWTracksCoordinates(window,wblock);
			update=1;
		  }else{
		    break;
		  }
		}
	
	}else{
                ATOMIC_INC(window->curr_track, 1);

                if (window->curr_track==LPBTRACK && window->show_lpb_track==false)
                  ATOMIC_INC(window->curr_track, 1);

                if (window->curr_track==SIGNATURETRACK && window->show_signature_track==false)
                  ATOMIC_INC(window->curr_track, 1);
                
                if (window->curr_track==LINENUMBTRACK)
                  ATOMIC_INC(window->curr_track, 1);

                if (window->curr_track==TEMPONODETRACK && window->show_reltempo_track==false)
                  ATOMIC_INC(window->curr_track, 1);

		if (0==window->curr_track)
                  window->curr_track_sub= -1;
                
                
	}
	if(update==1){
		return 2;
	}else{
		return 1;
	}
}