Esempio n. 1
0
static int async_del(long id)
{
    int i;
    /* scan all queue for an entry with async_id == 'id' */

    for (i = 0; i < erts_async_max_threads; i++) {
        ErlAsync* a;
        erts_mtx_lock(&async_q[i].mtx);

        a = async_q[i].head;
        while(a != NULL) {
            if (a->async_id == id) {
                if (a->prev != NULL)
                    a->prev->next = a->next;
                else
                    async_q[i].head = a->next;
                if (a->next != NULL)
                    a->next->prev = a->prev;
                else
                    async_q[i].tail = a->prev;
                async_q[i].len--;
                erts_mtx_unlock(&async_q[i].mtx);
                if (a->async_free != NULL)
                    a->async_free(a->async_data);
                async_detach(a->hndl);
                erts_free(ERTS_ALC_T_ASYNC, a);
                return 1;
            }
        }
        erts_mtx_unlock(&async_q[i].mtx);
    }
    return 0;
}
Esempio n. 2
0
static void
map_stat_free(ErtsAlcType_t n, void *extra, void *ptr)
{
    ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
    void *mptr;

    erts_mtx_lock(&instr_x_mutex);
    erts_mtx_lock(&instr_mutex);

    if (ptr) {
	MapStatBlock_t *mb;

	mptr = (void *) (((char *) ptr) - MAP_STAT_BLOCK_HEADER_SIZE);
	mb = (MapStatBlock_t *) mptr;

	stat_upd_free(n, mb->size);

	if (mb->prev)
	    mb->prev->next = mb->next;
	else
	    mem_anchor = mb->next;
	if (mb->next)
	    mb->next->prev = mb->prev;
    }
    else {
	mptr = NULL;
    }

    (*real_af->free)(n, real_af->extra, mptr);

    erts_mtx_unlock(&instr_mutex);
    erts_mtx_unlock(&instr_x_mutex);

}
Esempio n. 3
0
static void
release_update_permission(int release_updater)
{
    erts_mtx_lock(&update_table_permission_mtx);
    ASSERT(updater_process != NULL);

    if (release_updater) {
        erts_proc_lock(updater_process, ERTS_PROC_LOCK_STATUS);
        if (!ERTS_PROC_IS_EXITING(updater_process)) {
            erts_resume(updater_process, ERTS_PROC_LOCK_STATUS);
        }
        erts_proc_unlock(updater_process, ERTS_PROC_LOCK_STATUS);
    }
    updater_process = NULL;

    while (update_queue != NULL) { /* Unleash the entire herd */
	struct update_queue_item* qitem = update_queue;
	erts_proc_lock(qitem->p, ERTS_PROC_LOCK_STATUS);
	if (!ERTS_PROC_IS_EXITING(qitem->p)) {
	    erts_resume(qitem->p, ERTS_PROC_LOCK_STATUS);
	}
	erts_proc_unlock(qitem->p, ERTS_PROC_LOCK_STATUS);
	update_queue = qitem->next;
	erts_proc_dec_refc(qitem->p);
	erts_free(ERTS_ALC_T_PERSISTENT_LOCK_Q, qitem);
    }
    erts_mtx_unlock(&update_table_permission_mtx);
}
Esempio n. 4
0
static HashTable*
next_to_delete(void)
{
    HashTable* table;

    erts_mtx_lock(&delete_queue_mtx);
    table = delete_queue_head;
    if (table) {
        if (erts_atomic_read_nob(&table->refc)) {
            /*
             * This hash table is still referenced. Hash tables
             * must be deleted in order, so we return a NULL
             * pointer.
             */
            table = NULL;
        } else {
            /*
             * Remove the first hash table from the queue.
             */
            delete_queue_head = table->delete_next;
            if (delete_queue_head == NULL) {
                delete_queue_tail = &delete_queue_head;
            }
        }
    }
    erts_mtx_unlock(&delete_queue_mtx);
    return table;
}
Esempio n. 5
0
static void async_add(ErlAsync* a, AsyncQueue* q)
{
    /* XXX:PaN Is this still necessary when ports lock drivers? */
    if (is_internal_port(a->port)) {
        ERTS_LC_ASSERT(erts_drvportid2port(a->port));
        /* make sure the driver will stay around */
        driver_lock_driver(internal_port_index(a->port));
    }

    erts_mtx_lock(&q->mtx);

    if (q->len == 0) {
        q->head = a;
        q->tail = a;
        q->len = 1;
        erts_cnd_signal(&q->cv);
    }
    else { /* no need to signal (since the worker is working) */
        a->next = q->head;
        q->head->prev = a;
        q->head = a;
        q->len++;
    }
    erts_mtx_unlock(&q->mtx);
}
Esempio n. 6
0
File: erl_poll.c Progetto: basho/otp
void poll_debug_set_active_fd(ErtsSysFdType fd)
{
    erts_mtx_lock(&save_ops_mtx);
    active_debug_fd_set = 1;
    active_debug_fd = fd;
    erts_mtx_unlock(&save_ops_mtx);
}
Esempio n. 7
0
static ERTS_INLINE void async_reply(ErtsAsync *a, ErtsThrQPrepEnQ_t *prep_enq)
{
#if ERTS_USE_ASYNC_READY_Q
    ErtsAsyncReadyQ *arq;

#if ERTS_ASYNC_PRINT_JOB
    erts_fprintf(stderr, "=>> %ld\n", a->async_id);
#endif

    arq = async_ready_q(a->sched_id);

#if ERTS_USE_ASYNC_READY_ENQ_MTX
	erts_mtx_lock(&arq->x.data.enq_mtx);
#endif

	erts_thr_q_enqueue_prepared(&arq->thr_q, (void *) a, prep_enq);

#if ERTS_USE_ASYNC_READY_ENQ_MTX
	erts_mtx_unlock(&arq->x.data.enq_mtx);
#endif

#else /* ERTS_USE_ASYNC_READY_Q */

	call_async_ready(a);
	erts_free(ERTS_ALC_T_ASYNC, (void *) a);

#endif /* ERTS_USE_ASYNC_READY_Q */
}
Esempio n. 8
0
static void *
stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
{
    ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
    Uint ssize;
    void *res;

    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_lock(&instr_mutex);
    }

    ssize = size + STAT_BLOCK_HEADER_SIZE;
    res = (*real_af->alloc)(n, real_af->extra, ssize);
    if (res) {
	stat_upd_alloc(n, size);
	((StatBlock_t *) res)->size = size;
#ifdef VALGRIND
	/* Suppress "possibly leaks" by storing an actual dummy pointer
	   to the _start_ of the allocated block.*/
	((StatBlock_t *) res)->valgrind_leak_suppressor = res;
#endif
	res = (void *) ((StatBlock_t *) res)->mem;
    }

    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_unlock(&instr_mutex);
    }

    return res;
}
Esempio n. 9
0
int check_async_ready(void)
{
    ErlAsync* a;
    int count = 0;

    erts_mtx_lock(&async_ready_mtx);
    a = async_ready_list;
    async_ready_list = NULL;
    erts_mtx_unlock(&async_ready_mtx);

    while(a != NULL) {
	ErlAsync* a_next = a->next;
	/* Every port not dead */
	Port *p = erts_id2port_sflgs(a->port,
				     NULL,
				     0,
				     ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
	if (!p) {
	    if (a->async_free)
		(*a->async_free)(a->async_data);
	}
	else {
	    count++;
	    if (async_ready(p, a->async_data)) {
		if (a->async_free != NULL)
		    (*a->async_free)(a->async_data);
	    }
	    async_detach(a->hndl);
	    erts_port_release(p);
	}
	erts_free(ERTS_ALC_T_ASYNC, (void *) a);
	a = a_next;
    }
    return count;
}
Esempio n. 10
0
static erts_tse_t *async_thread_init(ErtsAsyncQ *aq)
{
    ErtsThrQInit_t qinit = ERTS_THR_Q_INIT_DEFAULT;
    erts_tse_t *tse = erts_tse_fetch();
#ifdef ERTS_SMP
    ErtsThrPrgrCallbacks callbacks;

    callbacks.arg = (void *) tse;
    callbacks.wakeup = async_wakeup;
    callbacks.prepare_wait = NULL;
    callbacks.wait = NULL;

    erts_thr_progress_register_unmanaged_thread(&callbacks);
#endif

    qinit.live.queue = ERTS_THR_Q_LIVE_LONG;
    qinit.live.objects = ERTS_THR_Q_LIVE_SHORT;
    qinit.arg = (void *) tse;
    qinit.notify = async_wakeup;
#if ERTS_USE_ASYNC_READY_Q
    qinit.auto_finalize_dequeue = 0;
#endif

    erts_thr_q_initialize(&aq->thr_q, &qinit);

    /* Inform main thread that we are done initializing... */
    erts_mtx_lock(&async->init.data.mtx);
    async->init.data.no_initialized++;
    erts_cnd_signal(&async->init.data.cnd);
    erts_mtx_unlock(&async->init.data.mtx);

    return tse;
}
Esempio n. 11
0
static void
stat_free(ErtsAlcType_t n, void *extra, void *ptr)
{
    ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
    void *sptr;

    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_lock(&instr_mutex);
    }

    if (ptr) {
	sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
	stat_upd_free(n, ((StatBlock_t *) sptr)->size);
    }
    else {
	sptr = NULL;
    }

    (*real_af->free)(n, real_af->extra, sptr);

    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_unlock(&instr_mutex);
    }

}
Esempio n. 12
0
static void *
map_stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
{
    ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
    Uint msize;
    void *res;

    erts_mtx_lock(&instr_mutex);

    msize = size + MAP_STAT_BLOCK_HEADER_SIZE;
    res = (*real_af->alloc)(n, real_af->extra, msize);
    if (res) {
	MapStatBlock_t *mb = (MapStatBlock_t *) res;
	stat_upd_alloc(n, size);

	mb->size = size;
	mb->type_no = n;
	mb->pid = erts_get_current_pid();

	mb->prev = NULL;
	mb->next = mem_anchor;
	if (mem_anchor)
	    mem_anchor->prev = mb;
	mem_anchor = mb;

	res = (void *) mb->mem;
    }

    erts_mtx_unlock(&instr_mutex);

    return res;
}
Esempio n. 13
0
static void *
stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
{
    ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
    Uint old_size;
    Uint ssize;
    void *sptr;
    void *res;

    erts_mtx_lock(&instr_mutex);

    if (ptr) {
	sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
	old_size = ((StatBlock_t *) sptr)->size;
    }
    else {
	sptr = NULL;
	old_size = 0;
    }

    ssize = size + STAT_BLOCK_HEADER_SIZE;
    res = (*real_af->realloc)(n, real_af->extra, sptr, ssize);
    if (res) {
	stat_upd_realloc(n, size, old_size);
	((StatBlock_t *) res)->size = size;
	res = (void *) ((StatBlock_t *) res)->mem;
    }

    erts_mtx_unlock(&instr_mutex);

    return res;
}
Esempio n. 14
0
static int
try_seize_update_permission(Process* c_p)
{
    int success;

    ASSERT(!erts_thr_progress_is_blocking()); /* to avoid deadlock */
    ASSERT(c_p != NULL);

    erts_mtx_lock(&update_table_permission_mtx);
    ASSERT(updater_process != c_p);
    success = (updater_process == NULL);
    if (success) {
        updater_process = c_p;
    } else {
        struct update_queue_item* qitem;
        qitem = erts_alloc(ERTS_ALC_T_PERSISTENT_LOCK_Q, sizeof(*qitem));
        qitem->p = c_p;
        erts_proc_inc_refc(c_p);
        qitem->next = update_queue;
        update_queue = qitem;
        erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
    }
    erts_mtx_unlock(&update_table_permission_mtx);
    return success;
}
Esempio n. 15
0
static void
append_to_delete_queue(HashTable* table)
{
    erts_mtx_lock(&delete_queue_mtx);
    table->delete_next = NULL;
    *delete_queue_tail = table;
    delete_queue_tail = &table->delete_next;
    erts_mtx_unlock(&delete_queue_mtx);
}
Esempio n. 16
0
int erts_register_async_ready_callback(void (*funcp)(void))
{
    ErtsAsyncReadyCallback *cb = erts_alloc(ERTS_ALC_T_NIF,
                                            sizeof(ErtsAsyncReadyCallback));
    cb->next = callbacks;
    cb->callback = funcp;
    erts_mtx_lock(&async_ready_mtx);
    callbacks = cb;
    erts_mtx_unlock(&async_ready_mtx);
    return async_handle;
}
Esempio n. 17
0
File: erl_poll.c Progetto: basho/otp
static void do_save_op(ErtsSysFdType fd, int op, int xdata)
{
    erts_mtx_lock(&save_ops_mtx);
    if (fd == active_debug_fd && num_debug_save_ops < 1024) {
	int x = num_debug_save_ops++;
	debug_save_ops[x].op = op;
	debug_save_ops[x].active = fd;
	debug_save_ops[x].xdata = xdata;
    }
    erts_mtx_unlock(&save_ops_mtx);
}
Esempio n. 18
0
static void
suspend_updater(Process* c_p)
{
#ifdef DEBUG
    ASSERT(c_p != NULL);
    erts_mtx_lock(&update_table_permission_mtx);
    ASSERT(updater_process == c_p);
    erts_mtx_unlock(&update_table_permission_mtx);
#endif
    erts_suspend(c_p, ERTS_PROC_LOCK_MAIN, NULL);
}
Esempio n. 19
0
void
erts_mtrace_exit(Uint32 exit_value)
{
    ASSERT(!erts_is_allctr_wrapper_prelocked());
    erts_mtx_lock(&mtrace_op_mutex);
    erts_mtx_lock(&mtrace_buf_mutex);
    if (erts_mtrace_enabled) {
	Uint32 ti = get_time_inc();
    
	if (ti != INVALID_TIME_INC
	    && MAKE_TBUF_SZ(UI8_SZ + UI16_SZ + 2*UI32_SZ)) {
	    byte *hdrp;
	    Uint16 hdr;
	    int ti_n, exit_value_n;

	    *(tracep++) = ERTS_MT_EXIT_BDY_TAG;

	    hdrp = tracep;
	    tracep += 2;

	    PUT_VSZ_UI32(tracep, exit_value_n,  exit_value);
	    PUT_VSZ_UI32(tracep, ti_n,  ti);

	    hdr = ti_n;

	    hdr <<= UI32_MSB_EHF_SZ;
	    hdr |= exit_value_n;

	    WRITE_UI16(hdrp, hdr);

	    if(send_trace_buffer()) {
		erts_mtrace_enabled = 0;
		erts_sock_close(socket_desc);
		socket_desc = ERTS_SOCK_INVALID_SOCKET;
	    }
	}
    }
    erts_mtx_unlock(&mtrace_buf_mutex);
    erts_mtx_unlock(&mtrace_op_mutex);
}
Esempio n. 20
0
static void erts_msacc_reset(ErtsMsAcc *msacc) {
  int i;
  if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx);

  for (i = 0; i < ERTS_MSACC_STATE_COUNT; i++) {
      msacc->perf_counters[i] = 0;
#ifdef ERTS_MSACC_STATE_COUNTERS
      msacc->state_counters[i] = 0;
#endif
  }

  if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx);
}
Esempio n. 21
0
static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
    ErtsSchedulerData *esdp = erts_get_scheduler_data();
    Process *rp = msaccrp->proc;
    ErtsMessage *msgp = NULL;
    Eterm **hpp, *hp;
    Eterm ref_copy = NIL, msg;
    Uint sz, *szp;
    ErlOffHeap *ohp = NULL;
    ErtsProcLocks rp_locks = (esdp && msaccrp->req_sched == esdp->no
                              ? ERTS_PROC_LOCK_MAIN : 0);

    sz = 0;
    hpp = NULL;
    szp = &sz;

    if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx);

    while (1) {
	if (hpp)
            ref_copy = STORE_NC(hpp, ohp, msaccrp->ref);
	else
            *szp += REF_THING_SIZE;

	if (msaccrp->action != ERTS_MSACC_GATHER)
            msg = ref_copy;
	else {
            msg = erts_msacc_gather_stats(msacc, hpp, szp);
            msg = erts_bld_tuple(hpp, szp, 2, ref_copy, msg);
	}
	if (hpp)
            break;

	msgp = erts_alloc_message_heap(rp, &rp_locks, sz, &hp, &ohp);
        hpp = &hp;
	szp = NULL;
    }

    if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx);

    erts_queue_message(rp, rp_locks, msgp, msg, am_system);

    if (esdp && msaccrp->req_sched == esdp->no)
	rp_locks &= ~ERTS_PROC_LOCK_MAIN;

    if (rp_locks)
	erts_smp_proc_unlock(rp, rp_locks);

}
Esempio n. 22
0
static void
mtrace_free(ErtsAlcType_t n, void *extra, void *ptr)
{
    ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;

    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_lock(&mtrace_op_mutex);
    }

    (*real_af->free)(n, real_af->extra, ptr);
    if (!erts_is_allctr_wrapper_prelocked()) {
	write_free_entry(ERTS_MT_FREE_BDY_TAG, n, 0, ptr);
    }

    erts_mtx_unlock(&mtrace_op_mutex);
}
Esempio n. 23
0
void
erl_drv_tsd_key_destroy(ErlDrvTSDKey key)
{
    erts_mtx_lock(&tsd_mtx);

    if (key < 0 || max_used_tsd_key < key || !used_tsd_keys[key])
	fatal_error(EINVAL, "erl_drv_tsd_key_destroy()");

    if (used_tsd_keys[key] != no_name)
	erts_free(ERTS_ALC_T_DRV_TSD, used_tsd_keys[key]);

    used_tsd_keys[key] = NULL;
    if (next_tsd_key < 0 || key < next_tsd_key)
	next_tsd_key = key;

    erts_mtx_unlock(&tsd_mtx);
}
Esempio n. 24
0
int check_async_ready(void)
{
#ifdef USE_THREADS
    ErtsAsyncReadyCallback *cbs;
#endif
    ErlAsync* a;
    int count = 0;

    erts_mtx_lock(&async_ready_mtx);
    a = async_ready_list;
    async_ready_list = NULL;
#ifdef USE_THREADS
    cbs = callbacks;
#endif
    erts_mtx_unlock(&async_ready_mtx);

    while(a != NULL) {
        ErlAsync* a_next = a->next;
        /* Every port not dead */
        Port *p = erts_id2port_sflgs(a->port,
                                     NULL,
                                     0,
                                     ERTS_PORT_SFLGS_INVALID_DRIVER_LOOKUP);
        if (!p) {
            if (a->async_free)
                (*a->async_free)(a->async_data);
        }
        else {
            count++;
            if (async_ready(p, a->async_data)) {
                if (a->async_free != NULL)
                    (*a->async_free)(a->async_data);
            }
            async_detach(a->hndl);
            erts_port_release(p);
        }
        erts_free(ERTS_ALC_T_ASYNC, (void *) a);
        a = a_next;
    }
#ifdef USE_THREADS
    for (; cbs; cbs = cbs->next)
        (*cbs->callback)();
#endif
    return count;
}
Esempio n. 25
0
static void *
mtrace_alloc(ErtsAlcType_t n, void *extra, Uint size)
{
    ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
    void *res;

    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_lock(&mtrace_op_mutex);
    }

    res = (*real_af->alloc)(n, real_af->extra, size);
    write_alloc_entry(ERTS_MT_ALLOC_BDY_TAG, res, n, 0, size);

    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_unlock(&mtrace_op_mutex);
    }

    return res;
}
Esempio n. 26
0
static void *
stat_alloc(ErtsAlcType_t n, void *extra, Uint size)
{
    ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
    Uint ssize;
    void *res;

    erts_mtx_lock(&instr_mutex);

    ssize = size + STAT_BLOCK_HEADER_SIZE;
    res = (*real_af->alloc)(n, real_af->extra, ssize);
    if (res) {
	stat_upd_alloc(n, size);
	((StatBlock_t *) res)->size = size;
	res = (void *) ((StatBlock_t *) res)->mem;
    }

    erts_mtx_unlock(&instr_mutex);

    return res;
}
Esempio n. 27
0
static void *
stat_realloc(ErtsAlcType_t n, void *extra, void *ptr, Uint size)
{
    ErtsAllocatorFunctions_t *real_af = (ErtsAllocatorFunctions_t *) extra;
    Uint old_size;
    Uint ssize;
    void *sptr;
    void *res;

    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_lock(&instr_mutex);
    }

    if (ptr) {
	sptr = (void *) (((char *) ptr) - STAT_BLOCK_HEADER_SIZE);
	old_size = ((StatBlock_t *) sptr)->size;
    }
    else {
	sptr = NULL;
	old_size = 0;
    }

    ssize = size + STAT_BLOCK_HEADER_SIZE;
    res = (*real_af->realloc)(n, real_af->extra, sptr, ssize);
    if (res) {
	stat_upd_realloc(n, size, old_size);
	((StatBlock_t *) res)->size = size;
#ifdef VALGRIND
	((StatBlock_t *) res)->valgrind_leak_suppressor = res;
#endif
	res = (void *) ((StatBlock_t *) res)->mem;
    }

    if (!erts_is_allctr_wrapper_prelocked()) {
	erts_mtx_unlock(&instr_mutex);
    }

    return res;
}
Esempio n. 28
0
File: erl_msacc.c Progetto: hawk/otp
static void send_reply(ErtsMsAcc *msacc, ErtsMSAccReq *msaccrp) {
    ErtsSchedulerData *esdp = erts_get_scheduler_data();
    Process *rp = msaccrp->proc;
    ErtsMessage *msgp = NULL;
    Eterm *hp;
    Eterm ref_copy = NIL, msg;
    ErtsProcLocks rp_locks = (esdp && msaccrp->req_sched == esdp->no
                              ? ERTS_PROC_LOCK_MAIN : 0);
    ErtsHeapFactory factory;

    if (msaccrp->action == ERTS_MSACC_GATHER) {

        msgp = erts_factory_message_create(&factory, rp, &rp_locks, DEFAULT_MSACC_MSG_SIZE);

        if (msacc->unmanaged) erts_mtx_lock(&msacc->mtx);

        hp = erts_produce_heap(&factory, REF_THING_SIZE + 3 /* tuple */, 0);
        ref_copy = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref);
        msg = erts_msacc_gather_stats(msacc, &factory);
        msg = TUPLE2(hp, ref_copy, msg);

        if (msacc->unmanaged) erts_mtx_unlock(&msacc->mtx);

        erts_factory_close(&factory);
    } else {
        ErlOffHeap *ohp = NULL;
        msgp = erts_alloc_message_heap(rp, &rp_locks, REF_THING_SIZE, &hp, &ohp);
        msg = STORE_NC(&hp, &msgp->hfrag.off_heap, msaccrp->ref);
    }

    erts_queue_message(rp, rp_locks, msgp, msg, am_system);

    if (esdp && msaccrp->req_sched == esdp->no)
	rp_locks &= ~ERTS_PROC_LOCK_MAIN;

    if (rp_locks)
	erts_smp_proc_unlock(rp, rp_locks);

}
Esempio n. 29
0
static ErlAsync* async_get(AsyncQueue* q)
{
    ErlAsync* a;

    erts_mtx_lock(&q->mtx);
    while((a = q->tail) == NULL) {
        erts_cnd_wait(&q->cv, &q->mtx);
    }
#ifdef ERTS_SMP
    ASSERT(a && q->tail == a);
#endif
    if (q->head == q->tail) {
        q->head = q->tail = NULL;
        q->len = 0;
    }
    else {
        q->tail->prev->next = NULL;
        q->tail = q->tail->prev;
        q->len--;
    }
    erts_mtx_unlock(&q->mtx);
    return a;
}
Esempio n. 30
0
static void
dump_stat_to_stream(FILE *fp, int begin_max_period)
{
    ErtsAlcType_t i, a_max, a_min;

    erts_mtx_lock(&instr_mutex);

    fprintf(fp,
	    "{instr_vsn,%lu}.\n",
	    (unsigned long) ERTS_INSTR_VSN);
    
    update_max_ever_values(&stats->tot, 0, 0);

    fprintf(fp,
	    "{total,[{total,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}]}.\n",
	    (UWord) stats->tot.size,
	    (UWord) stats->tot.max_size,
	    (UWord) stats->tot.max_size_ever,
	    (UWord) stats->tot.blocks,
	    (UWord) stats->tot.max_blocks,
	    (UWord) stats->tot.max_blocks_ever);

    a_max = 0;
    a_min = ~0;
    for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
	if (erts_allctrs_info[i].enabled) {
	    if (a_min > i)
		a_min = i;
	    if (a_max < i)
		a_max = i;
	}
    }

    ASSERT(ERTS_ALC_A_MIN <= a_min && a_min <= ERTS_ALC_A_MAX);
    ASSERT(ERTS_ALC_A_MIN <= a_max && a_max <= ERTS_ALC_A_MAX);
    ASSERT(a_min <= a_max);

    update_max_ever_values(stats->a, a_min, a_max);

    for (i = ERTS_ALC_A_MIN; i <= ERTS_ALC_A_MAX; i++) {
	if (erts_allctrs_info[i].enabled) {
	    fprintf(fp,
		    "%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
		    i == a_min ? "{allocators,\n [" : "  ",
		    ERTS_ALC_A2AD(i),
		    (UWord) stats->a[i].size,
		    (UWord) stats->a[i].max_size,
		    (UWord) stats->a[i].max_size_ever,
		    (UWord) stats->a[i].blocks,
		    (UWord) stats->a[i].max_blocks,
		    (UWord) stats->a[i].max_blocks_ever,
		    i == a_max ? "]}.\n" : ",\n");
	}
    }

    update_max_ever_values(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);

    for (i = ERTS_ALC_C_MIN; i <= ERTS_ALC_C_MAX; i++) {
	fprintf(fp,
		"%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
		i == ERTS_ALC_C_MIN ? "{classes,\n [" : "  ",
		ERTS_ALC_C2CD(i),
		(UWord) stats->c[i].size,
		(UWord) stats->c[i].max_size,
		(UWord) stats->c[i].max_size_ever,
		(UWord) stats->c[i].blocks,
		(UWord) stats->c[i].max_blocks,
		(UWord) stats->c[i].max_blocks_ever,
		i == ERTS_ALC_C_MAX ? "]}.\n" :  ",\n" );
    }

    update_max_ever_values(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);

    for (i = ERTS_ALC_N_MIN; i <= ERTS_ALC_N_MAX; i++) {
	fprintf(fp,
		"%s{%s,[{sizes,%lu,%lu,%lu},{blocks,%lu,%lu,%lu}]}%s",
		i == ERTS_ALC_N_MIN ? "{types,\n [" : "  ",
		ERTS_ALC_N2TD(i),
		(UWord) stats->n[i].size,
		(UWord) stats->n[i].max_size,
		(UWord) stats->n[i].max_size_ever,
		(UWord) stats->n[i].blocks,
		(UWord) stats->n[i].max_blocks,
		(UWord) stats->n[i].max_blocks_ever,
		i == ERTS_ALC_N_MAX ? "]}.\n" :  ",\n" );
    }

    if (begin_max_period) {
	begin_new_max_period(&stats->tot, 0, 0);
	begin_new_max_period(stats->a, a_min, a_max);
	begin_new_max_period(stats->c, ERTS_ALC_C_MIN, ERTS_ALC_C_MAX);
	begin_new_max_period(stats->n, ERTS_ALC_N_MIN, ERTS_ALC_N_MAX);
    }

    erts_mtx_unlock(&instr_mutex);

}