Beispiel #1
0
void
erts_cleanup_port_data(Port *prt)
{
    ASSERT(erts_atomic32_read_nob(&prt->state) & ERTS_PORT_SFLGS_INVALID_LOOKUP);
    cleanup_old_port_data(erts_smp_atomic_read_nob(&prt->data));
    erts_smp_atomic_set_nob(&prt->data, (erts_aint_t) THE_NON_VALUE);
}
Beispiel #2
0
static void 
bin_check(void)
{
    Process  *rp;
    struct erl_off_heap_header* hdr;
    int i, printed = 0, max = erts_ptab_max(&erts_proc);

    for (i=0; i < max; i++) {
	rp = erts_pix2proc(i);
	if (!rp)
	    continue;
	for (hdr = rp->off_heap.first; hdr; hdr = hdr->next) {
	    if (hdr->thing_word == HEADER_PROC_BIN) {
		ProcBin *bp = (ProcBin*) hdr;
		if (!printed) {
		    erts_printf("Process %T holding binary data \n", rp->common.id);
		    printed = 1;
		}
		erts_printf("%p orig_size: %bpd, norefs = %bpd\n",
			    bp->val, 
			    bp->val->orig_size, 
			    erts_smp_atomic_read_nob(&bp->val->refc));
	    }
	}
	if (printed) {
	    erts_printf("--------------------------------------\n");
	    printed = 0;
	}
    }
    /* db_bin_check() has to be rewritten for the AVL trees... */
    /*db_bin_check();*/ 
}
Beispiel #3
0
static void
resume_after_block(void *vd)
{
    ErtsPortTaskExeBlockData *d = (ErtsPortTaskExeBlockData *) vd;
    erts_smp_runq_lock(d->runq);
    if (d->resp)
	*d->resp = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
		    != (erts_aint_t) 0);
}
Beispiel #4
0
ErtsMigrateResult
erts_port_migrate(Port *prt, int *prt_locked,
		  ErtsRunQueue *from_rq, int *from_locked,
		  ErtsRunQueue *to_rq, int *to_locked)
{
    ERTS_SMP_LC_ASSERT(*from_locked);
    ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
    ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);

    ASSERT(!erts_common_run_queue);

    if (!*from_locked || !*to_locked) {
	if (from_rq < to_rq) {
	    if (!*to_locked) {
		if (!*from_locked)
		    erts_smp_runq_lock(from_rq);
		erts_smp_runq_lock(to_rq);
	    }
	    else if (erts_smp_runq_trylock(from_rq) == EBUSY) {
		erts_smp_runq_unlock(to_rq);
		erts_smp_runq_lock(from_rq);
		erts_smp_runq_lock(to_rq);
	    }
	}
	else {
	    if (!*from_locked) {
		if (!*to_locked)
		    erts_smp_runq_lock(to_rq);
		erts_smp_runq_lock(from_rq);
	    }
	    else if (erts_smp_runq_trylock(to_rq) == EBUSY) {
		erts_smp_runq_unlock(from_rq);
		erts_smp_runq_lock(to_rq);
		erts_smp_runq_lock(from_rq);
	    }
	}
	*to_locked = *from_locked = 1;
    }
    ERTS_SMP_LC_CHK_RUNQ_LOCK(from_rq, *from_locked);
    ERTS_SMP_LC_CHK_RUNQ_LOCK(to_rq, *to_locked);

    /* Refuse to migrate to a suspended run queue */
    if (to_rq->flags & ERTS_RUNQ_FLG_SUSPENDED)
	return ERTS_MIGRATE_FAILED_RUNQ_SUSPENDED;
    if (from_rq != (ErtsRunQueue *) erts_smp_atomic_read_nob(&prt->run_queue))
	return ERTS_MIGRATE_FAILED_RUNQ_CHANGED;
    if (!ERTS_PORT_IS_IN_RUNQ(from_rq, prt))
	return ERTS_MIGRATE_FAILED_NOT_IN_RUNQ;
    dequeue_port(from_rq, prt);
    erts_smp_atomic_set_nob(&prt->run_queue, (erts_aint_t) to_rq);
    enqueue_port(to_rq, prt);
    return ERTS_MIGRATE_SUCCESS;
}
Beispiel #5
0
int 
erts_is_count_break(BeamInstr *pc, Sint *count_ret) {
    BpDataCount *bdc = 
	(BpDataCount *) is_break(pc, (BeamInstr) BeamOp(op_i_count_breakpoint));
    
    if (bdc) {
	if (count_ret) {
	    *count_ret = (Sint) erts_smp_atomic_read_nob(&bdc->acount);
	}
	return !0;
    }
    return 0;
}
Beispiel #6
0
Datei: atom.c Projekt: ericmj/otp
/*
 * Print info about atom tables
 */
void atom_info(int to, void *to_arg)
{
    int lock = !ERTS_IS_CRASH_DUMPING;
    if (lock)
	atom_read_lock();
    index_info(to, to_arg, &erts_atom_table);
#ifdef ERTS_ATOM_PUT_OPS_STAT
    erts_print(to, to_arg, "atom_put_ops: %ld\n",
	       erts_smp_atomic_read_nob(&atom_put_ops));
#endif

    if (lock)
	atom_read_unlock();
}
Beispiel #7
0
int
erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
    int port_was_enqueued = 0;
    Port *pp;
    ErtsPortTaskQueue *ptqp;
    ErtsPortTask *ptp;
    int res = 0;
    int reds = ERTS_PORT_REDS_EXECUTE;
    erts_aint_t io_tasks_executed = 0;
    int fpe_was_unmasked;
    ErtsPortTaskExeBlockData blk_data = {runq, NULL};

    ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));

    ERTS_PT_CHK_PORTQ(runq);

    pp = pop_port(runq);
    if (!pp) {
	res = 0;
	goto done;
    }

    ERTS_PORT_NOT_IN_RUNQ(pp);

    *curr_port_pp = pp;

    ASSERT(pp->sched.taskq);
    ASSERT(pp->sched.taskq->first);
    ptqp = pp->sched.taskq;
    pp->sched.taskq = NULL;

    ASSERT(!pp->sched.exe_taskq);
    pp->sched.exe_taskq = ptqp;

    if (erts_smp_port_trylock(pp) == EBUSY) {
	erts_smp_runq_unlock(runq);
	erts_smp_port_lock(pp);
	erts_smp_runq_lock(runq);
    }
    
    if (erts_sched_stat.enabled) {
	ErtsSchedulerData *esdp = erts_get_scheduler_data();
	Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no);
	int migrated = old && old != esdp->no;

	erts_smp_spin_lock(&erts_sched_stat.lock);
	erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++;
	erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++;
	if (migrated) {
	    erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++;
	    erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++;
	}
	erts_smp_spin_unlock(&erts_sched_stat.lock);
    }

    /* trace port scheduling, in */
    if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
	trace_sched_ports(pp, am_in);
    }

    ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    ptp = pop_task(ptqp);

    fpe_was_unmasked = erts_block_fpe();

    while (ptp) {
	ASSERT(pp->sched.taskq != pp->sched.exe_taskq);

	reset_handle(ptp);
	erts_smp_runq_unlock(runq);

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
	ERTS_SMP_CHK_NO_PROC_LOCKS;
	ASSERT(pp->drv_ptr);

	switch (ptp->type) {
	case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */
	    reds += ERTS_PORT_REDS_FREE;
	    erts_smp_runq_lock(runq);

	    erts_unblock_fpe(fpe_was_unmasked);
	    ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
	    if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first))
		handle_remaining_tasks(runq, pp);
	    ASSERT(!ptqp->first
		   && (!pp->sched.taskq || !pp->sched.taskq->first));
#ifdef ERTS_SMP
	    erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
	    ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
#else
	    erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE);	    
#endif

	    port_task_free(ptp);
	    if (pp->sched.taskq)
		port_taskq_free(pp->sched.taskq);
	    pp->sched.taskq = NULL;

	    goto tasks_done;
	case ERTS_PORT_TASK_TIMEOUT:
	    reds += ERTS_PORT_REDS_TIMEOUT;
	    if (!(pp->status & ERTS_PORT_SFLGS_DEAD))
		(*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
	    break;
	case ERTS_PORT_TASK_INPUT:
	    reds += ERTS_PORT_REDS_INPUT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    /* NOTE some windows drivers use ->ready_input for input and output */
	    (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_OUTPUT:
	    reds += ERTS_PORT_REDS_OUTPUT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_EVENT:
	    reds += ERTS_PORT_REDS_EVENT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_DIST_CMD:
	    reds += erts_dist_command(pp, CONTEXT_REDS-reds);
	    break;
	default:
	    erl_exit(ERTS_ABORT_EXIT,
		     "Invalid port task type: %d\n",
		     (int) ptp->type);
	    break;
	}

	if ((pp->status & ERTS_PORT_SFLG_CLOSING)
	    && erts_is_port_ioq_empty(pp)) {
	    reds += ERTS_PORT_REDS_TERMINATE;
	    erts_terminate_port(pp);
	}

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

#ifdef ERTS_SMP
	if (pp->xports)
	    erts_smp_xports_unlock(pp);
	ASSERT(!pp->xports);
#endif

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

	port_task_free(ptp);

	erts_smp_runq_lock(runq);

	ptp = pop_task(ptqp);
    }

 tasks_done:

    erts_unblock_fpe(fpe_was_unmasked);

    if (io_tasks_executed) {
	ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
	       >= io_tasks_executed);
	erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
				 -1*io_tasks_executed);
    }

    *curr_port_pp = NULL;

#ifdef ERTS_SMP
    ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
#endif

    if (!pp->sched.taskq) {
	ASSERT(pp->sched.exe_taskq);
	pp->sched.exe_taskq = NULL;
    }
    else {
#ifdef ERTS_SMP
	ErtsRunQueue *xrunq;
#endif

	ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
	ASSERT(pp->sched.taskq->first);

#ifdef ERTS_SMP
	xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
	if (!xrunq) {
#endif
	    enqueue_port(runq, pp);
	    ASSERT(pp->sched.exe_taskq);
	    pp->sched.exe_taskq = NULL;
	    /* No need to notify ourselves about inc in runq. */
#ifdef ERTS_SMP
	}
	else {
	    /* Port emigrated ... */
	    erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
	    enqueue_port(xrunq, pp);
	    ASSERT(pp->sched.exe_taskq);
	    pp->sched.exe_taskq = NULL;
	    erts_smp_runq_unlock(xrunq);
	    erts_smp_notify_inc_runq(xrunq);
	}
#endif
	port_was_enqueued = 1;
    }

    res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
	   != (erts_aint_t) 0);

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);

    port_taskq_free(ptqp);

    if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) {
    	profile_runnable_port(pp, am_inactive);
    }

    /* trace port scheduling, out */
    if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
    	trace_sched_ports(pp, am_out);
    }
#ifndef ERTS_SMP
    erts_port_release(pp);
#else
    {
	erts_aint_t refc;
	erts_smp_mtx_unlock(pp->lock);
	refc = erts_smp_atomic_dec_read_nob(&pp->refc);
	ASSERT(refc >= 0);
	if (refc == 0) {
	    erts_smp_runq_unlock(runq);
	    erts_port_cleanup(pp); /* Might aquire runq lock */
	    erts_smp_runq_lock(runq);
	    res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
		   != (erts_aint_t) 0);
	}
    }
#endif

 done:
    blk_data.resp = &res;

    ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));

    ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);

    return res;
}
Beispiel #8
0
void
erts_port_task_free_port(Port *pp)
{
    ErtsRunQueue *runq;
    int port_is_dequeued = 0;

    ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
    ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
    runq = erts_port_runq(pp);
    ASSERT(runq);
    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    if (pp->sched.exe_taskq) {
	/* I (this thread) am currently executing this port, free it
	   when scheduled out... */
	ErtsPortTask *ptp = port_task_alloc();
	erts_smp_port_state_lock(pp);
	pp->status &= ~ERTS_PORT_SFLG_CLOSING;
	pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED;
	erts_may_save_closed_port(pp);
	erts_smp_port_state_unlock(pp);
	ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 1);
	ptp->type = ERTS_PORT_TASK_FREE;
	ptp->event = (ErlDrvEvent) -1;
	ptp->event_data = NULL;
	set_handle(ptp, NULL);
	push_task(pp->sched.exe_taskq, ptp);
	ERTS_PT_CHK_PRES_PORTQ(runq, pp);
	erts_smp_runq_unlock(runq);
    }
    else {
	ErtsPortTaskQueue *ptqp = pp->sched.taskq;
	if (ptqp) {
	    dequeue_port(runq, pp);
	    ERTS_PORT_NOT_IN_RUNQ(pp);
	    port_is_dequeued = 1;
	}
	erts_smp_port_state_lock(pp);
	pp->status &= ~ERTS_PORT_SFLG_CLOSING;
	pp->status |= ERTS_PORT_SFLG_FREE_SCHEDULED;
	erts_may_save_closed_port(pp);
	erts_smp_port_state_unlock(pp);
#ifdef ERTS_SMP
	erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
#endif
	ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
	handle_remaining_tasks(runq, pp); /* May release runq lock */
	ASSERT(!pp->sched.exe_taskq && (!ptqp || !ptqp->first));
	pp->sched.taskq = NULL;
	ERTS_PT_CHK_PRES_PORTQ(runq, pp);
#ifndef ERTS_SMP
	ASSERT(pp->status & ERTS_PORT_SFLG_PORT_DEBUG);
	erts_port_status_set(pp, ERTS_PORT_SFLG_FREE);	
#endif
	erts_smp_runq_unlock(runq);

	if (erts_system_profile_flags.runnable_ports && port_is_dequeued) {
    	    profile_runnable_port(pp, am_inactive);
    	}

	if (ptqp)
	    port_taskq_free(ptqp);
    }
}
Beispiel #9
0
int
erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp)
{
    ErtsRunQueue *runq;
    ErtsPortTaskQueue *ptqp;
    ErtsPortTask *ptp;
    Port *pp;
    int port_is_dequeued = 0;

    pp = &erts_port[internal_port_index(id)];
    runq = erts_port_runq(pp);

    ptp = handle2task(pthp);

    if (!ptp) {
	erts_smp_runq_unlock(runq);
	return 1;
    }

    ASSERT(ptp->handle == pthp);
    ptqp = ptp->queue;
    ASSERT(pp == ptqp->port);

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    ASSERT(ptqp);
    ASSERT(ptqp->first);

    dequeue_task(ptp);
    reset_handle(ptp);

    switch (ptp->type) {
    case ERTS_PORT_TASK_INPUT:
    case ERTS_PORT_TASK_OUTPUT:
    case ERTS_PORT_TASK_EVENT:
	ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) > 0);
	erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
	break;
    default:
	break;
    }

    ASSERT(ptqp == pp->sched.taskq || ptqp == pp->sched.exe_taskq);

    if (ptqp->first || pp->sched.taskq != ptqp)
	ptqp = NULL;
    else {
	pp->sched.taskq = NULL;
	if (!pp->sched.exe_taskq) {
	    dequeue_port(runq, pp);
	    ERTS_PORT_NOT_IN_RUNQ(pp);
	    port_is_dequeued = 1;
	}
    }

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);

    erts_smp_runq_unlock(runq);
    
    if (erts_system_profile_flags.runnable_ports && port_is_dequeued) {
    	profile_runnable_port(pp, am_inactive);
    }

    port_task_free(ptp);
    if (ptqp)
	port_taskq_free(ptqp);

    return 0;
}
Beispiel #10
0
static ERTS_INLINE ErtsPortTask *
handle2task(ErtsPortTaskHandle *pthp)
{
    return (ErtsPortTask *) erts_smp_atomic_read_nob(pthp);
}
Beispiel #11
0
/*
 * Entry point called by the trace wrap functions in erl_bif_wrap.c
 *
 * The trace wrap functions are themselves called through the export
 * entries instead of the original BIF functions.
 */
Eterm
erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
{
    Eterm result;
    Eterm (*func)(Process*, Eterm*, BeamInstr*);
    Export* ep = bif_export[bif_index];
    Uint32 flags = 0, flags_meta = 0;
    ErtsTracer meta_tracer = erts_tracer_nil;
    int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif
					   * is actually in the
					   * export entry */
    BeamInstr *cp = p->cp;
    GenericBp* g;
    GenericBpData* bp = NULL;
    Uint bp_flags = 0;

    ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);

    g = (GenericBp *) ep->fake_op_func_info_for_hipe[1];
    if (g) {
	bp = &g->data[erts_active_bp_ix()];
	bp_flags = bp->flags;
    }

    /*
     * Make continuation pointer OK, it is not during direct BIF calls,
     * but it is correct during apply of bif.
     */
    if (!applying) {
	p->cp = I;
    }
    if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
	flags = erts_call_trace(p, ep->code, bp->local_ms, args,
				local, &ERTS_TRACER(p));
    }
    if (bp_flags & ERTS_BPF_META_TRACE) {
	ErtsTracer old_tracer;

        meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
        old_tracer = meta_tracer;
	flags_meta = erts_call_trace(p, ep->code, bp->meta_ms, args,
				     0, &meta_tracer);

	if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
            ErtsTracer new_tracer = erts_tracer_nil;
            erts_tracer_update(&new_tracer, meta_tracer);
	    if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
                    &bp->meta_tracer->tracer,
                    (erts_aint_t)new_tracer,
                    (erts_aint_t)old_tracer)) {
                ERTS_TRACER_CLEAR(&old_tracer);
            } else {
                ERTS_TRACER_CLEAR(&new_tracer);
            }
	}
    }
    if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	BeamInstr *pc = (BeamInstr *)ep->code+3;
	erts_trace_time_call(p, pc, bp->time);
    }

    /* Restore original continuation pointer (if changed). */
    p->cp = cp;

    func = bif_table[bif_index].f;

    result = func(p, args, I);

    if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {
	BeamInstr i_return_trace      = beam_return_trace[0];
	BeamInstr i_return_to_trace   = beam_return_to_trace[0];
	BeamInstr i_return_time_trace = beam_return_time_trace[0];
	Eterm *cpp;
	/* Maybe advance cp to skip trace stack frames */
	for (cpp = p->stop;  ;  cp = cp_val(*cpp++)) {
	    if (*cp == i_return_trace) {
		/* Skip stack frame variables */
		while (is_not_CP(*cpp)) cpp++;
		cpp += 2; /* Skip return_trace parameters */
	    } else if (*cp == i_return_time_trace) {
		/* Skip stack frame variables */
		while (is_not_CP(*cpp)) cpp++;
		cpp += 1; /* Skip return_time_trace parameters */
	    } else if (*cp == i_return_to_trace) {
		/* A return_to trace message is going to be generated
		 * by normal means, so we do not have to.
		 */
		cp = NULL;
		break;
	    } else break;
	}
    }

    /* Try to get these in the order
     * they usually appear in normal code... */
    if (is_non_value(result)) {
	Uint reason = p->freason;
	if (reason != TRAP) {
	    Eterm class;
	    Eterm value = p->fvalue;
	    /* Expand error value like in handle_error() */
	    if (reason & EXF_ARGLIST) {
		Eterm *tp;
		ASSERT(is_tuple(value));
		tp = tuple_val(value);
		value = tp[1];
	    }
	    if ((reason & EXF_THROWN) && (p->catches <= 0)) {
                Eterm *hp = HAlloc(p, 3);
		value = TUPLE2(hp, am_nocatch, value);
		reason = EXC_ERROR;
	    }
	    /* Note: expand_error_value() could theoretically
	     * allocate on the heap, but not for any error
	     * returned by a BIF, and it would do no harm,
	     * just be annoying.
	     */
	    value = expand_error_value(p, reason, value);
	    class = exception_tag[GET_EXC_CLASS(reason)];

	    if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {
		erts_trace_exception(p, ep->code, class, value,
				     &meta_tracer);
	    }
	    if (flags & MATCH_SET_EXCEPTION_TRACE) {
		erts_trace_exception(p, ep->code, class, value,
				     &ERTS_TRACER(p));
	    }
Beispiel #12
0
BeamInstr
erts_generic_breakpoint(Process* c_p, BeamInstr* I, Eterm* reg)
{
    GenericBp* g;
    GenericBpData* bp;
    Uint bp_flags;
    ErtsBpIndex ix = erts_active_bp_ix();

    g = (GenericBp *) I[-4];
    bp = &g->data[ix];
    bp_flags = bp->flags;
    ASSERT((bp_flags & ~ERTS_BPF_ALL) == 0);
    if (bp_flags & (ERTS_BPF_LOCAL_TRACE|
		    ERTS_BPF_GLOBAL_TRACE|
		    ERTS_BPF_TIME_TRACE_ACTIVE) &&
	!IS_TRACED_FL(c_p, F_TRACE_CALLS)) {
	bp_flags &= ~(ERTS_BPF_LOCAL_TRACE|
		      ERTS_BPF_GLOBAL_TRACE|
		      ERTS_BPF_TIME_TRACE|
		      ERTS_BPF_TIME_TRACE_ACTIVE);
	if (bp_flags == 0) {	/* Quick exit */
	    return g->orig_instr;
	}
    }

    if (bp_flags & ERTS_BPF_LOCAL_TRACE) {
	ASSERT((bp_flags & ERTS_BPF_GLOBAL_TRACE) == 0);
	(void) do_call_trace(c_p, I, reg, 1, bp->local_ms, erts_tracer_true);
    } else if (bp_flags & ERTS_BPF_GLOBAL_TRACE) {
	(void) do_call_trace(c_p, I, reg, 0, bp->local_ms, erts_tracer_true);
    }

    if (bp_flags & ERTS_BPF_META_TRACE) {
	ErtsTracer old_tracer, new_tracer;

	old_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);

	new_tracer = do_call_trace(c_p, I, reg, 1, bp->meta_ms, old_tracer);
	if (!ERTS_TRACER_COMPARE(new_tracer, old_tracer)) {
            if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
                    &bp->meta_tracer->tracer,
                    (erts_aint_t)new_tracer,
                    (erts_aint_t)old_tracer)) {
                ERTS_TRACER_CLEAR(&old_tracer);
            } else {
                ERTS_TRACER_CLEAR(&new_tracer);
            }
	}
    }

    if (bp_flags & ERTS_BPF_COUNT_ACTIVE) {
	erts_smp_atomic_inc_nob(&bp->count->acount);
    }

    if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE) {
	Eterm w;
	erts_trace_time_call(c_p, I, bp->time);
	w = (BeamInstr) *c_p->cp;
	if (! (w == (BeamInstr) BeamOp(op_i_return_time_trace) ||
	       w == (BeamInstr) BeamOp(op_return_trace) ||
	       w == (BeamInstr) BeamOp(op_i_return_to_trace)) ) {
	    Eterm* E = c_p->stop;
	    ASSERT(c_p->htop <= E && E <= c_p->hend);
	    if (E - 2 < c_p->htop) {
		(void) erts_garbage_collect(c_p, 2, reg, I[-1]);
		ERTS_VERIFY_UNUSED_TEMP_ALLOC(c_p);
	    }
	    E = c_p->stop;

	    ASSERT(c_p->htop <= E && E <= c_p->hend);

	    E -= 2;
	    E[0] = make_cp(I);
	    E[1] = make_cp(c_p->cp);     /* original return address */
	    c_p->cp = beam_return_time_trace;
	    c_p->stop = E;
	}
    }

    if (bp_flags & ERTS_BPF_DEBUG) {
	return (BeamInstr) BeamOp(op_i_debug_breakpoint);
    } else {
	return g->orig_instr;
    }
}
Beispiel #13
0
UWord
erts_ranges_sz(void)
{
    return erts_smp_atomic_read_nob(&mem_used) * sizeof(Range);
}
Beispiel #14
0
Uint
erts_tot_link_lh_size(void)
{
    return (Uint) erts_smp_atomic_read_nob(&tot_link_lh_size);
}
Beispiel #15
0
/*
 * Entry point called by the trace wrap functions in erl_bif_wrap.c
 *
 * The trace wrap functions are themselves called through the export
 * entries instead of the original BIF functions.
 */
Eterm
erts_bif_trace(int bif_index, Process* p, Eterm* args, BeamInstr* I)
{
    Eterm result;
    Eterm (*func)(Process*, Eterm*, BeamInstr*);
    Export* ep = bif_export[bif_index];
    Uint32 flags = 0, flags_meta = 0;
    ErtsTracer meta_tracer = erts_tracer_nil;
    int applying = (I == ep->beam); /* Yup, the apply code for a bif
                                      * is actually in the
                                      * export entry */
    BeamInstr *cp = p->cp;
    GenericBp* g;
    GenericBpData* bp = NULL;
    Uint bp_flags = 0;

    ERTS_SMP_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p);

    g = ep->info.u.gen_bp;
    if (g) {
	bp = &g->data[erts_active_bp_ix()];
	bp_flags = bp->flags;
    }

    /*
     * Make continuation pointer OK, it is not during direct BIF calls,
     * but it is correct during apply of bif.
     */
    if (!applying) {
	p->cp = I;
    }
    if (bp_flags & (ERTS_BPF_LOCAL_TRACE|ERTS_BPF_GLOBAL_TRACE) &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	int local = !!(bp_flags & ERTS_BPF_LOCAL_TRACE);
	flags = erts_call_trace(p, &ep->info, bp->local_ms, args,
				local, &ERTS_TRACER(p));
    }
    if (bp_flags & ERTS_BPF_META_TRACE) {
	ErtsTracer old_tracer;

        meta_tracer = erts_smp_atomic_read_nob(&bp->meta_tracer->tracer);
        old_tracer = meta_tracer;
	flags_meta = erts_call_trace(p, &ep->info, bp->meta_ms, args,
				     0, &meta_tracer);

	if (!ERTS_TRACER_COMPARE(old_tracer, meta_tracer)) {
            ErtsTracer new_tracer = erts_tracer_nil;
            erts_tracer_update(&new_tracer, meta_tracer);
	    if (old_tracer == erts_smp_atomic_cmpxchg_acqb(
                    &bp->meta_tracer->tracer,
                    (erts_aint_t)new_tracer,
                    (erts_aint_t)old_tracer)) {
                ERTS_TRACER_CLEAR(&old_tracer);
            } else {
                ERTS_TRACER_CLEAR(&new_tracer);
            }
	}
    }
    if (bp_flags & ERTS_BPF_TIME_TRACE_ACTIVE &&
	IS_TRACED_FL(p, F_TRACE_CALLS)) {
	erts_trace_time_call(p, &ep->info, bp->time);
    }

    /* Restore original continuation pointer (if changed). */
    p->cp = cp;

    func = bif_table[bif_index].f;

    result = func(p, args, I);

    if (erts_nif_export_check_save_trace(p, result,
					 applying, ep,
					 cp, flags,
					 flags_meta, I,
					 meta_tracer)) {
	/*
	 * erts_bif_trace_epilogue() will be called
	 * later when appropriate via the NIF export
	 * scheduling functionality...
	 */
	return result;
    }

    return erts_bif_trace_epilogue(p, result, applying, ep, cp,
				   flags, flags_meta, I,
				   meta_tracer);
}
Beispiel #16
0
Process *
erts_pid2proc_opt(Process *c_p,
		  ErtsProcLocks c_p_have_locks,
		  Eterm pid,
		  ErtsProcLocks pid_need_locks,
		  int flags)
{
    Process *dec_refc_proc = NULL;
    int need_ptl;
    ErtsProcLocks need_locks;
    Uint pix;
    Process *proc;
#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
    ErtsProcLocks lcnt_locks;
#endif

#ifdef ERTS_ENABLE_LOCK_CHECK
    if (c_p) {
	ErtsProcLocks might_unlock = c_p_have_locks & pid_need_locks;
	if (might_unlock)
	    erts_proc_lc_might_unlock(c_p, might_unlock);
    }
#endif

    if (is_not_internal_pid(pid))
	return NULL;
    pix = internal_pid_index(pid);

    ERTS_LC_ASSERT((pid_need_locks & ERTS_PROC_LOCKS_ALL) == pid_need_locks);
    need_locks = pid_need_locks;

    if (c_p && c_p->id == pid) {
	ASSERT(c_p->id != ERTS_INVALID_PID);
	ASSERT(c_p == erts_pix2proc(pix));

	if (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
	    && ERTS_PROC_IS_EXITING(c_p))
	    return NULL;
	need_locks &= ~c_p_have_locks;
	if (!need_locks) {
	    if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
		erts_smp_proc_inc_refc(c_p);
	    return c_p;
	}
    }

    need_ptl = !erts_get_scheduler_id();

    if (need_ptl)
	erts_smp_rwmtx_rwlock(&erts_proc_tab_rwmtx);

    proc = (Process *) erts_smp_atomic_read_ddrb(&erts_proc.tab[pix]);

    if (proc) {
	if (proc->id != pid)
	    proc = NULL;
	else if (!need_locks) {
	    if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
		erts_smp_proc_inc_refc(proc);
	}
	else {
	    int busy;

#if ERTS_PROC_LOCK_OWN_IMPL
#ifdef ERTS_ENABLE_LOCK_COUNT
	    lcnt_locks = need_locks;
	    if (!(flags & ERTS_P2P_FLG_TRY_LOCK)) {
	    	erts_lcnt_proc_lock(&proc->lock, need_locks);
	    }
#endif

#ifdef ERTS_ENABLE_LOCK_CHECK
	    /* Make sure erts_pid2proc_safelock() is enough to handle
	       a potential lock order violation situation... */
	    busy = erts_proc_lc_trylock_force_busy(proc, need_locks);
	    if (!busy)
#endif
#endif /* ERTS_PROC_LOCK_OWN_IMPL */
	    {
		/* Try a quick trylock to grab all the locks we need. */
		busy = (int) erts_smp_proc_raw_trylock__(proc, need_locks);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_CHECK)
		erts_proc_lc_trylock(proc, need_locks, !busy);
#endif
#ifdef ERTS_PROC_LOCK_DEBUG
		if (!busy)
		    erts_proc_lock_op_debug(proc, need_locks, 1);
#endif
	    }

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
	    if (flags & ERTS_P2P_FLG_TRY_LOCK)
		erts_lcnt_proc_trylock(&proc->lock, need_locks,
				       busy ? EBUSY : 0);
#endif

	    if (!busy) {
		if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
		    erts_smp_proc_inc_refc(proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
	    	/* all is great */
	    	if (!(flags & ERTS_P2P_FLG_TRY_LOCK))
		    erts_lcnt_proc_lock_post_x(&proc->lock, lcnt_locks,
					       __FILE__, __LINE__);
#endif

	    }
	    else {
		if (flags & ERTS_P2P_FLG_TRY_LOCK)
		    proc = ERTS_PROC_LOCK_BUSY;
		else {
		    if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
			erts_smp_proc_inc_refc(proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_ENABLE_LOCK_COUNT)
		    erts_lcnt_proc_lock_unaquire(&proc->lock, lcnt_locks);
#endif

		    if (need_ptl) {
			erts_smp_proc_inc_refc(proc);
			dec_refc_proc = proc;
			erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);
			need_ptl = 0;
		    }

		    proc_safelock(!need_ptl,
				  c_p,
				  c_p_have_locks,
				  c_p_have_locks,
				  proc,
				  0,
				  need_locks);
		}
	    }
        }
    }

    if (need_ptl)
	erts_smp_rwmtx_rwunlock(&erts_proc_tab_rwmtx);

    if (need_locks
	&& proc
	&& proc != ERTS_PROC_LOCK_BUSY
	&& (!(flags & ERTS_P2P_FLG_ALLOW_OTHER_X)
	    ? ERTS_PROC_IS_EXITING(proc)
	    : (proc
	       != (Process *) erts_smp_atomic_read_nob(&erts_proc.tab[pix])))) {

	erts_smp_proc_unlock(proc, need_locks);

	if (flags & ERTS_P2P_FLG_SMP_INC_REFC)
	    dec_refc_proc = proc;
	proc = NULL;

    }

    if (dec_refc_proc)
	erts_smp_proc_dec_refc(dec_refc_proc);

#if ERTS_PROC_LOCK_OWN_IMPL && defined(ERTS_PROC_LOCK_DEBUG)
    ERTS_LC_ASSERT(!proc
		   || proc == ERTS_PROC_LOCK_BUSY
		   || (pid_need_locks ==
		       (ERTS_PROC_LOCK_FLGS_READ_(&proc->lock)
			& pid_need_locks)));
#endif

    return proc;
}