Esempio n. 1
0
File: erl_nif.c Progetto: a5an0/otp
void erts_pre_nif(ErlNifEnv* env, Process* p, struct erl_module_nif* mod_nif)
{
    env->mod_nif = mod_nif;
    env->proc = p;
    env->hp = HEAP_TOP(p);
    env->hp_end = HEAP_LIMIT(p);
    env->heap_frag = NULL;
    env->fpe_was_unmasked = erts_block_fpe();
    env->tmp_obj_list = NULL;
}
Esempio n. 2
0
File: erl_nif.c Progetto: a5an0/otp
static void pre_nif_noproc(ErlNifEnv* env, struct erl_module_nif* mod_nif)
{
    env->mod_nif = mod_nif;
    env->proc = NULL;
    env->hp = NULL;
    env->hp_end = NULL;
    env->heap_frag = NULL;
    env->fpe_was_unmasked = erts_block_fpe();
    env->tmp_obj_list = NULL;
}
Esempio n. 3
0
static int do_unload_driver_entry(DE_Handle *dh, Eterm *save_name)
{
    erts_driver_t *q, *p = driver_list;

    assert_drv_list_rwlocked();

    while (p != NULL) {
	if (p->handle == dh) {
		
	    q = p;
	    if (p->prev == NULL) {
		driver_list = p->next;
	    } else {
		p->prev->next = p->next;
	    }
	    if (p->next != NULL) {
		p->next->prev = p->prev;
	    }

	    if (save_name != NULL) {
		*save_name = mkatom(q->name);
	    } 
	    /* Future locking problems? Don't dare to let go of the
	       diver_list lock here!*/
	    if (q->finish) {
		int fpe_was_unmasked = erts_block_fpe();
		DTRACE1(driver_finish, q->name);
                LTTNG1(driver_finish, q->name);
		(*(q->finish))();
		erts_unblock_fpe(fpe_was_unmasked);
	    }
	    erts_sys_ddll_close(dh->handle);
	    erts_destroy_driver(q);
	    return 1;
	}
	p = p->next;
    }
    return 0;
}
Esempio n. 4
0
int
erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
    int port_was_enqueued = 0;
    Port *pp;
    ErtsPortTaskQueue *ptqp;
    ErtsPortTask *ptp;
    int res = 0;
    int reds = ERTS_PORT_REDS_EXECUTE;
    erts_aint_t io_tasks_executed = 0;
    int fpe_was_unmasked;
    ErtsPortTaskExeBlockData blk_data = {runq, NULL};

    ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));

    ERTS_PT_CHK_PORTQ(runq);

    pp = pop_port(runq);
    if (!pp) {
	res = 0;
	goto done;
    }

    ERTS_PORT_NOT_IN_RUNQ(pp);

    *curr_port_pp = pp;

    ASSERT(pp->sched.taskq);
    ASSERT(pp->sched.taskq->first);
    ptqp = pp->sched.taskq;
    pp->sched.taskq = NULL;

    ASSERT(!pp->sched.exe_taskq);
    pp->sched.exe_taskq = ptqp;

    if (erts_smp_port_trylock(pp) == EBUSY) {
	erts_smp_runq_unlock(runq);
	erts_smp_port_lock(pp);
	erts_smp_runq_lock(runq);
    }
    
    if (erts_sched_stat.enabled) {
	ErtsSchedulerData *esdp = erts_get_scheduler_data();
	Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no);
	int migrated = old && old != esdp->no;

	erts_smp_spin_lock(&erts_sched_stat.lock);
	erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++;
	erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++;
	if (migrated) {
	    erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++;
	    erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++;
	}
	erts_smp_spin_unlock(&erts_sched_stat.lock);
    }

    /* trace port scheduling, in */
    if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
	trace_sched_ports(pp, am_in);
    }

    ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    ptp = pop_task(ptqp);

    fpe_was_unmasked = erts_block_fpe();

    while (ptp) {
	ASSERT(pp->sched.taskq != pp->sched.exe_taskq);

	reset_handle(ptp);
	erts_smp_runq_unlock(runq);

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
	ERTS_SMP_CHK_NO_PROC_LOCKS;
	ASSERT(pp->drv_ptr);

	switch (ptp->type) {
	case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */
	    reds += ERTS_PORT_REDS_FREE;
	    erts_smp_runq_lock(runq);

	    erts_unblock_fpe(fpe_was_unmasked);
	    ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
	    if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first))
		handle_remaining_tasks(runq, pp);
	    ASSERT(!ptqp->first
		   && (!pp->sched.taskq || !pp->sched.taskq->first));
#ifdef ERTS_SMP
	    erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
	    ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
#else
	    erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE);	    
#endif

	    port_task_free(ptp);
	    if (pp->sched.taskq)
		port_taskq_free(pp->sched.taskq);
	    pp->sched.taskq = NULL;

	    goto tasks_done;
	case ERTS_PORT_TASK_TIMEOUT:
	    reds += ERTS_PORT_REDS_TIMEOUT;
	    if (!(pp->status & ERTS_PORT_SFLGS_DEAD))
		(*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
	    break;
	case ERTS_PORT_TASK_INPUT:
	    reds += ERTS_PORT_REDS_INPUT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    /* NOTE some windows drivers use ->ready_input for input and output */
	    (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_OUTPUT:
	    reds += ERTS_PORT_REDS_OUTPUT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_EVENT:
	    reds += ERTS_PORT_REDS_EVENT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_DIST_CMD:
	    reds += erts_dist_command(pp, CONTEXT_REDS-reds);
	    break;
	default:
	    erl_exit(ERTS_ABORT_EXIT,
		     "Invalid port task type: %d\n",
		     (int) ptp->type);
	    break;
	}

	if ((pp->status & ERTS_PORT_SFLG_CLOSING)
	    && erts_is_port_ioq_empty(pp)) {
	    reds += ERTS_PORT_REDS_TERMINATE;
	    erts_terminate_port(pp);
	}

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

#ifdef ERTS_SMP
	if (pp->xports)
	    erts_smp_xports_unlock(pp);
	ASSERT(!pp->xports);
#endif

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

	port_task_free(ptp);

	erts_smp_runq_lock(runq);

	ptp = pop_task(ptqp);
    }

 tasks_done:

    erts_unblock_fpe(fpe_was_unmasked);

    if (io_tasks_executed) {
	ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
	       >= io_tasks_executed);
	erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
				 -1*io_tasks_executed);
    }

    *curr_port_pp = NULL;

#ifdef ERTS_SMP
    ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
#endif

    if (!pp->sched.taskq) {
	ASSERT(pp->sched.exe_taskq);
	pp->sched.exe_taskq = NULL;
    }
    else {
#ifdef ERTS_SMP
	ErtsRunQueue *xrunq;
#endif

	ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
	ASSERT(pp->sched.taskq->first);

#ifdef ERTS_SMP
	xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
	if (!xrunq) {
#endif
	    enqueue_port(runq, pp);
	    ASSERT(pp->sched.exe_taskq);
	    pp->sched.exe_taskq = NULL;
	    /* No need to notify ourselves about inc in runq. */
#ifdef ERTS_SMP
	}
	else {
	    /* Port emigrated ... */
	    erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
	    enqueue_port(xrunq, pp);
	    ASSERT(pp->sched.exe_taskq);
	    pp->sched.exe_taskq = NULL;
	    erts_smp_runq_unlock(xrunq);
	    erts_smp_notify_inc_runq(xrunq);
	}
#endif
	port_was_enqueued = 1;
    }

    res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
	   != (erts_aint_t) 0);

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);

    port_taskq_free(ptqp);

    if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) {
    	profile_runnable_port(pp, am_inactive);
    }

    /* trace port scheduling, out */
    if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
    	trace_sched_ports(pp, am_out);
    }
#ifndef ERTS_SMP
    erts_port_release(pp);
#else
    {
	erts_aint_t refc;
	erts_smp_mtx_unlock(pp->lock);
	refc = erts_smp_atomic_dec_read_nob(&pp->refc);
	ASSERT(refc >= 0);
	if (refc == 0) {
	    erts_smp_runq_unlock(runq);
	    erts_port_cleanup(pp); /* Might aquire runq lock */
	    erts_smp_runq_lock(runq);
	    res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
		   != (erts_aint_t) 0);
	}
    }
#endif

 done:
    blk_data.resp = &res;

    ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));

    ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);

    return res;
}
Esempio n. 5
0
static BIF_RETTYPE
port_call(Process* c_p, Eterm arg1, Eterm arg2, Eterm arg3)
{
    Uint op;
    Port *p;
    Uint size;
    byte *bytes;
    byte *endp;
    ErlDrvSizeT real_size;
    erts_driver_t *drv;
    byte port_input[256];	/* Default input buffer to encode in */
    byte port_result[256];	/* Buffer for result from port. */
    byte* port_resp;		/* Pointer to result buffer. */
    char *prc;
    ErlDrvSSizeT ret;
    Eterm res;
    Sint result_size;
    Eterm *hp;
    Eterm *hp_end;              /* To satisfy hybrid heap architecture */
    unsigned ret_flags = 0U;
    int fpe_was_unmasked;

    bytes = &port_input[0];
    port_resp = port_result;
    /* trace of port scheduling with virtual process descheduling
     * lock wait 
     */
    if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
	trace_virtual_sched(c_p, am_out);
    }

    if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
	profile_runnable_proc(c_p, am_inactive);
    }

    p = id_or_name2port(c_p, arg1);
    if (!p) {
    error:
	if (port_resp != port_result && 
	    !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) {
	    driver_free(port_resp);
	}
	if (bytes != &port_input[0])
	    erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes);
	/* Need to virtual schedule in the process if there
	 * was an error.
	 */
	if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
	    trace_virtual_sched(c_p, am_in);
    	}

	if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
	    profile_runnable_proc(c_p, am_active);
    	}

	if (p)
	    erts_port_release(p);
#ifdef ERTS_SMP
	ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN);
#else
	ERTS_BIF_CHK_EXITED(c_p);
#endif
	BIF_ERROR(c_p, BADARG);
    }

    if ((drv = p->drv_ptr) == NULL) {
	goto error;
    }
    if (drv->call == NULL) {
	goto error;
    }
    if (!term_to_Uint(arg2, &op)) {
	goto error;
    }
    p->caller = c_p->id;
    
    /* Lock taken, virtual schedule of port */
    if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
    	trace_sched_ports_where(p, am_in, am_call);
    }
    
    if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
    	profile_runnable_port(p, am_active);
    }
    size = erts_encode_ext_size(arg3);
    if (size > sizeof(port_input))
	bytes = erts_alloc(ERTS_ALC_T_PORT_CALL_BUF, size);

    endp = bytes;
    erts_encode_ext(arg3, &endp);

    real_size = endp - bytes;
    if (real_size > size) {
	erl_exit(1, "%s, line %d: buffer overflow: %d word(s)\n",
		 __FILE__, __LINE__, endp - (bytes + size));
    }
    erts_smp_proc_unlock(c_p, ERTS_PROC_LOCK_MAIN);
#ifdef USE_VM_PROBES
    if (DTRACE_ENABLED(driver_call)) {
        DTRACE_CHARBUF(process_str, DTRACE_TERM_BUF_SIZE);
        DTRACE_CHARBUF(port_str, DTRACE_TERM_BUF_SIZE);

        dtrace_pid_str(p->connected, process_str);
        dtrace_port_str(p, port_str);
        DTRACE5(driver_call, process_str, port_str, p->name, op, real_size);
    }
#endif
    prc  = (char *) port_resp;
    fpe_was_unmasked = erts_block_fpe();
    ret = drv->call((ErlDrvData)p->drv_data, 
		    (unsigned) op,
		    (char *) bytes, 
		    (int) real_size,
		    &prc, 
		    (int) sizeof(port_result),
		    &ret_flags);
    erts_unblock_fpe(fpe_was_unmasked);
    if (IS_TRACED_FL(p, F_TRACE_SCHED_PORTS)) {
    	trace_sched_ports_where(p, am_out, am_call);
    }
    
    if (erts_system_profile_flags.runnable_ports && !erts_port_is_scheduled(p)) {
    	profile_runnable_port(p, am_inactive);
    }
   
    port_resp = (byte *) prc;
    p->caller = NIL;
    erts_smp_proc_lock(c_p, ERTS_PROC_LOCK_MAIN);
#ifdef HARDDEBUG
    { 
	ErlDrvSizeT z;
	printf("real_size = %ld,%d, ret = %ld,%d\r\n", (unsigned long) real_size,
	       (int) real_size, (unsigned long)ret, (int) ret);
	printf("[");
	for(z = 0; z < real_size; ++z) {
	    printf("%d, ",(int) bytes[z]);
	}
	printf("]\r\n");
	printf("[");
	for(z = 0; z < ret; ++z) {
	    printf("%d, ",(int) port_resp[z]);
	}
	printf("]\r\n");
    }
#endif
    if (ret <= 0 || port_resp[0] != VERSION_MAGIC) { 
	/* Error or a binary without magic/ with wrong magic */
	goto error;
    }
    result_size = erts_decode_ext_size(port_resp, ret);
    if (result_size < 0) {
	goto error;
    }
    hp = HAlloc(c_p, result_size);
    hp_end = hp + result_size;
    endp = port_resp;
    res = erts_decode_ext(&hp, &MSO(c_p), &endp);
    if (res == THE_NON_VALUE) {
	goto error;
    }
    HRelease(c_p, hp_end, hp);
    if (port_resp != port_result && !(ret_flags & DRIVER_CALL_KEEP_BUFFER)) {
	driver_free(port_resp);
    }
    if (bytes != &port_input[0])
	erts_free(ERTS_ALC_T_PORT_CALL_BUF, bytes);
    if (p)
	erts_port_release(p);
#ifdef ERTS_SMP
    ERTS_SMP_BIF_CHK_PENDING_EXIT(c_p, ERTS_PROC_LOCK_MAIN);
#else
    ERTS_BIF_CHK_EXITED(c_p);
#endif
    if (IS_TRACED_FL(c_p, F_TRACE_SCHED_PROCS)) {
	trace_virtual_sched(c_p, am_in);
    }

    if (erts_system_profile_flags.runnable_procs && erts_system_profile_flags.exclusive) {
	profile_runnable_proc(c_p, am_active);
    }
  
    return res;
}