int erts_port_task_schedule(Eterm id, ErtsPortTaskHandle *pthp, ErtsPortTaskType type, ErlDrvEvent event, ErlDrvEventData event_data) { ErtsRunQueue *runq; Port *pp; ErtsPortTask *ptp; int enq_port = 0; /* * NOTE: We might not have the port lock here. We are only * allowed to access the 'sched', 'tab_status', * and 'id' fields of the port struct while * tasks_lock is held. */ if (pthp && erts_port_task_is_scheduled(pthp)) { ASSERT(0); erts_port_task_abort(id, pthp); } ptp = port_task_alloc(); ASSERT(is_internal_port(id)); pp = &erts_port[internal_port_index(id)]; runq = erts_port_runq(pp); if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) { if (runq) erts_smp_runq_unlock(runq); return -1; } ASSERT(!erts_port_task_is_scheduled(pthp)); ERTS_PT_CHK_PRES_PORTQ(runq, pp); if (!pp->sched.taskq) { pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp); enq_port = !pp->sched.exe_taskq; } #ifdef ERTS_SMP if (enq_port) { ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); if (xrunq) { /* Port emigrated ... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); erts_smp_runq_unlock(runq); runq = xrunq; } } #endif ASSERT(!enq_port || !(runq->flags & ERTS_RUNQ_FLG_SUSPENDED)); ASSERT(pp->sched.taskq); ASSERT(ptp); ptp->type = type; ptp->event = event; ptp->event_data = event_data; set_handle(ptp, pthp); switch (type) { case ERTS_PORT_TASK_FREE: erl_exit(ERTS_ABORT_EXIT, "erts_port_task_schedule(): Cannot schedule free task\n"); break; case ERTS_PORT_TASK_INPUT: case ERTS_PORT_TASK_OUTPUT: case ERTS_PORT_TASK_EVENT: erts_smp_atomic_inc_relb(&erts_port_task_outstanding_io_tasks); /* Fall through... */ default: enqueue_task(pp->sched.taskq, ptp); break; } #ifndef ERTS_SMP /* * When (!enq_port && !pp->sched.exe_taskq) is true in the smp case, * the port might not be in the run queue. If this is the case, another * thread is in the process of enqueueing the port. This very seldom * occur, but do occur and is a valid scenario. Debug info showing this * enqueue in progress must be introduced before we can enable (modified * versions of these) assertions in the smp case again. */ #if defined(HARD_DEBUG) if (pp->sched.exe_taskq || enq_port) ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp); else ERTS_PT_CHK_IN_PORTQ(runq, pp); #elif defined(DEBUG) if (!enq_port && !pp->sched.exe_taskq) { /* We should be in port run q */ ASSERT(pp->sched.prev || runq->ports.start == pp); } #endif #endif if (!enq_port) { ERTS_PT_CHK_PRES_PORTQ(runq, pp); erts_smp_runq_unlock(runq); } else { enqueue_port(runq, pp); ERTS_PT_CHK_PRES_PORTQ(runq, pp); if (erts_system_profile_flags.runnable_ports) { profile_runnable_port(pp, am_active); } erts_smp_runq_unlock(runq); erts_smp_notify_inc_runq(runq); } return 0; }
int erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) { int port_was_enqueued = 0; Port *pp; ErtsPortTaskQueue *ptqp; ErtsPortTask *ptp; int res = 0; int reds = ERTS_PORT_REDS_EXECUTE; erts_aint_t io_tasks_executed = 0; int fpe_was_unmasked; ErtsPortTaskExeBlockData blk_data = {runq, NULL}; ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); ERTS_PT_CHK_PORTQ(runq); pp = pop_port(runq); if (!pp) { res = 0; goto done; } ERTS_PORT_NOT_IN_RUNQ(pp); *curr_port_pp = pp; ASSERT(pp->sched.taskq); ASSERT(pp->sched.taskq->first); ptqp = pp->sched.taskq; pp->sched.taskq = NULL; ASSERT(!pp->sched.exe_taskq); pp->sched.exe_taskq = ptqp; if (erts_smp_port_trylock(pp) == EBUSY) { erts_smp_runq_unlock(runq); erts_smp_port_lock(pp); erts_smp_runq_lock(runq); } if (erts_sched_stat.enabled) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no); int migrated = old && old != esdp->no; erts_smp_spin_lock(&erts_sched_stat.lock); erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++; if (migrated) { erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++; } erts_smp_spin_unlock(&erts_sched_stat.lock); } /* trace port scheduling, in */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { trace_sched_ports(pp, am_in); } ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); ERTS_PT_CHK_PRES_PORTQ(runq, pp); ptp = pop_task(ptqp); fpe_was_unmasked = erts_block_fpe(); while (ptp) { ASSERT(pp->sched.taskq != pp->sched.exe_taskq); reset_handle(ptp); erts_smp_runq_unlock(runq); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); ERTS_SMP_CHK_NO_PROC_LOCKS; ASSERT(pp->drv_ptr); switch (ptp->type) { case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */ reds += ERTS_PORT_REDS_FREE; erts_smp_runq_lock(runq); erts_unblock_fpe(fpe_was_unmasked); ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED); if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first)) handle_remaining_tasks(runq, pp); ASSERT(!ptqp->first && (!pp->sched.taskq || !pp->sched.taskq->first)); #ifdef ERTS_SMP erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ #else erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE); #endif port_task_free(ptp); if (pp->sched.taskq) port_taskq_free(pp->sched.taskq); pp->sched.taskq = NULL; goto tasks_done; case ERTS_PORT_TASK_TIMEOUT: reds += ERTS_PORT_REDS_TIMEOUT; if (!(pp->status & ERTS_PORT_SFLGS_DEAD)) (*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data); break; case ERTS_PORT_TASK_INPUT: reds += ERTS_PORT_REDS_INPUT; ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); /* NOTE some windows drivers use ->ready_input for input and output */ (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event); io_tasks_executed++; break; case ERTS_PORT_TASK_OUTPUT: reds += ERTS_PORT_REDS_OUTPUT; ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event); io_tasks_executed++; break; case ERTS_PORT_TASK_EVENT: reds += ERTS_PORT_REDS_EVENT; ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data); io_tasks_executed++; break; case ERTS_PORT_TASK_DIST_CMD: reds += erts_dist_command(pp, CONTEXT_REDS-reds); break; default: erl_exit(ERTS_ABORT_EXIT, "Invalid port task type: %d\n", (int) ptp->type); break; } if ((pp->status & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(pp)) { reds += ERTS_PORT_REDS_TERMINATE; erts_terminate_port(pp); } ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); #ifdef ERTS_SMP if (pp->xports) erts_smp_xports_unlock(pp); ASSERT(!pp->xports); #endif ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); port_task_free(ptp); erts_smp_runq_lock(runq); ptp = pop_task(ptqp); } tasks_done: erts_unblock_fpe(fpe_was_unmasked); if (io_tasks_executed) { ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) >= io_tasks_executed); erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks, -1*io_tasks_executed); } *curr_port_pp = NULL; #ifdef ERTS_SMP ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); #endif if (!pp->sched.taskq) { ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; } else { #ifdef ERTS_SMP ErtsRunQueue *xrunq; #endif ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD)); ASSERT(pp->sched.taskq->first); #ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); if (!xrunq) { #endif enqueue_port(runq, pp); ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; /* No need to notify ourselves about inc in runq. */ #ifdef ERTS_SMP } else { /* Port emigrated ... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); enqueue_port(xrunq, pp); ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; erts_smp_runq_unlock(xrunq); erts_smp_notify_inc_runq(xrunq); } #endif port_was_enqueued = 1; } res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); ERTS_PT_CHK_PRES_PORTQ(runq, pp); port_taskq_free(ptqp); if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) { profile_runnable_port(pp, am_inactive); } /* trace port scheduling, out */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { trace_sched_ports(pp, am_out); } #ifndef ERTS_SMP erts_port_release(pp); #else { erts_aint_t refc; erts_smp_mtx_unlock(pp->lock); refc = erts_smp_atomic_dec_read_nob(&pp->refc); ASSERT(refc >= 0); if (refc == 0) { erts_smp_runq_unlock(runq); erts_port_cleanup(pp); /* Might aquire runq lock */ erts_smp_runq_lock(runq); res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); } } #endif done: blk_data.resp = &res; ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds); return res; }
int erts_port_task_schedule(Eterm id, ErtsPortTaskHandle *pthp, ErtsPortTaskType type, ErlDrvEvent event, ErlDrvEventData event_data) { ErtsRunQueue *runq; Port *pp; ErtsPortTask *ptp; int enq_port = 0; /* * NOTE: We might not have the port lock here. We are only * allowed to access the 'sched', 'tab_status', * and 'id' fields of the port struct while * tasks_lock is held. */ if (pthp && erts_port_task_is_scheduled(pthp)) { ASSERT(0); erts_port_task_abort(id, pthp); } ptp = port_task_alloc(); ASSERT(is_internal_port(id)); pp = &erts_port[internal_port_index(id)]; runq = erts_port_runq(pp); if (!runq || ERTS_PORT_TASK_INVALID_PORT(pp, id)) { if (runq) erts_smp_runq_unlock(runq); return -1; } ASSERT(!erts_port_task_is_scheduled(pthp)); ERTS_PT_CHK_PRES_PORTQ(runq, pp); #ifdef DEBUG /* If we have a taskq and not executing, we should be in port run q */ if (pp->sched.taskq && !pp->sched.exe_taskq) { ASSERT(pp->sched.prev || runq->ports.start == pp); } #endif if (!pp->sched.taskq) { pp->sched.taskq = port_taskq_init(port_taskq_alloc(), pp); enq_port = !pp->sched.exe_taskq; } #ifdef ERTS_SMP if (enq_port) { ErtsRunQueue *xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); if (xrunq) { /* Port emigrated ... */ erts_smp_atomic_set(&pp->run_queue, (long) xrunq); erts_smp_runq_unlock(runq); runq = xrunq; } } #endif ASSERT(!(runq->flags & ERTS_RUNQ_FLG_SUSPENDED)); ASSERT(pp->sched.taskq); ASSERT(ptp); ptp->type = type; ptp->event = event; ptp->event_data = event_data; set_handle(ptp, pthp); switch (type) { case ERTS_PORT_TASK_FREE: erl_exit(ERTS_ABORT_EXIT, "erts_port_task_schedule(): Cannot schedule free task\n"); break; case ERTS_PORT_TASK_INPUT: case ERTS_PORT_TASK_OUTPUT: case ERTS_PORT_TASK_EVENT: erts_smp_atomic_inc(&erts_port_task_outstanding_io_tasks); /* Fall through... */ default: enqueue_task(pp->sched.taskq, ptp); break; } #if defined(HARD_DEBUG) if (pp->sched.exe_taskq || enq_port) ERTS_PT_CHK_NOT_IN_PORTQ(runq, pp); else ERTS_PT_CHK_IN_PORTQ(runq, pp); #elif defined(DEBUG) if (!enq_port && !pp->sched.exe_taskq) { /* We should be in port run q */ ASSERT(pp->sched.prev || runq->ports.start == pp); } #endif if (!enq_port) { ERTS_PT_CHK_PRES_PORTQ(runq, pp); } else { enqueue_port(runq, pp); ERTS_PT_CHK_PRES_PORTQ(runq, pp); if (erts_system_profile_flags.runnable_ports) { profile_runnable_port(pp, am_active); } erts_smp_notify_inc_runq(runq); } erts_smp_runq_unlock(runq); return 0; }