static ERTS_INLINE erts_tse_t * tse_fetch(erts_pix_lock_t *pix_lock) { erts_tse_t *tse = erts_tse_fetch(); if (!tse->udata) { erts_proc_lock_queues_t *qs; #if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL if (pix_lock) erts_pix_unlock(pix_lock); #endif erts_smp_spin_lock(&qs_lock); qs = queue_free_list; if (qs) { queue_free_list = queue_free_list->next; erts_smp_spin_unlock(&qs_lock); } else { erts_smp_spin_unlock(&qs_lock); qs = erts_alloc(ERTS_ALC_T_PROC_LCK_QS, sizeof(erts_proc_lock_queues_t)); sys_memcpy((void *) qs, (void *) &zeroqs, sizeof(erts_proc_lock_queues_t)); } tse->udata = qs; #if ERTS_PROC_LOCK_SPINLOCK_IMPL && !ERTS_PROC_LOCK_ATOMIC_IMPL if (pix_lock) erts_pix_lock(pix_lock); #endif } tse->uflgs = 0; return tse; }
static ERTS_INLINE void tse_return(erts_tse_t *tse, int force_free_q) { CHECK_UNUSED_TSE(tse); if (force_free_q || erts_tse_is_tmp(tse)) { erts_proc_lock_queues_t *qs = tse->udata; ASSERT(qs); erts_smp_spin_lock(&qs_lock); qs->next = queue_free_list; queue_free_list = qs; erts_smp_spin_unlock(&qs_lock); tse->udata = NULL; } erts_tse_return(tse); }
int erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp) { int port_was_enqueued = 0; Port *pp; ErtsPortTaskQueue *ptqp; ErtsPortTask *ptp; int res = 0; int reds = ERTS_PORT_REDS_EXECUTE; erts_aint_t io_tasks_executed = 0; int fpe_was_unmasked; ErtsPortTaskExeBlockData blk_data = {runq, NULL}; ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); ERTS_PT_CHK_PORTQ(runq); pp = pop_port(runq); if (!pp) { res = 0; goto done; } ERTS_PORT_NOT_IN_RUNQ(pp); *curr_port_pp = pp; ASSERT(pp->sched.taskq); ASSERT(pp->sched.taskq->first); ptqp = pp->sched.taskq; pp->sched.taskq = NULL; ASSERT(!pp->sched.exe_taskq); pp->sched.exe_taskq = ptqp; if (erts_smp_port_trylock(pp) == EBUSY) { erts_smp_runq_unlock(runq); erts_smp_port_lock(pp); erts_smp_runq_lock(runq); } if (erts_sched_stat.enabled) { ErtsSchedulerData *esdp = erts_get_scheduler_data(); Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no); int migrated = old && old != esdp->no; erts_smp_spin_lock(&erts_sched_stat.lock); erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++; if (migrated) { erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++; erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++; } erts_smp_spin_unlock(&erts_sched_stat.lock); } /* trace port scheduling, in */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { trace_sched_ports(pp, am_in); } ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); ERTS_PT_CHK_PRES_PORTQ(runq, pp); ptp = pop_task(ptqp); fpe_was_unmasked = erts_block_fpe(); while (ptp) { ASSERT(pp->sched.taskq != pp->sched.exe_taskq); reset_handle(ptp); erts_smp_runq_unlock(runq); ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); ERTS_SMP_CHK_NO_PROC_LOCKS; ASSERT(pp->drv_ptr); switch (ptp->type) { case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */ reds += ERTS_PORT_REDS_FREE; erts_smp_runq_lock(runq); erts_unblock_fpe(fpe_was_unmasked); ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED); if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first)) handle_remaining_tasks(runq, pp); ASSERT(!ptqp->first && (!pp->sched.taskq || !pp->sched.taskq->first)); #ifdef ERTS_SMP erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */ ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */ #else erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE); #endif port_task_free(ptp); if (pp->sched.taskq) port_taskq_free(pp->sched.taskq); pp->sched.taskq = NULL; goto tasks_done; case ERTS_PORT_TASK_TIMEOUT: reds += ERTS_PORT_REDS_TIMEOUT; if (!(pp->status & ERTS_PORT_SFLGS_DEAD)) (*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data); break; case ERTS_PORT_TASK_INPUT: reds += ERTS_PORT_REDS_INPUT; ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); /* NOTE some windows drivers use ->ready_input for input and output */ (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event); io_tasks_executed++; break; case ERTS_PORT_TASK_OUTPUT: reds += ERTS_PORT_REDS_OUTPUT; ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event); io_tasks_executed++; break; case ERTS_PORT_TASK_EVENT: reds += ERTS_PORT_REDS_EVENT; ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0); (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data); io_tasks_executed++; break; case ERTS_PORT_TASK_DIST_CMD: reds += erts_dist_command(pp, CONTEXT_REDS-reds); break; default: erl_exit(ERTS_ABORT_EXIT, "Invalid port task type: %d\n", (int) ptp->type); break; } if ((pp->status & ERTS_PORT_SFLG_CLOSING) && erts_is_port_ioq_empty(pp)) { reds += ERTS_PORT_REDS_TERMINATE; erts_terminate_port(pp); } ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); #ifdef ERTS_SMP if (pp->xports) erts_smp_xports_unlock(pp); ASSERT(!pp->xports); #endif ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp)); port_task_free(ptp); erts_smp_runq_lock(runq); ptp = pop_task(ptqp); } tasks_done: erts_unblock_fpe(fpe_was_unmasked); if (io_tasks_executed) { ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) >= io_tasks_executed); erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks, -1*io_tasks_executed); } *curr_port_pp = NULL; #ifdef ERTS_SMP ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue)); #endif if (!pp->sched.taskq) { ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; } else { #ifdef ERTS_SMP ErtsRunQueue *xrunq; #endif ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD)); ASSERT(pp->sched.taskq->first); #ifdef ERTS_SMP xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL); if (!xrunq) { #endif enqueue_port(runq, pp); ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; /* No need to notify ourselves about inc in runq. */ #ifdef ERTS_SMP } else { /* Port emigrated ... */ erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq); enqueue_port(xrunq, pp); ASSERT(pp->sched.exe_taskq); pp->sched.exe_taskq = NULL; erts_smp_runq_unlock(xrunq); erts_smp_notify_inc_runq(xrunq); } #endif port_was_enqueued = 1; } res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); ERTS_PT_CHK_PRES_PORTQ(runq, pp); port_taskq_free(ptqp); if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) { profile_runnable_port(pp, am_inactive); } /* trace port scheduling, out */ if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) { trace_sched_ports(pp, am_out); } #ifndef ERTS_SMP erts_port_release(pp); #else { erts_aint_t refc; erts_smp_mtx_unlock(pp->lock); refc = erts_smp_atomic_dec_read_nob(&pp->refc); ASSERT(refc >= 0); if (refc == 0) { erts_smp_runq_unlock(runq); erts_port_cleanup(pp); /* Might aquire runq lock */ erts_smp_runq_lock(runq); res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) != (erts_aint_t) 0); } } #endif done: blk_data.resp = &res; ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq)); ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds); return res; }
/* ** Schedule async_invoke on a worker thread ** NOTE will be syncrounous when threads are unsupported ** return values: ** 0 completed ** -1 error ** N handle value (used with async_cancel) ** arguments: ** ix driver index ** key pointer to secedule queue (NULL means round robin) ** async_invoke function to run in thread ** async_data data to pass to invoke function ** async_free function for relase async_data in case of failure */ long driver_async(ErlDrvPort ix, unsigned int* key, void (*async_invoke)(void*), void* async_data, void (*async_free)(void*)) { ErlAsync* a = (ErlAsync*) erts_alloc(ERTS_ALC_T_ASYNC, sizeof(ErlAsync)); Port* prt = erts_drvport2port(ix); long id; unsigned int qix; if (!prt) return -1; ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(prt)); a->next = NULL; a->prev = NULL; a->hndl = (DE_Handle*)prt->drv_ptr->handle; a->port = prt->id; a->pdl = NULL; a->async_data = async_data; a->async_invoke = async_invoke; a->async_free = async_free; erts_smp_spin_lock(&async_id_lock); async_id = (async_id + 1) & 0x7fffffff; if (async_id == 0) async_id++; id = async_id; erts_smp_spin_unlock(&async_id_lock); a->async_id = id; if (key == NULL) { qix = (erts_async_max_threads > 0) ? (id % erts_async_max_threads) : 0; } else { qix = (erts_async_max_threads > 0) ? (*key % erts_async_max_threads) : 0; *key = qix; } #ifdef USE_THREADS if (erts_async_max_threads > 0) { if (prt->port_data_lock) { driver_pdl_inc_refc(prt->port_data_lock); a->pdl = prt->port_data_lock; } async_add(a, &async_q[qix]); return id; } #endif (*a->async_invoke)(a->async_data); if (async_ready(prt, a->async_data)) { if (a->async_free != NULL) (*a->async_free)(a->async_data); } erts_free(ERTS_ALC_T_ASYNC, (void *) a); return id; }