Example #1
0
/* closes and flushes `f->fd' and releases any memory allocated for `f' */
int fec_close(struct fec_handle *f)
{
    check(f);

    if (f->fd != -1) {
        if (f->mode & O_RDWR && fdatasync(f->fd) == -1) {
            warn("fdatasync failed: %s", strerror(errno));
        }

        TEMP_FAILURE_RETRY(close(f->fd));
    }

    if (f->verity.hash) {
        delete[] f->verity.hash;
    }
    if (f->verity.salt) {
        delete[] f->verity.salt;
    }
    if (f->verity.table) {
        delete[] f->verity.table;
    }

    pthread_mutex_destroy(&f->mutex);

    reset_handle(f);
    delete f;

    return 0;
}
Example #2
0
static void
handle_remaining_tasks(ErtsRunQueue *runq, Port *pp)
{
    int i;
    ErtsPortTask *ptp;
    ErtsPortTaskQueue *ptqps[] = {pp->sched.exe_taskq, pp->sched.taskq};

    ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

    for (i = 0; i < sizeof(ptqps)/sizeof(ErtsPortTaskQueue *); i++) {
	if (!ptqps[i])
	    continue;

	ptp = pop_task(ptqps[i]);
	while (ptp) {
	    reset_handle(ptp);
	    erts_smp_runq_unlock(runq);

	    switch (ptp->type) {
	    case ERTS_PORT_TASK_FREE:
	    case ERTS_PORT_TASK_TIMEOUT:
		break;
	    case ERTS_PORT_TASK_INPUT:
		erts_stale_drv_select(pp->id, ptp->event, DO_READ, 1);
		break;
	    case ERTS_PORT_TASK_OUTPUT:
		erts_stale_drv_select(pp->id, ptp->event, DO_WRITE, 1);
		break;
	    case ERTS_PORT_TASK_EVENT:
		erts_stale_drv_select(pp->id, ptp->event, 0, 1);
		break;
	    case ERTS_PORT_TASK_DIST_CMD:
		break;
	    default:
		erl_exit(ERTS_ABORT_EXIT,
			 "Invalid port task type: %d\n",
			 (int) ptp->type);
	    }

	    port_task_free(ptp);

	    erts_smp_runq_lock(runq);
	    ptp = pop_task(ptqps[i]);
	}
    }

    ASSERT(!pp->sched.taskq || !pp->sched.taskq->first);
}
Example #3
0
int
erts_port_task_execute(ErtsRunQueue *runq, Port **curr_port_pp)
{
    int port_was_enqueued = 0;
    Port *pp;
    ErtsPortTaskQueue *ptqp;
    ErtsPortTask *ptp;
    int res = 0;
    int reds = ERTS_PORT_REDS_EXECUTE;
    erts_aint_t io_tasks_executed = 0;
    int fpe_was_unmasked;
    ErtsPortTaskExeBlockData blk_data = {runq, NULL};

    ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));

    ERTS_PT_CHK_PORTQ(runq);

    pp = pop_port(runq);
    if (!pp) {
	res = 0;
	goto done;
    }

    ERTS_PORT_NOT_IN_RUNQ(pp);

    *curr_port_pp = pp;

    ASSERT(pp->sched.taskq);
    ASSERT(pp->sched.taskq->first);
    ptqp = pp->sched.taskq;
    pp->sched.taskq = NULL;

    ASSERT(!pp->sched.exe_taskq);
    pp->sched.exe_taskq = ptqp;

    if (erts_smp_port_trylock(pp) == EBUSY) {
	erts_smp_runq_unlock(runq);
	erts_smp_port_lock(pp);
	erts_smp_runq_lock(runq);
    }
    
    if (erts_sched_stat.enabled) {
	ErtsSchedulerData *esdp = erts_get_scheduler_data();
	Uint old = ERTS_PORT_SCHED_ID(pp, esdp->no);
	int migrated = old && old != esdp->no;

	erts_smp_spin_lock(&erts_sched_stat.lock);
	erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_executed++;
	erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].executed++;
	if (migrated) {
	    erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].total_migrated++;
	    erts_sched_stat.prio[ERTS_PORT_PRIO_LEVEL].migrated++;
	}
	erts_smp_spin_unlock(&erts_sched_stat.lock);
    }

    /* trace port scheduling, in */
    if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
	trace_sched_ports(pp, am_in);
    }

    ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    ptp = pop_task(ptqp);

    fpe_was_unmasked = erts_block_fpe();

    while (ptp) {
	ASSERT(pp->sched.taskq != pp->sched.exe_taskq);

	reset_handle(ptp);
	erts_smp_runq_unlock(runq);

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));
	ERTS_SMP_CHK_NO_PROC_LOCKS;
	ASSERT(pp->drv_ptr);

	switch (ptp->type) {
	case ERTS_PORT_TASK_FREE: /* May be pushed in q at any time */
	    reds += ERTS_PORT_REDS_FREE;
	    erts_smp_runq_lock(runq);

	    erts_unblock_fpe(fpe_was_unmasked);
	    ASSERT(pp->status & ERTS_PORT_SFLG_FREE_SCHEDULED);
	    if (ptqp->first || (pp->sched.taskq && pp->sched.taskq->first))
		handle_remaining_tasks(runq, pp);
	    ASSERT(!ptqp->first
		   && (!pp->sched.taskq || !pp->sched.taskq->first));
#ifdef ERTS_SMP
	    erts_smp_atomic_dec_nob(&pp->refc); /* Not alive */
	    ERTS_SMP_LC_ASSERT(erts_smp_atomic_read_nob(&pp->refc) > 0); /* Lock */
#else
	    erts_port_status_bor_set(pp, ERTS_PORT_SFLG_FREE);	    
#endif

	    port_task_free(ptp);
	    if (pp->sched.taskq)
		port_taskq_free(pp->sched.taskq);
	    pp->sched.taskq = NULL;

	    goto tasks_done;
	case ERTS_PORT_TASK_TIMEOUT:
	    reds += ERTS_PORT_REDS_TIMEOUT;
	    if (!(pp->status & ERTS_PORT_SFLGS_DEAD))
		(*pp->drv_ptr->timeout)((ErlDrvData) pp->drv_data);
	    break;
	case ERTS_PORT_TASK_INPUT:
	    reds += ERTS_PORT_REDS_INPUT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    /* NOTE some windows drivers use ->ready_input for input and output */
	    (*pp->drv_ptr->ready_input)((ErlDrvData) pp->drv_data, ptp->event);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_OUTPUT:
	    reds += ERTS_PORT_REDS_OUTPUT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    (*pp->drv_ptr->ready_output)((ErlDrvData) pp->drv_data, ptp->event);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_EVENT:
	    reds += ERTS_PORT_REDS_EVENT;
	    ASSERT((pp->status & ERTS_PORT_SFLGS_DEAD) == 0);
	    (*pp->drv_ptr->event)((ErlDrvData) pp->drv_data, ptp->event, ptp->event_data);
	    io_tasks_executed++;
	    break;
	case ERTS_PORT_TASK_DIST_CMD:
	    reds += erts_dist_command(pp, CONTEXT_REDS-reds);
	    break;
	default:
	    erl_exit(ERTS_ABORT_EXIT,
		     "Invalid port task type: %d\n",
		     (int) ptp->type);
	    break;
	}

	if ((pp->status & ERTS_PORT_SFLG_CLOSING)
	    && erts_is_port_ioq_empty(pp)) {
	    reds += ERTS_PORT_REDS_TERMINATE;
	    erts_terminate_port(pp);
	}

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

#ifdef ERTS_SMP
	if (pp->xports)
	    erts_smp_xports_unlock(pp);
	ASSERT(!pp->xports);
#endif

	ERTS_SMP_LC_ASSERT(erts_lc_is_port_locked(pp));

	port_task_free(ptp);

	erts_smp_runq_lock(runq);

	ptp = pop_task(ptqp);
    }

 tasks_done:

    erts_unblock_fpe(fpe_was_unmasked);

    if (io_tasks_executed) {
	ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
	       >= io_tasks_executed);
	erts_smp_atomic_add_relb(&erts_port_task_outstanding_io_tasks,
				 -1*io_tasks_executed);
    }

    *curr_port_pp = NULL;

#ifdef ERTS_SMP
    ASSERT(runq == (ErtsRunQueue *) erts_smp_atomic_read_nob(&pp->run_queue));
#endif

    if (!pp->sched.taskq) {
	ASSERT(pp->sched.exe_taskq);
	pp->sched.exe_taskq = NULL;
    }
    else {
#ifdef ERTS_SMP
	ErtsRunQueue *xrunq;
#endif

	ASSERT(!(pp->status & ERTS_PORT_SFLGS_DEAD));
	ASSERT(pp->sched.taskq->first);

#ifdef ERTS_SMP
	xrunq = erts_check_emigration_need(runq, ERTS_PORT_PRIO_LEVEL);
	if (!xrunq) {
#endif
	    enqueue_port(runq, pp);
	    ASSERT(pp->sched.exe_taskq);
	    pp->sched.exe_taskq = NULL;
	    /* No need to notify ourselves about inc in runq. */
#ifdef ERTS_SMP
	}
	else {
	    /* Port emigrated ... */
	    erts_smp_atomic_set_nob(&pp->run_queue, (erts_aint_t) xrunq);
	    enqueue_port(xrunq, pp);
	    ASSERT(pp->sched.exe_taskq);
	    pp->sched.exe_taskq = NULL;
	    erts_smp_runq_unlock(xrunq);
	    erts_smp_notify_inc_runq(xrunq);
	}
#endif
	port_was_enqueued = 1;
    }

    res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
	   != (erts_aint_t) 0);

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);

    port_taskq_free(ptqp);

    if (erts_system_profile_flags.runnable_ports && (port_was_enqueued != 1)) {
    	profile_runnable_port(pp, am_inactive);
    }

    /* trace port scheduling, out */
    if (IS_TRACED_FL(pp, F_TRACE_SCHED_PORTS)) {
    	trace_sched_ports(pp, am_out);
    }
#ifndef ERTS_SMP
    erts_port_release(pp);
#else
    {
	erts_aint_t refc;
	erts_smp_mtx_unlock(pp->lock);
	refc = erts_smp_atomic_dec_read_nob(&pp->refc);
	ASSERT(refc >= 0);
	if (refc == 0) {
	    erts_smp_runq_unlock(runq);
	    erts_port_cleanup(pp); /* Might aquire runq lock */
	    erts_smp_runq_lock(runq);
	    res = (erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks)
		   != (erts_aint_t) 0);
	}
    }
#endif

 done:
    blk_data.resp = &res;

    ERTS_SMP_LC_ASSERT(erts_smp_lc_runq_is_locked(runq));

    ERTS_PORT_REDUCTIONS_EXECUTED(runq, reds);

    return res;
}
Example #4
0
int
erts_port_task_abort(Eterm id, ErtsPortTaskHandle *pthp)
{
    ErtsRunQueue *runq;
    ErtsPortTaskQueue *ptqp;
    ErtsPortTask *ptp;
    Port *pp;
    int port_is_dequeued = 0;

    pp = &erts_port[internal_port_index(id)];
    runq = erts_port_runq(pp);

    ptp = handle2task(pthp);

    if (!ptp) {
	erts_smp_runq_unlock(runq);
	return 1;
    }

    ASSERT(ptp->handle == pthp);
    ptqp = ptp->queue;
    ASSERT(pp == ptqp->port);

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);
    ASSERT(ptqp);
    ASSERT(ptqp->first);

    dequeue_task(ptp);
    reset_handle(ptp);

    switch (ptp->type) {
    case ERTS_PORT_TASK_INPUT:
    case ERTS_PORT_TASK_OUTPUT:
    case ERTS_PORT_TASK_EVENT:
	ASSERT(erts_smp_atomic_read_nob(&erts_port_task_outstanding_io_tasks) > 0);
	erts_smp_atomic_dec_relb(&erts_port_task_outstanding_io_tasks);
	break;
    default:
	break;
    }

    ASSERT(ptqp == pp->sched.taskq || ptqp == pp->sched.exe_taskq);

    if (ptqp->first || pp->sched.taskq != ptqp)
	ptqp = NULL;
    else {
	pp->sched.taskq = NULL;
	if (!pp->sched.exe_taskq) {
	    dequeue_port(runq, pp);
	    ERTS_PORT_NOT_IN_RUNQ(pp);
	    port_is_dequeued = 1;
	}
    }

    ERTS_PT_CHK_PRES_PORTQ(runq, pp);

    erts_smp_runq_unlock(runq);
    
    if (erts_system_profile_flags.runnable_ports && port_is_dequeued) {
    	profile_runnable_port(pp, am_inactive);
    }

    port_task_free(ptp);
    if (ptqp)
	port_taskq_free(ptqp);

    return 0;
}
Example #5
0
/* opens `path' using given options and returns a fec_handle in `handle' if
   successful */
int fec_open(struct fec_handle **handle, const char *path, int mode, int flags,
        int roots)
{
    check(path);
    check(handle);
    check(roots > 0 && roots < FEC_RSM);

    debug("path = %s, mode = %d, flags = %d, roots = %d", path, mode, flags,
        roots);

    if (mode & (O_CREAT | O_TRUNC | O_EXCL | O_WRONLY)) {
        /* only reading and updating existing files is supported */
        error("failed to open '%s': (unsupported mode %d)", path, mode);
        errno = EACCES;
        return -1;
    }

    fec::handle f(new (std::nothrow) fec_handle, fec_close);

    if (unlikely(!f)) {
        error("failed to allocate file handle");
        errno = ENOMEM;
        return -1;
    }

    reset_handle(f.get());

    f->mode = mode;
    f->ecc.roots = roots;
    f->ecc.rsn = FEC_RSM - roots;
    f->flags = flags;

    if (unlikely(pthread_mutex_init(&f->mutex, NULL) != 0)) {
        error("failed to create a mutex: %s", strerror(errno));
        return -1;
    }

    f->fd = TEMP_FAILURE_RETRY(open(path, mode | O_CLOEXEC));

    if (f->fd == -1) {
        error("failed to open '%s': %s", path, strerror(errno));
        return -1;
    }

    if (get_size(f.get()) == -1) {
        error("failed to get size for '%s': %s", path, strerror(errno));
        return -1;
    }

    f->data_size = f->size; /* until ecc and/or verity are loaded */

    if (load_ecc(f.get()) == -1) {
        debug("error-correcting codes not found from '%s'", path);
    }

    if (load_verity(f.get()) == -1) {
        debug("verity metadata not found from '%s'", path);
    }

    *handle = f.release();
    return 0;
}
Example #6
0
void buffering_thread(void)
{
    bool filling = false;
    struct queue_event ev;

    while (true)
    {
        if (!filling) {
            cancel_cpu_boost();
        }

        queue_wait_w_tmo(&buffering_queue, &ev, filling ? 5 : HZ/2);

        switch (ev.id)
        {
            case Q_START_FILL:
                LOGFQUEUE("buffering < Q_START_FILL %d", (int)ev.data);
                /* Call buffer callbacks here because this is one of two ways
                 * to begin a full buffer fill */
                send_event(BUFFER_EVENT_BUFFER_LOW, 0);
                shrink_buffer();
                queue_reply(&buffering_queue, 1);
                filling |= buffer_handle((int)ev.data);
                break;

            case Q_BUFFER_HANDLE:
                LOGFQUEUE("buffering < Q_BUFFER_HANDLE %d", (int)ev.data);
                queue_reply(&buffering_queue, 1);
                buffer_handle((int)ev.data);
                break;

            case Q_RESET_HANDLE:
                LOGFQUEUE("buffering < Q_RESET_HANDLE %d", (int)ev.data);
                queue_reply(&buffering_queue, 1);
                reset_handle((int)ev.data);
                break;

            case Q_CLOSE_HANDLE:
                LOGFQUEUE("buffering < Q_CLOSE_HANDLE %d", (int)ev.data);
                queue_reply(&buffering_queue, close_handle((int)ev.data));
                break;

            case Q_HANDLE_ADDED:
                LOGFQUEUE("buffering < Q_HANDLE_ADDED %d", (int)ev.data);
                /* A handle was added: the disk is spinning, so we can fill */
                filling = true;
                break;

            case Q_BASE_HANDLE:
                LOGFQUEUE("buffering < Q_BASE_HANDLE %d", (int)ev.data);
                base_handle_id = (int)ev.data;
                break;

#ifndef SIMULATOR
            case SYS_USB_CONNECTED:
                LOGFQUEUE("buffering < SYS_USB_CONNECTED");
                usb_acknowledge(SYS_USB_CONNECTED_ACK);
                usb_wait_for_disconnect(&buffering_queue);
                break;
#endif

            case SYS_TIMEOUT:
                LOGFQUEUE_SYS_TIMEOUT("buffering < SYS_TIMEOUT");
                break;
        }

        update_data_counters();

        /* If the buffer is low, call the callbacks to get new data */
        if (num_handles > 0 && data_counters.useful <= conf_watermark)
            send_event(BUFFER_EVENT_BUFFER_LOW, 0);

#if 0
        /* TODO: This needs to be fixed to use the idle callback, disable it
         * for simplicity until its done right */
#if MEM > 8
        /* If the disk is spinning, take advantage by filling the buffer */
        else if (storage_disk_is_active() && queue_empty(&buffering_queue))
        {
            if (num_handles > 0 && data_counters.useful <= high_watermark)
                send_event(BUFFER_EVENT_BUFFER_LOW, 0);

            if (data_counters.remaining > 0 && BUF_USED <= high_watermark)
            {
                /* This is a new fill, shrink the buffer up first */
                if (!filling)
                    shrink_buffer();
                filling = fill_buffer();
                update_data_counters();
            }
        }
#endif
#endif

        if (queue_empty(&buffering_queue)) {
            if (filling) {
                if (data_counters.remaining > 0 && BUF_USED < buffer_len)
                    filling = fill_buffer();
                else if (data_counters.remaining == 0)
                    filling = false;
            }
            else if (ev.id == SYS_TIMEOUT)
            {
                if (data_counters.remaining > 0 &&
                    data_counters.useful <= conf_watermark) {
                    shrink_buffer();
                    filling = fill_buffer();
                }
            }
        }
    }
}