Ejemplo n.º 1
0
static void
pmclog_schedule_io(struct pmc_owner *po)
{
	KASSERT(po->po_curbuf != NULL,
	    ("[pmclog,%d] schedule_io with null buffer po=%p", __LINE__, po));

	KASSERT(po->po_curbuf->plb_ptr >= po->po_curbuf->plb_base,
	    ("[pmclog,%d] buffer invariants po=%p ptr=%p base=%p", __LINE__,
		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_base));
	KASSERT(po->po_curbuf->plb_ptr <= po->po_curbuf->plb_fence,
	    ("[pmclog,%d] buffer invariants po=%p ptr=%p fenc=%p", __LINE__,
		po, po->po_curbuf->plb_ptr, po->po_curbuf->plb_fence));

	PMCDBG(LOG,SIO, 1, "po=%p", po);

	mtx_assert(&po->po_mtx, MA_OWNED);

	/*
	 * Add the current buffer to the tail of the buffer list and
	 * wakeup the helper.
	 */
	TAILQ_INSERT_TAIL(&po->po_logbuffers, po->po_curbuf, plb_next);
	po->po_curbuf = NULL;
	wakeup_one(po);
}
Ejemplo n.º 2
0
/*
 * This function is called to ensure that a vcpu "sees" a pending event
 * as soon as possible:
 * - If the vcpu thread is sleeping then it is woken up.
 * - If the vcpu is running on a different host_cpu then an IPI will be directed
 *   to the host_cpu to cause the vcpu to trap into the hypervisor.
 */
void
vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
{
	int hostcpu;
	struct vcpu *vcpu;

	vcpu = &vm->vcpu[vcpuid];

	vcpu_lock(vcpu);
	hostcpu = vcpu->hostcpu;
	if (vcpu->state == VCPU_RUNNING) {
		KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
		if (hostcpu != curcpu) {
			if (lapic_intr) {
				vlapic_post_intr(vcpu->vlapic, hostcpu,
				    vmm_ipinum);
			} else {
				ipi_cpu(hostcpu, vmm_ipinum);
			}
		} else {
			/*
			 * If the 'vcpu' is running on 'curcpu' then it must
			 * be sending a notification to itself (e.g. SELF_IPI).
			 * The pending event will be picked up when the vcpu
			 * transitions back to guest context.
			 */
		}
	} else {
		KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
		    "with hostcpu %d", vcpu->state, hostcpu));
		if (vcpu->state == VCPU_SLEEPING)
			wakeup_one(vcpu);
	}
	vcpu_unlock(vcpu);
}
Ejemplo n.º 3
0
int
pmclog_close(struct pmc_owner *po)
{

	PMCDBG(LOG,CLO,1, "po=%p", po);

	mtx_lock(&pmc_kthread_mtx);

	/*
	 * Schedule the current buffer.
	 */
	mtx_lock_spin(&po->po_mtx);
	if (po->po_curbuf)
		pmclog_schedule_io(po);
	else
		wakeup_one(po);
	mtx_unlock_spin(&po->po_mtx);

	/*
	 * Initiate shutdown: no new data queued,
	 * thread will close file on last block.
	 */
	po->po_flags |= PMC_PO_SHUTDOWN;

	mtx_unlock(&pmc_kthread_mtx);

	return (0);
}
Ejemplo n.º 4
0
void
alq_flush(struct alq *alq)
{
    int needwakeup = 0;

    ALD_LOCK();
    ALQ_LOCK(alq);

    /*
     * Pull the lever iff there is data to flush and we're
     * not already in the middle of a flush operation.
     */
    if (HAS_PENDING_DATA(alq) && !(alq->aq_flags & AQ_FLUSHING)) {
        if (alq->aq_flags & AQ_ACTIVE)
            ald_deactivate(alq);

        ALD_UNLOCK();
        needwakeup = alq_doio(alq);
    } else
        ALD_UNLOCK();

    ALQ_UNLOCK(alq);

    if (needwakeup)
        wakeup_one(alq);
}
Ejemplo n.º 5
0
static int
vtballoon_detach(device_t dev)
{
	struct vtballoon_softc *sc;

	sc = device_get_softc(dev);

	if (sc->vtballoon_td != NULL) {
		VTBALLOON_LOCK(sc);
		sc->vtballoon_flags |= VTBALLOON_FLAG_DETACH;
		wakeup_one(sc);
		msleep(sc->vtballoon_td, VTBALLOON_MTX(sc), 0, "vtbdth", 0);
		VTBALLOON_UNLOCK(sc);

		sc->vtballoon_td = NULL;
	}

	if (device_is_attached(dev)) {
		vtballoon_pop(sc);
		vtballoon_stop(sc);
	}

	if (sc->vtballoon_page_frames != NULL) {
		free(sc->vtballoon_page_frames, M_DEVBUF);
		sc->vtballoon_page_frames = NULL;
	}

	VTBALLOON_LOCK_DESTROY(sc);

	return (0);
}
Ejemplo n.º 6
0
/*
 * iicbus_release_bus()
 *
 * Release the device allocated with iicbus_request_dev()
 */
int
iicbus_release_bus(device_t bus, device_t dev)
{
	struct iicbus_softc *sc = (struct iicbus_softc *)device_get_softc(bus);
	int error;

	IICBUS_LOCK(sc);

	if (sc->owner != dev) {
		IICBUS_UNLOCK(sc);
		return (EACCES);
	}

	/* 
	 * Drop the lock around the call to the bus driver. 
	 * This call should be allowed to sleep in the IIC_WAIT case.
	 * Drivers might also need to grab locks that would cause LOR
	 * if our lock is held.
	 */
	IICBUS_UNLOCK(sc);
	/* Ask the underlying layers if the release is ok */
	error = IICBUS_CALLBACK(device_get_parent(bus), IIC_RELEASE_BUS, NULL);

	if (error == 0) {
		IICBUS_LOCK(sc);
		sc->owner = NULL;

		/* wakeup a waiting thread */
		wakeup_one(sc);
		IICBUS_UNLOCK(sc);
	}

	return (error);
}
Ejemplo n.º 7
0
int
taskq_next_work(struct taskq *tq, struct task *work, sleepfn tqsleep)
{
    struct task *next;

    mtx_enter(&tq->tq_mtx);
    while ((next = TAILQ_FIRST(&tq->tq_worklist)) == NULL) {
        if (tq->tq_state != TQ_S_RUNNING) {
            mtx_leave(&tq->tq_mtx);
            return (0);
        }

        tqsleep(tq, &tq->tq_mtx, PWAIT, "bored", 0);
    }

    TAILQ_REMOVE(&tq->tq_worklist, next, t_entry);
    CLR(next->t_flags, TASK_ONQUEUE);

    *work = *next; /* copy to caller to avoid races */

    next = TAILQ_FIRST(&tq->tq_worklist);
    mtx_leave(&tq->tq_mtx);

    if (next != NULL)
        wakeup_one(tq);

    return (1);
}
Ejemplo n.º 8
0
void 
tws_passthru_complete(struct tws_request *req)
{
    req->state = TWS_REQ_STATE_COMPLETE;
    wakeup_one(req);

}
Ejemplo n.º 9
0
void
alq_post_flags(struct alq *alq, struct ale *ale, int flags)
{
    int activate;
    void *waitchan;

    activate = 0;

    if (ale->ae_bytesused > 0) {
        if (!(alq->aq_flags & AQ_ACTIVE) &&
                !(flags & ALQ_NOACTIVATE)) {
            alq->aq_flags |= AQ_ACTIVE;
            activate = 1;
        }

        alq->aq_writehead += ale->ae_bytesused;
        alq->aq_freebytes -= ale->ae_bytesused;

        /* Wrap aq_writehead if we filled to the end of the buffer. */
        if (alq->aq_writehead == alq->aq_buflen)
            alq->aq_writehead = 0;

        KASSERT((alq->aq_writehead >= 0 &&
                 alq->aq_writehead < alq->aq_buflen),
                ("%s: aq_writehead < 0 || aq_writehead >= aq_buflen",
                 __func__));

        KASSERT((HAS_PENDING_DATA(alq)), ("%s: queue empty!", __func__));
    }

    /*
     * If there are waiters, we need to signal the waiting threads after we
     * complete our work. The alq ptr is used as a wait channel for threads
     * requiring resources to be freed up. In the AQ_ORDERED case, threads
     * are not allowed to concurrently compete for resources in the
     * alq_getn() while loop, so we use a different wait channel in this case.
     */
    if (alq->aq_waiters > 0) {
        if (alq->aq_flags & AQ_ORDERED)
            waitchan = &alq->aq_waiters;
        else
            waitchan = alq;
    } else
        waitchan = NULL;

    ALQ_UNLOCK(alq);

    if (activate) {
        ALD_LOCK();
        ald_activate(alq);
        ALD_UNLOCK();
    }

    /* NB: We rely on wakeup_one waking threads in a FIFO manner. */
    if (waitchan != NULL)
        wakeup_one(waitchan);
}
Ejemplo n.º 10
0
void
rumpuser_mutex_exit(struct rumpuser_mtx *mtx)
{

	assert(mtx->v > 0);
	if (--mtx->v == 0) {
		mtx->o = NULL;
		wakeup_one(&mtx->waiters);
	}
}
Ejemplo n.º 11
0
void
rumpuser_mutex_exit(struct rumpuser_mtx *mtx)
{

	bmk_assert(mtx->v == 1);
	mtx->v = 0;
	mtx->o = NULL;
	mtx->bmk_o = NULL;
	wakeup_one(&mtx->waiters);
}
Ejemplo n.º 12
0
void
tws_passthru_complete(struct tws_request *req)
{
    struct tws_softc *sc = req->sc;

    lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
    wakeup_one(req);
    lockmgr(&sc->gen_lock, LK_RELEASE);

}
Ejemplo n.º 13
0
static void
vtballoon_vq_intr(void *xsc)
{
	struct vtballoon_softc *sc;

	sc = xsc;

	VTBALLOON_LOCK(sc);
	wakeup_one(sc);
	VTBALLOON_UNLOCK(sc);
}
Ejemplo n.º 14
0
void
flowadv_add_entry(struct flowadv_fcentry *fce) {
    lck_mtx_lock_spin(&fadv_lock);
    STAILQ_INSERT_HEAD(&fadv_list, fce, fce_link);
    VERIFY(!STAILQ_EMPTY(&fadv_list));

    if (!fadv_active && fadv_thread != THREAD_NULL)
        wakeup_one((caddr_t)&fadv_list);

    lck_mtx_unlock(&fadv_lock);
}
Ejemplo n.º 15
0
static int
vtballoon_config_change(device_t dev)
{
	struct vtballoon_softc *sc;

	sc = device_get_softc(dev);

	VTBALLOON_LOCK(sc);
	wakeup_one(sc);
	VTBALLOON_UNLOCK(sc);

	return (1);
}
Ejemplo n.º 16
0
static void
nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl)
{
	struct nvme_io_test_thread	*tth = arg;
	struct timeval			t;

	tth->io_completed++;

	if (nvme_completion_is_error(cpl)) {
		printf("%s: error occurred\n", __func__);
		wakeup_one(tth);
		return;
	}

	getmicrouptime(&t);
	timevalsub(&t, &tth->start);

	if (t.tv_sec >= tth->time) {
		wakeup_one(tth);
		return;
	}

	switch (tth->opc) {
	case NVME_OPC_WRITE:
		nvme_ns_cmd_write(tth->ns, tth->buf, tth->idx * 2048,
		    tth->size/nvme_ns_get_sector_size(tth->ns),
		    nvme_ns_io_test_cb, tth);
		break;
	case NVME_OPC_READ:
		nvme_ns_cmd_read(tth->ns, tth->buf, tth->idx * 2048,
		    tth->size/nvme_ns_get_sector_size(tth->ns),
		    nvme_ns_io_test_cb, tth);
		break;
	default:
		break;
	}
}
Ejemplo n.º 17
0
void
soaio_enqueue(struct task *task)
{

	mtx_lock(&soaio_jobs_lock);
	MPASS(task->ta_pending == 0);
	task->ta_pending++;
	STAILQ_INSERT_TAIL(&soaio_jobs, task, ta_link);
	soaio_queued++;
	if (soaio_queued <= soaio_idle)
		wakeup_one(&soaio_idle);
	else if (soaio_num_procs < soaio_max_procs)
		taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task);
	mtx_unlock(&soaio_jobs_lock);
}
Ejemplo n.º 18
0
/*
 * Detach mapped page and release resources back to the system.
 *
 * Remove a reference from the given sf_buf, adding it to the free
 * list when its reference count reaches zero. A freed sf_buf still,
 * however, retains its virtual-to-physical mapping until it is
 * recycled or reactivated by sf_buf_alloc(9).
 */
void
sf_buf_free(struct sf_buf *sf)
{

	mtx_lock(&sf_buf_lock);
	sf->ref_count--;
	if (sf->ref_count == 0) {
		TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
		nsfbufsused--;

		if (sf_buf_alloc_want > 0)
			wakeup_one(&sf_buf_freelist);
	}
	mtx_unlock(&sf_buf_lock);
}
Ejemplo n.º 19
0
static void
pmclog_stop_kthread(struct pmc_owner *po)
{

	mtx_lock(&pmc_kthread_mtx);
	po->po_flags &= ~PMC_PO_OWNS_LOGFILE;
	if (po->po_kthread != NULL) {
		PROC_LOCK(po->po_kthread);
		kern_psignal(po->po_kthread, SIGHUP);
		PROC_UNLOCK(po->po_kthread);
	}
	wakeup_one(po);
	while (po->po_kthread)
		msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0);
	mtx_unlock(&pmc_kthread_mtx);
}
Ejemplo n.º 20
0
void
flowadv_add(struct flowadv_fclist *fcl)
{
	if (STAILQ_EMPTY(fcl))
		return;

	lck_mtx_lock_spin(&fadv_lock);

	STAILQ_CONCAT(&fadv_list, fcl);
	VERIFY(!STAILQ_EMPTY(&fadv_list));

	if (!fadv_active && fadv_thread != THREAD_NULL)
		wakeup_one((caddr_t)&fadv_list);

	lck_mtx_unlock(&fadv_lock);
}
void
_cv_signal(struct cv *c, int broadcast)
{
	spin_lock(&c->cv_lock);
	if (c->cv_waiters == 0) {
		spin_unlock(&c->cv_lock);
	} else if (broadcast) {
		c->cv_waiters = 0;
		spin_unlock(&c->cv_lock);	/* must unlock first */
		wakeup(c);
	} else {
		c->cv_waiters--;
		spin_unlock(&c->cv_lock);	/* must unlock first */
		wakeup_one(c);
	}
}
Ejemplo n.º 22
0
void
soisconnected(struct socket *so)
{
	struct socket *head = so->so_head;

	so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING);
	so->so_state |= SS_ISCONNECTED;
	if (head && soqremque(so, 0)) {
		soqinsque(head, so, 1);
		sorwakeup(head);
		wakeup_one(&head->so_timeo);
	} else {
		wakeup(&so->so_timeo);
		sorwakeup(so);
		sowwakeup(so);
	}
}
Ejemplo n.º 23
0
void
rumpuser_rw_exit(struct rumpuser_rw *rw)
{

	if (rw->o) {
		rw->o = NULL;
	} else {
		rw->v--;
	}

	/* standard procedure, don't let readers starve out writers */
	if (!TAILQ_EMPTY(&rw->wwait)) {
		if (rw->o == NULL)
			wakeup_one(&rw->wwait);
	} else if (!TAILQ_EMPTY(&rw->rwait) && rw->o == NULL) {
		wakeup_all(&rw->rwait);
	}
}
Ejemplo n.º 24
0
void
fdata_set_dead(struct fuse_data *data)
{
	debug_printf("data=%p\n", data);

	FUSE_LOCK();
	if (fdata_get_dead(data)) {
		FUSE_UNLOCK();
		return;
	}
	fuse_lck_mtx_lock(data->ms_mtx);
	data->dataflags |= FSESS_DEAD;
	wakeup_one(data);
	selwakeuppri(&data->ks_rsel, PZERO + 1);
	wakeup(&data->ticketer);
	fuse_lck_mtx_unlock(data->ms_mtx);
	FUSE_UNLOCK();
}
Ejemplo n.º 25
0
/*
 * Detatch mapped page and release resources back to the system.
 */
void
sf_buf_free(struct sf_buf *sf)
{
#ifndef ARM_USE_SMALL_ALLOC
	 mtx_lock(&sf_buf_lock);
	 sf->ref_count--;
	 if (sf->ref_count == 0) {
		 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
		 nsfbufsused--;
		 pmap_kremove(sf->kva);
		 sf->m = NULL;
		 LIST_REMOVE(sf, list_entry);
		 if (sf_buf_alloc_want > 0)
			 wakeup_one(&sf_buf_freelist);
	 }
	 mtx_unlock(&sf_buf_lock);				 
#endif
}
Ejemplo n.º 26
0
static void
loadimage(void *arg, int npending)
{
#ifdef notyet
	struct thread *td = curthread;
#endif
	char *imagename = arg;
	struct priv_fw *fp;
	linker_file_t result;
	int error;

	/* synchronize with the thread that dispatched us */
	lockmgr(&firmware_lock, LK_EXCLUSIVE);
	lockmgr(&firmware_lock, LK_RELEASE);

/* JAT
	if (td->td_proc->p_fd->fd_rdir == NULL) {
		kprintf("%s: root not mounted yet, no way to load image\n",
		    imagename);
		goto done;
	}
*/
	error = linker_reference_module(imagename, NULL, &result);
	if (error != 0) {
		kprintf("%s: could not load firmware image, error %d\n",
		    imagename, error);
		goto done;
	}

	lockmgr(&firmware_lock, LK_EXCLUSIVE);
	fp = lookup(imagename, NULL);
	if (fp == NULL || fp->file != NULL) {
		lockmgr(&firmware_lock, LK_RELEASE);
		if (fp == NULL)
			kprintf("%s: firmware image loaded, "
			    "but did not register\n", imagename);
		(void) linker_release_module(imagename, NULL, NULL);
		goto done;
	}
	fp->file = result;	/* record the module identity */
	lockmgr(&firmware_lock, LK_RELEASE);
done:
	wakeup_one(imagename);		/* we're done */
}
Ejemplo n.º 27
0
void
taskq_create_thread(void *arg)
{
    struct taskq *tq = arg;
    int rv;

    mtx_enter(&tq->tq_mtx);

    switch (tq->tq_state) {
    case TQ_S_DESTROYED:
        mtx_leave(&tq->tq_mtx);
        free(tq, M_DEVBUF, sizeof(*tq));
        return;

    case TQ_S_CREATED:
        tq->tq_state = TQ_S_RUNNING;
        break;

    default:
        panic("unexpected %s tq state %d", tq->tq_name, tq->tq_state);
    }

    do {
        tq->tq_running++;
        mtx_leave(&tq->tq_mtx);

        rv = kthread_create(taskq_thread, tq, NULL, tq->tq_name);

        mtx_enter(&tq->tq_mtx);
        if (rv != 0) {
            printf("unable to create thread for \"%s\" taskq\n",
                   tq->tq_name);

            tq->tq_running--;
            /* could have been destroyed during kthread_create */
            if (tq->tq_state == TQ_S_DESTROYED &&
                    tq->tq_running == 0)
                wakeup_one(&tq->tq_running);
            break;
        }
    } while (tq->tq_running < tq->tq_nthreads);

    mtx_leave(&tq->tq_mtx);
}
Ejemplo n.º 28
0
void
fuse_insert_message(struct fuse_ticket *ftick)
{
	debug_printf("ftick=%p\n", ftick);

	if (ftick->tk_flag & FT_DIRTY) {
		panic("FUSE: ticket reused without being refreshed");
	}
	ftick->tk_flag |= FT_DIRTY;

	if (fdata_get_dead(ftick->tk_data)) {
		return;
	}
	fuse_lck_mtx_lock(ftick->tk_data->ms_mtx);
	fuse_ms_push(ftick);
	wakeup_one(ftick->tk_data);
	selwakeuppri(&ftick->tk_data->ks_rsel, PZERO + 1);
	fuse_lck_mtx_unlock(ftick->tk_data->ms_mtx);
}
Ejemplo n.º 29
0
static void
pmclog_stop_kthread(struct pmc_owner *po)
{
	/*
	 * Close the file to force the thread out of fo_write,
	 * unset flag, wakeup the helper thread,
	 * wait for it to exit
	 */

	if (po->po_file != NULL)
		fo_close(po->po_file, curthread);

	mtx_lock(&pmc_kthread_mtx);
	po->po_flags &= ~PMC_PO_OWNS_LOGFILE;
	wakeup_one(po);
	if (po->po_kthread)
		msleep(po->po_kthread, &pmc_kthread_mtx, PPAUSE, "pmckstp", 0);
	mtx_unlock(&pmc_kthread_mtx);
}
Ejemplo n.º 30
0
static void
tws_reinit(void *arg)
{
    struct tws_softc *sc = (struct tws_softc *)arg;
    int timeout_val=0;
    int try=2;
    int done=0;


//  device_printf(sc->tws_dev,  "Waiting for Controller Ready\n");
    while ( !done && try ) {
        if ( tws_ctlr_ready(sc) ) {
            done = 1;
            break;
        } else {
            timeout_val += 5;
            if ( timeout_val >= TWS_RESET_TIMEOUT ) {
               timeout_val = 0;
               if ( try )
                   tws_assert_soft_reset(sc);
               try--;
            }
            mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz);
        }
    }

    if (!done) {
        device_printf(sc->tws_dev,  "FAILED to get Controller Ready!\n");
        return;
    }

    sc->obfl_q_overrun = false;
//  device_printf(sc->tws_dev,  "Sending initConnect\n");
    if ( tws_init_connect(sc, tws_queue_depth) ) {
        TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
    }
    tws_init_obfl_q(sc);

    tws_turn_on_interrupts(sc);

    wakeup_one(sc);
}