Exemple #1
0
static int
bcm2835_audio_detach(device_t dev)
{
	int r;
	struct bcm2835_audio_info *sc;
	sc = pcm_getdevinfo(dev);

	/* Stop worker thread */
	BCM2835_AUDIO_LOCK(sc);
	sc->worker_state = WORKER_STOPPING;
	cv_signal(&sc->worker_cv);
	/* Wait for thread to exit */
	while (sc->worker_state != WORKER_STOPPED)
		cv_wait_sig(&sc->worker_cv, &sc->lock);
	BCM2835_AUDIO_UNLOCK(sc);

	r = pcm_unregister(dev);
	if (r)
		return r;

	mtx_destroy(&sc->lock);
	cv_destroy(&sc->worker_cv);

	bcm2835_audio_release(sc);

    	free(sc, M_DEVBUF);

	return 0;
}
Exemple #2
0
static void
bcm2835_audio_update_controls(struct bcm2835_audio_info *sc)
{
	VC_AUDIO_MSG_T m;
	int ret, db;

	VCHIQ_VCHI_LOCK(sc);
	if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) {
		vchi_service_use(sc->vchi_handle);

		sc->msg_result = -1;

		m.type = VC_AUDIO_MSG_TYPE_CONTROL;
		m.u.control.dest = sc->dest;
		if (sc->volume > 99)
			sc->volume = 99;
		db = db_levels[sc->volume/5];
		m.u.control.volume = VCHIQ_AUDIO_VOLUME(db);

		ret = vchi_msg_queue(sc->vchi_handle,
		    &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);

		if (ret != 0)
			printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret);

		mtx_lock(&sc->msg_avail_lock);
		cv_wait_sig(&sc->msg_avail_cv, &sc->msg_avail_lock);
		if (sc->msg_result)
			printf("%s failed: %d\n", __func__, sc->msg_result);
		mtx_unlock(&sc->msg_avail_lock);

		vchi_service_release(sc->vchi_handle);
	}
	VCHIQ_VCHI_UNLOCK(sc);
}
Exemple #3
0
static void
bcm2835_audio_worker(void *data)
{
	struct bcm2835_audio_info *sc = (struct bcm2835_audio_info *)data;
	struct bcm2835_audio_chinfo *ch = &sc->pch;
	mtx_lock(&sc->data_lock);
	while(1) {

		if (sc->unloading)
			break;

		if ((ch->playback_state == PLAYBACK_PLAYING) &&
		    (vchiq_unbuffered_bytes(ch) >= VCHIQ_AUDIO_PACKET_SIZE)
		    && (ch->free_buffer >= VCHIQ_AUDIO_PACKET_SIZE)) {
			bcm2835_audio_write_samples(ch);
		} else {
			if (ch->playback_state == PLAYBACK_STOPPING) {
				bcm2835_audio_reset_channel(&sc->pch);
				ch->playback_state = PLAYBACK_IDLE;
			}

			cv_wait_sig(&sc->data_cv, &sc->data_lock);

			if (ch->playback_state == PLAYBACK_STARTING) {
				/* Give it initial kick */
				chn_intr(sc->pch.channel);
				ch->playback_state = PLAYBACK_PLAYING;
			}
		}
	}
	mtx_unlock(&sc->data_lock);

	kproc_exit(0);
}
Exemple #4
0
/*ARGSUSED*/
static	int
logread(struct cdev *dev, struct uio *uio, int flag)
{
	char buf[128];
	struct msgbuf *mbp = msgbufp;
	int error = 0, l;

	mtx_lock(&msgbuf_lock);
	while (msgbuf_getcount(mbp) == 0) {
		if (flag & IO_NDELAY) {
			mtx_unlock(&msgbuf_lock);
			return (EWOULDBLOCK);
		}
		if ((error = cv_wait_sig(&log_wakeup, &msgbuf_lock)) != 0) {
			mtx_unlock(&msgbuf_lock);
			return (error);
		}
	}

	while (uio->uio_resid > 0) {
		l = imin(sizeof(buf), uio->uio_resid);
		l = msgbuf_getbytes(mbp, buf, l);
		if (l == 0)
			break;
		mtx_unlock(&msgbuf_lock);
		error = uiomove(buf, l, uio);
		if (error || uio->uio_resid == 0)
			return (error);
		mtx_lock(&msgbuf_lock);
	}
	mtx_unlock(&msgbuf_lock);
	return (error);
}
Exemple #5
0
/* ARGSUSED */
static int
cnread(dev_t dev, struct uio *uio, struct cred *cred)
{
	kcondvar_t	sleep_forever;
	kmutex_t	sleep_forever_mutex;

	if (rconsvp == NULL) {
		/*
		 * Go to sleep forever.  This seems like the least
		 * harmful thing to do if there's no console.
		 * EOF might be better if we're ending up single-user
		 * mode.
		 */
		cv_init(&sleep_forever, NULL, CV_DRIVER, NULL);
		mutex_init(&sleep_forever_mutex, NULL, MUTEX_DRIVER, NULL);
		mutex_enter(&sleep_forever_mutex);
		(void) cv_wait_sig(&sleep_forever, &sleep_forever_mutex);
		mutex_exit(&sleep_forever_mutex);
		return (EIO);
	}

	if (rconsvp->v_stream != NULL)
		return (strread(rconsvp, uio, cred));
	else
		return (cdev_read(rconsdev, uio, cred));
}
Exemple #6
0
int
down_interruptible(struct semaphore *s)
{
	int ret ;

	ret = 0;

	mtx_lock(&s->mtx);

	while (s->value == 0) {
		s->waiters++;
		ret = cv_wait_sig(&s->cv, &s->mtx);
		s->waiters--;

		if (ret == EINTR) {
			mtx_unlock(&s->mtx);
			return (-EINTR);
		}

		if (ret == ERESTART)
			continue;
	}

	s->value--;
	mtx_unlock(&s->mtx);

	return (0);
}
Exemple #7
0
static void
bcm2835_audio_update_params(struct bcm2835_audio_info *sc, struct bcm2835_audio_chinfo *ch)
{
	VC_AUDIO_MSG_T m;
	int ret;

	VCHIQ_VCHI_LOCK(sc);
	if (sc->vchi_handle != VCHIQ_SERVICE_HANDLE_INVALID) {
		vchi_service_use(sc->vchi_handle);

		sc->msg_result = -1;

		m.type = VC_AUDIO_MSG_TYPE_CONFIG;
		m.u.config.channels = AFMT_CHANNEL(ch->fmt);
		m.u.config.samplerate = ch->spd;
		m.u.config.bps = AFMT_BIT(ch->fmt);

		ret = vchi_msg_queue(sc->vchi_handle,
		    &m, sizeof m, VCHI_FLAGS_BLOCK_UNTIL_QUEUED, NULL);

		if (ret != 0)
			printf("%s: vchi_msg_queue failed (err %d)\n", __func__, ret);

		mtx_lock(&sc->msg_avail_lock);
		cv_wait_sig(&sc->msg_avail_cv, &sc->msg_avail_lock);
		if (sc->msg_result)
			printf("%s failed: %d\n", __func__, sc->msg_result);
		mtx_unlock(&sc->msg_avail_lock);

		vchi_service_release(sc->vchi_handle);
	}
	VCHIQ_VCHI_UNLOCK(sc);
}
/*
 * port_alloc_event_block() has the same functionality of port_alloc_event() +
 * - it blocks if not enough event slots are available and
 * - it blocks if not enough memory is available.
 * Currently port_dispatch() is using this function to increase the
 * reliability of event delivery for library event sources.
 */
int
port_alloc_event_block(port_t *pp, int source, int flags,
    port_kevent_t **pkevpp)
{
	port_kevent_t *pkevp =
	    kmem_cache_alloc(port_control.pc_cache, KM_SLEEP);

	mutex_enter(&pp->port_queue.portq_mutex);
	while (pp->port_curr >= pp->port_max_events) {
		if (!cv_wait_sig(&pp->port_cv, &pp->port_queue.portq_mutex)) {
			/* signal detected */
			mutex_exit(&pp->port_queue.portq_mutex);
			kmem_cache_free(port_control.pc_cache, pkevp);
			return (EINTR);
		}
	}
	pp->port_curr++;
	mutex_exit(&pp->port_queue.portq_mutex);

	bzero(pkevp, sizeof (port_kevent_t));
	mutex_init(&pkevp->portkev_lock, NULL, MUTEX_DEFAULT, NULL);
	pkevp->portkev_flags = flags;
	pkevp->portkev_port = pp;
	pkevp->portkev_source = source;
	pkevp->portkev_pid = curproc->p_pid;
	*pkevpp = pkevp;
	return (0);
}
Exemple #9
0
/* ARGSUSED */
static void
port_close_sourcefd(void *arg, int port, pid_t pid, int lastclose)
{
	port_t		*pp = arg;
	port_fdcache_t	*pcp;
	portfd_t	**hashtbl;
	polldat_t	*pdp;
	polldat_t	*pdpnext;
	int		index;

	pcp = pp->port_queue.portq_pcp;
	if (pcp == NULL)
		/* no cache available -> nothing to do */
		return;

	mutex_enter(&pcp->pc_lock);
	/*
	 * Scan the cache and free all allocated portfd_t and port_kevent_t
	 * structures.
	 */
	hashtbl = pcp->pc_hash;
	for (index = 0; index < pcp->pc_hashsize; index++) {
		for (pdp = PFTOD(hashtbl[index]); pdp != NULL; pdp = pdpnext) {
			pdpnext = pdp->pd_hashnext;
			if (pid == pdp->pd_portev->portkev_pid) {
				/*
				 * remove polldat + port_event_t from cache
				 * only when current process did the
				 * association.
				 */
				port_remove_portfd(pdp, pcp);
			}
		}
	}
	if (lastclose) {
		/*
		 * Wait for all the portfd's to be freed.
		 * The remaining portfd_t's are the once we did not
		 * free in port_remove_portfd since some other thread
		 * is closing the fd. These threads will free the portfd_t's
		 * once we drop the pc_lock mutex.
		 */
		while (pcp->pc_fdcount) {
			(void) cv_wait_sig(&pcp->pc_lclosecv, &pcp->pc_lock);
		}
		/* event port vnode will be destroyed -> remove everything */
		pp->port_queue.portq_pcp = NULL;
	}
	mutex_exit(&pcp->pc_lock);
	/*
	 * last close:
	 * pollwakeup() can not further interact with this cache
	 * (all polldat structs are removed from pollhead entries).
	 */
	if (lastclose)
		port_pcache_destroy(pcp);
}
int
xb_read(void *data, unsigned len)
{
	volatile struct xenstore_domain_interface *intf =
	    xs_domain_interface(xb_addr);
	XENSTORE_RING_IDX cons, prod;
	extern int do_polled_io;

	while (len != 0) {
		unsigned int avail;
		const char *src;

		mutex_enter(&xb_wait_lock);
		while (intf->rsp_cons == intf->rsp_prod) {
			if (interrupts_unleashed && !do_polled_io) {
				if (cv_wait_sig(&xb_wait_cv,
				    &xb_wait_lock) == 0) {
					mutex_exit(&xb_wait_lock);
					return (EINTR);
				}
			} else { /* polled mode needed for early probes */
				(void) HYPERVISOR_yield();
			}
		}
		mutex_exit(&xb_wait_lock);
		/* Read indexes, then verify. */
		cons = intf->rsp_cons;
		prod = intf->rsp_prod;
		membar_enter();
		if (!check_indexes(cons, prod))
			return (EIO);

		src = get_input_chunk(cons, prod, (char *)intf->rsp, &avail);
		if (avail == 0)
			continue;
		if (avail > len)
			avail = len;

		/* We must read header before we read data. */
		membar_consumer();

		(void) memcpy(data, src, avail);
		data = (void *)((uintptr_t)data + avail);
		len -= avail;

		/* Other side must not see free space until we've copied out */
		membar_enter();
		intf->rsp_cons += avail;

		/* Implies mb(): they will see new header. */
		ec_notify_via_evtchn(xen_info->store_evtchn);
	}

	return (0);
}
Exemple #11
0
/*
 * User-level interface: read, select.
 * (User cannot write an event queue.)
 */
int
ev_read(struct evvar *ev, struct uio *uio, int flags)
{
	int n, cnt, put, error;

	/*
	 * Make sure we can return at least 1.
	 */
	if (uio->uio_resid < sizeof(struct firm_event))
		return (EMSGSIZE);	/* ??? */
	mutex_enter(ev->ev_lock);
	while (ev->ev_get == ev->ev_put) {
		if (flags & IO_NDELAY) {
			mutex_exit(ev->ev_lock);
			return (EWOULDBLOCK);
		}
		ev->ev_wanted = true;
		error = cv_wait_sig(&ev->ev_cv, ev->ev_lock);
		if (error != 0) {
			mutex_exit(ev->ev_lock);
			return (error);
		}
	}
	/*
	 * Move firm_events from tail end of queue (there is at least one
	 * there).
	 */
	if (ev->ev_put < ev->ev_get)
		cnt = EV_QSIZE - ev->ev_get;	/* events in [get..QSIZE) */
	else
		cnt = ev->ev_put - ev->ev_get;	/* events in [get..put) */
	put = ev->ev_put;
	mutex_exit(ev->ev_lock);
	n = howmany(uio->uio_resid, sizeof(struct firm_event));
	if (cnt > n)
		cnt = n;
	error = uiomove((void *)&ev->ev_q[ev->ev_get],
	    cnt * sizeof(struct firm_event), uio);
	n -= cnt;
	/*
	 * If we do not wrap to 0, used up all our space, or had an error,
	 * stop.  Otherwise move from front of queue to put index, if there
	 * is anything there to move.
	 */
	if ((ev->ev_get = (ev->ev_get + cnt) % EV_QSIZE) != 0 ||
	    n == 0 || error || (cnt = put) == 0)
		return (error);
	if (cnt > n)
		cnt = n;
	error = uiomove((void *)&ev->ev_q[0],
	    cnt * sizeof(struct firm_event), uio);
	ev->ev_get = cnt;
	return (error);
}
Exemple #12
0
HgfsReq *
HgfsGetNewReq(HgfsSuperInfo *sip)       // IN: Superinfo containing free list
{
   HgfsReq *newReq;

   DEBUG(VM_DEBUG_REQUEST, "HgfsGetNewReq().\n");

   ASSERT(sip);

   /*
    * Here we atomically get the next free request from the free list and set
    * that request's state to ALLOCATED.
    */
   mutex_enter(&sip->reqFreeMutex);

   /* Wait for a request structure if there aren't any free */
   while (HgfsListIsEmpty(&sip->reqFreeList)) {
      /*
       * If the list is empty, we wait on the condition variable which is
       * unconditionally signaled whenever a request is destroyed.
       */
      if (cv_wait_sig(&sip->reqFreeCondVar, &sip->reqFreeMutex) == 0) {
         /*
          * We were interrupted while waiting for a request, so we must return
          * NULL and release the mutex.
          */
         newReq = NULL;
         goto out;
      }
   }

   newReq = HGFS_FREE_REQ_LIST_HEAD(sip);

   HgfsDebugPrintReq("HgfsGetNewReq", newReq);

   /* Failure of these indicates error in program's logic */
   ASSERT(newReq && newReq->state == HGFS_REQ_UNUSED);

   /* Take request off the free list and indicate it has been ALLOCATED */
   DblLnkLst_Unlink1(&newReq->listNode);
   newReq->state = HGFS_REQ_ALLOCATED;

   /* Clear packet of request before allocating to clients. */
   bzero(newReq->packet, sizeof newReq->packet);

   DEBUG(VM_DEBUG_LIST, "Dequeued from free list: %s", newReq->packet);
   HgfsDebugPrintReqList(&sip->reqFreeList);

out:
   mutex_exit(&sip->reqFreeMutex);

   DEBUG(VM_DEBUG_REQUEST, "HgfsGetNewReq() done.\n");
   return newReq;
}
int
xb_write(const void *data, unsigned len)
{
	volatile struct xenstore_domain_interface *intf =
	    xs_domain_interface(xb_addr);
	XENSTORE_RING_IDX cons, prod;
	extern int do_polled_io;

	while (len != 0) {
		void *dst;
		unsigned int avail;

		mutex_enter(&xb_wait_lock);
		while ((intf->req_prod - intf->req_cons) ==
		    XENSTORE_RING_SIZE) {
			if (interrupts_unleashed && !do_polled_io) {
				if (cv_wait_sig(&xb_wait_cv,
				    &xb_wait_lock) == 0) {
					mutex_exit(&xb_wait_lock);
					return (EINTR);
				}
			} else { /* polled mode needed for early probes */
				(void) HYPERVISOR_yield();
			}
		}
		mutex_exit(&xb_wait_lock);
		/* Read indexes, then verify. */
		cons = intf->req_cons;
		prod = intf->req_prod;
		membar_enter();
		if (!check_indexes(cons, prod))
			return (EIO);

		dst = get_output_chunk(cons, prod, (char *)intf->req, &avail);
		if (avail == 0)
			continue;
		if (avail > len)
			avail = len;

		(void) memcpy(dst, data, avail);
		data = (void *)((uintptr_t)data + avail);
		len -= avail;

		/* Other side must not see new header until data is there. */
		membar_producer();
		intf->req_prod += avail;

		/* This implies mb() before other side sees interrupt. */
		ec_notify_via_evtchn(xen_info->store_evtchn);
	}

	return (0);
}
Exemple #14
0
/*
 * Generate some data from cprng.  Block or return zero bytes,
 * depending on flags & FNONBLOCK, if cprng was created without
 * CPRNG_REKEY_ANY.
 */
size_t
cprng_strong(struct cprng_strong *cprng, void *buffer, size_t bytes, int flags)
{
	size_t result;

	/* Caller must loop for more than CPRNG_MAX_LEN bytes.  */
	bytes = MIN(bytes, CPRNG_MAX_LEN);

	mutex_enter(&cprng->cs_lock);

	if (ISSET(cprng->cs_flags, CPRNG_REKEY_ANY)) {
		if (!cprng->cs_ready)
			cprng_strong_reseed(cprng);
	} else {
		while (!cprng->cs_ready) {
			if (ISSET(flags, FNONBLOCK) ||
			    !ISSET(cprng->cs_flags, CPRNG_USE_CV) ||
			    cv_wait_sig(&cprng->cs_cv, &cprng->cs_lock)) {
				result = 0;
				goto out;
			}
		}
	}

	/*
	 * Debit the entropy if requested.
	 *
	 * XXX Kludge for /dev/random `information-theoretic' properties.
	 */
	if (__predict_false(ISSET(cprng->cs_flags, CPRNG_HARD))) {
		KASSERT(0 < cprng->cs_remaining);
		KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
		if (bytes < cprng->cs_remaining) {
			cprng->cs_remaining -= bytes;
		} else {
			bytes = cprng->cs_remaining;
			cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
			cprng->cs_ready = false;
			rndsink_schedule(cprng->cs_rndsink);
		}
		KASSERT(bytes <= NIST_BLOCK_KEYLEN_BYTES);
		KASSERT(0 < cprng->cs_remaining);
		KASSERT(cprng->cs_remaining <= NIST_BLOCK_KEYLEN_BYTES);
	}

	cprng_strong_generate(cprng, buffer, bytes);
	result = bytes;

out:	mutex_exit(&cprng->cs_lock);
	return result;
}
Exemple #15
0
/*
 * The handling of small unions, like the sigval argument to sigqueue,
 * is architecture dependent.  We have adopted the convention that the
 * value itself is passed in the storage which crosses the kernel
 * protection boundary.  This procedure will accept a scalar argument,
 * and store it in the appropriate value member of the sigsend_t structure.
 */
int
sigqueue(pid_t pid, int sig, /* union sigval */ void *value,
	int si_code, int block)
{
	int error;
	sigsend_t v;
	sigqhdr_t *sqh;
	proc_t *p = curproc;

	/* The si_code value must indicate the signal will be queued */
	if (pid <= 0 || !sigwillqueue(sig, si_code))
		return (set_errno(EINVAL));

	if ((sqh = p->p_sigqhdr) == NULL) {
		/* Allocate sigqueue pool first time */
		sqh = sigqhdralloc(sizeof (sigqueue_t), _SIGQUEUE_MAX);
		mutex_enter(&p->p_lock);
		if (p->p_sigqhdr == NULL) {
			/* hang the pool head on proc */
			p->p_sigqhdr = sqh;
		} else {
			/* another lwp allocated the pool, free ours */
			sigqhdrfree(sqh);
			sqh = p->p_sigqhdr;
		}
		mutex_exit(&p->p_lock);
	}

	do {
		bzero(&v, sizeof (v));
		v.sig = sig;
		v.checkperm = 1;
		v.sicode = si_code;
		v.value.sival_ptr = value;
		if ((error = sigqkill(pid, &v)) != EAGAIN || !block)
			break;
		/* block waiting for another chance to allocate a sigqueue_t */
		mutex_enter(&sqh->sqb_lock);
		while (sqh->sqb_count == 0) {
			if (!cv_wait_sig(&sqh->sqb_cv, &sqh->sqb_lock)) {
				error = EINTR;
				break;
			}
		}
		mutex_exit(&sqh->sqb_lock);
	} while (error == EAGAIN);

	if (error)
		return (set_errno(error));
	return (0);
}
Exemple #16
0
/* Acquire exclusive access to MCDI for the duration of a request. */
static void
sfxge_mcdi_acquire(struct sfxge_mcdi *mcdi)
{

	mtx_lock(&mcdi->lock);
	KASSERT(mcdi->state != SFXGE_MCDI_UNINITIALIZED,
	    ("MCDI not initialized"));

	while (mcdi->state != SFXGE_MCDI_INITIALIZED)
		(void)cv_wait_sig(&mcdi->cv, &mcdi->lock);
	mcdi->state = SFXGE_MCDI_BUSY;

	mtx_unlock(&mcdi->lock);
}
int
cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks)
{
	struct timespec ts;
	extern int hz;
	int rv;

	if (ticks == 0) {
		rv = cv_wait_sig(cv, mtx);
	} else {
		ts.tv_sec = ticks / hz;
		ts.tv_nsec = (ticks % hz) * (1000000000/hz);
		rv = docvwait(cv, mtx, &ts);
	}

	return rv;
}
Exemple #18
0
int
afs_osi_SleepSig(void *event)
{
    struct afs_event *evp;
    int seq, code = 0;

    evp = afs_getevent(event);
    seq = evp->seq;
    while (seq == evp->seq) {
	AFS_ASSERT_GLOCK();
	if (cv_wait_sig(&evp->cond, &afs_global_lock) == 0) {
	    code = EINTR;
	    break;
	}
    }
    relevent(evp);
    return code;
}
Exemple #19
0
int
fuse_queue_request_wait(fuse_session_t *se, fuse_msg_node_t *req_p)
{
	int err = 0;
	int interrupted = 0;

	req_p->frd_on_request_complete = frd_on_request_complete_wakeup;
	fuse_queue_request_nowait(se, req_p);

	FUSE_SESSION_MUTEX_LOCK(se);

	while (req_p->fmn_state != FUSE_MSG_STATE_DONE) {
		if (cv_wait_sig(&req_p->fmn_cv, &se->session_mutx) != 0) {
			continue;
		} else {
			interrupted = 1;
			break;
		}
	}
	if (interrupted == 0) {
		req_p->opdata.outdata = req_p->opdata.iovbuf.base;
		FUSE_SESSION_MUTEX_UNLOCK(se);
		return (err);
	}

	DTRACE_PROBE3(fuse_queue_request_wait_err_no_response,
	    char *, "no response from daemon",
	    fuse_session_t *, se, fuse_msg_node_t *, req_p);

	if (req_p->fmn_state == FUSE_MSG_STATE_DONE) {
		goto err;
	}
	if (req_p->fmn_state != FUSE_MSG_STATE_QUEUE)
		req_p->fmn_state = FUSE_MSG_STATE_SIG;

	while (req_p->fmn_state != FUSE_MSG_STATE_DONE)
		cv_wait(&req_p->fmn_cv, &se->session_mutx);
err:
	req_p->opdata.outdata = NULL;
	err = EINTR;
	FUSE_SESSION_MUTEX_UNLOCK(se);

	return (err);
}
/*
 * wusb_df_serialize_access:
 *    Get the serial synchronization object before returning.
 *
 * Arguments:
 *    wusb_dfp - Pointer to wusb_df state structure
 *    waitsig - Set to:
 *	WUSB_DF_SER_SIG - to wait such that a signal can interrupt
 *	WUSB_DF_SER_NOSIG - to wait such that a signal cannot interrupt
 */
static int
wusb_df_serialize_access(wusb_df_state_t *wusb_dfp, boolean_t waitsig)
{
	int rval = 1;

	ASSERT(mutex_owned(&wusb_dfp->wusb_df_mutex));

	while (wusb_dfp->wusb_df_serial_inuse) {
		if (waitsig == WUSB_DF_SER_SIG) {
			rval = cv_wait_sig(&wusb_dfp->wusb_df_serial_cv,
			    &wusb_dfp->wusb_df_mutex);
		} else {
			cv_wait(&wusb_dfp->wusb_df_serial_cv,
			    &wusb_dfp->wusb_df_mutex);
		}
	}
	wusb_dfp->wusb_df_serial_inuse = B_TRUE;

	return (rval);
}
Exemple #21
0
void
pcppi_bell_locked(pcppi_tag_t self, int pitch, int period, int slp)
{
	struct pcppi_softc *sc = self;

	if (sc->sc_bellactive) {
		if (sc->sc_timeout) {
			sc->sc_timeout = 0;
			callout_stop(&sc->sc_bell_ch);
		}
		cv_broadcast(&sc->sc_slp);
	}
	if (pitch == 0 || period == 0) {
		pcppi_bell_stop(sc);
		sc->sc_bellpitch = 0;
		return;
	}
	if (!sc->sc_bellactive || sc->sc_bellpitch != pitch) {
#if NATTIMER > 0
		if (sc->sc_timer != NULL)
			attimer_set_pitch(sc->sc_timer, pitch);
#endif
		/* enable speaker */
		bus_space_write_1(sc->sc_iot, sc->sc_ppi_ioh, 0,
			bus_space_read_1(sc->sc_iot, sc->sc_ppi_ioh, 0)
			| PIT_SPKR);
	}
	sc->sc_bellpitch = pitch;

	sc->sc_bellactive = 1;
	if (slp & PCPPI_BELL_POLL) {
		delay((period * 1000000) / hz);
		pcppi_bell_stop(sc);
	} else {
		sc->sc_timeout = 1;
		callout_schedule(&sc->sc_bell_ch, period);
		if (slp & PCPPI_BELL_SLEEP) {
			cv_wait_sig(&sc->sc_slp, &tty_lock);
		}
	}
}
static int
lx_ptm_read_start(dev_t dev)
{
	lx_ptm_ops_t	*lpo = lx_ptm_lpo_lookup(DEVT_TO_INDEX(dev));

	mutex_enter(&lpo->lpo_rops_lock);
	ASSERT(lpo->lpo_rops >= 0);

	/* Wait for other read operations to finish */
	while (lpo->lpo_rops != 0) {
		if (cv_wait_sig(&lpo->lpo_rops_cv, &lpo->lpo_rops_lock) == 0) {
			mutex_exit(&lpo->lpo_rops_lock);
			return (-1);
		}
	}

	/* Start a read operation */
	VERIFY(++lpo->lpo_rops == 1);
	mutex_exit(&lpo->lpo_rops_lock);
	return (0);
}
Exemple #23
0
int
wait_for_completion_interruptible(struct completion *c)
{
	int res = 0;

	mtx_lock(&c->lock);
	while (c->done == 0) {
		res = cv_wait_sig(&c->cv, &c->lock);
		if (res)
			goto out;
	}

	_completion_claim(c);

out:
	mtx_unlock(&c->lock);

	if ((res == EINTR) || (res == ERESTART))
		res = -ERESTART;
	return res;
}
int
down_interruptible(struct semaphore *s)
{

	mutex_enter(&s->mtx);

	while (s->value == 0) {
		s->waiters++;
		int ret = cv_wait_sig(&s->cv, &s->mtx);
		s->waiters--;

		if (ret == EINTR || ret == ERESTART) {
			mutex_exit(&s->mtx);
			return -EINTR;
		}
	}

	s->value--;
	mutex_exit(&s->mtx);

	return 0;
}
Exemple #25
0
/*
 * wait until local tx buffer drains.
 * 'timeout' is in seconds, zero means wait forever
 */
static int
uftdi_wait_tx_drain(uftdi_state_t *uf, int timeout)
{
	clock_t	until;
	int over = 0;

	until = ddi_get_lbolt() + drv_usectohz(1000 * 1000 * timeout);

	while (uf->uf_tx_mp && !over) {
		if (timeout > 0) {
			/* whether timedout or signal pending */
			over = cv_timedwait_sig(&uf->uf_tx_cv,
			    &uf->uf_lock, until) <= 0;
		} else {
			/* whether a signal is pending */
			over = cv_wait_sig(&uf->uf_tx_cv,
			    &uf->uf_lock) == 0;
		}
	}

	return (uf->uf_tx_mp == NULL ? USB_SUCCESS : USB_FAILURE);
}
Exemple #26
0
static int
md_wait_for_event(md_event_queue_t *event_queue, void *ioctl_in,
		md_event_ioctl_t *ioctl, size_t sz,
		int mode, IOLOCK *lockp)
{
	int rval = 0;

	while (event_queue->mdn_front == NULL) {
		event_queue->mdn_waiting++;
		(void) IOLOCK_RETURN(0, lockp);
		rval = cv_wait_sig(&event_queue->mdn_cv, &md_eventq_mx);
		event_queue->mdn_waiting--;
		if ((rval == 0) || (event_queue->mdn_flags &
					MD_EVENT_QUEUE_DESTROY)) {
			global_lock_wait_cnt++;
			mutex_exit(&md_eventq_mx);
			/* reenable single threading of ioctls */
			while (md_ioctl_lock_enter() == EINTR);

			(void) notify_fillin_empty_ioctl
			    ((void *)ioctl, ioctl_in, sz, mode);
			mutex_enter(&md_eventq_mx);
			global_lock_wait_cnt--;
			mutex_exit(&md_eventq_mx);
			return (EINTR);
		}
		/*
		 * reacquire single threading ioctls. Drop eventq_mutex
		 * since md_ioctl_lock_enter can sleep.
		 */
		global_lock_wait_cnt++;
		mutex_exit(&md_eventq_mx);
		while (md_ioctl_lock_enter() == EINTR);
		mutex_enter(&md_eventq_mx);
		global_lock_wait_cnt--;
	}
	return (0);
}
Exemple #27
0
int
sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
    register_t *retval)
{
	/* {
		syscallarg(lwpid_t) target;
	} */
	struct proc *p = l->l_proc;
	struct lwp *t;
	int error;

	mutex_enter(p->p_lock);
	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
		mutex_exit(p->p_lock);
		return ESRCH;
	}

	/*
	 * Check for deadlock, which is only possible when we're suspending
	 * ourself.  XXX There is a short race here, as p_nrlwps is only
	 * incremented when an LWP suspends itself on the kernel/user
	 * boundary.  It's still possible to kill -9 the process so we
	 * don't bother checking further.
	 */
	lwp_lock(t);
	if ((t == l && p->p_nrlwps == 1) ||
	    (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
		lwp_unlock(t);
		mutex_exit(p->p_lock);
		return EDEADLK;
	}

	/*
	 * Suspend the LWP.  XXX If it's on a different CPU, we should wait
	 * for it to be preempted, where it will put itself to sleep. 
	 *
	 * Suspension of the current LWP will happen on return to userspace.
	 */
	error = lwp_suspend(l, t);
	if (error) {
		mutex_exit(p->p_lock);
		return error;
	}

	/*
	 * Wait for:
	 *  o process exiting
	 *  o target LWP suspended
	 *  o target LWP not suspended and L_WSUSPEND clear
	 *  o target LWP exited
	 */
	for (;;) {
		error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
		if (error) {
			error = ERESTART;
			break;
		}
		if (lwp_find(p, SCARG(uap, target)) == NULL) {
			error = ESRCH;
			break;
		}
		if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
			error = ERESTART;
			break;
		}
		if (t->l_stat == LSSUSPENDED ||
		    (t->l_flag & LW_WSUSPEND) == 0)
			break;
	}
	mutex_exit(p->p_lock);

	return error;
}
Exemple #28
0
static int
ams_read(struct cdev *dev, struct uio *uio, int flag)
{
	struct adb_mouse_softc *sc;
	size_t len;
	int8_t outpacket[8];
	int error;

	sc = CDEV_GET_SOFTC(dev);
	if (sc == NULL)
		return (EIO);

	if (uio->uio_resid <= 0)
		return (0);

	mtx_lock(&sc->sc_mtx);

	if (!sc->packet_read_len) {
		if (sc->xdelta == 0 && sc->ydelta == 0 && 
		   sc->buttons == sc->last_buttons) {

			if (flag & O_NONBLOCK) {
				mtx_unlock(&sc->sc_mtx);
				return EWOULDBLOCK;
			}

	
			/* Otherwise, block on new data */
			error = cv_wait_sig(&sc->sc_cv, &sc->sc_mtx);
			if (error) {
				mtx_unlock(&sc->sc_mtx);
				return (error);
			}
		}

		sc->packet[0] = 1 << 7;
		sc->packet[0] |= (!(sc->buttons & 1)) << 2;
		sc->packet[0] |= (!(sc->buttons & 4)) << 1;
		sc->packet[0] |= (!(sc->buttons & 2));

		if (sc->xdelta > 127) {
			sc->packet[1] = 127;
			sc->packet[3] = sc->xdelta - 127;
		} else if (sc->xdelta < -127) {
			sc->packet[1] = -127;
			sc->packet[3] = sc->xdelta + 127;
		} else {
			sc->packet[1] = sc->xdelta;
			sc->packet[3] = 0;
		}

		if (sc->ydelta > 127) {
			sc->packet[2] = 127;
			sc->packet[4] = sc->ydelta - 127;
		} else if (sc->ydelta < -127) {
			sc->packet[2] = -127;
			sc->packet[4] = sc->ydelta + 127;
		} else {
			sc->packet[2] = sc->ydelta;
			sc->packet[4] = 0;
		}

		/* No Z movement */
		sc->packet[5] = 0;
		sc->packet[6] = 0; 

		sc->packet[7] = ~((uint8_t)(sc->buttons >> 3)) & 0x7f;


		sc->last_buttons = sc->buttons;
		sc->xdelta = 0;
		sc->ydelta = 0;

		sc->packet_read_len = sc->mode.packetsize;
	}

	len = (sc->packet_read_len > uio->uio_resid) ? 
		uio->uio_resid : sc->packet_read_len;

	memcpy(outpacket,sc->packet + 
		(sc->mode.packetsize - sc->packet_read_len),len);
	sc->packet_read_len -= len;

	mtx_unlock(&sc->sc_mtx);

	uiomove(outpacket,len,uio);

	return (0);
}
int
nskernd_command(intptr_t arg, int mode, int *rvalp)
{
	struct nskernd *udata = NULL;
	uint64_t arg1, arg2;
	int rc;

	*rvalp = 0;
	rc = 0;

	udata = kmem_alloc(sizeof (*udata), KM_SLEEP);
	if (ddi_copyin((void *)arg, udata, sizeof (*udata), mode) < 0) {
		kmem_free(udata, sizeof (*udata));
		return (EFAULT);
	}

	switch (udata->command) {
	case NSKERND_START:		/* User program start */
		*rvalp = nskernd_start(udata->data1);
		break;

	case NSKERND_STOP:		/* User program requesting stop */
		mutex_enter(&nskernd_lock);
		nskernd_cleanup();
		mutex_exit(&nskernd_lock);
		break;

	case NSKERND_WAIT:
		mutex_enter(&nskernd_lock);

		bcopy(udata, &nskernd_kdata, sizeof (*udata));

		if (nskernd_ask > 0)
			cv_signal(&nskernd_ask_cv);

		nskernd_u_wait++;

		if (cv_wait_sig(&nskernd_u_cv, &nskernd_lock) != 0) {
			/*
			 * woken by cv_signal() or cv_broadcast()
			 */
			bcopy(&nskernd_kdata, udata, sizeof (*udata));
		} else {
			/*
			 * signal - the user process has blocked all
			 * signals except for SIGTERM and the
			 * uncatchables, so the process is about to die
			 * and we need to clean up.
			 */
			udata->command = NSKERND_STOP;
			udata->data1 = (uint64_t)1;	 /* cleanup done */

			nskernd_cleanup();
		}

		nskernd_u_wait--;

		mutex_exit(&nskernd_lock);

		if (ddi_copyout(udata, (void *)arg,
		    sizeof (*udata), mode) < 0) {
			rc = EFAULT;
			break;
		}

		break;

	case NSKERND_NEWLWP:
		/* save kmem by freeing the udata structure */
		arg1 = udata->data1;
		kmem_free(udata, sizeof (*udata));
		udata = NULL;
		nsc_runlwp(arg1);
		break;

	case NSKERND_LOCK:
		/* save kmem by freeing the udata structure */
		arg1 = udata->data1;
		arg2 = udata->data2;
		kmem_free(udata, sizeof (*udata));
		udata = NULL;
		nsc_lockchild(arg1, arg2);
		break;

	default:
		cmn_err(CE_WARN, "nskernd: unknown command %d", udata->command);
		rc = EINVAL;
		break;
	}

	if (udata != NULL) {
		kmem_free(udata, sizeof (*udata));
		udata = NULL;
	}

	return (rc);
}
Exemple #30
0
int
sys_semop(struct lwp *l, const struct sys_semop_args *uap, register_t *retval)
{
	/* {
		syscallarg(int) semid;
		syscallarg(struct sembuf *) sops;
		syscallarg(size_t) nsops;
	} */
	struct proc *p = l->l_proc;
	int semid = SCARG(uap, semid), seq;
	size_t nsops = SCARG(uap, nsops);
	struct sembuf small_sops[SMALL_SOPS];
	struct sembuf *sops;
	struct semid_ds *semaptr;
	struct sembuf *sopptr = NULL;
	struct __sem *semptr = NULL;
	struct sem_undo *suptr = NULL;
	kauth_cred_t cred = l->l_cred;
	int i, error;
	int do_wakeup, do_undos;

	SEM_PRINTF(("call to semop(%d, %p, %zd)\n", semid, SCARG(uap,sops), nsops));

	if (__predict_false((p->p_flag & PK_SYSVSEM) == 0)) {
		mutex_enter(p->p_lock);
		p->p_flag |= PK_SYSVSEM;
		mutex_exit(p->p_lock);
	}

restart:
	if (nsops <= SMALL_SOPS) {
		sops = small_sops;
	} else if (nsops <= seminfo.semopm) {
		sops = kmem_alloc(nsops * sizeof(*sops), KM_SLEEP);
	} else {
		SEM_PRINTF(("too many sops (max=%d, nsops=%zd)\n",
		    seminfo.semopm, nsops));
		return (E2BIG);
	}

	error = copyin(SCARG(uap, sops), sops, nsops * sizeof(sops[0]));
	if (error) {
		SEM_PRINTF(("error = %d from copyin(%p, %p, %zd)\n", error,
		    SCARG(uap, sops), &sops, nsops * sizeof(sops[0])));
		if (sops != small_sops)
			kmem_free(sops, nsops * sizeof(*sops));
		return error;
	}

	mutex_enter(&semlock);
	/* In case of reallocation, we will wait for completion */
	while (__predict_false(sem_realloc_state))
		cv_wait(&sem_realloc_cv, &semlock);

	semid = IPCID_TO_IX(semid);	/* Convert back to zero origin */
	if (semid < 0 || semid >= seminfo.semmni) {
		error = EINVAL;
		goto out;
	}

	semaptr = &sema[semid];
	seq = IPCID_TO_SEQ(SCARG(uap, semid));
	if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
	    semaptr->sem_perm._seq != seq) {
		error = EINVAL;
		goto out;
	}

	if ((error = ipcperm(cred, &semaptr->sem_perm, IPC_W))) {
		SEM_PRINTF(("error = %d from ipaccess\n", error));
		goto out;
	}

	for (i = 0; i < nsops; i++)
		if (sops[i].sem_num >= semaptr->sem_nsems) {
			error = EFBIG;
			goto out;
		}

	/*
	 * Loop trying to satisfy the vector of requests.
	 * If we reach a point where we must wait, any requests already
	 * performed are rolled back and we go to sleep until some other
	 * process wakes us up.  At this point, we start all over again.
	 *
	 * This ensures that from the perspective of other tasks, a set
	 * of requests is atomic (never partially satisfied).
	 */
	do_undos = 0;

	for (;;) {
		do_wakeup = 0;

		for (i = 0; i < nsops; i++) {
			sopptr = &sops[i];
			semptr = &semaptr->_sem_base[sopptr->sem_num];

			SEM_PRINTF(("semop:  semaptr=%p, sem_base=%p, "
			    "semptr=%p, sem[%d]=%d : op=%d, flag=%s\n",
			    semaptr, semaptr->_sem_base, semptr,
			    sopptr->sem_num, semptr->semval, sopptr->sem_op,
			    (sopptr->sem_flg & IPC_NOWAIT) ?
			    "nowait" : "wait"));

			if (sopptr->sem_op < 0) {
				if ((int)(semptr->semval +
				    sopptr->sem_op) < 0) {
					SEM_PRINTF(("semop:  "
					    "can't do it now\n"));
					break;
				} else {
					semptr->semval += sopptr->sem_op;
					if (semptr->semval == 0 &&
					    semptr->semzcnt > 0)
						do_wakeup = 1;
				}
				if (sopptr->sem_flg & SEM_UNDO)
					do_undos = 1;
			} else if (sopptr->sem_op == 0) {
				if (semptr->semval > 0) {
					SEM_PRINTF(("semop:  not zero now\n"));
					break;
				}
			} else {
				if (semptr->semncnt > 0)
					do_wakeup = 1;
				semptr->semval += sopptr->sem_op;
				if (sopptr->sem_flg & SEM_UNDO)
					do_undos = 1;
			}
		}

		/*
		 * Did we get through the entire vector?
		 */
		if (i >= nsops)
			goto done;

		/*
		 * No ... rollback anything that we've already done
		 */
		SEM_PRINTF(("semop:  rollback 0 through %d\n", i - 1));
		while (i-- > 0)
			semaptr->_sem_base[sops[i].sem_num].semval -=
			    sops[i].sem_op;

		/*
		 * If the request that we couldn't satisfy has the
		 * NOWAIT flag set then return with EAGAIN.
		 */
		if (sopptr->sem_flg & IPC_NOWAIT) {
			error = EAGAIN;
			goto out;
		}

		if (sopptr->sem_op == 0)
			semptr->semzcnt++;
		else
			semptr->semncnt++;

		sem_waiters++;
		SEM_PRINTF(("semop:  good night!\n"));
		error = cv_wait_sig(&semcv[semid], &semlock);
		SEM_PRINTF(("semop:  good morning (error=%d)!\n", error));
		sem_waiters--;

		/* Notify reallocator, if it is waiting */
		cv_broadcast(&sem_realloc_cv);

		/*
		 * Make sure that the semaphore still exists
		 */
		if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 ||
		    semaptr->sem_perm._seq != seq) {
			error = EIDRM;
			goto out;
		}

		/*
		 * The semaphore is still alive.  Readjust the count of
		 * waiting processes.
		 */
		semptr = &semaptr->_sem_base[sopptr->sem_num];
		if (sopptr->sem_op == 0)
			semptr->semzcnt--;
		else
			semptr->semncnt--;

		/* In case of such state, restart the call */
		if (sem_realloc_state) {
			mutex_exit(&semlock);
			goto restart;
		}

		/* Is it really morning, or was our sleep interrupted? */
		if (error != 0) {
			error = EINTR;
			goto out;
		}
		SEM_PRINTF(("semop:  good morning!\n"));
	}

done:
	/*
	 * Process any SEM_UNDO requests.
	 */
	if (do_undos) {
		for (i = 0; i < nsops; i++) {
			/*
			 * We only need to deal with SEM_UNDO's for non-zero
			 * op's.
			 */
			int adjval;

			if ((sops[i].sem_flg & SEM_UNDO) == 0)
				continue;
			adjval = sops[i].sem_op;
			if (adjval == 0)
				continue;
			error = semundo_adjust(p, &suptr, semid,
			    sops[i].sem_num, -adjval);
			if (error == 0)
				continue;

			/*
			 * Oh-Oh!  We ran out of either sem_undo's or undo's.
			 * Rollback the adjustments to this point and then
			 * rollback the semaphore ups and down so we can return
			 * with an error with all structures restored.  We
			 * rollback the undo's in the exact reverse order that
			 * we applied them.  This guarantees that we won't run
			 * out of space as we roll things back out.
			 */
			while (i-- > 0) {
				if ((sops[i].sem_flg & SEM_UNDO) == 0)
					continue;
				adjval = sops[i].sem_op;
				if (adjval == 0)
					continue;
				if (semundo_adjust(p, &suptr, semid,
				    sops[i].sem_num, adjval) != 0)
					panic("semop - can't undo undos");
			}

			for (i = 0; i < nsops; i++)
				semaptr->_sem_base[sops[i].sem_num].semval -=
				    sops[i].sem_op;

			SEM_PRINTF(("error = %d from semundo_adjust\n", error));
			goto out;
		} /* loop through the sops */
	} /* if (do_undos) */

	/* We're definitely done - set the sempid's */
	for (i = 0; i < nsops; i++) {
		sopptr = &sops[i];
		semptr = &semaptr->_sem_base[sopptr->sem_num];
		semptr->sempid = p->p_pid;
	}

	/* Update sem_otime */
	semaptr->sem_otime = time_second;

	/* Do a wakeup if any semaphore was up'd. */
	if (do_wakeup) {
		SEM_PRINTF(("semop:  doing wakeup\n"));
		cv_broadcast(&semcv[semid]);
		SEM_PRINTF(("semop:  back from wakeup\n"));
	}
	SEM_PRINTF(("semop:  done\n"));
	*retval = 0;

 out:
	mutex_exit(&semlock);
	if (sops != small_sops)
		kmem_free(sops, nsops * sizeof(*sops));
	return error;
}