Пример #1
0
/**
 * \brief Allocate a physically contiguous DMA-accessible consistent 
 * memory block.
 */
drm_dma_handle_t *
drm_pci_alloc(struct drm_device *dev, size_t size,
	      size_t align, dma_addr_t maxaddr)
{
	drm_dma_handle_t *dmah;
	int ret;

	/* Need power-of-two alignment, so fail the allocation if it isn't. */
	if ((align & (align - 1)) != 0) {
		DRM_ERROR("drm_pci_alloc with non-power-of-two alignment %d\n",
		    (int)align);
		return NULL;
	}

	dmah = malloc(sizeof(drm_dma_handle_t), DRM_MEM_DMA, M_ZERO | M_NOWAIT);
	if (dmah == NULL)
		return NULL;

#if 0 /* HT XXX XXX XXX */
	/* Make sure we aren't holding locks here */
	mtx_assert(&dev->dev_lock, MA_NOTOWNED);
	if (mtx_owned(&dev->dev_lock))
	    DRM_ERROR("called while holding dev_lock\n");
	mtx_assert(&dev->dma_lock, MA_NOTOWNED);
	if (mtx_owned(&dev->dma_lock))
	    DRM_ERROR("called while holding dma_lock\n");
#endif

	ret = bus_dma_tag_create(NULL, align, 0, /* tag, align, boundary */
	    maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
	    NULL, NULL, /* filtfunc, filtfuncargs */
	    size, 1, size, /* maxsize, nsegs, maxsegsize */
	    0, /* flags */
	    &dmah->tag);
	if (ret != 0) {
		free(dmah, DRM_MEM_DMA);
		return NULL;
	}

	/* XXX BUS_DMA_NOCACHE */
	ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr,
	    BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmah->map);
	if (ret != 0) {
		bus_dma_tag_destroy(dmah->tag);
		free(dmah, DRM_MEM_DMA);
		return NULL;
	}

	ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size,
	    drm_pci_busdma_callback, dmah, BUS_DMA_NOWAIT);
	if (ret != 0) {
		bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
		bus_dma_tag_destroy(dmah->tag);
		free(dmah, DRM_MEM_DMA);
		return NULL;
	}

	return dmah;
}
Пример #2
0
/*
 * General sleep call.  Suspends the current thread until a wakeup is
 * performed on the specified identifier.  The thread will then be made
 * runnable with the specified priority.  Sleeps at most sbt units of time
 * (0 means no timeout).  If pri includes the PCATCH flag, let signals
 * interrupt the sleep, otherwise ignore them while sleeping.  Returns 0 if
 * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
 * signal becomes pending, ERESTART is returned if the current system
 * call should be restarted if possible, and EINTR is returned if the system
 * call should be interrupted by the signal (return EINTR).
 *
 * The lock argument is unlocked before the caller is suspended, and
 * re-locked before _sleep() returns.  If priority includes the PDROP
 * flag the lock is not re-locked before returning.
 */
int
_sleep(void *ident, struct lock_object *lock, int priority,
    const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
{
	struct thread *td;
	struct proc *p;
	struct lock_class *class;
	uintptr_t lock_state;
	int catch, pri, rval, sleepq_flags;
	WITNESS_SAVE_DECL(lock_witness);

	td = curthread;
	p = td->td_proc;
#ifdef KTRACE
	if (KTRPOINT(td, KTR_CSW))
		ktrcsw(1, 0, wmesg);
#endif
	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
	    "Sleeping on \"%s\"", wmesg);
	KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL,
	    ("sleeping without a lock"));
	KASSERT(p != NULL, ("msleep1"));
	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
	if (priority & PDROP)
		KASSERT(lock != NULL && lock != &Giant.lock_object,
		    ("PDROP requires a non-Giant lock"));
	if (lock != NULL)
		class = LOCK_CLASS(lock);
	else
Пример #3
0
void
__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
    int line)
{
	struct mtx *m;

	if (SCHEDULER_STOPPED())
		return;

	m = mtxlock2mtx(c);

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	if (mtx_owned(m))
		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
		    (opts & MTX_RECURSE) != 0,
	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
		    m->lock_object.lo_name, file, line));
	opts &= ~MTX_RECURSE;
	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
	    file, line, NULL);
	__mtx_lock_spin(m, curthread, opts, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
}
Пример #4
0
void
AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags)
{
	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;

	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

	if (al == NULL) {
		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
		    "cannot release null spinlock\n"));
		return_VOID;
	}

	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", al->al_name));

	if (mtx_owned(&al->al_lock)) {
		if (al->al_nested > 0) {
			ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
			    "release nested %s, depth %d\n",
			    al->al_name, al->al_nested));
			al->al_nested--;
		} else
			mtx_unlock_spin(&al->al_lock);
	} else
		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
		    "cannot release unowned %s\n", al->al_name));
}
Пример #5
0
/*
 * We need to process our own vnode unlock and then clear the
 * interlock flag as it applies only to our vnode, not the
 * vnodes below us on the stack.
 */
static int
null_unlock(struct vop_unlock_args *ap)
{
	struct vnode *vp = ap->a_vp;
	int flags = ap->a_flags;
	int mtxlkflag = 0;
	struct null_node *nn;
	struct vnode *lvp;
	int error;

	if ((flags & LK_INTERLOCK) != 0)
		mtxlkflag = 1;
	else if (mtx_owned(VI_MTX(vp)) == 0) {
		VI_LOCK(vp);
		mtxlkflag = 2;
	}
	nn = VTONULL(vp);
	if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
		VI_LOCK_FLAGS(lvp, MTX_DUPOK);
		flags |= LK_INTERLOCK;
		vholdl(lvp);
		VI_UNLOCK(vp);
		error = VOP_UNLOCK(lvp, flags);
		vdrop(lvp);
		if (mtxlkflag == 0)
			VI_LOCK(vp);
	} else {
		if (mtxlkflag == 2)
			VI_UNLOCK(vp);
		error = vop_stdunlock(ap);
	}

	return (error);
}
Пример #6
0
/*
 * Nfs server psuedo system call for the nfsd's
 */
int
nfssvc(struct thread *td, struct nfssvc_args *uap)
{
	int error;

	KASSERT(!mtx_owned(&Giant), ("nfssvc(): called with Giant"));

	AUDIT_ARG_CMD(uap->flag);

	error = priv_check(td, PRIV_NFS_DAEMON);
	if (error)
		return (error);
	error = EINVAL;
	if ((uap->flag & (NFSSVC_ADDSOCK | NFSSVC_OLDNFSD | NFSSVC_NFSD)) &&
	    nfsd_call_nfsserver != NULL)
		error = (*nfsd_call_nfsserver)(td, uap);
	else if ((uap->flag & (NFSSVC_CBADDSOCK | NFSSVC_NFSCBD)) &&
	    nfsd_call_nfscl != NULL)
		error = (*nfsd_call_nfscl)(td, uap);
	else if ((uap->flag & (NFSSVC_IDNAME | NFSSVC_GETSTATS |
	    NFSSVC_GSSDADDPORT | NFSSVC_GSSDADDFIRST | NFSSVC_GSSDDELETEALL |
	    NFSSVC_NFSUSERDPORT | NFSSVC_NFSUSERDDELPORT)) &&
	    nfsd_call_nfscommon != NULL)
		error = (*nfsd_call_nfscommon)(td, uap);
	else if ((uap->flag & (NFSSVC_NFSDNFSD | NFSSVC_NFSDADDSOCK |
	    NFSSVC_PUBLICFH | NFSSVC_V4ROOTEXPORT | NFSSVC_NOPUBLICFH |
	    NFSSVC_STABLERESTART | NFSSVC_ADMINREVOKE |
	    NFSSVC_DUMPCLIENTS | NFSSVC_DUMPLOCKS)) &&
	    nfsd_call_nfsd != NULL)
		error = (*nfsd_call_nfsd)(td, uap);
	if (error == EINTR || error == ERESTART)
		error = 0;
	return (error);
}
Пример #7
0
/*------------------------------------------------------------------------*
 *	usb_pc_common_mem_cb - BUS-DMA callback function
 *------------------------------------------------------------------------*/
static void
usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
    int nseg, int error, uint8_t isload)
{
	struct usb_dma_parent_tag *uptag;
	struct usb_page_cache *pc;
	struct usb_page *pg;
	usb_size_t rem;
	uint8_t owned;

	pc = arg;
	uptag = pc->tag_parent;

	/*
	 * XXX There is sometimes recursive locking here.
	 * XXX We should try to find a better solution.
	 * XXX Until further the "owned" variable does
	 * XXX the trick.
	 */

	if (error) {
		goto done;
	}
	pg = pc->page_start;
	pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1);
	rem = segs->ds_addr & (USB_PAGE_SIZE - 1);
	pc->page_offset_buf = rem;
	pc->page_offset_end += rem;
	nseg--;
#ifdef USB_DEBUG
	if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) {
		/*
		 * This check verifies that the physical address is correct:
		 */
		DPRINTFN(0, "Page offset was not preserved\n");
		error = 1;
		goto done;
	}
#endif
	while (nseg > 0) {
		nseg--;
		segs++;
		pg++;
		pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1);
	}

done:
	owned = mtx_owned(uptag->mtx);
	if (!owned)
		mtx_lock(uptag->mtx);

	uptag->dma_error = (error ? 1 : 0);
	if (isload) {
		(uptag->func) (uptag);
	} else {
		cv_broadcast(uptag->cv);
	}
	if (!owned)
		mtx_unlock(uptag->mtx);
}
Пример #8
0
/*
 * Unfortunately, SCIL doesn't cleanly handle retry conditions.
 *  CAM_REQUEUE_REQ works only when no one is using the pass(4) interface.  So
 *  when SCIL denotes an I/O needs to be retried (typically because of mixing
 *  tagged/non-tagged ATA commands, or running out of NCQ slots), we queue
 *  these I/O internally.  Once SCIL completes an I/O to this device, or we get
 *  a ready notification, we will retry the first I/O on the queue.
 *  Unfortunately, SCIL also doesn't cleanly handle starting the new I/O within
 *  the context of the completion handler, so we need to retry these I/O after
 *  the completion handler is done executing.
 */
void
isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller)
{
	struct ISCI_REMOTE_DEVICE *dev;
	struct ccb_hdr *ccb_h;
	int dev_idx;

	KASSERT(mtx_owned(&controller->lock), ("controller lock not owned"));

	controller->release_queued_ccbs = FALSE;
	for (dev_idx = 0;
	     dev_idx < SCI_MAX_REMOTE_DEVICES;
	     dev_idx++) {

		dev = controller->remote_device[dev_idx];
		if (dev != NULL &&
		    dev->release_queued_ccb == TRUE &&
		    dev->queued_ccb_in_progress == NULL) {
			dev->release_queued_ccb = FALSE;
			ccb_h = TAILQ_FIRST(&dev->queued_ccbs);

			if (ccb_h == NULL)
				continue;

			isci_log_message(1, "ISCI", "release %p %x\n", ccb_h,
			    ((union ccb *)ccb_h)->csio.cdb_io.cdb_bytes[0]);

			dev->queued_ccb_in_progress = (union ccb *)ccb_h;
			isci_io_request_execute_scsi_io(
			    (union ccb *)ccb_h, controller);
		}
	}
}
Пример #9
0
static int
kmi_ioctl(keyboard_t *kbd, u_long cmd, caddr_t arg)
{
	int result;

	/*
	 * XXX KDGKBSTATE, KDSKBSTATE and KDSETLED can be called from any
	 * context where printf(9) can be called, which among other things
	 * includes interrupt filters and threads with any kinds of locks
	 * already held.  For this reason it would be dangerous to acquire
	 * the Giant here unconditionally.  On the other hand we have to
	 * have it to handle the ioctl.
	 * So we make our best effort to auto-detect whether we can grab
	 * the Giant or not.  Blame syscons(4) for this.
	 */
	switch (cmd) {
	case KDGKBSTATE:
	case KDSKBSTATE:
	case KDSETLED:
		if (!mtx_owned(&Giant) && !SCHEDULER_STOPPED())
			return (EDEADLK);	/* best I could come up with */
		/* FALLTHROUGH */
	default:
		KMI_LOCK();
		result = kmi_ioctl_locked(kbd, cmd, arg);
		KMI_UNLOCK();
		return (result);
	}
}
Пример #10
0
ACPI_CPU_FLAGS
AcpiOsAcquireLock(ACPI_SPINLOCK Handle)
{
	struct acpi_spinlock	*al = (struct acpi_spinlock *)Handle;

	ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

	if (al == NULL) {
		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
		    "cannot acquire null spinlock\n"));
		return (0);
	}

	ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire %s\n", al->al_name));

	if (mtx_owned(&al->al_lock)) {
		al->al_nested++;
		ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
		    "acquire nested %s, depth %d\n",
		    al->al_name, al->al_nested));
	} else
		mtx_lock_spin(&al->al_lock);

	return (0);
}
Пример #11
0
int
rm_wowned(const struct rmlock *rm)
{

	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
		return (sx_xlocked(&rm->rm_lock_sx));
	else
		return (mtx_owned(&rm->rm_lock_mtx));
}
Пример #12
0
void
vfs_clearmntopt(vfs_t *vfsp, const char *name)
{
	int locked;

	if (!(locked = mtx_owned(MNT_MTX(vfsp))))
		MNT_ILOCK(vfsp);
	vfs_deleteopt(vfsp->mnt_opt, name);
	if (!locked)
		MNT_IUNLOCK(vfsp);
}
Пример #13
0
static void
_completion_claim(struct completion *c)
{

	KASSERT(mtx_owned(&c->lock),
	    ("_completion_claim should be called with acquired lock"));
	KASSERT(c->done != 0, ("_completion_claim on non-waited completion"));
	if (c->done > 0)
		c->done--;
	else
		KASSERT(c->done == -1, ("Invalid value of c->done: %d", c->done));
}
Пример #14
0
void
cam_sim_hold(struct cam_sim *sim)
{
	int lock;

	lock = (mtx_owned(sim->mtx) == 0);
	if (lock)
		CAM_SIM_LOCK(sim);
	KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
	sim->refcount++;
	if (lock)
		CAM_SIM_UNLOCK(sim);
}
Пример #15
0
static void
vm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func)
{

	KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked"));

	/*
	 * Update 'rendezvous_func' and execute a write memory barrier to
	 * ensure that it is visible across all host cpus. This is not needed
	 * for correctness but it does ensure that all the vcpus will notice
	 * that the rendezvous is requested immediately.
	 */
	vm->rendezvous_func = func;
	wmb();
}
Пример #16
0
void
cam_sim_release(struct cam_sim *sim)
{
	int lock;

	lock = (mtx_owned(sim->mtx) == 0);
	if (lock)
		CAM_SIM_LOCK(sim);
	KASSERT(sim->refcount >= 1, ("sim->refcount >= 1"));
	sim->refcount--;
	if (sim->refcount == 0)
		wakeup(sim);
	if (lock)
		CAM_SIM_UNLOCK(sim);
}
Пример #17
0
static int
tumbler_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right)
{
    struct tumbler_softc *sc;
    struct mtx *mixer_lock;
    int locked;
    u_int l, r;
    u_char reg[6];

    sc = device_get_softc(mix_getdevinfo(m));
    mixer_lock = mixer_get_lock(m);
    locked = mtx_owned(mixer_lock);

    switch (dev) {
    case SOUND_MIXER_VOLUME:
        if (left > 100 || right > 100)
            return (0);

        l = (left == 0 ? 0 : tumbler_volume_table[left - 1]);
        r = (right == 0 ? 0 : tumbler_volume_table[right - 1]);

        reg[0] = (l & 0xff0000) >> 16;
        reg[1] = (l & 0x00ff00) >> 8;
        reg[2] = l & 0x0000ff;
        reg[3] = (r & 0xff0000) >> 16;
        reg[4] = (r & 0x00ff00) >> 8;
        reg[5] = r & 0x0000ff;

        /*
         * We need to unlock the mixer lock because iicbus_transfer()
         * may sleep. The mixer lock itself is unnecessary here
         * because it is meant to serialize hardware access, which
         * is taken care of by the I2C layer, so this is safe.
         */

        if (locked)
            mtx_unlock(mixer_lock);

        tumbler_write(sc, TUMBLER_VOLUME, reg);

        if (locked)
            mtx_lock(mixer_lock);

        return (left | (right << 8));
    }

    return (0);
}
Пример #18
0
void mtx_lock(mtx_t *mtx) {
  /* TODO: Implement recursive mutexes */
  assert(mtx_owner(mtx) != thread_self());

  while (!mtx_try_lock(mtx)) {
    cs_enter();
    /* Check if the mutex got unlocked since a call to mtx_try_lock */
    if (mtx->mtx_state == MTX_UNOWNED) {
      cs_leave();
      continue;
    }
    assert(mtx_owned(mtx));
    turnstile_wait(&mtx->turnstile);
    cs_leave();
  }
}
Пример #19
0
static void
thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
{

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("thread_lock() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	if (mtx_owned(m))
		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
		    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
		    m->lock_object.lo_name, file, line));
	WITNESS_CHECKORDER(&m->lock_object,
	    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
}
Пример #20
0
static void
update_tx_sched(void *context, int pending)
{
	int i, j, mode, rateunit, ratemode, maxrate, pktsize, rc;
	struct port_info *pi;
	struct tx_cl_rl_params *tc;
	struct adapter *sc = context;
	const int n = sc->chip_params->nsched_cls;

	mtx_lock(&sc->tc_lock);
	for_each_port(sc, i) {
		pi = sc->port[i];
		tc = &pi->sched_params->cl_rl[0];
		for (j = 0; j < n; j++, tc++) {
			MPASS(mtx_owned(&sc->tc_lock));
			if ((tc->flags & TX_CLRL_REFRESH) == 0)
				continue;

			mode = tc->mode;
			rateunit = tc->rateunit;
			ratemode = tc->ratemode;
			maxrate = tc->maxrate;
			pktsize = tc->pktsize;
			mtx_unlock(&sc->tc_lock);

			if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
			    "t4utxs") != 0) {
				mtx_lock(&sc->tc_lock);
				continue;
			}
			rc = t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED,
			    FW_SCHED_PARAMS_LEVEL_CL_RL, mode, rateunit,
			    ratemode, pi->tx_chan, j, 0, maxrate, 0, pktsize,
			    1);
			end_synchronized_op(sc, 0);

			mtx_lock(&sc->tc_lock);
			if (rc != 0) {
				tc->flags |= TX_CLRL_ERROR;
			} else if (tc->mode == mode &&
			    tc->rateunit == rateunit &&
			    tc->maxrate == maxrate &&
			    tc->pktsize == tc->pktsize) {
				tc->flags &= ~(TX_CLRL_REFRESH | TX_CLRL_ERROR);
			}
		}
	}
Пример #21
0
void
vfs_setmntopt(vfs_t *vfsp, const char *name, const char *arg,
    int flags __unused)
{
	struct vfsopt *opt;
	size_t namesize;
	int locked;

	if (!(locked = mtx_owned(MNT_MTX(vfsp))))
		MNT_ILOCK(vfsp);

	if (vfsp->mnt_opt == NULL) {
		void *opts;

		MNT_IUNLOCK(vfsp);
		opts = malloc(sizeof(*vfsp->mnt_opt), M_MOUNT, M_WAITOK);
		MNT_ILOCK(vfsp);
		if (vfsp->mnt_opt == NULL) {
			vfsp->mnt_opt = opts;
			TAILQ_INIT(vfsp->mnt_opt);
		} else {
			free(opts, M_MOUNT);
		}
	}

	MNT_IUNLOCK(vfsp);

	opt = malloc(sizeof(*opt), M_MOUNT, M_WAITOK);
	namesize = strlen(name) + 1;
	opt->name = malloc(namesize, M_MOUNT, M_WAITOK);
	strlcpy(opt->name, name, namesize);
	opt->pos = -1;
	opt->seen = 1;
	if (arg == NULL) {
		opt->value = NULL;
		opt->len = 0;
	} else {
		opt->len = strlen(arg) + 1;
		opt->value = malloc(opt->len, M_MOUNT, M_WAITOK);
		bcopy(arg, opt->value, opt->len);
	}

	MNT_ILOCK(vfsp);
	TAILQ_INSERT_TAIL(vfsp->mnt_opt, opt, link);
	if (!locked)
		MNT_IUNLOCK(vfsp);
}
Пример #22
0
/*
 * The important part of mtx_trylock{,_flags}()
 * Tries to acquire lock `m.'  If this function is called on a mutex that
 * is already owned, it will recursively acquire the lock.
 */
int
_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
{
	struct mtx *m;
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int rval;

	if (SCHEDULER_STOPPED())
		return (1);

	m = mtxlock2mtx(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d",
	    curthread, m->lock_object.lo_name, file, line));
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));

	if (mtx_owned(m) && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
	    (opts & MTX_RECURSE) != 0)) {
		m->mtx_recurse++;
		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
		rval = 1;
	} else
		rval = _mtx_obtain_lock(m, (uintptr_t)curthread);
	opts &= ~MTX_RECURSE;

	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		TD_LOCKS_INC(curthread);
		if (m->mtx_recurse == 0)
			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
			    m, contested, waittime, file, line);

	}

	return (rval);
}
Пример #23
0
static int
ua_mixer_set(struct snd_mixer *m, unsigned type, unsigned left, unsigned right)
{
	struct mtx *mtx = mixer_get_lock(m);
	uint8_t do_unlock;

	if (mtx_owned(mtx)) {
		do_unlock = 0;
	} else {
		do_unlock = 1;
		mtx_lock(mtx);
	}
	uaudio_mixer_set(mix_getdevinfo(m), type, left, right);
	if (do_unlock) {
		mtx_unlock(mtx);
	}
	return (left | (right << 8));
}
Пример #24
0
static uint32_t
ua_mixer_setrecsrc(struct snd_mixer *m, uint32_t src)
{
	struct mtx *mtx = mixer_get_lock(m);
	int retval;
	uint8_t do_unlock;

	if (mtx_owned(mtx)) {
		do_unlock = 0;
	} else {
		do_unlock = 1;
		mtx_lock(mtx);
	}
	retval = uaudio_mixer_setrecsrc(mix_getdevinfo(m), src);
	if (do_unlock) {
		mtx_unlock(mtx);
	}
	return (retval);
}
Пример #25
0
static int
onyx_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right)
{
	struct onyx_softc *sc;
	struct mtx *mixer_lock;
	int locked;
	uint8_t l, r;

	sc = device_get_softc(mix_getdevinfo(m));
	mixer_lock = mixer_get_lock(m);
	locked = mtx_owned(mixer_lock);

	switch (dev) {
	case SOUND_MIXER_VOLUME:

		/*
		 * We need to unlock the mixer lock because iicbus_transfer()
		 * may sleep. The mixer lock itself is unnecessary here
		 * because it is meant to serialize hardware access, which
		 * is taken care of by the I2C layer, so this is safe.
		 */
		if (left > 100 || right > 100)
			return (0);

		l = left + 128;
		r = right + 128;

		if (locked)
			mtx_unlock(mixer_lock);

		onyx_write(sc, PCM3052_REG_LEFT_ATTN, l);
		onyx_write(sc, PCM3052_REG_RIGHT_ATTN, r);

		if (locked)
			mtx_lock(mixer_lock);

		return (left | (right << 8));
	}

	return (0);
}
Пример #26
0
void
_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
{

	MPASS(curthread != NULL);
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	if (mtx_owned(m))
		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
		    m->lock_object.lo_name, file, line));
	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
	    file, line, NULL);
	_get_spin_lock(m, curthread, opts, file, line);
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
}
Пример #27
0
void
__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
    int line)
{
	struct mtx *m;
#ifdef SMP
	uintptr_t tid, v;
#endif

	m = mtxlock2mtx(c);

	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
	    ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
	    m->lock_object.lo_name, file, line));
	if (mtx_owned(m))
		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
		    (opts & MTX_RECURSE) != 0,
	    ("mtx_lock_spin: recursed on non-recursive mutex %s @ %s:%d\n",
		    m->lock_object.lo_name, file, line));
	opts &= ~MTX_RECURSE;
	WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
	    file, line, NULL);
#ifdef SMP
	spinlock_enter();
	tid = (uintptr_t)curthread;
	v = MTX_UNOWNED;
	if (!_mtx_obtain_lock_fetch(m, &v, tid))
		_mtx_lock_spin(m, v, opts, file, line);
	else
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire,
		    m, 0, 0, file, line);
#else
	__mtx_lock_spin(m, curthread, opts, file, line);
#endif
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
}
Пример #28
0
/*
 * Remove a reference from node private data.
 */
static void
ng_socket_free_priv(struct ngsock *priv)
{
	KKASSERT(mtx_owned(&priv->mtx));

	priv->refs--;

	if (priv->refs == 0) {
		mtx_uninit(&priv->mtx);
		kfree(priv, M_NETGRAPH_SOCK);
		return;
	}

	if ((priv->refs == 1) && (priv->node != NULL)) {
		node_p node = priv->node;

		priv->node = NULL;
		mtx_unlock(&priv->mtx);
		NG_NODE_UNREF(node);
		ng_rmnode_self(node);
	} else
		mtx_unlock(&priv->mtx);
}
Пример #29
0
/*
 * The important part of mtx_trylock{,_flags}()
 * Tries to acquire lock `m.'  If this function is called on a mutex that
 * is already owned, it will recursively acquire the lock.
 */
int
_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
{
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	int rval;

	MPASS(curthread != NULL);
	KASSERT(m->mtx_lock != MTX_DESTROYED,
	    ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
	KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
	    ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
	    file, line));

	if (mtx_owned(m) && (m->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		m->mtx_recurse++;
		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
		rval = 1;
	} else
		rval = _obtain_lock(m, (uintptr_t)curthread);

	LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
	if (rval) {
		WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
		    file, line);
		curthread->td_locks++;
		if (m->mtx_recurse == 0)
			LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE,
			    m, contested, waittime, file, line);

	}

	return (rval);
}
Пример #30
0
void
_thread_lock_flags(struct thread *td, int opts, const char *file, int line)
{
	struct mtx *m;
	uintptr_t tid;
	int i;
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
	uint64_t spin_cnt = 0;
#endif

	i = 0;
	tid = (uintptr_t)curthread;
	for (;;) {
retry:
		spinlock_enter();
		m = td->td_lock;
		KASSERT(m->mtx_lock != MTX_DESTROYED,
		    ("thread_lock() of destroyed mutex @ %s:%d", file, line));
		KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
		    ("thread_lock() of sleep mutex %s @ %s:%d",
		    m->lock_object.lo_name, file, line));
		if (mtx_owned(m))
			KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
	    ("thread_lock: recursed on non-recursive mutex %s @ %s:%d\n",
			    m->lock_object.lo_name, file, line));
		WITNESS_CHECKORDER(&m->lock_object,
		    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
		while (!_obtain_lock(m, tid)) {
#ifdef KDTRACE_HOOKS
			spin_cnt++;
#endif
			if (m->mtx_lock == tid) {
				m->mtx_recurse++;
				break;
			}
			lock_profile_obtain_lock_failed(&m->lock_object,
			    &contested, &waittime);
			/* Give interrupts a chance while we spin. */
			spinlock_exit();
			while (m->mtx_lock != MTX_UNOWNED) {
				if (i++ < 10000000)
					cpu_spinwait();
				else if (i < 60000000 ||
				    kdb_active || panicstr != NULL)
					DELAY(1);
				else
					_mtx_lock_spin_failed(m);
				cpu_spinwait();
				if (m != td->td_lock)
					goto retry;
			}
			spinlock_enter();
		}
		if (m == td->td_lock)
			break;
		_rel_spin_lock(m);	/* does spinlock_exit() */
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
	}
	if (m->mtx_recurse == 0)
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE,
		    m, contested, waittime, (file), (line));
	LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
	    line);
	WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
	LOCKSTAT_RECORD1(LS_THREAD_LOCK_SPIN, m, spin_cnt);
}