Exemplo n.º 1
0
/*
 * Process pending interrupts
 */
void
splz(void)
{
	struct mdglobaldata *gd = mdcpu;
	thread_t td = gd->mi.gd_curthread;
	int irq;

	while (gd->mi.gd_reqflags & (RQF_IPIQ|RQF_INTPEND)) {
		crit_enter_quick(td);
		if (gd->mi.gd_reqflags & RQF_IPIQ) {
			atomic_clear_int(&gd->mi.gd_reqflags, RQF_IPIQ);
			lwkt_process_ipiq();
		}
		if (gd->mi.gd_reqflags & RQF_INTPEND) {
			atomic_clear_int(&gd->mi.gd_reqflags, RQF_INTPEND);
			while ((irq = ffs(gd->gd_spending)) != 0) {
				--irq;
				atomic_clear_int(&gd->gd_spending, 1 << irq);
				irq += FIRST_SOFTINT;
				sched_ithd_soft(irq);
			}
			while ((irq = ffs(gd->gd_fpending)) != 0) {
				--irq;
				atomic_clear_int(&gd->gd_fpending, 1 << irq);
				sched_ithd_hard_virtual(irq);
			}
		}
		crit_exit_noyield(td);
	}
}
Exemplo n.º 2
0
/*
 * Destroy node
 */
static int
ng_ksocket_shutdown(node_p node)
{
	const priv_p priv = NG_NODE_PRIVATE(node);
	priv_p embryo;

	/* Close our socket (if any) */
	if (priv->so != NULL) {
		atomic_clear_int(&priv->so->so_rcv.ssb_flags, SSB_UPCALL);
		atomic_clear_int(&priv->so->so_snd.ssb_flags, SSB_UPCALL);
		priv->so->so_upcall = NULL;
		soclose(priv->so, FNONBLOCK);
		priv->so = NULL;
	}

	/* If we are an embryo, take ourselves out of the parent's list */
	if (priv->flags & KSF_EMBRYONIC) {
		LIST_REMOVE(priv, siblings);
		priv->flags &= ~KSF_EMBRYONIC;
	}

	/* Remove any embryonic children we have */
	while (!LIST_EMPTY(&priv->embryos)) {
		embryo = LIST_FIRST(&priv->embryos);
		ng_rmnode_self(embryo->node);
	}

	/* Take down netgraph node */
	bzero(priv, sizeof(*priv));
	kfree(priv, M_NETGRAPH);
	NG_NODE_SET_PRIVATE(node, NULL);
	NG_NODE_UNREF(node);		/* let the node escape */
	return (0);
}
Exemplo n.º 3
0
/*
 * Kill all lwps associated with the current process except the
 * current lwp.   Return an error if we race another thread trying to
 * do the same thing and lose the race.
 *
 * If forexec is non-zero the current thread and process flags are
 * cleaned up so they can be reused.
 *
 * Caller must hold curproc->p_token
 */
int
killalllwps(int forexec)
{
	struct lwp *lp = curthread->td_lwp;
	struct proc *p = lp->lwp_proc;
	int fakestop;

	/*
	 * Interlock against P_WEXIT.  Only one of the process's thread
	 * is allowed to do the master exit.
	 */
	if (p->p_flags & P_WEXIT)
		return (EALREADY);
	p->p_flags |= P_WEXIT;

	/*
	 * Set temporary stopped state in case we are racing a coredump.
	 * Otherwise the coredump may hang forever.
	 */
	if (lp->lwp_mpflags & LWP_MP_WSTOP) {
		fakestop = 0;
	} else {
		atomic_set_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
		++p->p_nstopped;
		fakestop = 1;
		wakeup(&p->p_nstopped);
	}

	/*
	 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs
	 */
	atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
	if (p->p_nthreads > 1)
		killlwps(lp);

	/*
	 * Undo temporary stopped state
	 */
	if (fakestop) {
		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WSTOP);
		--p->p_nstopped;
	}

	/*
	 * If doing this for an exec, clean up the remaining thread
	 * (us) for continuing operation after all the other threads
	 * have been killed.
	 */
	if (forexec) {
		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
		p->p_flags &= ~P_WEXIT;
	}
	return(0);
}
Exemplo n.º 4
0
/*
 * Handle an IPI sent to this processor.
 */
intrmask_t
smp_handle_ipi(struct trapframe *frame)
{
	cpumask_t cpumask;		/* This cpu mask */
	u_int	ipi, ipi_bitmap;

	ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis));
	cpumask = PCPU_GET(cpumask);

	CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap);
	while (ipi_bitmap) {
		/*
		 * Find the lowest set bit.
		 */
		ipi = ipi_bitmap & ~(ipi_bitmap - 1);
		ipi_bitmap &= ~ipi;
		switch (ipi) {
		case IPI_INVLTLB:
			CTR0(KTR_SMP, "IPI_INVLTLB");
			break;

		case IPI_RENDEZVOUS:
			CTR0(KTR_SMP, "IPI_RENDEZVOUS");
			smp_rendezvous_action();
			break;

		case IPI_AST:
			CTR0(KTR_SMP, "IPI_AST");
			break;

		case IPI_STOP:

			/*
			 * IPI_STOP_HARD is mapped to IPI_STOP so it is not
			 * necessary to add it in the switch.
			 */
			CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD");
			atomic_set_int(&stopped_cpus, cpumask);

			while ((started_cpus & cpumask) == 0)
			    ;
			atomic_clear_int(&started_cpus, cpumask);
			atomic_clear_int(&stopped_cpus, cpumask);
			break;
		}
	}
	return CR_INT_IPI;
}
Exemplo n.º 5
0
/*
 * Kill all lwps associated with the current process except the
 * current lwp.   Return an error if we race another thread trying to
 * do the same thing and lose the race.
 *
 * If forexec is non-zero the current thread and process flags are
 * cleaned up so they can be reused.
 *
 * Caller must hold curproc->p_token
 */
int
killalllwps(int forexec)
{
	struct lwp *lp = curthread->td_lwp;
	struct proc *p = lp->lwp_proc;

	/*
	 * Interlock against P_WEXIT.  Only one of the process's thread
	 * is allowed to do the master exit.
	 */
	if (p->p_flags & P_WEXIT)
		return (EALREADY);
	p->p_flags |= P_WEXIT;

	/*
	 * Interlock with LWP_MP_WEXIT and kill any remaining LWPs
	 */
	atomic_set_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
	if (p->p_nthreads > 1)
		killlwps(lp);

	/*
	 * If doing this for an exec, clean up the remaining thread
	 * (us) for continuing operation after all the other threads
	 * have been killed.
	 */
	if (forexec) {
		atomic_clear_int(&lp->lwp_mpflags, LWP_MP_WEXIT);
		p->p_flags &= ~P_WEXIT;
	}
	return(0);
}
Exemplo n.º 6
0
/*
 * When an inode is flagged INODE_CREATING its chains have not actually
 * been inserting into the on-media tree yet.
 */
int
hammer2_inode_chain_ins(hammer2_inode_t *ip)
{
	int error;

	error = 0;
	if (ip->flags & HAMMER2_INODE_CREATING) {
		hammer2_xop_create_t *xop;

		atomic_clear_int(&ip->flags, HAMMER2_INODE_CREATING);
		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
		xop->lhc = ip->meta.inum;
		xop->flags = 0;
		hammer2_xop_start(&xop->head, &hammer2_inode_create_ins_desc);
		error = hammer2_xop_collect(&xop->head, 0);
		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
		if (error == HAMMER2_ERROR_ENOENT)
			error = 0;
		if (error) {
			kprintf("hammer2: backend unable to "
				"insert inode %p %ld\n", ip, ip->meta.inum);
			/* XXX return error somehow? */
		}
	}
	return error;
}
Exemplo n.º 7
0
/*
 * Delete a link structure after tsleep has failed.  This code is not
 * in the critical path as most exclusive waits are chained.
 */
static
void
mtx_delete_link(mtx_t *mtx, mtx_link_t *link)
{
	thread_t td = curthread;
	u_int	lock;
	u_int	nlock;

	/*
	 * Acquire MTX_LINKSPIN.
	 *
	 * Do not use cmpxchg to wait for LINKSPIN to clear as this might
	 * result in too much cpu cache traffic.
	 */
	crit_enter_raw(td);
	for (;;) {
		lock = mtx->mtx_lock;
		if (lock & MTX_LINKSPIN) {
			cpu_pause();
			continue;
		}
		nlock = lock | MTX_LINKSPIN;
		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
			break;
		cpu_pause();
	}

	/*
	 * Delete the link and release LINKSPIN.
	 */
	nlock = MTX_LINKSPIN;	/* to clear */

	switch(link->state) {
	case MTX_LINK_LINKED_EX:
		if (link->next == link) {
			mtx->mtx_exlink = NULL;
			nlock |= MTX_EXWANTED;	/* to clear */
		} else {
			mtx->mtx_exlink = link->next;
			link->next->prev = link->prev;
			link->prev->next = link->next;
		}
		break;
	case MTX_LINK_LINKED_SH:
		if (link->next == link) {
			mtx->mtx_shlink = NULL;
			nlock |= MTX_SHWANTED;	/* to clear */
		} else {
			mtx->mtx_shlink = link->next;
			link->next->prev = link->prev;
			link->prev->next = link->next;
		}
		break;
	default:
		/* no change */
		break;
	}
	atomic_clear_int(&mtx->mtx_lock, nlock);
	crit_exit_raw(td);
}
Exemplo n.º 8
0
/*
 * Remove inactive table from device. Routines which work's with inactive tables
 * doesn't need to synchronise with dmstrategy. They can synchronise themselves with mutex?.
 *
 */
int
dm_table_clear_ioctl(prop_dictionary_t dm_dict)
{
	dm_dev_t *dmv;
	const char *name, *uuid;
	uint32_t flags, minor;

	dmv = NULL;
	name = NULL;
	uuid = NULL;
	flags = 0;
	minor = 0;

	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_NAME, &name);
	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_UUID, &uuid);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_FLAGS, &flags);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_MINOR, &minor);

	aprint_debug("Clearing inactive table from device: %s--%s\n",
	    name, uuid);

	if ((dmv = dm_dev_lookup(name, uuid, minor)) == NULL) {
		DM_REMOVE_FLAG(flags, DM_EXISTS_FLAG);
		return ENOENT;
	}
	/* Select unused table */
	dm_table_destroy(&dmv->table_head, DM_TABLE_INACTIVE);

	atomic_clear_int(&dmv->flags, DM_INACTIVE_PRESENT_FLAG);

	dm_dev_unbusy(dmv);

	return 0;
}
Exemplo n.º 9
0
int
_pthread_setcanceltype(int type, int *oldtype)
{
	struct pthread	*curthread = tls_get_curthread();
	int oldval;

	oldval = curthread->cancelflags;
	if (oldtype != NULL)
		*oldtype = ((oldval & THR_CANCEL_AT_POINT) ?
				 PTHREAD_CANCEL_ASYNCHRONOUS :
				 PTHREAD_CANCEL_DEFERRED);
	switch (type) {
	case PTHREAD_CANCEL_ASYNCHRONOUS:
		atomic_set_int(&curthread->cancelflags, THR_CANCEL_AT_POINT);
		testcancel(curthread);
		break;
	case PTHREAD_CANCEL_DEFERRED:
		atomic_clear_int(&curthread->cancelflags, THR_CANCEL_AT_POINT);
		break;
	default:
		return (EINVAL);
	}

	return (0);
}
Exemplo n.º 10
0
/*
 * lwkt_thread_putport() - Backend to lwkt_beginmsg()
 *
 * Called with the target port as an argument but in the context of the
 * reply port.  This function always implements an asynchronous put to
 * the target message port, and thus returns EASYNC.
 *
 * The message must already have cleared MSGF_DONE and MSGF_REPLY
 */
static
void
lwkt_thread_putport_remote(lwkt_msg_t msg)
{
    lwkt_port_t port = msg->ms_target_port;

    /*
     * Chase any thread migration that occurs
     */
    if (port->mpu_td->td_gd != mycpu) {
        lwkt_send_ipiq(port->mpu_td->td_gd,
                       (ipifunc1_t)lwkt_thread_putport_remote, msg);
        return;
    }

    /*
     * An atomic op is needed on ms_flags vs originator.  Also
     * note that the originator might be using a different type
     * of msgport.
     */
#ifdef INVARIANTS
    KKASSERT(msg->ms_flags & MSGF_INTRANSIT);
    atomic_clear_int(&msg->ms_flags, MSGF_INTRANSIT);
#endif
    _lwkt_pushmsg(port, msg);
    if (port->mp_flags & MSGPORTF_WAITING)
        _lwkt_schedule_msg(port->mpu_td, msg->ms_flags);
}
Exemplo n.º 11
0
/*
 * When an inode is flagged INODE_DELETING it has been deleted (no directory
 * entry or open refs are left, though as an optimization H2 might leave
 * nlinks == 1 to avoid unnecessary block updates).  The backend flush then
 * needs to actually remove it from the topology.
 *
 * NOTE: backend flush must still sync and flush the deleted inode to clean
 *	 out related chains.
 *
 * NOTE: We must clear not only INODE_DELETING, but also INODE_ISUNLINKED
 *	 to prevent the vnode reclaim code from trying to delete it twice.
 */
int
hammer2_inode_chain_des(hammer2_inode_t *ip)
{
	int error;

	error = 0;
	if (ip->flags & HAMMER2_INODE_DELETING) {
		hammer2_xop_destroy_t *xop;

		atomic_clear_int(&ip->flags, HAMMER2_INODE_DELETING |
					     HAMMER2_INODE_ISUNLINKED);
		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
		hammer2_xop_start(&xop->head, &hammer2_inode_destroy_desc);
		error = hammer2_xop_collect(&xop->head, 0);
		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);

		if (error == HAMMER2_ERROR_ENOENT)
			error = 0;
		if (error) {
			kprintf("hammer2: backend unable to "
				"delete inode %p %ld\n", ip, ip->meta.inum);
			/* XXX return error somehow? */
		}
	}
	return error;
}
Exemplo n.º 12
0
/*
 * Per-port thread helper.  This helper thread is responsible for
 * atomically retrieving and clearing the signal mask and calling
 * the machine-independant driver core.
 *
 * MPSAFE
 */
static
void
ahci_port_thread(void *arg)
{
	struct ahci_port *ap = arg;
	int mask;

	/*
	 * Sets us up as an interrupt support thread, meaning we are
	 * given a higher priority and we can preempt normal threads.
	 */
	lwkt_set_interrupt_support_thread();

	/*
	 * The helper thread is responsible for the initial port init,
	 * so all the ports can be inited in parallel.
	 *
	 * We also run the state machine which should do all probes.
	 * Since CAM is not attached yet we will not get out-of-order
	 * SCSI attachments.
	 */
	ahci_os_lock_port(ap);
	ahci_port_init(ap);
	atomic_clear_int(&ap->ap_signal, AP_SIGF_THREAD_SYNC);
	wakeup(&ap->ap_signal);
	ahci_port_state_machine(ap, 1);
	ahci_os_unlock_port(ap);
	atomic_clear_int(&ap->ap_signal, AP_SIGF_INIT);
	wakeup(&ap->ap_signal);

	/*
	 * Then loop on the helper core.
	 */
	mask = ap->ap_signal;
	while ((mask & AP_SIGF_STOP) == 0) {
		ahci_port_thread_core(ap, mask);
		lockmgr(&ap->ap_sig_lock, LK_EXCLUSIVE);
		if (ap->ap_signal == 0) {
			lksleep(&ap->ap_thread, &ap->ap_sig_lock, 0,
				"ahport", 0);
		}
		mask = ap->ap_signal;
		atomic_clear_int(&ap->ap_signal, mask);
		lockmgr(&ap->ap_sig_lock, LK_RELEASE);
	}
	ap->ap_thread = NULL;
}
Exemplo n.º 13
0
/*
 * Drop an inode reference, freeing the inode when the last reference goes
 * away.
 */
void
hammer2_inode_drop(hammer2_inode_t *ip)
{
	hammer2_pfs_t *pmp;
	u_int refs;

	while (ip) {
		if (hammer2_debug & 0x80000) {
			kprintf("INODE-1 %p (%d->%d)\n",
				ip, ip->refs, ip->refs - 1);
			print_backtrace(8);
		}
		refs = ip->refs;
		cpu_ccfence();
		if (refs == 1) {
			/*
			 * Transition to zero, must interlock with
			 * the inode inumber lookup tree (if applicable).
			 * It should not be possible for anyone to race
			 * the transition to 0.
			 */
			pmp = ip->pmp;
			KKASSERT(pmp);
			hammer2_spin_ex(&pmp->inum_spin);

			if (atomic_cmpset_int(&ip->refs, 1, 0)) {
				KKASSERT(hammer2_mtx_refs(&ip->lock) == 0);
				if (ip->flags & HAMMER2_INODE_ONRBTREE) {
					atomic_clear_int(&ip->flags,
						     HAMMER2_INODE_ONRBTREE);
					RB_REMOVE(hammer2_inode_tree,
						  &pmp->inum_tree, ip);
					--pmp->inum_count;
				}
				hammer2_spin_unex(&pmp->inum_spin);

				ip->pmp = NULL;

				/*
				 * Cleaning out ip->cluster isn't entirely
				 * trivial.
				 */
				hammer2_inode_repoint(ip, NULL, NULL);

				kfree(ip, pmp->minode);
				atomic_add_long(&pmp->inmem_inodes, -1);
				ip = NULL;	/* will terminate loop */
			} else {
				hammer2_spin_unex(&ip->pmp->inum_spin);
			}
		} else {
			/*
			 * Non zero transition
			 */
			if (atomic_cmpset_int(&ip->refs, refs, refs - 1))
				break;
		}
	}
}
Exemplo n.º 14
0
void
kproc_suspend_loop(void)
{
	struct thread *td = curthread;

	if (td->td_mpflags & TDF_MP_STOPREQ) {
		lwkt_gettoken(&kpsus_token);
		atomic_clear_int(&td->td_mpflags, TDF_MP_STOPREQ);
		while ((td->td_mpflags & TDF_MP_WAKEREQ) == 0) {
			wakeup(td);
			tsleep(td, 0, "kpsusp", 0);
		}
		atomic_clear_int(&td->td_mpflags, TDF_MP_WAKEREQ);
		wakeup(td);
		lwkt_reltoken(&kpsus_token);
	}
}
void
pmap_inval_deinterlock(pmap_inval_info_t info, pmap_t pmap)
{
    KKASSERT(info->pir_flags & PIRF_CPUSYNC);
    atomic_clear_int(&pmap->pm_active_lock, CPULOCK_EXCL);
    lwkt_cpusync_deinterlock(&info->pir_cpusync);
    info->pir_flags = 0;
}
Exemplo n.º 16
0
static void
pmap_inval_done(pmap_t pmap)
{
    if (pmap != &kernel_pmap) {
        atomic_add_acq_long(&pmap->pm_invgen, 1);
        atomic_clear_int(&pmap->pm_active_lock, CPULOCK_EXCL);
    }
    crit_exit_id("inval");
}
Exemplo n.º 17
0
void
hammer2_thr_unfreeze(hammer2_thread_t *thr)
{
	if (thr->td == NULL)
		return;
	lockmgr(&thr->lk, LK_EXCLUSIVE);
	atomic_clear_int(&thr->flags, HAMMER2_THREAD_FROZEN);
	wakeup(&thr->flags);
	lockmgr(&thr->lk, LK_RELEASE);
}
Exemplo n.º 18
0
/*
 * Bounce through this wrapper function since ACPI-CA doesn't understand
 * the pending argument for its callbacks.
 */
static void
acpi_task_execute(void *context, int pending)
{
    struct acpi_task_ctx *at;

    at = (struct acpi_task_ctx *)context;
    at->at_function(at->at_context);
    atomic_clear_int(&at->at_flag, ACPI_TASK_USED | ACPI_TASK_ENQUEUED);
    acpi_task_count--;
}
Exemplo n.º 19
0
int
copyout_nofault(const void *kaddr, void *udaddr, size_t len)
{
	thread_t td = curthread;
	int error;

	atomic_set_int(&td->td_flags, TDF_NOFAULT);
	error = copyout(kaddr, udaddr, len);
	atomic_clear_int(&td->td_flags, TDF_NOFAULT);
	return error;
}
Exemplo n.º 20
0
/*
 * uiomove() but fail for non-trivial VM faults, even if the VM fault is
 * valid.  Returns EFAULT if a VM fault occurred via the copyin/copyout
 * onfault code.
 *
 * This allows callers to hold e.g. a busy VM page, or a busy VM object,
 * or a locked vnode through the call and then fall-back to safer code
 * if we fail.
 */
int
uiomove_nofault(caddr_t cp, size_t n, struct uio *uio)
{
	thread_t td = curthread;
	int error;

	atomic_set_int(&td->td_flags, TDF_NOFAULT);
	error = uiomove(cp, n, uio);
	atomic_clear_int(&td->td_flags, TDF_NOFAULT);
	return error;
}
Exemplo n.º 21
0
/*
 * Release an inode lock.  If another thread is blocked on SYNCQ_WAKEUP
 * we wake them up.
 */
void
hammer2_inode_unlock(hammer2_inode_t *ip)
{
	if (ip->flags & HAMMER2_INODE_SYNCQ_WAKEUP) {
		atomic_clear_int(&ip->flags, HAMMER2_INODE_SYNCQ_WAKEUP);
		hammer2_mtx_unlock(&ip->lock);
		wakeup(&ip->flags);
	} else {
		hammer2_mtx_unlock(&ip->lock);
	}
	hammer2_inode_drop(ip);
}
Exemplo n.º 22
0
static int
fbsdrun_deletecpu(struct vmctx *ctx, int vcpu)
{

	if ((cpumask & (1 << vcpu)) == 0) {
		fprintf(stderr, "addcpu: attempting to delete unknown cpu %d\n",
		    vcpu);
		exit(1);
	}

	atomic_clear_int(&cpumask, 1 << vcpu);
	return (cpumask == 0);
}
Exemplo n.º 23
0
/*
 * Per-port thread helper.  This helper thread is responsible for
 * atomically retrieving and clearing the signal mask and calling
 * the machine-independant driver core.
 *
 * MPSAFE
 */
static
void
sili_port_thread(void *arg)
{
	struct sili_port *ap = arg;
	int mask;

	/*
	 * The helper thread is responsible for the initial port init,
	 * so all the ports can be inited in parallel.
	 *
	 * We also run the state machine which should do all probes.
	 * Since CAM is not attached yet we will not get out-of-order
	 * SCSI attachments.
	 */
	sili_os_lock_port(ap);
	sili_port_init(ap);
	sili_port_state_machine(ap, 1);
	sili_os_unlock_port(ap);
	atomic_clear_int(&ap->ap_signal, AP_SIGF_INIT);
	wakeup(&ap->ap_signal);

	/*
	 * Then loop on the helper core.
	 */
	mask = ap->ap_signal;
	while ((mask & AP_SIGF_STOP) == 0) {
		atomic_clear_int(&ap->ap_signal, mask);
		sili_port_thread_core(ap, mask);
		lockmgr(&ap->ap_sig_lock, LK_EXCLUSIVE);
		if (ap->ap_signal == 0) {
			lksleep(&ap->ap_thread, &ap->ap_sig_lock, 0,
				"siport", 0);
		}
		lockmgr(&ap->ap_sig_lock, LK_RELEASE);
		mask = ap->ap_signal;
	}
	ap->ap_thread = NULL;
}
Exemplo n.º 24
0
/*
 * Chain pending links.  Called on the last release of an exclusive or
 * shared lock when the appropriate WANTED bit is set.  mtx_lock old state
 * is passed in with the count left at 1, which we can inherit, and other
 * bits which we must adjust in a single atomic operation.
 *
 * Return non-zero on success, 0 if caller needs to retry.
 *
 * NOTE: It's ok if MTX_EXWANTED is in an indeterminant state while we are
 *	 acquiring LINKSPIN as all other cases will also need to acquire
 *	 LINKSPIN when handling the EXWANTED case.
 */
static int
mtx_chain_link_ex(mtx_t *mtx, u_int olock)
{
	thread_t td = curthread;
	mtx_link_t *link;
	u_int	nlock;

	olock &= ~MTX_LINKSPIN;
	nlock = olock | MTX_LINKSPIN | MTX_EXCLUSIVE;	/* upgrade if necc */
	crit_enter_raw(td);
	if (atomic_cmpset_int(&mtx->mtx_lock, olock, nlock)) {
		link = mtx->mtx_exlink;
		KKASSERT(link != NULL);
		if (link->next == link) {
			mtx->mtx_exlink = NULL;
			nlock = MTX_LINKSPIN | MTX_EXWANTED;	/* to clear */
		} else {
			mtx->mtx_exlink = link->next;
			link->next->prev = link->prev;
			link->prev->next = link->next;
			nlock = MTX_LINKSPIN;			/* to clear */
		}
		KKASSERT(link->state == MTX_LINK_LINKED_EX);
		mtx->mtx_owner = link->owner;
		cpu_sfence();

		/*
		 * WARNING! The callback can only be safely
		 *	    made with LINKSPIN still held
		 *	    and in a critical section.
		 *
		 * WARNING! The link can go away after the
		 *	    state is set, or after the
		 *	    callback.
		 */
		if (link->callback) {
			link->state = MTX_LINK_CALLEDBACK;
			link->callback(link, link->arg, 0);
		} else {
			link->state = MTX_LINK_ACQUIRED;
			wakeup(link);
		}
		atomic_clear_int(&mtx->mtx_lock, nlock);
		crit_exit_raw(td);
		return 1;
	}
	/* retry */
	crit_exit_raw(td);

	return 0;
}
Exemplo n.º 25
0
/*
 * Allows an unprotected signal handler or mailbox to signal an interrupt
 *
 * For sched_ithd_hard_virtaul() to properly preempt via lwkt_schedule() we
 * cannot enter a critical section here.  We use td_nest_count instead.
 */
void
signalintr(int intr)
{
	struct mdglobaldata *gd = mdcpu;
	thread_t td = gd->mi.gd_curthread;

	if (td->td_critcount || td->td_nest_count) {
		atomic_set_int_nonlocked(&gd->gd_fpending, 1 << intr);
		atomic_set_int(&gd->mi.gd_reqflags, RQF_INTPEND);
	} else {
		++td->td_nest_count;
		atomic_clear_int(&gd->gd_fpending, 1 << intr);
		sched_ithd_hard_virtual(intr);
		--td->td_nest_count;
	}
}
Exemplo n.º 26
0
/*
 * Flushes the inode's chain and its sub-topology to media.  Interlocks
 * HAMMER2_INODE_DIRTYDATA by clearing it prior to the flush.  Any strategy
 * function creating or modifying a chain under this inode will re-set the
 * flag.
 *
 * inode must be locked.
 */
int
hammer2_inode_chain_flush(hammer2_inode_t *ip, int flags)
{
	hammer2_xop_fsync_t *xop;
	int error;

	atomic_clear_int(&ip->flags, HAMMER2_INODE_DIRTYDATA);
	xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING | flags);
	hammer2_xop_start(&xop->head, &hammer2_inode_flush_desc);
	error = hammer2_xop_collect(&xop->head, HAMMER2_XOP_COLLECT_WAITALL);
	hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
	if (error == HAMMER2_ERROR_ENOENT)
		error = 0;

	return error;
}
Exemplo n.º 27
0
/*
 * Delete a link structure after tsleep has failed.  This code is not
 * in the critical path as most exclusive waits are chained.
 */
static
void
mtx_delete_link(mtx_t mtx, mtx_link_t link)
{
	thread_t td = curthread;
	u_int	lock;
	u_int	nlock;

	/*
	 * Acquire MTX_EXLINK.
	 *
	 * Do not use cmpxchg to wait for EXLINK to clear as this might
	 * result in too much cpu cache traffic.
	 */
	++td->td_critcount;
	for (;;) {
		lock = mtx->mtx_lock;
		if (lock & MTX_EXLINK) {
			cpu_pause();
			++mtx_collision_count;
			continue;
		}
		/* lock &= ~MTX_EXLINK; */
		nlock = lock | MTX_EXLINK;
		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
			break;
		cpu_pause();
		++mtx_collision_count;
	}

	/*
	 * Delete the link and release EXLINK.
	 */
	if (link->state == MTX_LINK_LINKED) {
		if (link->next == link) {
			mtx->mtx_link = NULL;
		} else {
			mtx->mtx_link = link->next;
			link->next->prev = link->prev;
			link->prev->next = link->next;
		}
		link->state = MTX_LINK_IDLE;
	}
	atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
	--td->td_critcount;
}
Exemplo n.º 28
0
/*
 * Simulate Linux behaviour better and switch tables here and not in
 * dm_table_load_ioctl.
 */
int
dm_dev_resume_ioctl(prop_dictionary_t dm_dict)
{
	dm_dev_t *dmv;
	const char *name, *uuid;
	uint32_t flags, minor;

	name = NULL;
	uuid = NULL;
	flags = 0;

	/*
	 * char *xml; xml = prop_dictionary_externalize(dm_dict);
	 * printf("%s\n",xml);
	 */

	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_NAME, &name);
	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_UUID, &uuid);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_FLAGS, &flags);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_MINOR, &minor);

	/* Remove device from global device list */
	if ((dmv = dm_dev_lookup(name, uuid, minor)) == NULL) {
		DM_REMOVE_FLAG(flags, DM_EXISTS_FLAG);
		return ENOENT;
	}
	atomic_clear_int(&dmv->flags, (DM_SUSPEND_FLAG | DM_INACTIVE_PRESENT_FLAG));
	atomic_set_int(&dmv->flags, DM_ACTIVE_PRESENT_FLAG);

	dm_table_switch_tables(&dmv->table_head);

	DM_ADD_FLAG(flags, DM_EXISTS_FLAG);

	dmsetdiskinfo(dmv->diskp, &dmv->table_head);

	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_OPEN, dmv->table_head.io_cnt);
	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_FLAGS, flags);
	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_MINOR, dmv->minor);

	dm_dev_unbusy(dmv);

	/* Destroy inactive table after resume. */
	dm_table_destroy(&dmv->table_head, DM_TABLE_INACTIVE);

	return 0;
}
Exemplo n.º 29
0
/*
 * Synchronize the inode's frontend state with the chain state prior
 * to any explicit flush of the inode or any strategy write call.  This
 * does not flush the inode's chain or its sub-topology to media (higher
 * level layers are responsible for doing that).
 *
 * Called with a locked inode inside a normal transaction.
 *
 * inode must be locked.
 */
int
hammer2_inode_chain_sync(hammer2_inode_t *ip)
{
	int error;

	error = 0;
	if (ip->flags & (HAMMER2_INODE_RESIZED | HAMMER2_INODE_MODIFIED)) {
		hammer2_xop_fsync_t *xop;

		xop = hammer2_xop_alloc(ip, HAMMER2_XOP_MODIFYING);
		xop->clear_directdata = 0;
		if (ip->flags & HAMMER2_INODE_RESIZED) {
			if ((ip->meta.op_flags & HAMMER2_OPFLAG_DIRECTDATA) &&
			    ip->meta.size > HAMMER2_EMBEDDED_BYTES) {
				ip->meta.op_flags &= ~HAMMER2_OPFLAG_DIRECTDATA;
				xop->clear_directdata = 1;
			}
			xop->osize = ip->osize;
		} else {
			xop->osize = ip->meta.size;	/* safety */
		}
		xop->ipflags = ip->flags;
		xop->meta = ip->meta;

		atomic_clear_int(&ip->flags, HAMMER2_INODE_RESIZED |
					     HAMMER2_INODE_MODIFIED);
		hammer2_xop_start(&xop->head, &hammer2_inode_chain_sync_desc);
		error = hammer2_xop_collect(&xop->head, 0);
		hammer2_xop_retire(&xop->head, HAMMER2_XOPMASK_VOP);
		if (error == HAMMER2_ERROR_ENOENT)
			error = 0;
		if (error) {
			kprintf("hammer2: unable to fsync inode %p\n", ip);
			/*
			atomic_set_int(&ip->flags,
				       xop->ipflags & (HAMMER2_INODE_RESIZED |
						       HAMMER2_INODE_MODIFIED));
			*/
			/* XXX return error somehow? */
		}
	}
	return error;
}
Exemplo n.º 30
0
static __inline
void
_lwkt_pullmsg(lwkt_port_t port, lwkt_msg_t msg)
{
    lwkt_msg_queue *queue;

    /*
     * normal case, remove and return the message.
     */
    if (__predict_false(msg->ms_flags & MSGF_PRIORITY))
        queue = &port->mp_msgq_prio;
    else
        queue = &port->mp_msgq;
    TAILQ_REMOVE(queue, msg, ms_node);

    /*
     * atomic op needed for spin ports
     */
    atomic_clear_int(&msg->ms_flags, MSGF_QUEUED);
}