Example #1
0
void
vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
    vm_offset_t end)
{
	struct vnode *vp;
	vm_ooffset_t old_wm;

	VM_OBJECT_WLOCK(object);
	if (object->type != OBJT_VNODE) {
		VM_OBJECT_WUNLOCK(object);
		return;
	}
	old_wm = object->un_pager.vnp.writemappings;
	object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start;
	vp = object->handle;
	if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) {
		ASSERT_VOP_ELOCKED(vp, "v_writecount inc");
		VOP_ADD_WRITECOUNT(vp, 1);
		CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
		    __func__, vp, vp->v_writecount);
	} else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) {
		ASSERT_VOP_ELOCKED(vp, "v_writecount dec");
		VOP_ADD_WRITECOUNT(vp, -1);
		CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
		    __func__, vp, vp->v_writecount);
	}
	VM_OBJECT_WUNLOCK(object);
}
Example #2
0
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
	int len = vma->vm_end - vma->vm_start;
	u32 key = vma->vm_pgoff << PAGE_SHIFT;
	struct c4iw_rdev *rdev;
	int ret = 0;
	struct c4iw_mm_entry *mm;
	struct c4iw_ucontext *ucontext;
	u64 addr, paddr;

	u64 va_regs_res = 0, va_udbs_res = 0;
	u64 len_regs_res = 0, len_udbs_res = 0;

	CTR3(KTR_IW_CXGBE, "%s:1 ctx %p vma %p", __func__, context, vma);

	CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__,
	    vma->vm_pgoff, key, len);

	if (vma->vm_start & (PAGE_SIZE-1)) {
		CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p",
		    __func__, vma->vm_start, vma);
		return -EINVAL;
	}

	rdev = &(to_c4iw_dev(context->device)->rdev);
	ucontext = to_c4iw_ucontext(context);

	mm = remove_mmap(ucontext, key, len);
	if (!mm) {
		CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__,
		    ucontext, key, len);
		return -EINVAL;
	}
	addr = mm->addr;
	kfree(mm);

	va_regs_res = (u64)rman_get_virtual(rdev->adap->regs_res);
	len_regs_res = (u64)rman_get_size(rdev->adap->regs_res);
	va_udbs_res = (u64)rman_get_virtual(rdev->adap->udbs_res);
	len_udbs_res = (u64)rman_get_size(rdev->adap->udbs_res);

	CTR6(KTR_IW_CXGBE,
	    "%s:4 addr %p, masync region %p:%p, udb region %p:%p", __func__,
	    addr, va_regs_res, va_regs_res+len_regs_res, va_udbs_res,
	    va_udbs_res+len_udbs_res);

	if (addr >= va_regs_res && addr < va_regs_res + len_regs_res) {
		CTR4(KTR_IW_CXGBE, "%s:5 MA_SYNC addr %p region %p, reglen %u",
		    __func__, addr, va_regs_res, len_regs_res);
		/*
		 * MA_SYNC register...
		 */
		paddr = vtophys(addr);
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		ret = io_remap_pfn_range(vma, vma->vm_start,
				paddr >> PAGE_SHIFT,
				len, vma->vm_page_prot);
	} else {
Example #3
0
static unsigned int
cxgbei_task_reserve_itt(struct icl_conn *ic, void **prv,
			struct ccb_scsiio *scmd, unsigned int *itt)
{
	struct icl_cxgbei_conn *icc = ic_to_icc(ic);
	int xferlen = scmd->dxfer_len;
	struct cxgbei_task_data *tdata = NULL;
	struct cxgbei_sgl *sge = NULL;
	struct toepcb *toep = icc->toep;
	struct adapter *sc = td_adapter(toep->td);
	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
	int err = -1;

	MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);

	tdata = (struct cxgbei_task_data *)*prv;
	if (xferlen == 0 || tdata == NULL)
		goto out;
	if (xferlen < DDP_THRESHOLD)
		goto out;

	if ((scmd->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
		tdata->nsge = cxgbei_map_sg(tdata->sgl, scmd);
		if (tdata->nsge == 0) {
			CTR1(KTR_CXGBE, "%s: map_sg failed", __func__);
			return 0;
		}
		sge = tdata->sgl;

		tdata->sc_ddp_tag = *itt;

		CTR3(KTR_CXGBE, "%s: *itt:0x%x sc_ddp_tag:0x%x",
				__func__, *itt, tdata->sc_ddp_tag);
		if (cxgbei_ulp2_sw_tag_usable(&ci->tag_format,
							tdata->sc_ddp_tag)) {
			err = t4_sk_ddp_tag_reserve(ci, icc, scmd->dxfer_len,
			    sge, tdata->nsge, &tdata->sc_ddp_tag);
		} else {
			CTR3(KTR_CXGBE,
				"%s: itt:0x%x sc_ddp_tag:0x%x not usable",
				__func__, *itt, tdata->sc_ddp_tag);
		}
	}
out:
	if (err < 0)
		tdata->sc_ddp_tag =
			cxgbei_ulp2_set_non_ddp_tag(&ci->tag_format, *itt);

	return tdata->sc_ddp_tag;
}
Example #4
0
void cxio_dump_tpt(struct cxio_rdev *rdev, uint32_t stag)
{
	struct ch_mem_range *m;
	u64 *data;
	int rc;
	int size = 32;

	m = kmalloc(sizeof(*m) + size, M_NOWAIT);
	if (!m) {
		CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__);
		return;
	}
	m->mem_id = MEM_PMRX;
	m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
	m->len = size;
	CTR3(KTR_IW_CXGB, "%s TPT addr 0x%x len %d", __FUNCTION__, m->addr, m->len);
	rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
	if (rc) {
		CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc);
		free(m, M_DEVBUF);
		return;
	}

	data = (u64 *)m->buf;
	while (size > 0) {
		CTR2(KTR_IW_CXGB, "TPT %08x: %016llx", m->addr, (unsigned long long) *data);
		size -= 8;
		data++;
		m->addr += 8;
	}
	free(m, M_DEVBUF);
}
Example #5
0
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{

	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, size);
	MMU_UNMAPDEV(mmu_obj, va, size);
}
Example #6
0
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{

	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, m, ma);
	return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
}
Example #7
0
void *
pmap_mapdev(vm_paddr_t pa, vm_size_t size)
{

	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
	return (MMU_MAPDEV(mmu_obj, pa, size));
}
Example #8
0
int
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
{

	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
	return (MMU_MINCORE(mmu_obj, pmap, addr, locked_pa));
}
Example #9
0
void
pmap_qremove(vm_offset_t start, int count)
{

	CTR3(KTR_PMAP, "%s(%#x, %d)", __func__, start, count);
	MMU_QREMOVE(mmu_obj, start, count);
}
Example #10
0
boolean_t
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{

	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
	return (MMU_PAGE_EXISTS_QUICK(mmu_obj, pmap, m));
}
Example #11
0
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t va)
{

	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
	return (MMU_IS_PREFAULTABLE(mmu_obj, pmap, va));
}
Example #12
0
vm_paddr_t
pmap_extract(pmap_t pmap, vm_offset_t va)
{

	CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, va);
	return (MMU_EXTRACT(mmu_obj, pmap, va));
}
Example #13
0
void
pmap_copy_page(vm_page_t src, vm_page_t dst)
{

	CTR3(KTR_PMAP, "%s(%p, %p)", __func__, src, dst);
	MMU_COPY_PAGE(mmu_obj, src, dst);
}
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
					struct iwch_mr *mhp,
					int shift,
					__be64 *page_list)
{
	u32 stag;
	u32 mmid;


	if (cxio_register_phys_mem(&rhp->rdev,
				   &stag, mhp->attr.pdid,
				   mhp->attr.perms,
				   mhp->attr.zbva,
				   mhp->attr.va_fbo,
				   mhp->attr.len,
				   shift-12,
				   page_list,
				   &mhp->attr.pbl_size, &mhp->attr.pbl_addr))
		return (-ENOMEM);
	mhp->attr.state = 1;
	mhp->attr.stag = stag;
	mmid = stag >> 8;
	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
	insert_handle(rhp, &rhp->mmidr, mhp, mmid);
	CTR3(KTR_IW_CXGB, "%s mmid 0x%x mhp %p", __FUNCTION__, mmid, mhp);
	return 0;
}
Example #15
0
boolean_t
pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
{

	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
	return (MMU_DEV_DIRECT_MAPPED(mmu_obj, pa, size));
}
Example #16
0
void
pmap_kenter(vm_offset_t va, vm_paddr_t pa)
{

	CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
	MMU_KENTER(mmu_obj, va, pa);
}
Example #17
0
/*
 * Active open failed.
 */
static int
do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
    struct mbuf *m)
{
	struct adapter *sc = iq->adapter;
	const struct cpl_act_establish *cpl = (const void *)(rss + 1);
	unsigned int tid = GET_TID(cpl);
	unsigned int atid = G_TID_TID(ntohl(cpl->tos_atid));
	struct toepcb *toep = lookup_atid(sc, atid);
	struct inpcb *inp = toep->inp;

	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
	KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__));

	CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid);
	free_atid(sc, atid);

	INP_WLOCK(inp);
	toep->tid = tid;
	insert_tid(sc, tid, toep);
	if (inp->inp_flags & INP_DROPPED) {

		/* socket closed by the kernel before hw told us it connected */

		send_flowc_wr(toep, NULL);
		send_reset(sc, toep, be32toh(cpl->snd_isn));
		goto done;
	}

	make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt);
done:
	INP_WUNLOCK(inp);
	return (0);
}
Example #18
0
static int
do_rx_iscsi_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
{
	struct adapter *sc = iq->adapter;
	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
	struct cpl_iscsi_data *cpl =  mtod(m, struct cpl_iscsi_data *);
	u_int tid = GET_TID(cpl);
	struct toepcb *toep = lookup_tid(sc, tid);
	struct icl_cxgbei_pdu *icp = toep->ulpcb2;

	M_ASSERTPKTHDR(m);
	MPASS(m->m_pkthdr.len == be16toh(cpl->len) + sizeof(*cpl));

	/* Must already have received the header (but not the data). */
	MPASS(icp != NULL);
	MPASS(icp->icp_flags == ICPF_RX_HDR);
	MPASS(icp->ip.ip_data_mbuf == NULL);


	m_adj(m, sizeof(*cpl));
	MPASS(icp->ip.ip_data_len == m->m_pkthdr.len);

	icp->icp_flags |= ICPF_RX_FLBUF;
	icp->ip.ip_data_mbuf = m;
	counter_u64_add(ci->fl_pdus, 1);
	counter_u64_add(ci->fl_bytes, m->m_pkthdr.len);

#if 0
	CTR3(KTR_CXGBE, "%s: tid %u, cpl->len %u", __func__, tid,
	    be16toh(cpl->len));
#endif

	return (0);
}
Example #19
0
void cxio_dump_tcb(struct cxio_rdev *rdev, uint32_t hwtid)
{
	struct ch_mem_range *m;
	int size = TCB_SIZE;
	uint32_t *data;
	int rc;

	m = kmalloc(sizeof(*m) + size, M_NOWAIT);
	if (!m) {
		CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__);
		return;
	}
	m->mem_id = MEM_CM;
	m->addr = hwtid * size;
	m->len = size;
	CTR3(KTR_IW_CXGB, "%s TCB %d len %d", __FUNCTION__, m->addr, m->len);
	rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
	if (rc) {
		CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc);
		free(m, M_DEVBUF);
		return;
	}

	data = (uint32_t *)m->buf;
	while (size > 0) {
		printf("%2u: %08x %08x %08x %08x %08x %08x %08x %08x\n",
			m->addr,
			*(data+2), *(data+3), *(data),*(data+1),
			*(data+6), *(data+7), *(data+4), *(data+5));
		size -= 32;
		data += 8;
		m->addr += 32;
	}
	free(m, M_DEVBUF);
}
Example #20
0
int
freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
{
	ucontext32_t uc;
	int error;

	CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);

	if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
		CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
		return (EFAULT);
	}

	error = set_mcontext32(td, &uc.uc_mcontext);
	if (error != 0)
		return (error);

	kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);

#if 0
	CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
	     td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
#endif

	return (EJUSTRETURN);
}
Example #21
0
/*
 * Active open failed.
 */
static int
do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
    struct mbuf *m)
{
	struct adapter *sc = iq->adapter;
	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
	struct toepcb *toep = lookup_atid(sc, atid);
	int rc;

	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
	KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__));

	CTR3(KTR_CXGBE, "%s: atid %u, status %u ", __func__, atid, status);

	/* Ignore negative advice */
	if (negative_advice(status))
		return (0);

	if (status && act_open_has_tid(status))
		release_tid(sc, GET_TID(cpl), toep->ctrlq);

	rc = act_open_rpl_status_to_errno(status);
	act_open_failure_cleanup(sc, atid, rc);

	return (0);
}
Example #22
0
/*
 *	The object must be locked.
 */
static void
vnode_pager_dealloc(vm_object_t object)
{
	struct vnode *vp;
	int refs;

	vp = object->handle;
	if (vp == NULL)
		panic("vnode_pager_dealloc: pager already dealloced");

	VM_OBJECT_ASSERT_WLOCKED(object);
	vm_object_pip_wait(object, "vnpdea");
	refs = object->ref_count;

	object->handle = NULL;
	object->type = OBJT_DEAD;
	if (object->flags & OBJ_DISCONNECTWNT) {
		vm_object_clear_flag(object, OBJ_DISCONNECTWNT);
		wakeup(object);
	}
	ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc");
	if (object->un_pager.vnp.writemappings > 0) {
		object->un_pager.vnp.writemappings = 0;
		VOP_ADD_WRITECOUNT(vp, -1);
		CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
		    __func__, vp, vp->v_writecount);
	}
	vp->v_object = NULL;
	VOP_UNSET_TEXT(vp);
	VM_OBJECT_WUNLOCK(object);
	while (refs-- > 0)
		vunref(vp);
	VM_OBJECT_WLOCK(object);
}
Example #23
0
File: ic.c Project: coyizumi/cs111
int
arm_get_next_irq(int last)
{
	u_int filt, irq;
	int next;

	filt = ~((last >= 0) ? (2 << last) - 1 : 0);
	irq = mv_ic_get_cause() & mv_ic_get_mask();
	if (irq & filt) {
		next = ffs(irq & filt) - 1;
		goto out;
	}
	if (mv_ic_sc->ic_high_regs) {
		filt = ~((last >= 32) ? (2 << (last - 32)) - 1 : 0);
		irq = mv_ic_get_cause_hi() & mv_ic_get_mask_hi();
		if (irq & filt) {
			next = ffs(irq & filt) + 31;
			goto out;
		}
	}
	if (mv_ic_sc->ic_error_regs) {
		filt = ~((last >= 64) ? (2 << (last - 64)) - 1 : 0);
		irq = mv_ic_get_cause_error() & mv_ic_get_mask_error();
		if (irq & filt) {
			next = ffs(irq & filt) + 63;
			goto out;
		}
	}
	next = -1;

 out:
	CTR3(KTR_INTR, "%s: last=%d, next=%d", __func__, last, next);
	return (next);
}
Example #24
0
/*
 * Enable an EA using the passed filesystem, backing vnode, attribute name,
 * namespace, and proc.  Will perform a VOP_OPEN() on the vp, so expects vp
 * to be locked when passed in.  The vnode will be returned unlocked,
 * regardless of success/failure of the function.  As a result, the caller
 * will always need to vrele(), but not vput().
 */
static int
ufs_extattr_enable_with_open(struct ufsmount *ump, struct vnode *vp,
    int attrnamespace, const char *attrname, struct thread *td)
{
	int error;

	error = VOP_OPEN(vp, FREAD|FWRITE, td->td_ucred, td, NULL);
	if (error) {
		printf("ufs_extattr_enable_with_open.VOP_OPEN(): failed "
		    "with %d\n", error);
		VOP_UNLOCK(vp, 0);
		return (error);
	}

	error = VOP_ADD_WRITECOUNT(vp, 1);
	if (error != 0) {
		VOP_CLOSE(vp, FREAD | FWRITE, td->td_ucred, td);
		VOP_UNLOCK(vp, 0);
		return (error);
	}
	CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", __func__, vp,
	    vp->v_writecount);

	vref(vp);

	VOP_UNLOCK(vp, 0);

	error = ufs_extattr_enable(ump, attrnamespace, attrname, vp, td);
	if (error != 0)
		vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
	return (error);
}
/*
 * send an IPI to a set of cpus.
 */
void
ipi_selected(u_int32_t cpus, u_int ipi)
{
	struct pcpu *pcpu;
	u_int cpuid, new_pending, old_pending;

	CTR3(KTR_SMP, "%s: cpus: %x, ipi: %x\n", __func__, cpus, ipi);

	while ((cpuid = ffs(cpus)) != 0) {
		cpuid--;
		cpus &= ~(1 << cpuid);
		pcpu = pcpu_find(cpuid);

		if (pcpu) {
			do {
				old_pending = pcpu->pc_pending_ipis;
				new_pending = old_pending | ipi;
			} while (!atomic_cmpset_int(&pcpu->pc_pending_ipis,
			    old_pending, new_pending));	

			if (old_pending)
				continue;

			mips_ipi_send (cpuid);
		}
	}
}
Example #26
0
int
rwindow_load(struct thread *td, struct trapframe *tf, int n)
{
	struct rwindow rw;
	u_long usp;
	int error;
	int i;

	CTR3(KTR_TRAP, "rwindow_load: td=%p (%s) n=%d",
	    td, td->td_proc->p_comm, n);

	/*
	 * In case current window is still only on-chip, push it out;
	 * if it cannot get all the way out, we cannot continue either.
	 */
	if ((error = rwindow_save(td)) != 0)
		return (error);
	usp = tf->tf_out[6];
	for (i = 0; i < n; i++) {
		CTR1(KTR_TRAP, "rwindow_load: usp=%#lx", usp);
		usp += SPOFF;
		if ((error = (usp & 0x7)) != 0)
			break;
		error = copyin((void *)usp, &rw, sizeof rw);
		usp = rw.rw_in[6];
	}
	CTR1(KTR_TRAP, "rwindow_load: error=%d", error);
	return (error == 0 ? 0 : SIGILL);
}
Example #27
0
/*
 * This function is called if the first try at releasing a write lock failed.
 * This means that one of the 2 waiter bits must be set indicating that at
 * least one thread is waiting on this lock.
 */
void
_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
{
	struct turnstile *ts;
	uintptr_t v;
	int queue;

	if (SCHEDULER_STOPPED())
		return;

	if (rw_wlocked(rw) && rw_recursed(rw)) {
		rw->rw_recurse--;
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
		return;
	}

	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
	    ("%s: neither of the waiter flags are set", __func__));

	if (LOCK_LOG_TEST(&rw->lock_object, 0))
		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);

	turnstile_chain_lock(&rw->lock_object);
	ts = turnstile_lookup(&rw->lock_object);
	MPASS(ts != NULL);

	/*
	 * Use the same algo as sx locks for now.  Prefer waking up shared
	 * waiters if we have any over writers.  This is probably not ideal.
	 *
	 * 'v' is the value we are going to write back to rw_lock.  If we
	 * have waiters on both queues, we need to preserve the state of
	 * the waiter flag for the queue we don't wake up.  For now this is
	 * hardcoded for the algorithm mentioned above.
	 *
	 * In the case of both readers and writers waiting we wakeup the
	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
	 * new writer comes in before a reader it will claim the lock up
	 * above.  There is probably a potential priority inversion in
	 * there that could be worked around either by waking both queues
	 * of waiters or doing some complicated lock handoff gymnastics.
	 */
	v = RW_UNLOCKED;
	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
		queue = TS_EXCLUSIVE_QUEUE;
		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
	} else
		queue = TS_SHARED_QUEUE;

	/* Wake up all waiters for the specific queue. */
	if (LOCK_LOG_TEST(&rw->lock_object, 0))
		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
		    queue == TS_SHARED_QUEUE ? "read" : "write");
	turnstile_broadcast(ts, queue);
	atomic_store_rel_ptr(&rw->rw_lock, v);
	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
	turnstile_chain_unlock(&rw->lock_object);
}
Example #28
0
/*
 * Finish a fork operation, with process p2 nearly set up.
 * Copy and update the pcb, set up the stack so that the child
 * ready to run and return to user mode.
 */
void
cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
{
    struct proc *p1;
    struct trapframe *tf;
    struct callframe *cf;
    struct pcb *pcb;

    KASSERT(td1 == curthread || td1 == &thread0,
            ("cpu_fork: p1 not curproc and not proc0"));
    CTR3(KTR_PROC, "cpu_fork: called td1=%08x p2=%08x flags=%x", (u_int)td1,
         (u_int)p2, flags);

    if ((flags & RFPROC) == 0)
        return;

    p1 = td1->td_proc;

    pcb = (struct pcb *)((td2->td_kstack +
                          td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x3fU);
    td2->td_pcb = pcb;

    /* Copy the pcb */
    bcopy(td1->td_pcb, pcb, sizeof(struct pcb));

    /*
     * Create a fresh stack for the new process.
     * Copy the trap frame for the return to user mode as if from a
     * syscall.  This copies most of the user mode register values.
     */
    tf = (struct trapframe *)pcb - 1;
    bcopy(td1->td_frame, tf, sizeof(*tf));

    /* Set up trap frame. */
    tf->fixreg[FIRSTARG] = 0;
    tf->fixreg[FIRSTARG + 1] = 0;
    tf->cr &= ~0x10000000;

    td2->td_frame = tf;

    cf = (struct callframe *)tf - 1;
    memset(cf, 0, sizeof(struct callframe));
    cf->cf_func = (register_t)fork_return;
    cf->cf_arg0 = (register_t)td2;
    cf->cf_arg1 = (register_t)tf;

    pcb->pcb_sp = (register_t)cf;
    pcb->pcb_lr = (register_t)fork_trampoline;

    /* Setup to release sched_lock in fork_exit(). */
    td2->td_md.md_spinlock_count = 1;
    td2->td_md.md_saved_msr = PSL_KERNSET;

    /*
     * Now cpu_switch() can schedule the new process.
     */
}
Example #29
0
/*
 * This function represents the so-called 'hard case' for sx_xunlock
 * operation.  All 'easy case' failures are redirected to this.  Note
 * that ideally this would be a static function, but it needs to be
 * accessible from at least sx.h.
 */
void
_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
{
	uintptr_t x;
	int queue, wakeup_swapper;

	if (SCHEDULER_STOPPED())
		return;

	MPASS(!(sx->sx_lock & SX_LOCK_SHARED));

	/* If the lock is recursed, then unrecurse one level. */
	if (sx_xlocked(sx) && sx_recursed(sx)) {
		if ((--sx->sx_recurse) == 0)
			atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
		return;
	}
	MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
	    SX_LOCK_EXCLUSIVE_WAITERS));
	if (LOCK_LOG_TEST(&sx->lock_object, 0))
		CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);

	sleepq_lock(&sx->lock_object);
	x = SX_LOCK_UNLOCKED;

	/*
	 * The wake up algorithm here is quite simple and probably not
	 * ideal.  It gives precedence to shared waiters if they are
	 * present.  For this condition, we have to preserve the
	 * state of the exclusive waiters flag.
	 * If interruptible sleeps left the shared queue empty avoid a
	 * starvation for the threads sleeping on the exclusive queue by giving
	 * them precedence and cleaning up the shared waiters bit anyway.
	 */
	if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
	    sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) {
		queue = SQ_SHARED_QUEUE;
		x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
	} else
		queue = SQ_EXCLUSIVE_QUEUE;

	/* Wake up all the waiters for the specific queue. */
	if (LOCK_LOG_TEST(&sx->lock_object, 0))
		CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
		    __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
		    "exclusive");
	atomic_store_rel_ptr(&sx->sx_lock, x);
	wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
	    queue);
	sleepq_release(&sx->lock_object);
	if (wakeup_swapper)
		kick_proc0();
}
Example #30
0
static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
{
	u32 mmid;

	mhp->attr.state = 1;
	mhp->attr.stag = stag;
	mmid = stag >> 8;
	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
	CTR3(KTR_IW_CXGB, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
	return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
}