コード例 #1
0
ファイル: provider.c プロジェクト: Digital-Chaos/freebsd
static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
	int len = vma->vm_end - vma->vm_start;
	u32 key = vma->vm_pgoff << PAGE_SHIFT;
	struct c4iw_rdev *rdev;
	int ret = 0;
	struct c4iw_mm_entry *mm;
	struct c4iw_ucontext *ucontext;
	u64 addr, paddr;

	u64 va_regs_res = 0, va_udbs_res = 0;
	u64 len_regs_res = 0, len_udbs_res = 0;

	CTR3(KTR_IW_CXGBE, "%s:1 ctx %p vma %p", __func__, context, vma);

	CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__,
	    vma->vm_pgoff, key, len);

	if (vma->vm_start & (PAGE_SIZE-1)) {
		CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p",
		    __func__, vma->vm_start, vma);
		return -EINVAL;
	}

	rdev = &(to_c4iw_dev(context->device)->rdev);
	ucontext = to_c4iw_ucontext(context);

	mm = remove_mmap(ucontext, key, len);
	if (!mm) {
		CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__,
		    ucontext, key, len);
		return -EINVAL;
	}
	addr = mm->addr;
	kfree(mm);

	va_regs_res = (u64)rman_get_virtual(rdev->adap->regs_res);
	len_regs_res = (u64)rman_get_size(rdev->adap->regs_res);
	va_udbs_res = (u64)rman_get_virtual(rdev->adap->udbs_res);
	len_udbs_res = (u64)rman_get_size(rdev->adap->udbs_res);

	CTR6(KTR_IW_CXGBE,
	    "%s:4 addr %p, masync region %p:%p, udb region %p:%p", __func__,
	    addr, va_regs_res, va_regs_res+len_regs_res, va_udbs_res,
	    va_udbs_res+len_udbs_res);

	if (addr >= va_regs_res && addr < va_regs_res + len_regs_res) {
		CTR4(KTR_IW_CXGBE, "%s:5 MA_SYNC addr %p region %p, reglen %u",
		    __func__, addr, va_regs_res, len_regs_res);
		/*
		 * MA_SYNC register...
		 */
		paddr = vtophys(addr);
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		ret = io_remap_pfn_range(vma, vma->vm_start,
				paddr >> PAGE_SHIFT,
				len, vma->vm_page_prot);
	} else {
コード例 #2
0
ファイル: pmap_dispatch.c プロジェクト: 2asoft/freebsd
void
pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
{

	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, va, pa, ma);
	MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
}
コード例 #3
0
ファイル: pmap_dispatch.c プロジェクト: rchander/freebsd
void
pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
{

	CTR4(KTR_PMAP, "%s(%p, %#x, %u)", __func__, pmap, va, wired);
	MMU_CHANGE_WIRING(mmu_obj, pmap, va, wired);
}
コード例 #4
0
ファイル: pmap_dispatch.c プロジェクト: JabirTech/Source
vm_offset_t
pmap_dumpsys_map(struct pmap_md *md, vm_size_t ofs, vm_size_t *sz)
{

	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, *sz);
	return (MMU_DUMPSYS_MAP(mmu_obj, md, ofs, sz));
}
コード例 #5
0
ファイル: pmap_dispatch.c プロジェクト: JabirTech/Source
void
pmap_dumpsys_unmap(struct pmap_md *md, vm_size_t ofs, vm_offset_t va)
{

	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, va);
	return (MMU_DUMPSYS_UNMAP(mmu_obj, md, ofs, va));
}
コード例 #6
0
static void
release_offload_resources(struct toepcb *toep)
{
	struct tom_data *td = toep->td;
	struct adapter *sc = td_adapter(td);
	int tid = toep->tid;

	KASSERT(toepcb_flag(toep, TPF_CPL_PENDING) == 0,
	    ("%s: %p has CPL pending.", __func__, toep));
	KASSERT(toepcb_flag(toep, TPF_ATTACHED) == 0,
	    ("%s: %p is still attached.", __func__, toep));

	CTR4(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p)",
	    __func__, toep, tid, toep->l2te);

	if (toep->l2te)
		t4_l2t_release(toep->l2te);

	if (tid >= 0) {
		remove_tid(sc, tid);
		release_tid(sc, tid, toep->ctrlq);
	}

	mtx_lock(&td->toep_list_lock);
	TAILQ_REMOVE(&td->toep_list, toep, link);
	mtx_unlock(&td->toep_list_lock);

	free_toepcb(toep);
}
コード例 #7
0
ファイル: pmap_dispatch.c プロジェクト: 2asoft/freebsd
void
pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
{

	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
	MMU_UNWIRE(mmu_obj, pmap, start, end);
}
コード例 #8
0
ファイル: pmap_dispatch.c プロジェクト: 2asoft/freebsd
void
pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
{
 
	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pm, va, sz);
	return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
}
コード例 #9
0
ファイル: cxgbei.c プロジェクト: Digital-Chaos/freebsd
static int
do_rx_iscsi_hdr(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
{
	struct adapter *sc = iq->adapter;
	struct cpl_iscsi_hdr *cpl = mtod(m, struct cpl_iscsi_hdr *);
	u_int tid = GET_TID(cpl);
	struct toepcb *toep = lookup_tid(sc, tid);
	struct icl_pdu *ip;
	struct icl_cxgbei_pdu *icp;

	M_ASSERTPKTHDR(m);

	ip = icl_cxgbei_new_pdu(M_NOWAIT);
	if (ip == NULL)
		CXGBE_UNIMPLEMENTED("PDU allocation failure");
	icp = ip_to_icp(ip);
	bcopy(mtod(m, caddr_t) + sizeof(*cpl), icp->ip.ip_bhs, sizeof(struct
	    iscsi_bhs));
	icp->pdu_seq = ntohl(cpl->seq);
	icp->pdu_flags = SBUF_ULP_FLAG_HDR_RCVD;

	/* This is the start of a new PDU.  There should be no old state. */
	MPASS(toep->ulpcb2 == NULL);
	toep->ulpcb2 = icp;

#if 0
	CTR4(KTR_CXGBE, "%s: tid %u, cpl->len hlen %u, m->m_len hlen %u",
	    __func__, tid, ntohs(cpl->len), m->m_len);
#endif

	m_freem(m);
	return (0);
}
コード例 #10
0
ファイル: cxgbei.c プロジェクト: Digital-Chaos/freebsd
static int
do_rx_iscsi_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
{
	struct adapter *sc = iq->adapter;
	struct cpl_iscsi_data *cpl =  mtod(m, struct cpl_iscsi_data *);
	u_int tid = GET_TID(cpl);
	struct toepcb *toep = lookup_tid(sc, tid);
	struct icl_cxgbei_pdu *icp = toep->ulpcb2;

	M_ASSERTPKTHDR(m);

	/* Must already have received the header (but not the data). */
	MPASS(icp != NULL);
	MPASS(icp->pdu_flags == SBUF_ULP_FLAG_HDR_RCVD);
	MPASS(icp->ip.ip_data_mbuf == NULL);
	MPASS(icp->ip.ip_data_len == 0);

	m_adj(m, sizeof(*cpl));

	icp->pdu_flags |= SBUF_ULP_FLAG_DATA_RCVD;
	icp->ip.ip_data_mbuf = m;
	icp->ip.ip_data_len = m->m_pkthdr.len;

#if 0
	CTR4(KTR_CXGBE, "%s: tid %u, cpl->len dlen %u, m->m_len dlen %u",
	    __func__, tid, ntohs(cpl->len), m->m_len);
#endif

	return (0);
}
コード例 #11
0
ファイル: pmap_dispatch.c プロジェクト: 2asoft/freebsd
void
dumpsys_unmap_chunk(vm_paddr_t pa, size_t sz, void *va)
{

	CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
	return (MMU_DUMPSYS_UNMAP(mmu_obj, pa, sz, va));
}
コード例 #12
0
ファイル: pmap_dispatch.c プロジェクト: 2asoft/freebsd
void
pmap_zero_page_area(vm_page_t m, int off, int size)
{

	CTR4(KTR_PMAP, "%s(%p, %d, %d)", __func__, m, off, size);
	MMU_ZERO_PAGE_AREA(mmu_obj, m, off, size);
}
コード例 #13
0
ファイル: pmap_dispatch.c プロジェクト: 2asoft/freebsd
void *
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t attr)
{

	CTR4(KTR_PMAP, "%s(%#x, %#x, %#x)", __func__, pa, size, attr);
	return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
}
コード例 #14
0
ファイル: pmap_dispatch.c プロジェクト: 2asoft/freebsd
void
pmap_remove(pmap_t pmap, vm_offset_t start, vm_offset_t end)
{

	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, start, end);
	MMU_REMOVE(mmu_obj, pmap, start, end);
}
コード例 #15
0
ファイル: pmap_dispatch.c プロジェクト: 2asoft/freebsd
void
pmap_qenter(vm_offset_t start, vm_page_t *m, int count)
{

	CTR4(KTR_PMAP, "%s(%#x, %p, %d)", __func__, start, m, count);
	MMU_QENTER(mmu_obj, start, m, count);
}
コード例 #16
0
ファイル: pmap_dispatch.c プロジェクト: 2asoft/freebsd
vm_page_t
pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
{

	CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, pmap, va, prot);
	return (MMU_EXTRACT_AND_HOLD(mmu_obj, pmap, va, prot));
}
コード例 #17
0
ファイル: vm_machdep.c プロジェクト: Alkzndr/freebsd
/*
 * Intercept the return address from a freshly forked process that has NOT
 * been scheduled yet.
 *
 * This is needed to make kernel threads stay in kernel mode.
 */
void
cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
{
	struct callframe *cf;

	CTR4(KTR_PROC, "%s called with td=%p func=%p arg=%p",
	    __func__, td, func, arg);

	cf = (struct callframe *)td->td_pcb->pcb_sp;

	cf->cf_func = (register_t)func;
	cf->cf_arg0 = (register_t)arg;
}
コード例 #18
0
ファイル: kern_fork.c プロジェクト: mulichao/freebsd
/*
 * Handle the return of a child process from fork1().  This function
 * is called from the MD fork_trampoline() entry point.
 */
void
fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
    struct trapframe *frame)
{
	struct proc *p;
	struct thread *td;
	struct thread *dtd;

	td = curthread;
	p = td->td_proc;
	KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));

	CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
	    td, td_get_sched(td), p->p_pid, td->td_name);

	sched_fork_exit(td);
	/*
	* Processes normally resume in mi_switch() after being
	* cpu_switch()'ed to, but when children start up they arrive here
	* instead, so we must do much the same things as mi_switch() would.
	*/
	if ((dtd = PCPU_GET(deadthread))) {
		PCPU_SET(deadthread, NULL);
		thread_stash(dtd);
	}
	thread_unlock(td);

	/*
	 * cpu_fork_kthread_handler intercepts this function call to
	 * have this call a non-return function to stay in kernel mode.
	 * initproc has its own fork handler, but it does return.
	 */
	KASSERT(callout != NULL, ("NULL callout in fork_exit"));
	callout(arg, frame);

	/*
	 * Check if a kernel thread misbehaved and returned from its main
	 * function.
	 */
	if (p->p_flag & P_KPROC) {
		printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
		    td->td_name, p->p_pid);
		kthread_exit();
	}
	mtx_assert(&Giant, MA_NOTOWNED);

	if (p->p_sysent->sv_schedtail != NULL)
		(p->p_sysent->sv_schedtail)(td);
	td->td_pflags &= ~TDP_FORKING;
}
コード例 #19
0
ファイル: kern_sema.c プロジェクト: MarginC/kame
void
sema_init(struct sema *sema, int value, const char *description)
{

	KASSERT((value >= 0), ("%s(): negative value\n", __func__));

	bzero(sema, sizeof(*sema));
	mtx_init(&sema->sema_mtx, description, "sema backing lock",
	    MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
	cv_init(&sema->sema_cv, description);
	sema->sema_value = value;

	CTR4(KTR_LOCK, "%s(%p, %d, \"%s\")", __func__, sema, value, description);
}
コード例 #20
0
ファイル: qp.c プロジェクト: Lxg1582/freebsd
static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{
	sq->queue = contigmalloc(sq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul,
	    4096, 0);

	if (sq->queue)
		sq->dma_addr = vtophys(sq->queue);
	else
		return -ENOMEM;
	sq->phys_addr = vtophys(sq->queue);
	pci_unmap_addr_set(sq, mapping, sq->dma_addr);
	CTR4(KTR_IW_CXGBE, "%s sq %p dma_addr %p phys_addr %p", __func__,
	    sq->queue, sq->dma_addr, sq->phys_addr);
	return 0;
}
コード例 #21
0
ファイル: trap-v6.c プロジェクト: nbsoftwarecsjava/embedded
static __inline void
call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr)
{
	ksiginfo_t ksi;

	CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d",
	   __func__, addr, sig, code);

	/*
	 * TODO: some info would be nice to know
	 * if we are serving data or prefetch abort.
	 */

	ksiginfo_init_trap(&ksi);
	ksi.ksi_signo = sig;
	ksi.ksi_code = code;
	ksi.ksi_addr = (void *)addr;
	trapsignal(td, &ksi);
}
コード例 #22
0
ファイル: ev.c プロジェクト: ele7enxxh/dtrace-pf
static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
			  struct c4iw_qp *qhp,
			  struct t4_cqe *err_cqe,
			  enum ib_event_type ib_event)
{
	struct ib_event event;
	struct c4iw_qp_attributes attrs;
	unsigned long flag;

	if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
	    (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
		CTR4(KTR_IW_CXGBE, "%s AE received after RTS - "
		     "qp state %d qpid 0x%x status 0x%x", __func__,
		     qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
		return;
	}

	printf("AE qpid 0x%x opcode %d status 0x%x "
	       "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
	       CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
	       CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
	       CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));

	if (qhp->attr.state == C4IW_QP_STATE_RTS) {
		attrs.next_state = C4IW_QP_STATE_TERMINATE;
		c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
			       &attrs, 0);
	}

	event.event = ib_event;
	event.device = chp->ibcq.device;
	if (ib_event == IB_EVENT_CQ_ERR)
		event.element.cq = &chp->ibcq;
	else
		event.element.qp = &qhp->ibqp;
	if (qhp->ibqp.event_handler)
		(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);

	spin_lock_irqsave(&chp->comp_handler_lock, flag);
	(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
	spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
}
コード例 #23
0
ファイル: t4_cpl_io.c プロジェクト: outbackdingo/uBSD
/*
 * Called when a connection is established to translate the TCP options
 * reported by HW to FreeBSD's native format.
 */
static void
assign_rxopt(struct tcpcb *tp, unsigned int opt)
{
	struct toepcb *toep = tp->t_toe;
	struct inpcb *inp = tp->t_inpcb;
	struct adapter *sc = td_adapter(toep->td);
	int n;

	INP_LOCK_ASSERT(inp);

	if (inp->inp_inc.inc_flags & INC_ISIPV6)
		n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
	else
		n = sizeof(struct ip) + sizeof(struct tcphdr);
	tp->t_maxseg = sc->params.mtus[G_TCPOPT_MSS(opt)] - n;

	CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid,
	    G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]);

	if (G_TCPOPT_TSTAMP(opt)) {
		tp->t_flags |= TF_RCVD_TSTMP;	/* timestamps ok */
		tp->ts_recent = 0;		/* hmmm */
		tp->ts_recent_age = tcp_ts_getsbintime();
		tp->t_maxseg -= TCPOLEN_TSTAMP_APPA;
	}

	if (G_TCPOPT_SACK(opt))
		tp->t_flags |= TF_SACK_PERMIT;	/* should already be set */
	else
		tp->t_flags &= ~TF_SACK_PERMIT;	/* sack disallowed by peer */

	if (G_TCPOPT_WSCALE_OK(opt))
		tp->t_flags |= TF_RCVD_SCALE;

	/* Doing window scaling? */
	if ((tp->t_flags & (TF_RCVD_SCALE | TF_REQ_SCALE)) ==
	    (TF_RCVD_SCALE | TF_REQ_SCALE)) {
		tp->rcv_scale = tp->request_r_scale;
		tp->snd_scale = G_TCPOPT_SND_WSCALE(opt);
	}
}
コード例 #24
0
int
t3_ddp_copy(const struct mbuf *m, int offset, struct uio *uio, int len)
{
	int resid_init, err;
	struct ddp_gather_list *gl = (struct ddp_gather_list *)m->m_ddp_gl;
	
	resid_init = uio->uio_resid;
	
	if (!gl->dgl_pages)
		panic("pages not set\n");

	CTR4(KTR_TOM, "t3_ddp_copy: offset=%d dgl_offset=%d cur_offset=%d len=%d",
	    offset, gl->dgl_offset, m->m_cur_offset, len);
	offset += gl->dgl_offset + m->m_cur_offset;
	KASSERT(len <= gl->dgl_length,
	    ("len=%d > dgl_length=%d in ddp_copy\n", len, gl->dgl_length));


	err = uiomove_fromphys(gl->dgl_pages, offset, len, uio);
	return (err);
}
コード例 #25
0
ファイル: iw_cxgb_dbg.c プロジェクト: AhmadTux/freebsd
void cxio_dump_pbl(struct cxio_rdev *rdev, uint32_t pbl_addr, uint32_t len, u8 shift)
{
	struct ch_mem_range *m;
	u64 *data;
	int rc;
	int size, npages;

	shift += 12;
	npages = (len + (1ULL << shift) - 1) >> shift;
	size = npages * sizeof(u64);

	m = kmalloc(sizeof(*m) + size, M_NOWAIT);
	if (!m) {
		CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__);
		return;
	}
	m->mem_id = MEM_PMRX;
	m->addr = pbl_addr;
	m->len = size;
	CTR4(KTR_IW_CXGB, "%s PBL addr 0x%x len %d depth %d",
		__FUNCTION__, m->addr, m->len, npages);
	rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
	if (rc) {
		CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc);
		free(m, M_DEVBUF);
		return;
	}

	data = (u64 *)m->buf;
	while (size > 0) {
		CTR2(KTR_IW_CXGB, "PBL %08x: %016llx", m->addr, (unsigned long long) *data);
		size -= 8;
		data++;
		m->addr += 8;
	}
	free(m, M_DEVBUF);
}
コード例 #26
0
/*
 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.
 *
 * We call this if the lock is either contested (i.e. we need to go to
 * sleep waiting for it), or if we need to recurse on it.
 */
void
_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file,
    int line)
{
	struct turnstile *ts;
	uintptr_t v;
#ifdef ADAPTIVE_MUTEXES
	volatile struct thread *owner;
#endif
#ifdef KTR
	int cont_logged = 0;
#endif
#ifdef LOCK_PROFILING
	int contested = 0;
	uint64_t waittime = 0;
#endif
#ifdef KDTRACE_HOOKS
	uint64_t spin_cnt = 0;
	uint64_t sleep_cnt = 0;
	int64_t sleep_time = 0;
#endif

	if (mtx_owned(m)) {
		KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0,
	    ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
		    m->lock_object.lo_name, file, line));
		m->mtx_recurse++;
		atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
		if (LOCK_LOG_TEST(&m->lock_object, opts))
			CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
		return;
	}

	lock_profile_obtain_lock_failed(&m->lock_object,
		    &contested, &waittime);
	if (LOCK_LOG_TEST(&m->lock_object, opts))
		CTR4(KTR_LOCK,
		    "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
		    m->lock_object.lo_name, (void *)m->mtx_lock, file, line);

	while (!_obtain_lock(m, tid)) {
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
#ifdef ADAPTIVE_MUTEXES
		/*
		 * If the owner is running on another CPU, spin until the
		 * owner stops running or the state of the lock changes.
		 */
		v = m->mtx_lock;
		if (v != MTX_UNOWNED) {
			owner = (struct thread *)(v & ~MTX_FLAGMASK);
			if (TD_IS_RUNNING(owner)) {
				if (LOCK_LOG_TEST(&m->lock_object, 0))
					CTR3(KTR_LOCK,
					    "%s: spinning on %p held by %p",
					    __func__, m, owner);
				while (mtx_owner(m) == owner &&
				    TD_IS_RUNNING(owner)) {
					cpu_spinwait();
#ifdef KDTRACE_HOOKS
					spin_cnt++;
#endif
				}
				continue;
			}
		}
#endif

		ts = turnstile_trywait(&m->lock_object);
		v = m->mtx_lock;

		/*
		 * Check if the lock has been released while spinning for
		 * the turnstile chain lock.
		 */
		if (v == MTX_UNOWNED) {
			turnstile_cancel(ts);
			continue;
		}

#ifdef ADAPTIVE_MUTEXES
		/*
		 * The current lock owner might have started executing
		 * on another CPU (or the lock could have changed
		 * owners) while we were waiting on the turnstile
		 * chain lock.  If so, drop the turnstile lock and try
		 * again.
		 */
		owner = (struct thread *)(v & ~MTX_FLAGMASK);
		if (TD_IS_RUNNING(owner)) {
			turnstile_cancel(ts);
			continue;
		}
#endif

		/*
		 * If the mutex isn't already contested and a failure occurs
		 * setting the contested bit, the mutex was either released
		 * or the state of the MTX_RECURSED bit changed.
		 */
		if ((v & MTX_CONTESTED) == 0 &&
		    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
			turnstile_cancel(ts);
			continue;
		}

		/*
		 * We definitely must sleep for this lock.
		 */
		mtx_assert(m, MA_NOTOWNED);

#ifdef KTR
		if (!cont_logged) {
			CTR6(KTR_CONTENTION,
			    "contention: %p at %s:%d wants %s, taken by %s:%d",
			    (void *)tid, file, line, m->lock_object.lo_name,
			    WITNESS_FILE(&m->lock_object),
			    WITNESS_LINE(&m->lock_object));
			cont_logged = 1;
		}
#endif

		/*
		 * Block on the turnstile.
		 */
#ifdef KDTRACE_HOOKS
		sleep_time -= lockstat_nsecs();
#endif
		turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE);
#ifdef KDTRACE_HOOKS
		sleep_time += lockstat_nsecs();
		sleep_cnt++;
#endif
	}
#ifdef KTR
	if (cont_logged) {
		CTR4(KTR_CONTENTION,
		    "contention end: %s acquired by %p at %s:%d",
		    m->lock_object.lo_name, (void *)tid, file, line);
	}
#endif
	LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, m, contested,
	    waittime, file, line);
#ifdef KDTRACE_HOOKS
	if (sleep_time)
		LOCKSTAT_RECORD1(LS_MTX_LOCK_BLOCK, m, sleep_time);

	/*
	 * Only record the loops spinning and not sleeping. 
	 */
	if (spin_cnt > sleep_cnt)
		LOCKSTAT_RECORD1(LS_MTX_LOCK_SPIN, m, (spin_cnt - sleep_cnt));
#endif
}
コード例 #27
0
ファイル: kern_rwlock.c プロジェクト: jmgurney/freebsd
void
_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
	uintptr_t x, v, queue;

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
	__rw_assert(c, RA_RLOCKED, file, line);
	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);

	/* TODO: drop "owner of record" here. */

	for (;;) {
		/*
		 * See if there is more than one read lock held.  If so,
		 * just drop one and return.
		 */
		x = rw->rw_lock;
		if (RW_READERS(x) > 1) {
			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
			    x - RW_ONE_READER)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeeded %p -> %p",
					    __func__, rw, (void *)x,
					    (void *)(x - RW_ONE_READER));
				break;
			}
			continue;
		}
		/*
		 * If there aren't any waiters for a write lock, then try
		 * to drop it quickly.
		 */
		if (!(x & RW_LOCK_WAITERS)) {
			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
			    RW_READERS_LOCK(1));
			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
			    RW_UNLOCKED)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR2(KTR_LOCK, "%s: %p last succeeded",
					    __func__, rw);
				break;
			}
			continue;
		}
		/*
		 * Ok, we know we have waiters and we think we are the
		 * last reader, so grab the turnstile lock.
		 */
		turnstile_chain_lock(&rw->lock_object);
		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
		MPASS(v & RW_LOCK_WAITERS);

		/*
		 * Try to drop our lock leaving the lock in a unlocked
		 * state.
		 *
		 * If you wanted to do explicit lock handoff you'd have to
		 * do it here.  You'd also want to use turnstile_signal()
		 * and you'd have to handle the race where a higher
		 * priority thread blocks on the write lock before the
		 * thread you wakeup actually runs and have the new thread
		 * "steal" the lock.  For now it's a lot simpler to just
		 * wakeup all of the waiters.
		 *
		 * As above, if we fail, then another thread might have
		 * acquired a read lock, so drop the turnstile lock and
		 * restart.
		 */
		x = RW_UNLOCKED;
		if (v & RW_LOCK_WRITE_WAITERS) {
			queue = TS_EXCLUSIVE_QUEUE;
			x |= (v & RW_LOCK_READ_WAITERS);
		} else
			queue = TS_SHARED_QUEUE;
		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
		    x)) {
			turnstile_chain_unlock(&rw->lock_object);
			continue;
		}
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
			    __func__, rw);

		/*
		 * Ok.  The lock is released and all that's left is to
		 * wake up the waiters.  Note that the lock might not be
		 * free anymore, but in that case the writers will just
		 * block again if they run before the new lock holder(s)
		 * release the lock.
		 */
		ts = turnstile_lookup(&rw->lock_object);
		MPASS(ts != NULL);
		turnstile_broadcast(ts, queue);
		turnstile_unpend(ts, TS_SHARED_LOCK);
		turnstile_chain_unlock(&rw->lock_object);
		break;
	}
	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
	curthread->td_locks--;
	curthread->td_rw_rlocks--;
}
コード例 #28
0
ファイル: kern_rwlock.c プロジェクト: jmgurney/freebsd
void
__rw_rlock(volatile uintptr_t *c, const char *file, int line)
{
	struct rwlock *rw;
	struct turnstile *ts;
#ifdef ADAPTIVE_RWLOCKS
	volatile struct thread *owner;
	int spintries = 0;
	int i;
#endif
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	uintptr_t v;
#ifdef KDTRACE_HOOKS
	uintptr_t state;
	uint64_t spin_cnt = 0;
	uint64_t sleep_cnt = 0;
	int64_t sleep_time = 0;
	int64_t all_time = 0;
#endif

	if (SCHEDULER_STOPPED())
		return;

	rw = rwlock2rw(c);

	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
	    curthread, rw->lock_object.lo_name, file, line));
	KASSERT(rw->rw_lock != RW_DESTROYED,
	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
	KASSERT(rw_wowner(rw) != curthread,
	    ("rw_rlock: wlock already held for %s @ %s:%d",
	    rw->lock_object.lo_name, file, line));
	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);

#ifdef KDTRACE_HOOKS
	all_time -= lockstat_nsecs(&rw->lock_object);
	state = rw->rw_lock;
#endif
	for (;;) {
		/*
		 * Handle the easy case.  If no other thread has a write
		 * lock, then try to bump up the count of read locks.  Note
		 * that we have to preserve the current state of the
		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
		 * read lock, then rw_lock must have changed, so restart
		 * the loop.  Note that this handles the case of a
		 * completely unlocked rwlock since such a lock is encoded
		 * as a read lock with no waiters.
		 */
		v = rw->rw_lock;
		if (RW_CAN_READ(v)) {
			/*
			 * The RW_LOCK_READ_WAITERS flag should only be set
			 * if the lock has been unlocked and write waiters
			 * were present.
			 */
			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
			    v + RW_ONE_READER)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeed %p -> %p", __func__,
					    rw, (void *)v,
					    (void *)(v + RW_ONE_READER));
				break;
			}
			continue;
		}
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
#ifdef HWPMC_HOOKS
		PMC_SOFT_CALL( , , lock, failed);
#endif
		lock_profile_obtain_lock_failed(&rw->lock_object,
		    &contested, &waittime);

#ifdef ADAPTIVE_RWLOCKS
		/*
		 * If the owner is running on another CPU, spin until
		 * the owner stops running or the state of the lock
		 * changes.
		 */
		if ((v & RW_LOCK_READ) == 0) {
			owner = (struct thread *)RW_OWNER(v);
			if (TD_IS_RUNNING(owner)) {
				if (LOCK_LOG_TEST(&rw->lock_object, 0))
					CTR3(KTR_LOCK,
					    "%s: spinning on %p held by %p",
					    __func__, rw, owner);
				KTR_STATE1(KTR_SCHED, "thread",
				    sched_tdname(curthread), "spinning",
				    "lockname:\"%s\"", rw->lock_object.lo_name);
				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
				    owner && TD_IS_RUNNING(owner)) {
					cpu_spinwait();
#ifdef KDTRACE_HOOKS
					spin_cnt++;
#endif
				}
				KTR_STATE0(KTR_SCHED, "thread",
				    sched_tdname(curthread), "running");
				continue;
			}
		} else if (spintries < rowner_retries) {
			spintries++;
			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
			    "spinning", "lockname:\"%s\"",
			    rw->lock_object.lo_name);
			for (i = 0; i < rowner_loops; i++) {
				v = rw->rw_lock;
				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
					break;
				cpu_spinwait();
			}
#ifdef KDTRACE_HOOKS
			spin_cnt += rowner_loops - i;
#endif
			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
			    "running");
			if (i != rowner_loops)
				continue;
		}
#endif

		/*
		 * Okay, now it's the hard case.  Some other thread already
		 * has a write lock or there are write waiters present,
		 * acquire the turnstile lock so we can begin the process
		 * of blocking.
		 */
		ts = turnstile_trywait(&rw->lock_object);

		/*
		 * The lock might have been released while we spun, so
		 * recheck its state and restart the loop if needed.
		 */
		v = rw->rw_lock;
		if (RW_CAN_READ(v)) {
			turnstile_cancel(ts);
			continue;
		}

#ifdef ADAPTIVE_RWLOCKS
		/*
		 * The current lock owner might have started executing
		 * on another CPU (or the lock could have changed
		 * owners) while we were waiting on the turnstile
		 * chain lock.  If so, drop the turnstile lock and try
		 * again.
		 */
		if ((v & RW_LOCK_READ) == 0) {
			owner = (struct thread *)RW_OWNER(v);
			if (TD_IS_RUNNING(owner)) {
				turnstile_cancel(ts);
				continue;
			}
		}
#endif

		/*
		 * The lock is held in write mode or it already has waiters.
		 */
		MPASS(!RW_CAN_READ(v));

		/*
		 * If the RW_LOCK_READ_WAITERS flag is already set, then
		 * we can go ahead and block.  If it is not set then try
		 * to set it.  If we fail to set it drop the turnstile
		 * lock and restart the loop.
		 */
		if (!(v & RW_LOCK_READ_WAITERS)) {
			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
			    v | RW_LOCK_READ_WAITERS)) {
				turnstile_cancel(ts);
				continue;
			}
			if (LOCK_LOG_TEST(&rw->lock_object, 0))
				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
				    __func__, rw);
		}

		/*
		 * We were unable to acquire the lock and the read waiters
		 * flag is set, so we must block on the turnstile.
		 */
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
			    rw);
#ifdef KDTRACE_HOOKS
		sleep_time -= lockstat_nsecs(&rw->lock_object);
#endif
		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
#ifdef KDTRACE_HOOKS
		sleep_time += lockstat_nsecs(&rw->lock_object);
		sleep_cnt++;
#endif
		if (LOCK_LOG_TEST(&rw->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
			    __func__, rw);
	}
#ifdef KDTRACE_HOOKS
	all_time += lockstat_nsecs(&rw->lock_object);
	if (sleep_time)
		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));

	/* Record only the loops spinning and not sleeping. */
	if (spin_cnt > sleep_cnt)
		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
#endif
	/*
	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
	 * however.  turnstiles don't like owners changing between calls to
	 * turnstile_wait() currently.
	 */
	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
	    waittime, file, line, LOCKSTAT_READER);
	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
	WITNESS_LOCK(&rw->lock_object, 0, file, line);
	curthread->td_locks++;
	curthread->td_rw_rlocks++;
}
コード例 #29
0
ファイル: cxgbei.c プロジェクト: 2trill2spill/freebsd
static int
do_rx_iscsi_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
{
	struct adapter *sc = iq->adapter;
	struct cxgbei_data *ci = sc->iscsi_ulp_softc;
	const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1);
	u_int tid = GET_TID(cpl);
	struct toepcb *toep = lookup_tid(sc, tid);
	struct inpcb *inp = toep->inp;
	struct socket *so;
	struct sockbuf *sb;
	struct tcpcb *tp;
	struct icl_cxgbei_conn *icc;
	struct icl_conn *ic;
	struct icl_cxgbei_pdu *icp = toep->ulpcb2;
	struct icl_pdu *ip;
	u_int pdu_len, val;

	MPASS(m == NULL);

	/* Must already be assembling a PDU. */
	MPASS(icp != NULL);
	MPASS(icp->icp_flags & ICPF_RX_HDR);	/* Data is optional. */
	MPASS((icp->icp_flags & ICPF_RX_STATUS) == 0);

	pdu_len = be16toh(cpl->len);	/* includes everything. */
	val = be32toh(cpl->ddpvld);

#if 0
	CTR5(KTR_CXGBE,
	    "%s: tid %u, cpl->len %u, ddpvld 0x%08x, icp_flags 0x%08x",
	    __func__, tid, pdu_len, val, icp->icp_flags);
#endif

	icp->icp_flags |= ICPF_RX_STATUS;
	ip = &icp->ip;
	if (val & F_DDP_PADDING_ERR)
		icp->icp_flags |= ICPF_PAD_ERR;
	if (val & F_DDP_HDRCRC_ERR)
		icp->icp_flags |= ICPF_HCRC_ERR;
	if (val & F_DDP_DATACRC_ERR)
		icp->icp_flags |= ICPF_DCRC_ERR;
	if (val & F_DDP_PDU && ip->ip_data_mbuf == NULL) {
		MPASS((icp->icp_flags & ICPF_RX_FLBUF) == 0);
		MPASS(ip->ip_data_len > 0);
		icp->icp_flags |= ICPF_RX_DDP;
		counter_u64_add(ci->ddp_pdus, 1);
		counter_u64_add(ci->ddp_bytes, ip->ip_data_len);
	}

	INP_WLOCK(inp);
	if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) {
		CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x",
		    __func__, tid, pdu_len, inp->inp_flags);
		INP_WUNLOCK(inp);
		icl_cxgbei_conn_pdu_free(NULL, ip);
#ifdef INVARIANTS
		toep->ulpcb2 = NULL;
#endif
		return (0);
	}

	tp = intotcpcb(inp);
	MPASS(icp->icp_seq == tp->rcv_nxt);
	MPASS(tp->rcv_wnd >= pdu_len);
	tp->rcv_nxt += pdu_len;
	tp->rcv_wnd -= pdu_len;
	tp->t_rcvtime = ticks;

	/* update rx credits */
	toep->rx_credits += pdu_len;
	t4_rcvd(&toep->td->tod, tp);	/* XXX: sc->tom_softc.tod */

	so = inp->inp_socket;
	sb = &so->so_rcv;
	SOCKBUF_LOCK(sb);

	icc = toep->ulpcb;
	if (__predict_false(icc == NULL || sb->sb_state & SBS_CANTRCVMORE)) {
		CTR5(KTR_CXGBE,
		    "%s: tid %u, excess rx (%d bytes), icc %p, sb_state 0x%x",
		    __func__, tid, pdu_len, icc, sb->sb_state);
		SOCKBUF_UNLOCK(sb);
		INP_WUNLOCK(inp);

		INP_INFO_RLOCK(&V_tcbinfo);
		INP_WLOCK(inp);
		tp = tcp_drop(tp, ECONNRESET);
		if (tp)
			INP_WUNLOCK(inp);
		INP_INFO_RUNLOCK(&V_tcbinfo);

		icl_cxgbei_conn_pdu_free(NULL, ip);
#ifdef INVARIANTS
		toep->ulpcb2 = NULL;
#endif
		return (0);
	}
	MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE);
	ic = &icc->ic;
	icl_cxgbei_new_pdu_set_conn(ip, ic);

	MPASS(m == NULL); /* was unused, we'll use it now. */
	m = sbcut_locked(sb, sbused(sb)); /* XXXNP: toep->sb_cc accounting? */
	if (__predict_false(m != NULL)) {
		int len = m_length(m, NULL);

		/*
		 * PDUs were received before the tid transitioned to ULP mode.
		 * Convert them to icl_cxgbei_pdus and send them to ICL before
		 * the PDU in icp/ip.
		 */
		CTR3(KTR_CXGBE, "%s: tid %u, %u bytes in so_rcv", __func__, tid,
		    len);

		/* XXXNP: needs to be rewritten. */
		if (len == sizeof(struct iscsi_bhs) || len == 4 + sizeof(struct
		    iscsi_bhs)) {
			struct icl_cxgbei_pdu *icp0;
			struct icl_pdu *ip0;

			ip0 = icl_cxgbei_new_pdu(M_NOWAIT);
			icl_cxgbei_new_pdu_set_conn(ip0, ic);
			if (ip0 == NULL)
				CXGBE_UNIMPLEMENTED("PDU allocation failure");
			icp0 = ip_to_icp(ip0);
			icp0->icp_seq = 0; /* XXX */
			icp0->icp_flags = ICPF_RX_HDR | ICPF_RX_STATUS;
			m_copydata(m, 0, sizeof(struct iscsi_bhs), (void *)ip0->ip_bhs);
			STAILQ_INSERT_TAIL(&icc->rcvd_pdus, ip0, ip_next);
		}
		m_freem(m);
	}

	STAILQ_INSERT_TAIL(&icc->rcvd_pdus, ip, ip_next);
	if ((icc->rx_flags & RXF_ACTIVE) == 0) {
		struct cxgbei_worker_thread_softc *cwt = &cwt_softc[icc->cwt];

		mtx_lock(&cwt->cwt_lock);
		icc->rx_flags |= RXF_ACTIVE;
		TAILQ_INSERT_TAIL(&cwt->rx_head, icc, rx_link);
		if (cwt->cwt_state == CWT_SLEEPING) {
			cwt->cwt_state = CWT_RUNNING;
			cv_signal(&cwt->cwt_cv);
		}
		mtx_unlock(&cwt->cwt_lock);
	}
	SOCKBUF_UNLOCK(sb);
	INP_WUNLOCK(inp);

#ifdef INVARIANTS
	toep->ulpcb2 = NULL;
#endif

	return (0);
}
コード例 #30
0
ファイル: machdep.c プロジェクト: ralphost/NextBSD
void
sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
{
	struct thread *td;
	struct proc *p;
	struct trapframe *tf;
	struct sigframe *fp, frame;
	struct sigacts *psp;
	int code, onstack, sig;

	td = curthread;
	p = td->td_proc;
	PROC_LOCK_ASSERT(p, MA_OWNED);

	sig = ksi->ksi_signo;
	code = ksi->ksi_code;
	psp = p->p_sigacts;
	mtx_assert(&psp->ps_mtx, MA_OWNED);

	tf = td->td_frame;
	onstack = sigonstack(tf->tf_sp);

	CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
	    catcher, sig);

	/* Allocate and validate space for the signal handler context. */
	if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
	    SIGISMEMBER(psp->ps_sigonstack, sig)) {
		fp = (struct sigframe *)(td->td_sigstk.ss_sp +
		    td->td_sigstk.ss_size);
#if defined(COMPAT_43)
		td->td_sigstk.ss_flags |= SS_ONSTACK;
#endif
	} else {
		fp = (struct sigframe *)td->td_frame->tf_sp;
	}

	/* Make room, keeping the stack aligned */
	fp--;
	fp = (struct sigframe *)STACKALIGN(fp);

	/* Fill in the frame to copy out */
	get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
	get_fpcontext(td, &frame.sf_uc.uc_mcontext);
	frame.sf_si = ksi->ksi_info;
	frame.sf_uc.uc_sigmask = *mask;
	frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
	    ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
	frame.sf_uc.uc_stack = td->td_sigstk;
	mtx_unlock(&psp->ps_mtx);
	PROC_UNLOCK(td->td_proc);

	/* Copy the sigframe out to the user's stack. */
	if (copyout(&frame, fp, sizeof(*fp)) != 0) {
		/* Process has trashed its stack. Kill it. */
		CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
		PROC_LOCK(p);
		sigexit(td, SIGILL);
	}

	tf->tf_x[0]= sig;
	tf->tf_x[1] = (register_t)&fp->sf_si;
	tf->tf_x[2] = (register_t)&fp->sf_uc;

	tf->tf_elr = (register_t)catcher;
	tf->tf_sp = (register_t)fp;
	tf->tf_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));

	CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
	    tf->tf_sp);

	PROC_LOCK(p);
	mtx_lock(&psp->ps_mtx);
}