예제 #1
0
파일: xattr.c 프로젝트: pcd1193182/openzfs
/* ARGSUSED */
static int
xattr_file_close(vnode_t *vp, int flags, int count, offset_t off,
    cred_t *cr, caller_context_t *ct)
{
	cleanlocks(vp, ddi_get_pid(), 0);
	cleanshares(vp, ddi_get_pid());
	return (0);
}
예제 #2
0
static int
audio_stropen(queue_t *rq, dev_t *devp, int oflag, int sflag, cred_t *credp)
{
	int			rv;
	audio_client_t		*c;

	if (sflag != 0) {
		/* no direct clone or module opens */
		return (ENXIO);
	}

	/*
	 * Make sure its a STREAMS personality - only legacy Sun API uses
	 * STREAMS.
	 */
	switch (AUDIO_MN_TYPE_MASK & getminor(*devp)) {
	case AUDIO_MINOR_DEVAUDIO:
	case AUDIO_MINOR_DEVAUDIOCTL:
		break;
	default:
		return (ENOSTR);
	}

	if ((c = auimpl_client_create(*devp)) == NULL) {
		audio_dev_warn(NULL, "client create failed");
		return (ENXIO);
	}

	rq->q_ptr = WR(rq)->q_ptr = c;
	c->c_omode = oflag;
	c->c_pid = ddi_get_pid();
	c->c_cred = credp;
	c->c_rq = rq;
	c->c_wq = WR(rq);

	/*
	 * Call client/personality specific open handler.  Note that
	 * we "insist" that there is an open.  The personality layer
	 * will initialize/allocate any engines required.
	 *
	 * Hmm... do we need to pass in the cred?
	 */
	if ((rv = c->c_open(c, oflag)) != 0) {
		audio_dev_warn(c->c_dev, "open failed (rv %d)", rv);
		auimpl_client_destroy(c);
		return (rv);
	}

	/* we do device cloning! */
	*devp = makedevice(c->c_major, c->c_minor);

	qprocson(rq);

	/* now we can receive upcalls */
	auimpl_client_activate(c);

	atomic_inc_uint(&c->c_dev->d_serial);

	return (0);
}
예제 #3
0
int
smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
	struct smb_cred *scred)
{
	int error;

	bzero(rqp, sizeof (*rqp));
	mutex_init(&rqp->sr_lock, NULL,  MUTEX_DRIVER, NULL);
	cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);

	error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
	if (error)
		return (error);

	/*
	 * We copied a VC pointer (vcp) into rqp->sr_vc,
	 * but we do NOT do a smb_vc_hold here.  Instead,
	 * the caller is responsible for the hold on the
	 * share or the VC as needed.  For smbfs callers,
	 * the hold is on the share, via the smbfs mount.
	 * For nsmb ioctl callers, the hold is done when
	 * the driver handle gets VC or share references.
	 * This design avoids frequent hold/rele activity
	 * when creating and completing requests.
	 */

	rqp->sr_rexmit = SMBMAXRESTARTS;
	rqp->sr_cred = scred;	/* Note: ref hold done by caller. */
	rqp->sr_pid = (uint16_t)ddi_get_pid();
	error = smb_rq_new(rqp, cmd);

	return (error);
}
예제 #4
0
static int
audio_strclose(queue_t *rq, int flag, cred_t *credp)
{
	audio_client_t	*c;
	audio_dev_t	*d;
	int		rv;

	_NOTE(ARGUNUSED(flag));
	_NOTE(ARGUNUSED(credp));

	if ((c = rq->q_ptr) == NULL) {
		return (ENXIO);
	}
	if (ddi_can_receive_sig() || (ddi_get_pid() == 0)) {
		rv = auclnt_drain(c);
	}

	/* make sure we won't get any upcalls */
	auimpl_client_deactivate(c);

	/*
	 * Pick up any data sitting around in input buffers.  This
	 * avoids leaving record data stuck in queues.
	 */
	if (c->c_istream.s_engine != NULL)
		audio_engine_produce(c->c_istream.s_engine);

	/* get a local hold on the device */
	d = c->c_dev;
	auimpl_dev_hold(c->c_dev);

	/* Turn off queue processing... */
	qprocsoff(rq);

	/* Call personality specific close handler */
	c->c_close(c);

	auimpl_client_destroy(c);

	/* notify peers that a change has occurred */
	atomic_inc_uint(&d->d_serial);

	/* now we can drop the release we had on the device */
	auimpl_dev_release(d);

	return (rv);
}
예제 #5
0
void log_msg(int level, const char *fmt, ...)
{
    va_list ap;
    char buf[256];
    struct psinfo psinfo;

    va_start(ap, fmt);
    vsnprintf(buf, 255, fmt, ap);
    va_end(ap);

    mutex_enter(&curproc->p_lock);
    prgetpsinfo(curproc, &psinfo);
    mutex_exit(&curproc->p_lock);

    cmn_err(level, "%s (cmd: %s, pid: %d, uid: %d, gid: %d).%s", buf,
            psinfo.pr_psargs, ddi_get_pid(),
            ddi_get_cred()->cr_ruid, ddi_get_cred()->cr_rgid,
            (level == CE_CONT) ? "\n" : "");
}
예제 #6
0
static int
audio_open(dev_t *devp, int oflag, int otyp, cred_t *credp)
{
	int			rv;
	audio_client_t		*c;

	if (otyp == OTYP_BLK) {
		return (ENXIO);
	}

	if ((c = auimpl_client_create(*devp)) == NULL) {
		audio_dev_warn(NULL, "client create failed");
		return (ENXIO);
	}

	c->c_omode = oflag;
	c->c_pid = ddi_get_pid();
	c->c_cred = credp;

	/*
	 * Call client/personality specific open handler.  Note that
	 * we "insist" that there is an open.  The personality layer
	 * will initialize/allocate any engines required.
	 *
	 * Hmm... do we need to pass in the cred?
	 */
	if ((rv = c->c_open(c, oflag)) != 0) {
		audio_dev_warn(c->c_dev, "open failed (rv %d)", rv);
		auimpl_client_destroy(c);
		return (rv);
	}

	/* we do device cloning! */
	*devp = makedevice(c->c_major, c->c_minor);

	/* now we can receive upcalls */
	auimpl_client_activate(c);

	atomic_inc_uint(&c->c_dev->d_serial);

	return (0);
}
예제 #7
0
static void
evtchn_bind_to_user(struct evtsoftdata *u, int port)
{
	ulong_t flags;

	/*
	 * save away the PID of the last process to bind to this event channel.
	 * Useful for debugging.
	 */
	u->pid = ddi_get_pid();

	mutex_enter(&port_user_lock);
	ASSERT(port_user[port] == NULL);
	port_user[port] = u;
	ec_irq_add_evtchn(ec_dev_irq, port);
	flags = intr_clear();
	ec_unmask_evtchn(port);
	intr_restore(flags);
	mutex_exit(&port_user_lock);
}
예제 #8
0
RTDECL(RTPROCESS) RTProcSelf(void)
{
    return ddi_get_pid();
}
예제 #9
0
/*
 * tavor_srq_alloc()
 *    Context: Can be called only from user or kernel context.
 */
int
tavor_srq_alloc(tavor_state_t *state, tavor_srq_info_t *srqinfo,
    uint_t sleepflag, tavor_srq_options_t *op)
{
	ibt_srq_hdl_t		ibt_srqhdl;
	tavor_pdhdl_t		pd;
	ibt_srq_sizes_t		*sizes;
	ibt_srq_sizes_t		*real_sizes;
	tavor_srqhdl_t		*srqhdl;
	ibt_srq_flags_t		flags;
	tavor_rsrc_t		*srqc, *rsrc;
	tavor_hw_srqc_t		srqc_entry;
	uint32_t		*buf;
	tavor_srqhdl_t		srq;
	tavor_umap_db_entry_t	*umapdb;
	ibt_mr_attr_t		mr_attr;
	tavor_mr_options_t	mr_op;
	tavor_mrhdl_t		mr;
	uint64_t		addr;
	uint64_t		value, srq_desc_off;
	uint32_t		lkey;
	uint32_t		log_srq_size;
	uint32_t		uarpg;
	uint_t			wq_location, dma_xfer_mode, srq_is_umap;
	int			flag, status;
	char			*errormsg;
	uint_t			max_sgl;
	uint_t			wqesz;

	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sizes))

	TAVOR_TNF_ENTER(tavor_srq_alloc);

	/*
	 * Check the "options" flag.  Currently this flag tells the driver
	 * whether or not the SRQ's work queues should be come from normal
	 * system memory or whether they should be allocated from DDR memory.
	 */
	if (op == NULL) {
		wq_location = TAVOR_QUEUE_LOCATION_NORMAL;
	} else {
		wq_location = op->srqo_wq_loc;
	}

	/*
	 * Extract the necessary info from the tavor_srq_info_t structure
	 */
	real_sizes = srqinfo->srqi_real_sizes;
	sizes	   = srqinfo->srqi_sizes;
	pd	   = srqinfo->srqi_pd;
	ibt_srqhdl = srqinfo->srqi_ibt_srqhdl;
	flags	   = srqinfo->srqi_flags;
	srqhdl	   = srqinfo->srqi_srqhdl;

	/*
	 * Determine whether SRQ is being allocated for userland access or
	 * whether it is being allocated for kernel access.  If the SRQ is
	 * being allocated for userland access, then lookup the UAR doorbell
	 * page number for the current process.  Note:  If this is not found
	 * (e.g. if the process has not previously open()'d the Tavor driver),
	 * then an error is returned.
	 */
	srq_is_umap = (flags & IBT_SRQ_USER_MAP) ? 1 : 0;
	if (srq_is_umap) {
		status = tavor_umap_db_find(state->ts_instance, ddi_get_pid(),
		    MLNX_UMAP_UARPG_RSRC, &value, 0, NULL);
		if (status != DDI_SUCCESS) {
			/* Set "status" and "errormsg" and goto failure */
			TAVOR_TNF_FAIL(IBT_INVALID_PARAM, "failed UAR page");
			goto srqalloc_fail3;
		}
		uarpg = ((tavor_rsrc_t *)(uintptr_t)value)->tr_indx;
	}

	/* Increase PD refcnt */
	tavor_pd_refcnt_inc(pd);

	/* Allocate an SRQ context entry */
	status = tavor_rsrc_alloc(state, TAVOR_SRQC, 1, sleepflag, &srqc);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed SRQ context");
		goto srqalloc_fail1;
	}

	/* Allocate the SRQ Handle entry */
	status = tavor_rsrc_alloc(state, TAVOR_SRQHDL, 1, sleepflag, &rsrc);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed SRQ handle");
		goto srqalloc_fail2;
	}

	srq = (tavor_srqhdl_t)rsrc->tr_addr;
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*srq))

	/* Calculate the SRQ number */
	tavor_srq_numcalc(state, srqc->tr_indx, &srq->srq_srqnum);

	/*
	 * If this will be a user-mappable SRQ, then allocate an entry for
	 * the "userland resources database".  This will later be added to
	 * the database (after all further SRQ operations are successful).
	 * If we fail here, we must undo the reference counts and the
	 * previous resource allocation.
	 */
	if (srq_is_umap) {
		umapdb = tavor_umap_db_alloc(state->ts_instance,
		    srq->srq_srqnum, MLNX_UMAP_SRQMEM_RSRC,
		    (uint64_t)(uintptr_t)rsrc);
		if (umapdb == NULL) {
			/* Set "status" and "errormsg" and goto failure */
			TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed umap add");
			goto srqalloc_fail3;
		}
	}

	/*
	 * Calculate the appropriate size for the SRQ.
	 * Note:  All Tavor SRQs must be a power-of-2 in size.  Also
	 * they may not be any smaller than TAVOR_SRQ_MIN_SIZE.  This step
	 * is to round the requested size up to the next highest power-of-2
	 */
	sizes->srq_wr_sz = max(sizes->srq_wr_sz, TAVOR_SRQ_MIN_SIZE);
	log_srq_size = highbit(sizes->srq_wr_sz);
	if ((sizes->srq_wr_sz & (sizes->srq_wr_sz - 1)) == 0) {
		log_srq_size = log_srq_size - 1;
	}

	/*
	 * Next we verify that the rounded-up size is valid (i.e. consistent
	 * with the device limits and/or software-configured limits).  If not,
	 * then obviously we have a lot of cleanup to do before returning.
	 */
	if (log_srq_size > state->ts_cfg_profile->cp_log_max_srq_sz) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_HCA_WR_EXCEEDED, "max SRQ size");
		goto srqalloc_fail4;
	}

	/*
	 * Next we verify that the requested number of SGL is valid (i.e.
	 * consistent with the device limits and/or software-configured
	 * limits).  If not, then obviously the same cleanup needs to be done.
	 */
	max_sgl = state->ts_cfg_profile->cp_srq_max_sgl;
	if (sizes->srq_sgl_sz > max_sgl) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_HCA_SGL_EXCEEDED, "max SRQ SGL");
		goto srqalloc_fail4;
	}

	/*
	 * Determine the SRQ's WQE sizes.  This depends on the requested
	 * number of SGLs.  Note: This also has the side-effect of
	 * calculating the real number of SGLs (for the calculated WQE size)
	 */
	tavor_srq_sgl_to_logwqesz(state, sizes->srq_sgl_sz,
	    TAVOR_QP_WQ_TYPE_RECVQ, &srq->srq_wq_log_wqesz,
	    &srq->srq_wq_sgl);

	/*
	 * Allocate the memory for SRQ work queues.  Note:  The location from
	 * which we will allocate these work queues has been passed in through
	 * the tavor_qp_options_t structure.  Since Tavor work queues are not
	 * allowed to cross a 32-bit (4GB) boundary, the alignment of the work
	 * queue memory is very important.  We used to allocate work queues
	 * (the combined receive and send queues) so that they would be aligned
	 * on their combined size.  That alignment guaranteed that they would
	 * never cross the 4GB boundary (Tavor work queues are on the order of
	 * MBs at maximum).  Now we are able to relax this alignment constraint
	 * by ensuring that the IB address assigned to the queue memory (as a
	 * result of the tavor_mr_register() call) is offset from zero.
	 * Previously, we had wanted to use the ddi_dma_mem_alloc() routine to
	 * guarantee the alignment, but when attempting to use IOMMU bypass
	 * mode we found that we were not allowed to specify any alignment that
	 * was more restrictive than the system page size.  So we avoided this
	 * constraint by passing two alignment values, one for the memory
	 * allocation itself and the other for the DMA handle (for later bind).
	 * This used to cause more memory than necessary to be allocated (in
	 * order to guarantee the more restrictive alignment contraint).  But
	 * be guaranteeing the zero-based IB virtual address for the queue, we
	 * are able to conserve this memory.
	 *
	 * Note: If SRQ is not user-mappable, then it may come from either
	 * kernel system memory or from HCA-attached local DDR memory.
	 *
	 * Note2: We align this queue on a pagesize boundary.  This is required
	 * to make sure that all the resulting IB addresses will start at 0, for
	 * a zero-based queue.  By making sure we are aligned on at least a
	 * page, any offset we use into our queue will be the same as when we
	 * perform tavor_srq_modify() operations later.
	 */
	wqesz = (1 << srq->srq_wq_log_wqesz);
	srq->srq_wqinfo.qa_size = (1 << log_srq_size) * wqesz;
	srq->srq_wqinfo.qa_alloc_align = PAGESIZE;
	srq->srq_wqinfo.qa_bind_align = PAGESIZE;
	if (srq_is_umap) {
		srq->srq_wqinfo.qa_location = TAVOR_QUEUE_LOCATION_USERLAND;
	} else {
		srq->srq_wqinfo.qa_location = wq_location;
	}
	status = tavor_queue_alloc(state, &srq->srq_wqinfo, sleepflag);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed srq");
		goto srqalloc_fail4;
	}
	buf = (uint32_t *)srq->srq_wqinfo.qa_buf_aligned;
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*buf))

	/*
	 * Register the memory for the SRQ work queues.  The memory for the SRQ
	 * must be registered in the Tavor TPT tables.  This gives us the LKey
	 * to specify in the SRQ context later.  Note: If the work queue is to
	 * be allocated from DDR memory, then only a "bypass" mapping is
	 * appropriate.  And if the SRQ memory is user-mappable, then we force
	 * DDI_DMA_CONSISTENT mapping.  Also, in order to meet the alignment
	 * restriction, we pass the "mro_bind_override_addr" flag in the call
	 * to tavor_mr_register().  This guarantees that the resulting IB vaddr
	 * will be zero-based (modulo the offset into the first page).  If we
	 * fail here, we still have the bunch of resource and reference count
	 * cleanup to do.
	 */
	flag = (sleepflag == TAVOR_SLEEP) ? IBT_MR_SLEEP :
	    IBT_MR_NOSLEEP;
	mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
	mr_attr.mr_len   = srq->srq_wqinfo.qa_size;
	mr_attr.mr_as    = NULL;
	mr_attr.mr_flags = flag | IBT_MR_ENABLE_LOCAL_WRITE;
	if (srq_is_umap) {
		mr_op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
	} else {
		if (wq_location == TAVOR_QUEUE_LOCATION_NORMAL) {
			mr_op.mro_bind_type =
			    state->ts_cfg_profile->cp_iommu_bypass;
			dma_xfer_mode =
			    state->ts_cfg_profile->cp_streaming_consistent;
			if (dma_xfer_mode == DDI_DMA_STREAMING) {
				mr_attr.mr_flags |= IBT_MR_NONCOHERENT;
			}
		} else {
			mr_op.mro_bind_type = TAVOR_BINDMEM_BYPASS;
		}
	}
	mr_op.mro_bind_dmahdl = srq->srq_wqinfo.qa_dmahdl;
	mr_op.mro_bind_override_addr = 1;
	status = tavor_mr_register(state, pd, &mr_attr, &mr, &mr_op);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed register mr");
		goto srqalloc_fail5;
	}
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
	addr = mr->mr_bindinfo.bi_addr;
	lkey = mr->mr_lkey;

	/*
	 * Calculate the offset between the kernel virtual address space
	 * and the IB virtual address space.  This will be used when
	 * posting work requests to properly initialize each WQE.
	 */
	srq_desc_off = (uint64_t)(uintptr_t)srq->srq_wqinfo.qa_buf_aligned -
	    (uint64_t)mr->mr_bindinfo.bi_addr;

	/*
	 * Create WQL and Wridlist for use by this SRQ
	 */
	srq->srq_wrid_wql = tavor_wrid_wql_create(state);
	if (srq->srq_wrid_wql == NULL) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed wql create");
		goto srqalloc_fail6;
	}
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*(srq->srq_wrid_wql)))

	srq->srq_wridlist = tavor_wrid_get_list(1 << log_srq_size);
	if (srq->srq_wridlist == NULL) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed wridlist create");
		goto srqalloc_fail7;
	}
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*(srq->srq_wridlist)))

	srq->srq_wridlist->wl_srq_en = 1;
	srq->srq_wridlist->wl_free_list_indx = -1;

	/*
	 * Fill in all the return arguments (if necessary).  This includes
	 * real queue size and real SGLs.
	 */
	if (real_sizes != NULL) {
		real_sizes->srq_wr_sz = (1 << log_srq_size);
		real_sizes->srq_sgl_sz = srq->srq_wq_sgl;
	}

	/*
	 * Fill in the SRQC entry.  This is the final step before passing
	 * ownership of the SRQC entry to the Tavor hardware.  We use all of
	 * the information collected/calculated above to fill in the
	 * requisite portions of the SRQC.  Note: If this SRQ is going to be
	 * used for userland access, then we need to set the UAR page number
	 * appropriately (otherwise it's a "don't care")
	 */
	bzero(&srqc_entry, sizeof (tavor_hw_srqc_t));
	srqc_entry.wqe_addr_h	   = (addr >> 32);
	srqc_entry.next_wqe_addr_l = 0;
	srqc_entry.ds		   = (wqesz >> 4);
	srqc_entry.state	   = TAVOR_SRQ_STATE_HW_OWNER;
	srqc_entry.pd		   = pd->pd_pdnum;
	srqc_entry.lkey		   = lkey;
	srqc_entry.wqe_cnt	   = 0;
	if (srq_is_umap) {
		srqc_entry.uar	   = uarpg;
	} else {
		srqc_entry.uar	   = 0;
	}

	/*
	 * Write the SRQC entry to hardware.  Lastly, we pass ownership of
	 * the entry to the hardware (using the Tavor SW2HW_SRQ firmware
	 * command).  Note: In general, this operation shouldn't fail.  But
	 * if it does, we have to undo everything we've done above before
	 * returning error.
	 */
	status = tavor_cmn_ownership_cmd_post(state, SW2HW_SRQ, &srqc_entry,
	    sizeof (tavor_hw_srqc_t), srq->srq_srqnum,
	    sleepflag);
	if (status != TAVOR_CMD_SUCCESS) {
		cmn_err(CE_CONT, "Tavor: SW2HW_SRQ command failed: %08x\n",
		    status);
		TNF_PROBE_1(tavor_srq_alloc_sw2hw_srq_cmd_fail,
		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_FAILURE, "tavor SW2HW_SRQ command");
		goto srqalloc_fail8;
	}

	/*
	 * Fill in the rest of the Tavor SRQ handle.  We can update
	 * the following fields for use in further operations on the SRQ.
	 */
	srq->srq_srqcrsrcp = srqc;
	srq->srq_rsrcp	   = rsrc;
	srq->srq_mrhdl	   = mr;
	srq->srq_refcnt	   = 0;
	srq->srq_is_umap   = srq_is_umap;
	srq->srq_uarpg	   = (srq->srq_is_umap) ? uarpg : 0;
	srq->srq_umap_dhp  = (devmap_cookie_t)NULL;
	srq->srq_pdhdl	   = pd;
	srq->srq_wq_lastwqeindx = -1;
	srq->srq_wq_bufsz  = (1 << log_srq_size);
	srq->srq_wq_buf	   = buf;
	srq->srq_desc_off  = srq_desc_off;
	srq->srq_hdlrarg   = (void *)ibt_srqhdl;
	srq->srq_state	   = 0;
	srq->srq_real_sizes.srq_wr_sz = (1 << log_srq_size);
	srq->srq_real_sizes.srq_sgl_sz = srq->srq_wq_sgl;

	/* Determine if later ddi_dma_sync will be necessary */
	srq->srq_sync = TAVOR_SRQ_IS_SYNC_REQ(state, srq->srq_wqinfo);

	/*
	 * Put SRQ handle in Tavor SRQNum-to-SRQhdl list.  Then fill in the
	 * "srqhdl" and return success
	 */
	ASSERT(state->ts_srqhdl[srqc->tr_indx] == NULL);
	state->ts_srqhdl[srqc->tr_indx] = srq;

	/*
	 * If this is a user-mappable SRQ, then we need to insert the
	 * previously allocated entry into the "userland resources database".
	 * This will allow for later lookup during devmap() (i.e. mmap())
	 * calls.
	 */
	if (srq->srq_is_umap) {
		tavor_umap_db_add(umapdb);
	} else {
		mutex_enter(&srq->srq_wrid_wql->wql_lock);
		tavor_wrid_list_srq_init(srq->srq_wridlist, srq, 0);
		mutex_exit(&srq->srq_wrid_wql->wql_lock);
	}

	*srqhdl = srq;

	TAVOR_TNF_EXIT(tavor_srq_alloc);
	return (status);

/*
 * The following is cleanup for all possible failure cases in this routine
 */
srqalloc_fail8:
	kmem_free(srq->srq_wridlist->wl_wre, srq->srq_wridlist->wl_size *
	    sizeof (tavor_wrid_entry_t));
	kmem_free(srq->srq_wridlist, sizeof (tavor_wrid_list_hdr_t));
srqalloc_fail7:
	tavor_wql_refcnt_dec(srq->srq_wrid_wql);
srqalloc_fail6:
	if (tavor_mr_deregister(state, &mr, TAVOR_MR_DEREG_ALL,
	    TAVOR_SLEEPFLAG_FOR_CONTEXT()) != DDI_SUCCESS) {
		TAVOR_WARNING(state, "failed to deregister SRQ memory");
	}
srqalloc_fail5:
	tavor_queue_free(state, &srq->srq_wqinfo);
srqalloc_fail4:
	if (srq_is_umap) {
		tavor_umap_db_free(umapdb);
	}
srqalloc_fail3:
	tavor_rsrc_free(state, &rsrc);
srqalloc_fail2:
	tavor_rsrc_free(state, &srqc);
srqalloc_fail1:
	tavor_pd_refcnt_dec(pd);
srqalloc_fail:
	TNF_PROBE_1(tavor_srq_alloc_fail, TAVOR_TNF_ERROR, "",
	    tnf_string, msg, errormsg);
	TAVOR_TNF_EXIT(tavor_srq_alloc);
	return (status);
}