예제 #1
0
/*
 * tavor_agent_unregister_all()
 *    Context: Only called from detach() path context
 */
static int
tavor_agent_unregister_all(tavor_state_t *state, int num_reg)
{
	tavor_agent_list_t	*curr;
	int			i, status;

	TAVOR_TNF_ENTER(tavor_agent_unregister_all);

	/*
	 * For each registered agent in the agent list, teardown the
	 * callbacks from the IBMF and unregister.
	 */
	for (i = 0; i < num_reg; i++) {
		curr = &state->ts_agents[i];

		/* Teardown the IBMF callback */
		status = ibmf_tear_down_async_cb(curr->agl_ibmfhdl,
		    IBMF_QP_HANDLE_DEFAULT, 0);
		if (status != IBMF_SUCCESS) {
			TNF_PROBE_0(tavor_agents_unreg_teardown_cb_fail,
			    TAVOR_TNF_ERROR, "");
			TAVOR_TNF_EXIT(tavor_agent_unregister_all);
			return (DDI_FAILURE);
		}

		/* Unregister the agent from the IBMF */
		status = ibmf_unregister(&curr->agl_ibmfhdl, 0);
		if (status != IBMF_SUCCESS) {
			TNF_PROBE_0(tavor_agents_unreg_ibmf_fail,
			    TAVOR_TNF_ERROR, "");
			TAVOR_TNF_EXIT(tavor_agent_unregister_all);
			return (DDI_FAILURE);
		}
	}

	TAVOR_TNF_EXIT(tavor_agent_unregister_all);
	return (DDI_SUCCESS);
}
예제 #2
0
/*
 * av1394_async_create_minor_node()
 *    Create async minor node
 */
static int
av1394_async_create_minor_node(av1394_inst_t *avp)
{
	int	ret;

	ret = ddi_create_minor_node(avp->av_dip, "async",
	    S_IFCHR, AV1394_ASYNC_INST2MINOR(avp->av_instance),
	    DDI_NT_AV_ASYNC, NULL);
	if (ret != DDI_SUCCESS) {
		TNF_PROBE_0(av1394_async_create_minor_node_error,
		    AV1394_TNF_ASYNC_ERROR, "");
	}
	return (ret);
}
static int
hci1394_ioctl_wrvreg(hci1394_state_t *soft_state, void *arg, int mode)
{
	hci1394_ioctl_wrvreg_t wrvreg;
	int status;


	ASSERT(soft_state != NULL);
	ASSERT(arg != NULL);
	TNF_PROBE_0_DEBUG(hci1394_ioctl_wrvreg_enter, HCI1394_TNF_HAL_STACK,
	    "");

	status = ddi_copyin(arg, &wrvreg, sizeof (hci1394_ioctl_wrvreg_t),
	    mode);
	if (status != 0) {
		TNF_PROBE_0(hci1394_ioctl_wrvreg_ci_fail, HCI1394_TNF_HAL_ERROR,
		    "");
		TNF_PROBE_0_DEBUG(hci1394_ioctl_wrvreg_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return (EFAULT);
	}

	status = hci1394_vendor_reg_write(soft_state->vendor,
	    wrvreg.regset, wrvreg.addr, wrvreg.data);
	if (status != DDI_SUCCESS) {
		TNF_PROBE_0(hci1394_ioctl_wrvreg_vrw_fail,
		    HCI1394_TNF_HAL_ERROR, "");
		TNF_PROBE_0_DEBUG(hci1394_ioctl_wrvreg_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return (EINVAL);
	}

	TNF_PROBE_0_DEBUG(hci1394_ioctl_wrvreg_exit, HCI1394_TNF_HAL_STACK, "");

	return (0);
}
예제 #4
0
/*
 * hci1394_isr_handler_init()
 *    register our interrupt service routine.
 */
int
hci1394_isr_handler_init(hci1394_state_t *soft_state)
{
	int status;

	ASSERT(soft_state != NULL);

	/* Initialize interrupt handler */
	status = ddi_add_intr(soft_state->drvinfo.di_dip, 0, NULL, NULL,
	    hci1394_isr, (caddr_t)soft_state);
	if (status != DDI_SUCCESS) {
		TNF_PROBE_0(hci1394_isr_handler_init_fail,
		    HCI1394_TNF_HAL_ERROR, "");
		return (DDI_FAILURE);
	}

	return (DDI_SUCCESS);
}
예제 #5
0
static int
av1394_add_events(av1394_inst_t *avp)
{
	ddi_eventcookie_t	br_evc, rem_evc, ins_evc;

	if (ddi_get_eventcookie(avp->av_dip, DDI_DEVI_BUS_RESET_EVENT,
	    &br_evc) != DDI_SUCCESS) {
		TNF_PROBE_0(av1394_add_events_error_bus_reset_cookie,
		    AV1394_TNF_INST_ERROR, "");
		return (DDI_FAILURE);
	}
	if (ddi_add_event_handler(avp->av_dip, br_evc, av1394_bus_reset,
	    avp, &avp->av_reset_cb) != DDI_SUCCESS) {
		TNF_PROBE_0(av1394_add_events_error_bus_reset_event,
		    AV1394_TNF_INST_ERROR, "");
		return (DDI_FAILURE);
	}

	if (ddi_get_eventcookie(avp->av_dip, DDI_DEVI_REMOVE_EVENT,
	    &rem_evc) != DDI_SUCCESS) {
		(void) ddi_remove_event_handler(avp->av_reset_cb);
		TNF_PROBE_0(av1394_add_events_error_remove_cookie,
		    AV1394_TNF_INST_ERROR, "");
		return (DDI_FAILURE);
	}
	if (ddi_add_event_handler(avp->av_dip, rem_evc, av1394_disconnect,
	    avp, &avp->av_remove_cb) != DDI_SUCCESS) {
		(void) ddi_remove_event_handler(avp->av_reset_cb);
		TNF_PROBE_0(av1394_add_events_error_remove_event,
		    AV1394_TNF_INST_ERROR, "");
		return (DDI_FAILURE);
	}

	if (ddi_get_eventcookie(avp->av_dip, DDI_DEVI_INSERT_EVENT,
	    &ins_evc) != DDI_SUCCESS) {
		(void) ddi_remove_event_handler(avp->av_remove_cb);
		(void) ddi_remove_event_handler(avp->av_reset_cb);
		TNF_PROBE_0(av1394_add_events_error_insert_cookie,
		    AV1394_TNF_INST_ERROR, "");
		return (DDI_FAILURE);
	}
	if (ddi_add_event_handler(avp->av_dip, ins_evc, av1394_reconnect,
	    avp, &avp->av_insert_cb) != DDI_SUCCESS) {
		(void) ddi_remove_event_handler(avp->av_remove_cb);
		(void) ddi_remove_event_handler(avp->av_reset_cb);
		TNF_PROBE_0(av1394_add_events_error_insert_event,
		    AV1394_TNF_INST_ERROR, "");
		return (DDI_FAILURE);
	}

	return (DDI_SUCCESS);
}
예제 #6
0
/*
 * put a message on the read queue, take care of polling
 */
void
av1394_async_putq_rq(av1394_inst_t *avp, mblk_t *mp)
{
	av1394_async_t	*ap = &avp->av_a;

	if (!av1394_putq(&ap->a_rq, mp)) {
		freemsg(mp);
		TNF_PROBE_0(av1394_async_putq_rq_error_putq,
		    AV1394_TNF_ASYNC_ERROR, "");
	} else {
		mutex_enter(&ap->a_mutex);
		if (ap->a_pollevents & POLLIN) {
			ap->a_pollevents &= ~POLLIN;
			mutex_exit(&ap->a_mutex);
			pollwakeup(&ap->a_pollhead, POLLIN);
		} else {
			mutex_exit(&ap->a_mutex);
		}
	}
}
예제 #7
0
/*
 * tavor_srq_sgl_to_logwqesz()
 *    Context: Can be called from interrupt or base context.
 */
static void
tavor_srq_sgl_to_logwqesz(tavor_state_t *state, uint_t num_sgl,
    tavor_qp_wq_type_t wq_type, uint_t *logwqesz, uint_t *max_sgl)
{
	uint_t	max_size, log2, actual_sgl;

	TAVOR_TNF_ENTER(tavor_srq_sgl_to_logwqesz);

	switch (wq_type) {
	case TAVOR_QP_WQ_TYPE_RECVQ:
		/*
		 * Use requested maximum SGL to calculate max descriptor size
		 * (while guaranteeing that the descriptor size is a
		 * power-of-2 cachelines).
		 */
		max_size = (TAVOR_QP_WQE_MLX_RCV_HDRS + (num_sgl << 4));
		log2 = highbit(max_size);
		if ((max_size & (max_size - 1)) == 0) {
			log2 = log2 - 1;
		}

		/* Make sure descriptor is at least the minimum size */
		log2 = max(log2, TAVOR_QP_WQE_LOG_MINIMUM);

		/* Calculate actual number of SGL (given WQE size) */
		actual_sgl = ((1 << log2) - TAVOR_QP_WQE_MLX_RCV_HDRS) >> 4;
		break;

	default:
		TAVOR_WARNING(state, "unexpected work queue type");
		TNF_PROBE_0(tavor_srq_sgl_to_logwqesz_inv_wqtype_fail,
		    TAVOR_TNF_ERROR, "");
		break;
	}

	/* Fill in the return values */
	*logwqesz = log2;
	*max_sgl  = min(state->ts_cfg_profile->cp_srq_max_sgl, actual_sgl);

	TAVOR_TNF_EXIT(tavor_qp_sgl_to_logwqesz);
}
예제 #8
0
/*
 * tavor_agent_handlers_fini()
 *    Context: Only called from detach() path context
 */
int
tavor_agent_handlers_fini(tavor_state_t *state)
{
	int		status;

	TAVOR_TNF_ENTER(tavor_agent_handlers_fini);

	/* Determine if we need to unregister any agents from the IBMF */
	if ((state->ts_cfg_profile->cp_qp0_agents_in_fw) &&
	    (state->ts_cfg_profile->cp_qp1_agents_in_fw)) {
		TAVOR_TNF_EXIT(tavor_agent_handlers_fini);
		return (DDI_SUCCESS);
	}

	/* Now attempt to unregister all of the agents from the IBMF */
	status = tavor_agent_unregister_all(state, state->ts_num_agents);
	if (status != DDI_SUCCESS) {
		TNF_PROBE_0(tavor_agent_handlers_fini_unreg_fail,
		    TAVOR_TNF_ERROR, "");
		TAVOR_TNF_EXIT(tavor_agent_handlers_fini);
		return (DDI_FAILURE);
	}

	/*
	 * Destroy the task queue.  The task queue destroy is guaranteed to
	 * wait until any scheduled tasks have completed.  We are able to
	 * guarantee that no _new_ tasks will be added the task queue while
	 * we are in the ddi_taskq_destroy() call because we have
	 * (at this point) successfully unregistered from IBMF (in
	 * tavor_agent_unregister_all() above).
	 */
	ddi_taskq_destroy(state->ts_taskq_agents);

	/* Teardown the Tavor IB management agent list */
	tavor_agent_list_fini(state);

	TAVOR_TNF_EXIT(tavor_agent_handlers_fini);
	return (DDI_SUCCESS);
}
예제 #9
0
/*
 * hci1394_isr_atresp_complete()
 *    Process all completed responses that we have sent out (i.e. HW gave us
 *    an ack). We get in a request off the bus (arreq) and send it up to the
 *    services layer, they send down a response to that request some time
 *    later. This interrupt signifies that the HW is done with the response.
 *    (i.e. it sent it out or failed it)
 */
static void
hci1394_isr_atresp_complete(hci1394_state_t *soft_state)
{
	boolean_t response_available;
	int status;


	ASSERT(soft_state != NULL);
	TNF_PROBE_0_DEBUG(hci1394_isr_atresp_complete_enter,
	    HCI1394_TNF_HAL_STACK, "");

	hci1394_ohci_intr_clear(soft_state->ohci, OHCI_INTR_RESP_TX_CMPLT);

	/*
	 * Processes all ack'd AT responses It is possible that we will call
	 * hci1394_async_atresp_process() even thought there are no more
	 * responses to process.  This would be because we have processed
	 * them earlier on. (i.e. we cleared interrupt, then got another
	 * response and processed it. The interrupt would still be pending.
	 */
	do {
		/*
		 * Process a single response. Do not flush Q. That is only
		 * done during bus reset processing.
		 */
		status = hci1394_async_atresp_process(soft_state->async,
		    B_FALSE, &response_available);
		if (status != DDI_SUCCESS) {
			TNF_PROBE_0(hci1394_isr_atresp_complete_pr_fail,
			    HCI1394_TNF_HAL_ERROR, "");
		}
	} while (response_available == B_TRUE);

	TNF_PROBE_0_DEBUG(hci1394_isr_atresp_complete_exit,
	    HCI1394_TNF_HAL_STACK, "");
}
예제 #10
0
/* ARGSUSED */
int
hci1394_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
    int *rvalp)
{
	hci1394_state_t *soft_state;
	int instance;
	int status;


	TNF_PROBE_0_DEBUG(hci1394_ioctl_enter, HCI1394_TNF_HAL_STACK, "");

	instance = getminor(dev);
	if (instance == -1) {
		TNF_PROBE_0(hci1394_ioctl_gm_fail, HCI1394_TNF_HAL_ERROR, "");
		TNF_PROBE_0_DEBUG(hci1394_ioctl_exit, HCI1394_TNF_HAL_STACK,
		    "");
		return (EBADF);
	}

	soft_state = ddi_get_soft_state(hci1394_statep, instance);
	if (soft_state == NULL) {
		TNF_PROBE_0(hci1394_ioctl_gss_fail, HCI1394_TNF_HAL_ERROR, "");
		TNF_PROBE_0_DEBUG(hci1394_ioctl_exit, HCI1394_TNF_HAL_STACK,
		    "");
		return (EBADF);
	}

	status = 0;

	switch (cmd) {
	case HCI1394_IOCTL_WRITE_REG:
		status = hci1394_ioctl_wrreg(soft_state, (void *)arg, mode);
		break;
	case HCI1394_IOCTL_READ_REG:
		status = hci1394_ioctl_rdreg(soft_state, (void *)arg, mode);
		break;
	case HCI1394_IOCTL_READ_VREG:
		status = hci1394_ioctl_rdvreg(soft_state, (void *)arg, mode);
		break;
	case HCI1394_IOCTL_WRITE_VREG:
		status = hci1394_ioctl_wrvreg(soft_state, (void *)arg, mode);
		break;
	case HCI1394_IOCTL_RESET_BUS:
		status = hci1394_ohci_bus_reset(soft_state->ohci);
		break;
	case HCI1394_IOCTL_SELFID_CNT:
		status = hci1394_ioctl_selfid_cnt(soft_state, (void *)arg,
		    mode);
		break;
	case HCI1394_IOCTL_BUSGEN_CNT:
		status = hci1394_ioctl_busgen_cnt(soft_state, (void *)arg,
		    mode);
		break;
	case HCI1394_IOCTL_READ_SELFID:
		status = hci1394_ioctl_read_selfid(soft_state, (void *)arg,
		    mode);
		break;
	case HCI1394_IOCTL_READ_PHY:
		status = hci1394_ioctl_rdphy(soft_state, (void *)arg, mode);
		break;
	case HCI1394_IOCTL_WRITE_PHY:
		status = hci1394_ioctl_wrphy(soft_state, (void *)arg, mode);
		break;
	case HCI1394_IOCTL_HBA_INFO:
		status = hci1394_ioctl_hbainfo(soft_state, (void *)arg, mode);
		break;
	default:
		/*
		 * if we don't know what the ioctl is, forward it on to the
		 * services layer.  The services layer will handle the devctl
		 * ioctl's along with any services layer private ioctls that
		 * it has defined.
		 */
		status = h1394_ioctl(soft_state->drvinfo.di_sl_private, cmd,
		    arg, mode, credp, rvalp);
		break;
	}

	TNF_PROBE_0_DEBUG(hci1394_ioctl_exit, HCI1394_TNF_HAL_STACK, "");

	return (status);
}
예제 #11
0
static int
hci1394_ioctl_read_selfid(hci1394_state_t *soft_state, void *arg, int mode)
{
	hci1394_ioctl_read_selfid_t read_selfid;
	int status;
	uint_t offset;
	uint32_t data;
#ifdef	_MULTI_DATAMODEL
	hci1394_ioctl_readselfid32_t read_selfid32;
#endif


	ASSERT(soft_state != NULL);
	ASSERT(arg != NULL);
	TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid_enter,
	    HCI1394_TNF_HAL_STACK, "");

#ifdef	_MULTI_DATAMODEL
	switch (ddi_model_convert_from(mode & FMODELS)) {

		/* 32-bit app in 64-bit kernel */
	case DDI_MODEL_ILP32:
		/* copy in the 32-bit version of the args */
		status = ddi_copyin(arg, &read_selfid32,
		    sizeof (hci1394_ioctl_readselfid32_t), mode);
		if (status != 0) {
			TNF_PROBE_0(hci1394_ioctl_read_selfid_ci_fail,
			    HCI1394_TNF_HAL_ERROR, "");
			TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid_exit,
			    HCI1394_TNF_HAL_STACK, "");
			return (EFAULT);
		}

		/*
		 * Use a special function to process the 32-bit user address
		 * pointer embedded in the structure we pass in arg.
		 */
		status = hci1394_ioctl_read_selfid32(soft_state,
		    &read_selfid32, mode);
		return (status);
	default:
		break;
	}
#endif

	/*
	 * if we got here, we either are a 64-bit app in a 64-bit kernel or a
	 * 32-bit app in a 32-bit kernel
	 */

	/* copy in the args. We don't need to do any special conversions */
	status = ddi_copyin(arg, &read_selfid,
	    sizeof (hci1394_ioctl_read_selfid_t), mode);
	if (status != 0) {
		TNF_PROBE_0(hci1394_ioctl_read_selfid_ci_fail,
		    HCI1394_TNF_HAL_ERROR, "");
		TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return (EFAULT);
	}

	/*
	 * make sure we are not trying to copy more data than the selfid buffer
	 * can hold.  count is in quadlets and max_selfid_size is in bytes.
	 */
	if ((read_selfid.count * 4) > OHCI_MAX_SELFID_SIZE) {
		TNF_PROBE_0(hci1394_ioctl_read_selfid_cnt_fail,
		    HCI1394_TNF_HAL_ERROR, "");
		TNF_PROBE_0_DEBUG(hci1394_ioctl_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return (EINVAL);
	}

	/*
	 * copy the selfid buffer one word at a time into the user buffer. The
	 * combination between having to do ddi_get32's (for endian reasons)
	 * and a ddi_copyout() make it easier to do it one word at a time.
	 */
	for (offset = 0; offset < read_selfid.count; offset++) {
		/* read word from selfid buffer */
		hci1394_ohci_selfid_read(soft_state->ohci, offset, &data);

		/* copy the selfid word into the user buffer */
		status = ddi_copyout(&data, &read_selfid.buf[offset], 4, mode);
		if (status != 0) {
			TNF_PROBE_0(hci1394_ioctl_read_selfid_co_fail,
			    HCI1394_TNF_HAL_ERROR, "");
			TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid_exit,
			    HCI1394_TNF_HAL_STACK, "");
			return (EFAULT);
		}
	}

	TNF_PROBE_0_DEBUG(hci1394_ioctl_read_selfid_exit,
	    HCI1394_TNF_HAL_STACK, "");

	return (0);
}
예제 #12
0
/*
 * tavor_agent_list_init()
 *    Context: Only called from attach() path context
 */
static int
tavor_agent_list_init(tavor_state_t *state)
{
	tavor_agent_list_t	*curr;
	uint_t			num_ports, num_agents, num_agents_per_port;
	uint_t			num_sma_agents = 0;
	uint_t			num_pma_agents = 0;
	uint_t			num_bma_agents = 0;
	uint_t			do_qp0, do_qp1;
	int			i, j, indx;

	TAVOR_TNF_ENTER(tavor_agent_list_init);

	/*
	 * Calculate the number of registered agents for each port
	 * (SMA, PMA, and BMA) and determine whether or not to register
	 * a given agent with the IBMF (or whether to let the Tavor firmware
	 * handle it)
	 */
	num_ports	    = state->ts_cfg_profile->cp_num_ports;
	num_agents	    = 0;
	num_agents_per_port = 0;
	do_qp0		    = state->ts_cfg_profile->cp_qp0_agents_in_fw;
	do_qp1		    = state->ts_cfg_profile->cp_qp1_agents_in_fw;
	if (do_qp0 == 0) {
		num_agents += (num_ports * TAVOR_NUM_QP0_AGENTS_PER_PORT);
		num_agents_per_port += TAVOR_NUM_QP0_AGENTS_PER_PORT;
		num_sma_agents = num_ports;
	}
	if (do_qp1 == 0) {
		num_agents += (num_ports * TAVOR_NUM_QP1_AGENTS_PER_PORT);
		num_agents_per_port += TAVOR_NUM_QP1_AGENTS_PER_PORT;
		num_pma_agents = num_ports;
		/*
		 * The following line is commented out because the Tavor
		 * firmware does not currently support a BMA.  If it did,
		 * then we would want to register the agent with the IBMF.
		 * (We would also need to have TAVOR_NUM_QP1_AGENTS_PER_PORT
		 * set to 2, instead of 1.)
		 *
		 * num_bma_agents = num_ports;
		 */
	}

	state->ts_num_agents = num_agents;

	/*
	 * Allocate the memory for all of the agent list entries
	 */
	state->ts_agents = (tavor_agent_list_t *)kmem_zalloc(num_agents *
	    sizeof (tavor_agent_list_t), KM_SLEEP);
	if (state->ts_agents == NULL) {
		TNF_PROBE_0(tavor_agent_list_init_kma_fail,
		    TAVOR_TNF_ERROR, "");
		TAVOR_TNF_EXIT(tavor_agent_list_init);
		return (DDI_FAILURE);
	}

	/*
	 * Fill in each of the agent list entries with the agent's
	 * MgmtClass, port number, and Tavor softstate pointer
	 */
	indx = 0;
	for (i = 0; i < num_agents_per_port; i++) {
		for (j = 0; j < num_ports; j++) {
			curr = &state->ts_agents[indx];
			curr->agl_state = state;
			curr->agl_port  = j + 1;

			if ((do_qp0 == 0) && num_sma_agents) {
				curr->agl_mgmtclass = SUBN_AGENT;
				num_sma_agents--;
				indx++;
			} else if ((do_qp1 == 0) && (num_pma_agents)) {
				curr->agl_mgmtclass = PERF_AGENT;
				num_pma_agents--;
				indx++;
			} else if ((do_qp1 == 0) && (num_bma_agents)) {
				curr->agl_mgmtclass = BM_AGENT;
				num_bma_agents--;
				indx++;
			}
		}
	}

	TAVOR_TNF_EXIT(tavor_agent_list_init);
	return (DDI_SUCCESS);
}
예제 #13
0
파일: mkdstore.c 프로젝트: andreiw/polaris
/*
 * mkdstore <table> <nrecords> <cid> <flags> <cip> <sip> <lease> <macro>
 * <comment>
 */
main(int c, char **v)
{
	long long	cid;
	uchar_t	flags;
	struct in_addr	cip;
	struct in_addr	sip;
	int		i, j;
	char		**entries;
	uint_t		lease;
	char		*network = v[1];
	int		ct = strtol(v[2], 0L, 0L);
	char		*server;
	char		*macro;
	int		err;
	uint32_t	query;
	dn_rec_t	dn;
	dn_rec_list_t	*dncp = NULL;
	dhcp_confopt_t	*dsp = NULL;

#ifdef	DEBUG
	mallocctl(MTDEBUGPATTERN, 1);
	mallocctl(MTINITBUFFER, 1);
#endif				/* DEBUG */

	if (c == 1) {
		(void) fprintf(stderr, "/*\n * mkdstore <table> <nrecords> "
		    "<cid> <flags> <cip> <sip> <lease> <comment>\n*/");
		return (0);
	}

	cid = (c > 3) ? strtoul(v[3], 0L, 0L) : 0;
	flags = (c > 4) ? (char)strtol(v[4], 0L, 0L) : 0;
	cip.s_addr = (c > 5) ? strtoul(v[5], 0L, 0L) : 0;
	sip.s_addr = (c > 6) ? strtoul(v[6], 0L, 0L) : 0;
	lease = (c > 7) ? strtoul(v[7], 0L, 0L) : 0;
	macro = (c > 8) ? v[8] : 0;
	server = (c > 9) ? v[9] : "unknown";

	entries = (char **) malloc(ct * (sizeof (char *) * 8 + 4));

	/* Load current datastore. */
	(void) read_dsvc_conf(&dsp);
	if ((i = confopt_to_datastore(dsp, &datastore)) != DSVC_SUCCESS) {
		(void) fprintf(stderr, "Invalid datastore: %s\n",
		    dhcpsvc_errmsg(i));
		return (EINVAL);
	}
	err = open_dd(&dh, &datastore, DSVC_DHCPNETWORK, network,
	    DSVC_READ | DSVC_WRITE);

	if (err != DSVC_SUCCESS) {
		(void) fprintf(stderr, "Invalid network: %s trying create...\n",
		    dhcpsvc_errmsg(err));

		err = open_dd(&dh, &datastore, DSVC_DHCPNETWORK, network,
		    DSVC_READ | DSVC_WRITE | DSVC_CREATE);
		if (err != DSVC_SUCCESS) {
			(void) fprintf(stderr, "Can't create network: %s\n",
			    dhcpsvc_errmsg(err));
			return (err);
		}
	}
	/* XXXX: bug: currently can't get the count as advertised */
	(void) memset(&dn, '\0', sizeof (dn));
	DSVC_QINIT(query);
	err = lookup_dd(dh, B_FALSE, query, -1,
		    (const void *) &dn, (void **) &dncp, &nrecords);
	if (dncp)
		free_dd_list(dh, dncp);

	if (err != DSVC_SUCCESS) {
		(void) fprintf(stderr, "Bad nrecords: %s [%d]\n",
		    dhcpsvc_errmsg(err), nrecords);
		return (err);
	}

	for (i = 0, j = 0; i < ct; i++) {
		TNF_PROBE_1(main, "main",
			    "main%debug 'in function main'",
			    tnf_ulong, record, i);
		if (cid) {
			(void) memcpy(dn.dn_cid, &cid, sizeof (long long));
			dn.dn_cid_len = 7;
		} else {
			(void) memset(dn.dn_cid, '\0', sizeof (long long));
			dn.dn_cid_len = 1;
		}
		dn.dn_sig = 0;
		dn.dn_flags = flags;
		dn.dn_cip.s_addr = cip.s_addr;
		dn.dn_sip.s_addr = sip.s_addr;
		dn.dn_lease = lease;
		strcpy(dn.dn_macro, macro);
		strcpy(dn.dn_comment, server);
		(void) add_dd_entry(dh, &dn);
		if (cid)
			cid += 0x100;
		cip.s_addr++;

		TNF_PROBE_0(main_end, "main", "");
	}
	(void) close_dd(&dh);

	return (0);
}
예제 #14
0
/*
 * hci1394_isr_self_id()
 *    Process the selfid complete interrupt.  The bus reset has completed
 *    and the 1394 HW has finished it's bus enumeration.  The SW needs to
 *    see what's changed and handle any hotplug conditions.
 */
static void
hci1394_isr_self_id(hci1394_state_t *soft_state)
{
	int status;
	uint_t node_id;
	uint_t selfid_size;
	uint_t quadlet_count;
	uint_t index;
	uint32_t *selfid_buf_p;
	boolean_t selfid_error;
	boolean_t nodeid_error;
	boolean_t saw_error = B_FALSE;
	uint_t phy_status;


	ASSERT(soft_state != NULL);
	TNF_PROBE_0_DEBUG(hci1394_isr_self_id_enter, HCI1394_TNF_HAL_STACK, "");

	soft_state->drvinfo.di_stats.st_selfid_count++;

	/*
	 * check for the bizarre case that we got both a bus reset and self id
	 * complete after checking for a bus reset
	 */
	if (hci1394_state(&soft_state->drvinfo) != HCI1394_BUS_RESET) {
		hci1394_isr_bus_reset(soft_state);
	}

	/*
	 * Clear any set PHY error status bits set.  The PHY status bits
	 * may always be set (i.e. we removed cable power) so we do not want
	 * to clear them when we handle the interrupt. We will clear them
	 * every selfid complete interrupt so worst case we will get 1 PHY event
	 * interrupt every bus reset.
	 */
	status = hci1394_ohci_phy_read(soft_state->ohci, 5, &phy_status);
	if (status != DDI_SUCCESS) {
		TNF_PROBE_0(hci1394_isr_self_id_pr_fail,
		    HCI1394_TNF_HAL_ERROR, "");
	} else {
		phy_status |= OHCI_PHY_LOOP_ERR | OHCI_PHY_PWRFAIL_ERR |
		    OHCI_PHY_TIMEOUT_ERR | OHCI_PHY_PORTEVT_ERR;
		status = hci1394_ohci_phy_write(soft_state->ohci, 5,
		    phy_status);
		if (status != DDI_SUCCESS) {
			TNF_PROBE_0(hci1394_isr_self_id_pw_fail,
			    HCI1394_TNF_HAL_ERROR, "");
		} else {
			/*
			 * Re-enable PHY interrupt. We disable the PHY interrupt
			 *  when we get one so that we do not get stuck in the
			 * ISR.
			 */
			hci1394_ohci_intr_enable(soft_state->ohci,
			    OHCI_INTR_PHY);
		}
	}

	/* See if either AT active bit is set */
	if (hci1394_ohci_at_active(soft_state->ohci) == B_TRUE) {
		TNF_PROBE_1(hci1394_isr_self_id_as_fail, HCI1394_TNF_HAL_ERROR,
		    "", tnf_string, errmsg, "AT ACTIVE still set");
		saw_error = B_TRUE;
	}

	/* Clear busReset and selfIdComplete interrupts */
	hci1394_ohci_intr_clear(soft_state->ohci, (OHCI_INTR_BUS_RESET |
	    OHCI_INTR_SELFID_CMPLT));

	/* Read node info and test for Invalid Node ID */
	hci1394_ohci_nodeid_info(soft_state->ohci, &node_id, &nodeid_error);
	if (nodeid_error == B_TRUE) {
		TNF_PROBE_1(hci1394_isr_self_id_ni_fail, HCI1394_TNF_HAL_ERROR,
		    "", tnf_string, errmsg, "saw invalid NodeID");
		saw_error = B_TRUE;
	}

	/* Sync Selfid Buffer */
	hci1394_ohci_selfid_sync(soft_state->ohci);

	/* store away selfid info */
	hci1394_ohci_selfid_info(soft_state->ohci,
	    &soft_state->drvinfo.di_gencnt, &selfid_size, &selfid_error);

	/* Test for selfid error */
	if (selfid_error == B_TRUE) {
		TNF_PROBE_1(hci1394_isr_self_id_si_fail, HCI1394_TNF_HAL_ERROR,
		    "", tnf_string, errmsg, "saw invalid SelfID");
		saw_error = B_TRUE;
	}

	/*
	 * selfid size could be 0 if a bus reset has occurred. If this occurs,
	 * we should have another selfid int coming later.
	 */
	if ((saw_error == B_FALSE) && (selfid_size == 0)) {
		TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return;
	}

	/*
	 * make sure generation count in buffer matches generation
	 * count in register.
	 */
	if (hci1394_ohci_selfid_buf_current(soft_state->ohci) == B_FALSE) {
		TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return;
	}

	/*
	 * Skip over first quadlet in selfid buffer, this is OpenHCI specific
	 * data.
	 */
	selfid_size = selfid_size - IEEE1394_QUADLET;
	quadlet_count = selfid_size >> 2;

	/* Copy selfid buffer to Services Layer buffer */
	for (index = 0; index < quadlet_count; index++) {
		hci1394_ohci_selfid_read(soft_state->ohci, index + 1,
		    &soft_state->sl_selfid_buf[index]);
	}

	/*
	 * Put our selfID info into the Services Layer's selfid buffer if we
	 * have a 1394-1995 PHY.
	 */
	if (soft_state->halinfo.phy == H1394_PHY_1995) {
		selfid_buf_p = (uint32_t *)(
		    (uintptr_t)soft_state->sl_selfid_buf +
		    (uintptr_t)selfid_size);
		status = hci1394_ohci_phy_info(soft_state->ohci,
		    &selfid_buf_p[0]);
		if (status != DDI_SUCCESS) {
			/*
			 * If we fail reading from PHY, put invalid data into
			 * the selfid buffer so the SL will reset the bus again.
			 */
			TNF_PROBE_0(hci1394_isr_self_id_pi_fail,
			    HCI1394_TNF_HAL_ERROR, "");
			selfid_buf_p[0] = 0xFFFFFFFF;
			selfid_buf_p[1] = 0xFFFFFFFF;
		} else {
			selfid_buf_p[1] = ~selfid_buf_p[0];
		}
		selfid_size = selfid_size + 8;
	}

	/* Flush out async DMA Q's */
	hci1394_async_flush(soft_state->async);

	/*
	 * Make sure generation count is still valid.  i.e. we have not gotten
	 * another bus reset since the last time we checked.  If we have gotten
	 * another bus reset, we should have another selfid interrupt coming.
	 */
	if (soft_state->drvinfo.di_gencnt !=
	    hci1394_ohci_current_busgen(soft_state->ohci)) {
		TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return;
	}

	/*
	 * do whatever CSR register processing that needs to be done.
	 */
	hci1394_csr_bus_reset(soft_state->csr);

	/*
	 * do whatever management may be necessary for the CYCLE_LOST and
	 * CYCLE_INCONSISTENT interrupts.
	 */
	hci1394_isoch_error_ints_enable(soft_state);

	/*
	 * See if we saw an error.  If we did, tell the services layer that we
	 * finished selfid processing and give them an illegal selfid buffer
	 * size of 0.  The Services Layer will try to reset the bus again to
	 * see if we can recover from this problem.  It will threshold after
	 * a finite number of errors.
	 */
	if (saw_error == B_TRUE) {
		h1394_self_ids(soft_state->drvinfo.di_sl_private,
		    soft_state->sl_selfid_buf, 0, node_id,
		    soft_state->drvinfo.di_gencnt);

		/*
		 * Take ourself out of Bus Reset processing mode
		 *
		 * Set the driver state to normal. If we cannot, we have been
		 * shutdown. The only way we can get in this code is if we have
		 * a multi-processor machine and the HAL is shutdown by one
		 * processor running in base context while this interrupt
		 * handler runs in another processor. We will disable all
		 * interrupts and just return.  We shouldn't have to disable
		 * the interrupts, but we will just in case.
		 */
		status = hci1394_state_set(&soft_state->drvinfo,
		    HCI1394_NORMAL);
		if (status != DDI_SUCCESS) {
			hci1394_ohci_intr_master_disable(soft_state->ohci);
			return;
		}
	} else if (IEEE1394_NODE_NUM(node_id) != 63) {
		/*
		 * Notify services layer about self-id-complete. Don't notify
		 * the services layer if there are too many devices on the bus.
		 */
		h1394_self_ids(soft_state->drvinfo.di_sl_private,
		    soft_state->sl_selfid_buf, selfid_size,
		    node_id, soft_state->drvinfo.di_gencnt);

		/*
		 * Take ourself out of Bus Reset processing mode
		 *
		 * Set the driver state to normal. If we cannot, we have been
		 * shutdown. The only way we can get in this code is if we have
		 * a multi-processor machine and the HAL is shutdown by one
		 * processor running in base context while this interrupt
		 * handler runs in another processor. We will disable all
		 * interrupts and just return.  We shouldn't have to disable
		 * the interrupts, but we will just in case.
		 */
		status = hci1394_state_set(&soft_state->drvinfo,
		    HCI1394_NORMAL);
		if (status != DDI_SUCCESS) {
			hci1394_ohci_intr_master_disable(soft_state->ohci);
			return;
		}
	} else {
		cmn_err(CE_NOTE, "hci1394(%d): Too many devices on the 1394 "
		    "bus", soft_state->drvinfo.di_instance);
	}

	/* enable bus reset interrupt */
	hci1394_ohci_intr_enable(soft_state->ohci, OHCI_INTR_BUS_RESET);

	TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit, HCI1394_TNF_HAL_STACK, "");
}
예제 #15
0
/*
 * hci1394_isr()
 *    Core interrupt handler.  Every interrupt enabled in
 *    hci1394_isr_mask_setup() should be covered here.  There may be other
 *    interrupts supported in here even if they are not initially enabled
 *    (like OHCI_INTR_CYC_64_SECS) since they may be enabled later (i.e. due to
 *    CSR register write)
 */
static uint_t
hci1394_isr(caddr_t parm)
{
	hci1394_state_t *soft_state;
	h1394_posted_wr_err_t posted_wr_err;
	uint32_t interrupt_event;
	uint_t status;


	status = DDI_INTR_UNCLAIMED;
	soft_state = (hci1394_state_t *)parm;

	ASSERT(soft_state != NULL);
	TNF_PROBE_0_DEBUG(hci1394_isr_enter, HCI1394_TNF_HAL_STACK, "");

	/*
	 * Get all of the enabled 1394 interrupts which are currently
	 * asserted.
	 */
	interrupt_event = hci1394_ohci_intr_asserted(soft_state->ohci);
	do {
		/* handle the asserted interrupts */
		if (interrupt_event & OHCI_INTR_BUS_RESET) {
			hci1394_isr_bus_reset(soft_state);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_SELFID_CMPLT) {
			hci1394_isr_self_id(soft_state);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_ISOCH_TX) {
			hci1394_isr_isoch_it(soft_state);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_ISOCH_RX) {
			hci1394_isr_isoch_ir(soft_state);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_REQ_TX_CMPLT) {
			hci1394_isr_atreq_complete(soft_state);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_RSPKT) {
			hci1394_isr_arresp(soft_state);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_RQPKT) {
			hci1394_isr_arreq(soft_state);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_RESP_TX_CMPLT) {
			hci1394_isr_atresp_complete(soft_state);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_CYC_64_SECS) {
			hci1394_ohci_isr_cycle64seconds(soft_state->ohci);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_UNRECOVERABLE_ERR) {
			h1394_error_detected(soft_state->drvinfo.di_sl_private,
			    H1394_SELF_INITIATED_SHUTDOWN, NULL);
			cmn_err(CE_WARN, "hci1394(%d): driver shutdown: "
			    "unrecoverable error interrupt detected",
			    soft_state->drvinfo.di_instance);
			hci1394_shutdown(soft_state->drvinfo.di_dip);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_CYC_LOST) {
			hci1394_isoch_cycle_lost(soft_state);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_CYC_INCONSISTENT) {
			hci1394_isoch_cycle_inconsistent(soft_state);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_CYC_TOO_LONG) {
			hci1394_ohci_intr_clear(soft_state->ohci,
			    OHCI_INTR_CYC_TOO_LONG);
			/* clear cycle master bit in csr state register */
			hci1394_csr_state_bclr(soft_state->csr,
			    IEEE1394_CSR_STATE_CMSTR);
			h1394_error_detected(soft_state->drvinfo.di_sl_private,
			    H1394_CYCLE_TOO_LONG, NULL);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_POST_WR_ERR) {
			hci1394_ohci_postwr_addr(soft_state->ohci,
			    &posted_wr_err.addr);
			h1394_error_detected(soft_state->drvinfo.di_sl_private,
			    H1394_POSTED_WR_ERR, &posted_wr_err);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_PHY) {
			hci1394_ohci_isr_phy(soft_state->ohci);
			status = DDI_INTR_CLAIMED;
		}
		if (interrupt_event & OHCI_INTR_LOCK_RESP_ERR) {
			hci1394_ohci_intr_clear(soft_state->ohci,
			    OHCI_INTR_LOCK_RESP_ERR);
			h1394_error_detected(soft_state->drvinfo.di_sl_private,
			    H1394_LOCK_RESP_ERR, NULL);
			status = DDI_INTR_CLAIMED;
		}

		/*
		 * Check for self-id-complete interrupt disappearing.  There is
		 * a chance in OpenHCI where it will assert the selfid
		 * interrupt and then take it away.  We will look for this case
		 * and claim it just in case.  We could possibly claim an
		 * interrupt that's not ours.  We would have to be in the
		 * middle of a bus reset and a bunch of other weird stuff
		 * would have to align.  It should not hurt anything if we do.
		 *
		 * This will very very rarely happen, if ever.  We still have
		 * to handle the case, just in case. OpenHCI 1.1 should fix
		 * this problem.
		 */
		if ((status == DDI_INTR_UNCLAIMED) &&
		    (hci1394_state(&soft_state->drvinfo) ==
		    HCI1394_BUS_RESET)) {
			if (soft_state->drvinfo.di_gencnt !=
			    hci1394_ohci_current_busgen(soft_state->ohci)) {
				TNF_PROBE_0(hci1394_isr_busgen_claim,
				    HCI1394_TNF_HAL, "");
				status = DDI_INTR_CLAIMED;
			}
		}

		/*
		 * See if any of the enabled 1394 interrupts have been asserted
		 * since we first checked.
		 */
		interrupt_event = hci1394_ohci_intr_asserted(
		    soft_state->ohci);
	} while (interrupt_event != 0);

	TNF_PROBE_0_DEBUG(hci1394_isr_exit, HCI1394_TNF_HAL_STACK, "");

	return (status);
}
예제 #16
0
/*
 * tavor_agent_request_cb()
 *    Context: Called from the IBMF context
 */
static void
tavor_agent_request_cb(ibmf_handle_t ibmf_handle, ibmf_msg_t *msgp,
    void *args)
{
	tavor_agent_handler_arg_t	*cb_args;
	tavor_agent_list_t		*curr;
	tavor_state_t			*state;
	int				status;
	int				ibmf_status;

	TAVOR_TNF_ENTER(tavor_agent_request_cb);

	curr  = (tavor_agent_list_t *)args;
	state = curr->agl_state;

	/*
	 * Allocate space to hold the callback args (for passing to the
	 * task queue).  Note: If we are unable to allocate space for the
	 * the callback args here, then we just return.  But we must ensure
	 * that we call ibmf_free_msg() to free up the message.
	 */
	cb_args = (tavor_agent_handler_arg_t *)kmem_zalloc(
	    sizeof (tavor_agent_handler_arg_t), KM_NOSLEEP);
	if (cb_args == NULL) {
		ibmf_status = ibmf_free_msg(ibmf_handle, &msgp);
		if (ibmf_status != IBMF_SUCCESS) {
			TNF_PROBE_1(tavor_agent_request_cb_ibmf_free_msg_fail,
			    TAVOR_TNF_ERROR, "", tnf_uint, ibmf_status,
			    ibmf_status);
		}
		TNF_PROBE_0(tavor_agent_request_cb_kma_fail,
		    TAVOR_TNF_ERROR, "");
		TAVOR_TNF_EXIT(tavor_agent_request_cb);
		return;
	}
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cb_args))

	/* Fill in the callback args */
	cb_args->ahd_ibmfhdl	= ibmf_handle;
	cb_args->ahd_ibmfmsg	= msgp;
	cb_args->ahd_agentlist	= args;

	/*
	 * Dispatch the message to the task queue.  Note: Just like above,
	 * if this request fails for any reason then make sure to free up
	 * the IBMF message and then return
	 */
	status = ddi_taskq_dispatch(state->ts_taskq_agents,
	    tavor_agent_handle_req, cb_args, DDI_NOSLEEP);
	if (status == DDI_FAILURE) {
		kmem_free(cb_args, sizeof (tavor_agent_handler_arg_t));
		ibmf_status = ibmf_free_msg(ibmf_handle, &msgp);
		if (ibmf_status != IBMF_SUCCESS) {
			TNF_PROBE_1(tavor_agent_request_cb_ibmf_free_msg_fail,
			    TAVOR_TNF_ERROR, "", tnf_uint, ibmf_status,
			    ibmf_status);
		}
		TNF_PROBE_0(tavor_agent_request_cb_taskq_fail,
		    TAVOR_TNF_ERROR, "");
	}
	TAVOR_TNF_EXIT(tavor_agent_request_cb);
}
예제 #17
0
/*
 * attach
 */
static int
av1394_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	int		instance = ddi_get_instance(dip);
	av1394_inst_t	*avp;

	AV1394_TNF_ENTER(av1394_attach);

	switch (cmd) {
	case DDI_ATTACH:
		break;
	case DDI_RESUME:
		if ((avp = AV1394_INST2STATE(instance)) == NULL) {
			return (DDI_FAILURE);
		}
		return (av1394_cpr_resume(avp));
	default:
		AV1394_TNF_EXIT(av1394_attach);
		return (DDI_FAILURE);
	}

	if (ddi_soft_state_zalloc(av1394_statep, instance) != 0) {
		TNF_PROBE_0(av1394_attach_error_soft_state_zalloc,
		    AV1394_TNF_INST_ERROR, "");
		AV1394_TNF_EXIT(av1394_attach);
		return (DDI_FAILURE);
	}
	avp = AV1394_INST2STATE(instance);

	if (av1394_t1394_attach(avp, dip) != DDI_SUCCESS) {
		av1394_cleanup(avp, 1);
		AV1394_TNF_EXIT(av1394_attach);
		return (DDI_FAILURE);
	}

	mutex_init(&avp->av_mutex, NULL, MUTEX_DRIVER,
	    avp->av_attachinfo.iblock_cookie);

#ifndef __lock_lint
	avp->av_dip = dip;
	avp->av_instance = instance;
#endif

	if (av1394_add_events(avp) != DDI_SUCCESS) {
		av1394_cleanup(avp, 2);
		AV1394_TNF_EXIT(av1394_attach);
		return (DDI_FAILURE);
	}

	if (av1394_isoch_attach(avp) != DDI_SUCCESS) {
		av1394_cleanup(avp, 3);
		AV1394_TNF_EXIT(av1394_attach);
		return (DDI_FAILURE);
	}

	if (av1394_async_attach(avp) != DDI_SUCCESS) {
		av1394_cleanup(avp, 4);
		AV1394_TNF_EXIT(av1394_attach);
		return (DDI_FAILURE);
	}

#ifndef __lock_lint
	avp->av_dev_state = AV1394_DEV_ONLINE;
#endif

	ddi_report_dev(dip);

	AV1394_TNF_EXIT(av1394_attach);
	return (DDI_SUCCESS);
}
예제 #18
0
/*
 * tavor_srq_modify()
 *    Context: Can be called only from user or kernel context.
 */
int
tavor_srq_modify(tavor_state_t *state, tavor_srqhdl_t srq, uint_t size,
    uint_t *real_size, uint_t sleepflag)
{
	tavor_qalloc_info_t	new_srqinfo, old_srqinfo;
	tavor_rsrc_t		*mtt, *mpt, *old_mtt;
	tavor_bind_info_t	bind;
	tavor_bind_info_t	old_bind;
	tavor_rsrc_pool_info_t	*rsrc_pool;
	tavor_mrhdl_t		mr;
	tavor_hw_mpt_t		mpt_entry;
	tavor_wrid_entry_t	*wre_new, *wre_old;
	uint64_t		mtt_ddrbaseaddr, mtt_addr;
	uint64_t		srq_desc_off;
	uint32_t		*buf, srq_old_bufsz;
	uint32_t		wqesz;
	uint_t			max_srq_size;
	uint_t			dma_xfer_mode, mtt_pgsize_bits;
	uint_t			srq_sync, log_srq_size, maxprot;
	uint_t			wq_location;
	int			status;
	char			*errormsg;

	TAVOR_TNF_ENTER(tavor_srq_modify);

	/*
	 * Check the "inddr" flag.  This flag tells the driver whether or not
	 * the SRQ's work queues should be come from normal system memory or
	 * whether they should be allocated from DDR memory.
	 */
	wq_location = state->ts_cfg_profile->cp_srq_wq_inddr;

	/*
	 * If size requested is larger than device capability, return
	 * Insufficient Resources
	 */
	max_srq_size = (1 << state->ts_cfg_profile->cp_log_max_srq_sz);
	if (size > max_srq_size) {
		TNF_PROBE_0(tavor_srq_modify_size_larger_than_maxsize,
		    TAVOR_TNF_ERROR, "");
		TAVOR_TNF_EXIT(tavor_srq_modify);
		return (IBT_HCA_WR_EXCEEDED);
	}

	/*
	 * Calculate the appropriate size for the SRQ.
	 * Note:  All Tavor SRQs must be a power-of-2 in size.  Also
	 * they may not be any smaller than TAVOR_SRQ_MIN_SIZE.  This step
	 * is to round the requested size up to the next highest power-of-2
	 */
	size = max(size, TAVOR_SRQ_MIN_SIZE);
	log_srq_size = highbit(size);
	if ((size & (size - 1)) == 0) {
		log_srq_size = log_srq_size - 1;
	}

	/*
	 * Next we verify that the rounded-up size is valid (i.e. consistent
	 * with the device limits and/or software-configured limits).
	 */
	if (log_srq_size > state->ts_cfg_profile->cp_log_max_srq_sz) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_HCA_WR_EXCEEDED, "max SRQ size");
		goto srqmodify_fail;
	}

	/*
	 * Allocate the memory for newly resized Shared Receive Queue.
	 *
	 * Note: If SRQ is not user-mappable, then it may come from either
	 * kernel system memory or from HCA-attached local DDR memory.
	 *
	 * Note2: We align this queue on a pagesize boundary.  This is required
	 * to make sure that all the resulting IB addresses will start at 0,
	 * for a zero-based queue.  By making sure we are aligned on at least a
	 * page, any offset we use into our queue will be the same as it was
	 * when we allocated it at tavor_srq_alloc() time.
	 */
	wqesz = (1 << srq->srq_wq_log_wqesz);
	new_srqinfo.qa_size = (1 << log_srq_size) * wqesz;
	new_srqinfo.qa_alloc_align = PAGESIZE;
	new_srqinfo.qa_bind_align  = PAGESIZE;
	if (srq->srq_is_umap) {
		new_srqinfo.qa_location = TAVOR_QUEUE_LOCATION_USERLAND;
	} else {
		new_srqinfo.qa_location = wq_location;
	}
	status = tavor_queue_alloc(state, &new_srqinfo, sleepflag);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed srq");
		goto srqmodify_fail;
	}
	buf = (uint32_t *)new_srqinfo.qa_buf_aligned;
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*buf))

	/*
	 * Allocate the memory for the new WRE list.  This will be used later
	 * when we resize the wridlist based on the new SRQ size.
	 */
	wre_new = (tavor_wrid_entry_t *)kmem_zalloc((1 << log_srq_size) *
	    sizeof (tavor_wrid_entry_t), sleepflag);
	if (wre_new == NULL) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE,
		    "failed wre_new alloc");
		goto srqmodify_fail;
	}

	/*
	 * Fill in the "bind" struct.  This struct provides the majority
	 * of the information that will be used to distinguish between an
	 * "addr" binding (as is the case here) and a "buf" binding (see
	 * below).  The "bind" struct is later passed to tavor_mr_mem_bind()
	 * which does most of the "heavy lifting" for the Tavor memory
	 * registration routines.
	 */
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(bind))
	bzero(&bind, sizeof (tavor_bind_info_t));
	bind.bi_type  = TAVOR_BINDHDL_VADDR;
	bind.bi_addr  = (uint64_t)(uintptr_t)buf;
	bind.bi_len   = new_srqinfo.qa_size;
	bind.bi_as    = NULL;
	bind.bi_flags = sleepflag == TAVOR_SLEEP ? IBT_MR_SLEEP :
	    IBT_MR_NOSLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
	if (srq->srq_is_umap) {
		bind.bi_bypass = state->ts_cfg_profile->cp_iommu_bypass;
	} else {
		if (wq_location == TAVOR_QUEUE_LOCATION_NORMAL) {
			bind.bi_bypass =
			    state->ts_cfg_profile->cp_iommu_bypass;
			dma_xfer_mode =
			    state->ts_cfg_profile->cp_streaming_consistent;
			if (dma_xfer_mode == DDI_DMA_STREAMING) {
				bind.bi_flags |= IBT_MR_NONCOHERENT;
			}
		} else {
			bind.bi_bypass = TAVOR_BINDMEM_BYPASS;
		}
	}
	status = tavor_mr_mtt_bind(state, &bind, new_srqinfo.qa_dmahdl, &mtt,
	    &mtt_pgsize_bits);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(status, "failed mtt bind");
		kmem_free(wre_new, srq->srq_wq_bufsz *
		    sizeof (tavor_wrid_entry_t));
		tavor_queue_free(state, &new_srqinfo);
		goto srqmodify_fail;
	}

	/*
	 * Calculate the offset between the kernel virtual address space
	 * and the IB virtual address space.  This will be used when
	 * posting work requests to properly initialize each WQE.
	 *
	 * Note: bind addr is zero-based (from alloc) so we calculate the
	 * correct new offset here.
	 */
	bind.bi_addr = bind.bi_addr & ((1 << mtt_pgsize_bits) - 1);
	srq_desc_off = (uint64_t)(uintptr_t)new_srqinfo.qa_buf_aligned -
	    (uint64_t)bind.bi_addr;

	/*
	 * Get the base address for the MTT table.  This will be necessary
	 * below when we are modifying the MPT entry.
	 */
	rsrc_pool = &state->ts_rsrc_hdl[TAVOR_MTT];
	mtt_ddrbaseaddr = (uint64_t)(uintptr_t)rsrc_pool->rsrc_ddr_offset;

	/*
	 * Fill in the MPT entry.  This is the final step before passing
	 * ownership of the MPT entry to the Tavor hardware.  We use all of
	 * the information collected/calculated above to fill in the
	 * requisite portions of the MPT.
	 */
	bzero(&mpt_entry, sizeof (tavor_hw_mpt_t));
	mpt_entry.reg_win_len	= bind.bi_len;
	mtt_addr = mtt_ddrbaseaddr + (mtt->tr_indx << TAVOR_MTT_SIZE_SHIFT);
	mpt_entry.mttseg_addr_h = mtt_addr >> 32;
	mpt_entry.mttseg_addr_l = mtt_addr >> 6;

	/*
	 * Now we grab the SRQ lock.  Since we will be updating the actual
	 * SRQ location and the producer/consumer indexes, we should hold
	 * the lock.
	 *
	 * We do a TAVOR_NOSLEEP here (and below), though, because we are
	 * holding the "srq_lock" and if we got raised to interrupt level
	 * by priority inversion, we would not want to block in this routine
	 * waiting for success.
	 */
	mutex_enter(&srq->srq_lock);

	/*
	 * Copy old entries to new buffer
	 */
	srq_old_bufsz = srq->srq_wq_bufsz;
	bcopy(srq->srq_wq_buf, buf, srq_old_bufsz * wqesz);

	/* Determine if later ddi_dma_sync will be necessary */
	srq_sync = TAVOR_SRQ_IS_SYNC_REQ(state, srq->srq_wqinfo);

	/* Sync entire "new" SRQ for use by hardware (if necessary) */
	if (srq_sync) {
		(void) ddi_dma_sync(bind.bi_dmahdl, 0,
		    new_srqinfo.qa_size, DDI_DMA_SYNC_FORDEV);
	}

	/*
	 * Setup MPT information for use in the MODIFY_MPT command
	 */
	mr = srq->srq_mrhdl;
	mutex_enter(&mr->mr_lock);
	mpt = srq->srq_mrhdl->mr_mptrsrcp;

	/*
	 * MODIFY_MPT
	 *
	 * If this fails for any reason, then it is an indication that
	 * something (either in HW or SW) has gone seriously wrong.  So we
	 * print a warning message and return.
	 */
	status = tavor_modify_mpt_cmd_post(state, &mpt_entry, mpt->tr_indx,
	    TAVOR_CMD_MODIFY_MPT_RESIZESRQ, sleepflag);
	if (status != TAVOR_CMD_SUCCESS) {
		cmn_err(CE_CONT, "Tavor: MODIFY_MPT command failed: %08x\n",
		    status);
		TNF_PROBE_1(tavor_mr_common_reg_sw2hw_mpt_cmd_fail,
		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
		TAVOR_TNF_FAIL(status, "MODIFY_MPT command failed");
		(void) tavor_mr_mtt_unbind(state, &srq->srq_mrhdl->mr_bindinfo,
		    srq->srq_mrhdl->mr_mttrsrcp);
		kmem_free(wre_new, srq->srq_wq_bufsz *
		    sizeof (tavor_wrid_entry_t));
		tavor_queue_free(state, &new_srqinfo);
		mutex_exit(&mr->mr_lock);
		mutex_exit(&srq->srq_lock);
		return (ibc_get_ci_failure(0));
	}

	/*
	 * Update the Tavor Shared Receive Queue handle with all the new
	 * information.  At the same time, save away all the necessary
	 * information for freeing up the old resources
	 */
	old_srqinfo	   = srq->srq_wqinfo;
	old_mtt		   = srq->srq_mrhdl->mr_mttrsrcp;
	bcopy(&srq->srq_mrhdl->mr_bindinfo, &old_bind,
	    sizeof (tavor_bind_info_t));

	/* Now set the new info */
	srq->srq_wqinfo	   = new_srqinfo;
	srq->srq_wq_buf	   = buf;
	srq->srq_wq_bufsz  = (1 << log_srq_size);
	bcopy(&bind, &srq->srq_mrhdl->mr_bindinfo, sizeof (tavor_bind_info_t));
	srq->srq_mrhdl->mr_mttrsrcp = mtt;
	srq->srq_desc_off  = srq_desc_off;
	srq->srq_real_sizes.srq_wr_sz = (1 << log_srq_size);

	/* Update MR mtt pagesize */
	mr->mr_logmttpgsz = mtt_pgsize_bits;
	mutex_exit(&mr->mr_lock);

#ifdef __lock_lint
	mutex_enter(&srq->srq_wrid_wql->wql_lock);
#else
	if (srq->srq_wrid_wql != NULL) {
		mutex_enter(&srq->srq_wrid_wql->wql_lock);
	}
#endif

	/*
	 * Initialize new wridlist, if needed.
	 *
	 * If a wridlist already is setup on an SRQ (the QP associated with an
	 * SRQ has moved "from_reset") then we must update this wridlist based
	 * on the new SRQ size.  We allocate the new size of Work Request ID
	 * Entries, copy over the old entries to the new list, and
	 * re-initialize the srq wridlist in non-umap case
	 */
	wre_old = NULL;
	if (srq->srq_wridlist != NULL) {
		wre_old = srq->srq_wridlist->wl_wre;

		bcopy(wre_old, wre_new, srq_old_bufsz *
		    sizeof (tavor_wrid_entry_t));

		/* Setup new sizes in wre */
		srq->srq_wridlist->wl_wre = wre_new;
		srq->srq_wridlist->wl_size = srq->srq_wq_bufsz;

		if (!srq->srq_is_umap) {
			tavor_wrid_list_srq_init(srq->srq_wridlist, srq,
			    srq_old_bufsz);
		}
	}

#ifdef __lock_lint
	mutex_exit(&srq->srq_wrid_wql->wql_lock);
#else
	if (srq->srq_wrid_wql != NULL) {
		mutex_exit(&srq->srq_wrid_wql->wql_lock);
	}
#endif

	/*
	 * If "old" SRQ was a user-mappable SRQ that is currently mmap()'d out
	 * to a user process, then we need to call devmap_devmem_remap() to
	 * invalidate the mapping to the SRQ memory.  We also need to
	 * invalidate the SRQ tracking information for the user mapping.
	 *
	 * Note: On failure, the remap really shouldn't ever happen.  So, if it
	 * does, it is an indication that something has gone seriously wrong.
	 * So we print a warning message and return error (knowing, of course,
	 * that the "old" SRQ memory will be leaked)
	 */
	if ((srq->srq_is_umap) && (srq->srq_umap_dhp != NULL)) {
		maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
		status = devmap_devmem_remap(srq->srq_umap_dhp,
		    state->ts_dip, 0, 0, srq->srq_wqinfo.qa_size, maxprot,
		    DEVMAP_MAPPING_INVALID, NULL);
		if (status != DDI_SUCCESS) {
			mutex_exit(&srq->srq_lock);
			TAVOR_WARNING(state, "failed in SRQ memory "
			    "devmap_devmem_remap()");
			/* We can, however, free the memory for old wre */
			if (wre_old != NULL) {
				kmem_free(wre_old, srq_old_bufsz *
				    sizeof (tavor_wrid_entry_t));
			}
			TAVOR_TNF_EXIT(tavor_srq_modify);
			return (ibc_get_ci_failure(0));
		}
		srq->srq_umap_dhp = (devmap_cookie_t)NULL;
	}

	/*
	 * Drop the SRQ lock now.  The only thing left to do is to free up
	 * the old resources.
	 */
	mutex_exit(&srq->srq_lock);

	/*
	 * Unbind the MTT entries.
	 */
	status = tavor_mr_mtt_unbind(state, &old_bind, old_mtt);
	if (status != DDI_SUCCESS) {
		TAVOR_WARNING(state, "failed to unbind old SRQ memory");
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(ibc_get_ci_failure(0),
		    "failed to unbind (old)");
		goto srqmodify_fail;
	}

	/* Free the memory for old wre */
	if (wre_old != NULL) {
		kmem_free(wre_old, srq_old_bufsz *
		    sizeof (tavor_wrid_entry_t));
	}

	/* Free the memory for the old SRQ */
	tavor_queue_free(state, &old_srqinfo);

	/*
	 * Fill in the return arguments (if necessary).  This includes the
	 * real new completion queue size.
	 */
	if (real_size != NULL) {
		*real_size = (1 << log_srq_size);
	}

	TAVOR_TNF_EXIT(tavor_srq_modify);
	return (DDI_SUCCESS);

srqmodify_fail:
	TNF_PROBE_1(tavor_srq_modify_fail, TAVOR_TNF_ERROR, "",
	    tnf_string, msg, errormsg);
	TAVOR_TNF_EXIT(tavor_srq_modify);
	return (status);
}
예제 #19
0
/* ARGSUSED */
int
tavor_srq_free(tavor_state_t *state, tavor_srqhdl_t *srqhdl, uint_t sleepflag)
{
	tavor_rsrc_t		*srqc, *rsrc;
	tavor_umap_db_entry_t	*umapdb;
	uint64_t		value;
	tavor_srqhdl_t		srq;
	tavor_mrhdl_t		mr;
	tavor_pdhdl_t		pd;
	tavor_hw_srqc_t		srqc_entry;
	uint32_t		srqnum;
	uint32_t		size;
	uint_t			maxprot;
	int			status;

	TAVOR_TNF_ENTER(tavor_srq_free);

	/*
	 * Pull all the necessary information from the Tavor Shared Receive
	 * Queue handle.  This is necessary here because the resource for the
	 * SRQ handle is going to be freed up as part of this operation.
	 */
	srq	= *srqhdl;
	mutex_enter(&srq->srq_lock);
	srqc	= srq->srq_srqcrsrcp;
	rsrc	= srq->srq_rsrcp;
	pd	= srq->srq_pdhdl;
	mr	= srq->srq_mrhdl;
	srqnum	= srq->srq_srqnum;

	/*
	 * If there are work queues still associated with the SRQ, then return
	 * an error.  Otherwise, we will be holding the SRQ lock.
	 */
	if (srq->srq_refcnt != 0) {
		mutex_exit(&srq->srq_lock);
		TNF_PROBE_1(tavor_srq_free_refcnt_fail, TAVOR_TNF_ERROR, "",
		    tnf_int, refcnt, srq->srq_refcnt);
		TAVOR_TNF_EXIT(tavor_srq_free);
		return (IBT_SRQ_IN_USE);
	}

	/*
	 * If this was a user-mappable SRQ, then we need to remove its entry
	 * from the "userland resources database".  If it is also currently
	 * mmap()'d out to a user process, then we need to call
	 * devmap_devmem_remap() to remap the SRQ memory to an invalid mapping.
	 * We also need to invalidate the SRQ tracking information for the
	 * user mapping.
	 */
	if (srq->srq_is_umap) {
		status = tavor_umap_db_find(state->ts_instance, srq->srq_srqnum,
		    MLNX_UMAP_SRQMEM_RSRC, &value, TAVOR_UMAP_DB_REMOVE,
		    &umapdb);
		if (status != DDI_SUCCESS) {
			mutex_exit(&srq->srq_lock);
			TAVOR_WARNING(state, "failed to find in database");
			TAVOR_TNF_EXIT(tavor_srq_free);
			return (ibc_get_ci_failure(0));
		}
		tavor_umap_db_free(umapdb);
		if (srq->srq_umap_dhp != NULL) {
			maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
			status = devmap_devmem_remap(srq->srq_umap_dhp,
			    state->ts_dip, 0, 0, srq->srq_wqinfo.qa_size,
			    maxprot, DEVMAP_MAPPING_INVALID, NULL);
			if (status != DDI_SUCCESS) {
				mutex_exit(&srq->srq_lock);
				TAVOR_WARNING(state, "failed in SRQ memory "
				    "devmap_devmem_remap()");
				TAVOR_TNF_EXIT(tavor_srq_free);
				return (ibc_get_ci_failure(0));
			}
			srq->srq_umap_dhp = (devmap_cookie_t)NULL;
		}
	}

	/*
	 * Put NULL into the Tavor SRQNum-to-SRQHdl list.  This will allow any
	 * in-progress events to detect that the SRQ corresponding to this
	 * number has been freed.
	 */
	state->ts_srqhdl[srqc->tr_indx] = NULL;

	mutex_exit(&srq->srq_lock);
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*srq));
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*srq->srq_wridlist));

	/*
	 * Reclaim SRQC entry from hardware (using the Tavor HW2SW_SRQ
	 * firmware command).  If the ownership transfer fails for any reason,
	 * then it is an indication that something (either in HW or SW) has
	 * gone seriously wrong.
	 */
	status = tavor_cmn_ownership_cmd_post(state, HW2SW_SRQ, &srqc_entry,
	    sizeof (tavor_hw_srqc_t), srqnum, sleepflag);
	if (status != TAVOR_CMD_SUCCESS) {
		TAVOR_WARNING(state, "failed to reclaim SRQC ownership");
		cmn_err(CE_CONT, "Tavor: HW2SW_SRQ command failed: %08x\n",
		    status);
		TNF_PROBE_1(tavor_srq_free_hw2sw_srq_cmd_fail,
		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
		TAVOR_TNF_EXIT(tavor_srq_free);
		return (IBT_FAILURE);
	}

	/*
	 * Deregister the memory for the Shared Receive Queue.  If this fails
	 * for any reason, then it is an indication that something (either
	 * in HW or SW) has gone seriously wrong.  So we print a warning
	 * message and return.
	 */
	status = tavor_mr_deregister(state, &mr, TAVOR_MR_DEREG_ALL,
	    sleepflag);
	if (status != DDI_SUCCESS) {
		TAVOR_WARNING(state, "failed to deregister SRQ memory");
		TNF_PROBE_0(tavor_srq_free_dereg_mr_fail, TAVOR_TNF_ERROR, "");
		TAVOR_TNF_EXIT(tavor_srq_free);
		return (IBT_FAILURE);
	}

	/* Calculate the size and free the wridlist container */
	if (srq->srq_wridlist != NULL) {
		size = (srq->srq_wridlist->wl_size *
		    sizeof (tavor_wrid_entry_t));
		kmem_free(srq->srq_wridlist->wl_wre, size);
		kmem_free(srq->srq_wridlist, sizeof (tavor_wrid_list_hdr_t));

		/*
		 * Release reference to WQL; If this is the last reference,
		 * this call also has the side effect of freeing up the
		 * 'srq_wrid_wql' memory.
		 */
		tavor_wql_refcnt_dec(srq->srq_wrid_wql);
	}

	/* Free the memory for the SRQ */
	tavor_queue_free(state, &srq->srq_wqinfo);

	/* Free the Tavor SRQ Handle */
	tavor_rsrc_free(state, &rsrc);

	/* Free the SRQC entry resource */
	tavor_rsrc_free(state, &srqc);

	/* Decrement the reference count on the protection domain (PD) */
	tavor_pd_refcnt_dec(pd);

	/* Set the srqhdl pointer to NULL and return success */
	*srqhdl = NULL;

	TAVOR_TNF_EXIT(tavor_srq_free);
	return (DDI_SUCCESS);
}