コード例 #1
0
ファイル: nx1394.c プロジェクト: andreiw/polaris
/*
 * nx1394_define_events()
 *    Allocates event handle for the hal dip and binds event set to it.
 */
int
nx1394_define_events(s1394_hal_t *hal)
{
	int ret;

	TNF_PROBE_0_DEBUG(nx1394_define_events_enter, S1394_TNF_SL_NEXUS_STACK,
	    "");

	/* get event handle */
	ret = ndi_event_alloc_hdl(hal->halinfo.dip, hal->halinfo.hw_interrupt,
	    &hal->hal_ndi_event_hdl, NDI_SLEEP);
	if (ret != NDI_SUCCESS) {
		TNF_PROBE_1(nx1394_define_events_alloc_fail,
		    S1394_TNF_SL_NEXUS_ERROR, "", tnf_int, ret, ret);
	} else {
		/* and bind to it */
		ret = ndi_event_bind_set(hal->hal_ndi_event_hdl, &nx1394_events,
		    NDI_SLEEP);
		if (ret != NDI_SUCCESS) {
			TNF_PROBE_1(nx1394_define_events_bind_fail,
			    S1394_TNF_SL_NEXUS_ERROR, "", tnf_int, ret, ret);
			(void) ndi_event_free_hdl(hal->hal_ndi_event_hdl);
			TNF_PROBE_0_DEBUG(nx1394_define_events_exit,
			    S1394_TNF_SL_NEXUS_STACK, "");
			return (DDI_FAILURE);
		}
	}

	TNF_PROBE_0_DEBUG(nx1394_define_events_exit, S1394_TNF_SL_NEXUS_STACK,
	    "");

	return (DDI_SUCCESS);
}
コード例 #2
0
ファイル: s1394.c プロジェクト: andreiw/polaris
int
_init()
{
	int status;

#ifndef	NPROBE
	(void) tnf_mod_load();
#endif
	status = s1394_init();
	if (status != 0) {
		TNF_PROBE_1(_init_error, S1394_TNF_SL_ERROR, "",
		    tnf_string, msg, "s1394: failed in s1394_init");
#ifndef NPROBE
		(void) tnf_mod_unload(&s1394_modlinkage);
#endif
		return (status);
	}

	status = mod_install(&s1394_modlinkage);
	if (status != 0) {
		TNF_PROBE_1(_init_error, S1394_TNF_SL_ERROR, "",
		    tnf_string, msg, "s1394: failed in mod_install");
#ifndef NPROBE
		(void) tnf_mod_unload(&s1394_modlinkage);
#endif
	}
	return (status);
}
コード例 #3
0
int
_init()
{
	int status;

	/* CONSTCOND */
	ASSERT(NO_COMPETING_THREADS);

#ifndef	NPROBE
	(void) tnf_mod_load();
#endif
	ibmf_statep = &ibmf_state;

	/*
	 * call ibmf_saa_init first so it can set up subnet list before being
	 * contacted with ibt_async events
	 */
	status = ibmf_saa_impl_init();
	if (status != IBMF_SUCCESS) {
		TNF_PROBE_1(_init_error, IBMF_TNF_ERROR, "", tnf_string, msg,
		    "ibmf_saa_impl_init failed");

#ifndef	NPROBE
		(void) tnf_mod_unload(&ibmf_modlinkage);
#endif
		return (EACCES);
	}



	status = ibmf_init();
	if (status != 0) {
		TNF_PROBE_1(_init_error, IBMF_TNF_ERROR, "", tnf_string, msg,
		    "ibmf_init failed");

		(void) ibmf_saa_impl_fini();

#ifndef	NPROBE
		(void) tnf_mod_unload(&ibmf_modlinkage);
#endif
		return (EACCES);
	}

	status = mod_install(&ibmf_modlinkage);
	if (status != 0) {
		TNF_PROBE_2(_init_error, IBMF_TNF_ERROR, "", tnf_string, msg,
		    "mod_install failed", tnf_uint, status, status);
#ifndef NPROBE
		(void) tnf_mod_unload(&ibmf_modlinkage);
#endif
		(void) ibmf_fini();
		ibmf_statep = (ibmf_state_t *)NULL;
	}

	return (status);
}
コード例 #4
0
ファイル: hci1394_isr.c プロジェクト: andreiw/polaris
/*
 * hci1394_isr_init()
 *    Get the iblock_cookie, make sure we are not using a high level interrupt,
 *    register our interrupt service routine.
 */
int
hci1394_isr_init(hci1394_state_t *soft_state)
{
	int status;


	ASSERT(soft_state != NULL);
	TNF_PROBE_0_DEBUG(hci1394_isr_init_enter, HCI1394_TNF_HAL_STACK, "");

	/* This driver does not support running at a high level interrupt */
	status = ddi_intr_hilevel(soft_state->drvinfo.di_dip, 0);
	if (status != 0) {
		TNF_PROBE_1(hci1394_isr_init_hli_fail,
		    HCI1394_TNF_HAL_ERROR, "", tnf_string, errmsg,
		    "High Level interrupts not supported");
		TNF_PROBE_0_DEBUG(hci1394_isr_init_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return (DDI_FAILURE);
	}

	/* There should only be 1 1394 interrupt for an OpenHCI adapter */
	status = ddi_get_iblock_cookie(soft_state->drvinfo.di_dip, 0,
	    &soft_state->drvinfo.di_iblock_cookie);
	if (status != DDI_SUCCESS) {
		TNF_PROBE_0(hci1394_isr_init_gic_fail,
		    HCI1394_TNF_HAL_ERROR, "");
		TNF_PROBE_0_DEBUG(hci1394_isr_init_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return (DDI_FAILURE);
	}

	TNF_PROBE_0_DEBUG(hci1394_isr_init_exit, HCI1394_TNF_HAL_STACK, "");

	return (DDI_SUCCESS);
}
コード例 #5
0
ファイル: nx1394.c プロジェクト: andreiw/polaris
/*
 * nx1394_undefine_events()
 *    Unbinds event set bound to the hal and frees the event handle.
 */
void
nx1394_undefine_events(s1394_hal_t *hal)
{
	int ret;

	TNF_PROBE_0_DEBUG(nx1394_undefine_events_enter,
	    S1394_TNF_SL_NEXUS_STACK, "");

	ret = ndi_event_unbind_set(hal->hal_ndi_event_hdl, &nx1394_events,
	    NDI_SLEEP);
	if (ret != NDI_SUCCESS) {
		TNF_PROBE_1(nx1394_undefine_events_unbind_fail,
		    S1394_TNF_SL_NEXUS_ERROR, "", tnf_int, ret, ret);
	} else {
		ret = ndi_event_free_hdl(hal->hal_ndi_event_hdl);
		if (ret != NDI_SUCCESS) {
			TNF_PROBE_1(nx1394_undefine_events_free_hdl_fail,
			    S1394_TNF_SL_NEXUS_ERROR, "", tnf_int, ret, ret);
		}
	}

	TNF_PROBE_0_DEBUG(nx1394_undefine_events_exit,
	    S1394_TNF_SL_NEXUS_STACK, "");
}
コード例 #6
0
ファイル: s1394.c プロジェクト: andreiw/polaris
int
_fini()
{
	int status;

	status = mod_remove(&s1394_modlinkage);
	if (status != 0) {
		TNF_PROBE_1(_fini_error, S1394_TNF_SL_ERROR, "",
		    tnf_string, msg, "s1394: failed in mod_remove");
		return (status);
	}

	s1394_fini();
#ifndef NPROBE
	(void) tnf_mod_unload(&s1394_modlinkage);
#endif
	return (status);
}
コード例 #7
0
static int
av1394_t1394_attach(av1394_inst_t *avp, dev_info_t *dip)
{
	int	ret;

	AV1394_TNF_ENTER(av1394_t1394_attach);

	ret = t1394_attach(dip, T1394_VERSION_V1, 0, &avp->av_attachinfo,
	    &avp->av_t1394_hdl);

	if (ret != DDI_SUCCESS) {
		TNF_PROBE_1(av1394_t1394_attach_error, AV1394_TNF_INST_ERROR,
		    "", tnf_int, ret, ret);
	}

	AV1394_TNF_EXIT(av1394_t1394_attach);
	return (ret);
}
コード例 #8
0
ファイル: s1394_fa.c プロジェクト: andreiw/polaris
/*
 * s1394_fa_free_addr_blk()
 *    Free fixed address block.
 */
void
s1394_fa_free_addr(s1394_hal_t *hal, s1394_fa_type_t type)
{
	s1394_fa_hal_t		*falp = &hal->hal_fa[type];
	int			ret;

	TNF_PROBE_0_DEBUG(s1394_fa_free_addr_enter, S1394_TNF_SL_FA_STACK, "");

	/* Might have been freed already */
	if (falp->fal_addr_blk != NULL) {
		ret = s1394_free_addr_blk(hal, falp->fal_addr_blk);
		if (ret != DDI_SUCCESS) {
			TNF_PROBE_1(s1394_fa_free_addr_error,
			    S1394_TNF_SL_FA_STACK, "", tnf_int, ret, ret);
		}
		falp->fal_addr_blk = NULL;
	}

	TNF_PROBE_0_DEBUG(s1394_fa_free_addr_exit, S1394_TNF_SL_FA_STACK, "");
}
コード例 #9
0
/* ARGSUSED */
static void
tavor_agent_response_cb(ibmf_handle_t ibmf_handle, ibmf_msg_t *msgp,
    void *args)
{
	int		status;

	TAVOR_TNF_ENTER(tavor_agent_response_cb);

	/*
	 * It is the responsibility of each IBMF callback recipient to free
	 * the packets that it has been given.  Now that we are in the
	 * response callback, we can be assured that it is safe to do so.
	 */
	status = ibmf_free_msg(ibmf_handle, &msgp);
	if (status != IBMF_SUCCESS) {
		TNF_PROBE_1(tavor_agent_response_cb_ibmf_free_msg_fail,
		    TAVOR_TNF_ERROR, "", tnf_uint, ibmf_status, status);
	}

	TAVOR_TNF_EXIT(tavor_agent_response_cb);
}
コード例 #10
0
ファイル: mkdstore.c プロジェクト: andreiw/polaris
/*
 * mkdstore <table> <nrecords> <cid> <flags> <cip> <sip> <lease> <macro>
 * <comment>
 */
main(int c, char **v)
{
	long long	cid;
	uchar_t	flags;
	struct in_addr	cip;
	struct in_addr	sip;
	int		i, j;
	char		**entries;
	uint_t		lease;
	char		*network = v[1];
	int		ct = strtol(v[2], 0L, 0L);
	char		*server;
	char		*macro;
	int		err;
	uint32_t	query;
	dn_rec_t	dn;
	dn_rec_list_t	*dncp = NULL;
	dhcp_confopt_t	*dsp = NULL;

#ifdef	DEBUG
	mallocctl(MTDEBUGPATTERN, 1);
	mallocctl(MTINITBUFFER, 1);
#endif				/* DEBUG */

	if (c == 1) {
		(void) fprintf(stderr, "/*\n * mkdstore <table> <nrecords> "
		    "<cid> <flags> <cip> <sip> <lease> <comment>\n*/");
		return (0);
	}

	cid = (c > 3) ? strtoul(v[3], 0L, 0L) : 0;
	flags = (c > 4) ? (char)strtol(v[4], 0L, 0L) : 0;
	cip.s_addr = (c > 5) ? strtoul(v[5], 0L, 0L) : 0;
	sip.s_addr = (c > 6) ? strtoul(v[6], 0L, 0L) : 0;
	lease = (c > 7) ? strtoul(v[7], 0L, 0L) : 0;
	macro = (c > 8) ? v[8] : 0;
	server = (c > 9) ? v[9] : "unknown";

	entries = (char **) malloc(ct * (sizeof (char *) * 8 + 4));

	/* Load current datastore. */
	(void) read_dsvc_conf(&dsp);
	if ((i = confopt_to_datastore(dsp, &datastore)) != DSVC_SUCCESS) {
		(void) fprintf(stderr, "Invalid datastore: %s\n",
		    dhcpsvc_errmsg(i));
		return (EINVAL);
	}
	err = open_dd(&dh, &datastore, DSVC_DHCPNETWORK, network,
	    DSVC_READ | DSVC_WRITE);

	if (err != DSVC_SUCCESS) {
		(void) fprintf(stderr, "Invalid network: %s trying create...\n",
		    dhcpsvc_errmsg(err));

		err = open_dd(&dh, &datastore, DSVC_DHCPNETWORK, network,
		    DSVC_READ | DSVC_WRITE | DSVC_CREATE);
		if (err != DSVC_SUCCESS) {
			(void) fprintf(stderr, "Can't create network: %s\n",
			    dhcpsvc_errmsg(err));
			return (err);
		}
	}
	/* XXXX: bug: currently can't get the count as advertised */
	(void) memset(&dn, '\0', sizeof (dn));
	DSVC_QINIT(query);
	err = lookup_dd(dh, B_FALSE, query, -1,
		    (const void *) &dn, (void **) &dncp, &nrecords);
	if (dncp)
		free_dd_list(dh, dncp);

	if (err != DSVC_SUCCESS) {
		(void) fprintf(stderr, "Bad nrecords: %s [%d]\n",
		    dhcpsvc_errmsg(err), nrecords);
		return (err);
	}

	for (i = 0, j = 0; i < ct; i++) {
		TNF_PROBE_1(main, "main",
			    "main%debug 'in function main'",
			    tnf_ulong, record, i);
		if (cid) {
			(void) memcpy(dn.dn_cid, &cid, sizeof (long long));
			dn.dn_cid_len = 7;
		} else {
			(void) memset(dn.dn_cid, '\0', sizeof (long long));
			dn.dn_cid_len = 1;
		}
		dn.dn_sig = 0;
		dn.dn_flags = flags;
		dn.dn_cip.s_addr = cip.s_addr;
		dn.dn_sip.s_addr = sip.s_addr;
		dn.dn_lease = lease;
		strcpy(dn.dn_macro, macro);
		strcpy(dn.dn_comment, server);
		(void) add_dd_entry(dh, &dn);
		if (cid)
			cid += 0x100;
		cip.s_addr++;

		TNF_PROBE_0(main_end, "main", "");
	}
	(void) close_dd(&dh);

	return (0);
}
コード例 #11
0
/*
 * tavor_agent_handlers_init()
 *    Context: Only called from attach() and/or detach() path contexts
 */
int
tavor_agent_handlers_init(tavor_state_t *state)
{
	int		status;
	char		*errormsg, *rsrc_name;

	TAVOR_TNF_ENTER(tavor_agent_handlers_init);

	/* Determine if we need to register any agents with the IBMF */
	if ((state->ts_cfg_profile->cp_qp0_agents_in_fw) &&
	    (state->ts_cfg_profile->cp_qp1_agents_in_fw)) {
		TAVOR_TNF_EXIT(tavor_agent_handlers_init);
		return (DDI_SUCCESS);
	}

	/*
	 * Build a unique name for the Tavor task queue from the Tavor driver
	 * instance number and TAVOR_TASKQ_NAME
	 */
	rsrc_name = (char *)kmem_zalloc(TAVOR_RSRC_NAME_MAXLEN, KM_SLEEP);
	TAVOR_RSRC_NAME(rsrc_name, TAVOR_TASKQ_NAME);

	/* Initialize the Tavor IB management agent list */
	status = tavor_agent_list_init(state);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(DDI_FAILURE, "failed agent list init");
		goto agentsinit_fail;
	}

	/*
	 * Initialize the agent handling task queue.  Note: We set the task
	 * queue priority to the minimum system priority.  At this point this
	 * is considered acceptable because MADs are unreliable datagrams
	 * and could get lost (in general) anyway.
	 */
	state->ts_taskq_agents = ddi_taskq_create(state->ts_dip,
	    rsrc_name, TAVOR_TASKQ_NTHREADS, TASKQ_DEFAULTPRI, 0);
	if (state->ts_taskq_agents == NULL) {
		tavor_agent_list_fini(state);
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(DDI_FAILURE, "failed task queue");
		goto agentsinit_fail;
	}

	/* Now attempt to register all of the agents with the IBMF */
	status = tavor_agent_register_all(state);
	if (status != DDI_SUCCESS) {
		ddi_taskq_destroy(state->ts_taskq_agents);
		tavor_agent_list_fini(state);
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(DDI_FAILURE, "failed IBMF register");
		goto agentsinit_fail;
	}

	kmem_free(rsrc_name, TAVOR_RSRC_NAME_MAXLEN);
	TAVOR_TNF_EXIT(tavor_agent_handlers_init);
	return (DDI_SUCCESS);

agentsinit_fail:
	kmem_free(rsrc_name, TAVOR_RSRC_NAME_MAXLEN);
	TNF_PROBE_1(tavor_agent_handlers_init_fail, TAVOR_TNF_ERROR, "",
	    tnf_string, msg, errormsg);
	TAVOR_TNF_EXIT(tavor_agent_handlers_init);
	return (status);
}
コード例 #12
0
ファイル: hci1394_isr.c プロジェクト: andreiw/polaris
/*
 * hci1394_isr_self_id()
 *    Process the selfid complete interrupt.  The bus reset has completed
 *    and the 1394 HW has finished it's bus enumeration.  The SW needs to
 *    see what's changed and handle any hotplug conditions.
 */
static void
hci1394_isr_self_id(hci1394_state_t *soft_state)
{
	int status;
	uint_t node_id;
	uint_t selfid_size;
	uint_t quadlet_count;
	uint_t index;
	uint32_t *selfid_buf_p;
	boolean_t selfid_error;
	boolean_t nodeid_error;
	boolean_t saw_error = B_FALSE;
	uint_t phy_status;


	ASSERT(soft_state != NULL);
	TNF_PROBE_0_DEBUG(hci1394_isr_self_id_enter, HCI1394_TNF_HAL_STACK, "");

	soft_state->drvinfo.di_stats.st_selfid_count++;

	/*
	 * check for the bizarre case that we got both a bus reset and self id
	 * complete after checking for a bus reset
	 */
	if (hci1394_state(&soft_state->drvinfo) != HCI1394_BUS_RESET) {
		hci1394_isr_bus_reset(soft_state);
	}

	/*
	 * Clear any set PHY error status bits set.  The PHY status bits
	 * may always be set (i.e. we removed cable power) so we do not want
	 * to clear them when we handle the interrupt. We will clear them
	 * every selfid complete interrupt so worst case we will get 1 PHY event
	 * interrupt every bus reset.
	 */
	status = hci1394_ohci_phy_read(soft_state->ohci, 5, &phy_status);
	if (status != DDI_SUCCESS) {
		TNF_PROBE_0(hci1394_isr_self_id_pr_fail,
		    HCI1394_TNF_HAL_ERROR, "");
	} else {
		phy_status |= OHCI_PHY_LOOP_ERR | OHCI_PHY_PWRFAIL_ERR |
		    OHCI_PHY_TIMEOUT_ERR | OHCI_PHY_PORTEVT_ERR;
		status = hci1394_ohci_phy_write(soft_state->ohci, 5,
		    phy_status);
		if (status != DDI_SUCCESS) {
			TNF_PROBE_0(hci1394_isr_self_id_pw_fail,
			    HCI1394_TNF_HAL_ERROR, "");
		} else {
			/*
			 * Re-enable PHY interrupt. We disable the PHY interrupt
			 *  when we get one so that we do not get stuck in the
			 * ISR.
			 */
			hci1394_ohci_intr_enable(soft_state->ohci,
			    OHCI_INTR_PHY);
		}
	}

	/* See if either AT active bit is set */
	if (hci1394_ohci_at_active(soft_state->ohci) == B_TRUE) {
		TNF_PROBE_1(hci1394_isr_self_id_as_fail, HCI1394_TNF_HAL_ERROR,
		    "", tnf_string, errmsg, "AT ACTIVE still set");
		saw_error = B_TRUE;
	}

	/* Clear busReset and selfIdComplete interrupts */
	hci1394_ohci_intr_clear(soft_state->ohci, (OHCI_INTR_BUS_RESET |
	    OHCI_INTR_SELFID_CMPLT));

	/* Read node info and test for Invalid Node ID */
	hci1394_ohci_nodeid_info(soft_state->ohci, &node_id, &nodeid_error);
	if (nodeid_error == B_TRUE) {
		TNF_PROBE_1(hci1394_isr_self_id_ni_fail, HCI1394_TNF_HAL_ERROR,
		    "", tnf_string, errmsg, "saw invalid NodeID");
		saw_error = B_TRUE;
	}

	/* Sync Selfid Buffer */
	hci1394_ohci_selfid_sync(soft_state->ohci);

	/* store away selfid info */
	hci1394_ohci_selfid_info(soft_state->ohci,
	    &soft_state->drvinfo.di_gencnt, &selfid_size, &selfid_error);

	/* Test for selfid error */
	if (selfid_error == B_TRUE) {
		TNF_PROBE_1(hci1394_isr_self_id_si_fail, HCI1394_TNF_HAL_ERROR,
		    "", tnf_string, errmsg, "saw invalid SelfID");
		saw_error = B_TRUE;
	}

	/*
	 * selfid size could be 0 if a bus reset has occurred. If this occurs,
	 * we should have another selfid int coming later.
	 */
	if ((saw_error == B_FALSE) && (selfid_size == 0)) {
		TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return;
	}

	/*
	 * make sure generation count in buffer matches generation
	 * count in register.
	 */
	if (hci1394_ohci_selfid_buf_current(soft_state->ohci) == B_FALSE) {
		TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return;
	}

	/*
	 * Skip over first quadlet in selfid buffer, this is OpenHCI specific
	 * data.
	 */
	selfid_size = selfid_size - IEEE1394_QUADLET;
	quadlet_count = selfid_size >> 2;

	/* Copy selfid buffer to Services Layer buffer */
	for (index = 0; index < quadlet_count; index++) {
		hci1394_ohci_selfid_read(soft_state->ohci, index + 1,
		    &soft_state->sl_selfid_buf[index]);
	}

	/*
	 * Put our selfID info into the Services Layer's selfid buffer if we
	 * have a 1394-1995 PHY.
	 */
	if (soft_state->halinfo.phy == H1394_PHY_1995) {
		selfid_buf_p = (uint32_t *)(
		    (uintptr_t)soft_state->sl_selfid_buf +
		    (uintptr_t)selfid_size);
		status = hci1394_ohci_phy_info(soft_state->ohci,
		    &selfid_buf_p[0]);
		if (status != DDI_SUCCESS) {
			/*
			 * If we fail reading from PHY, put invalid data into
			 * the selfid buffer so the SL will reset the bus again.
			 */
			TNF_PROBE_0(hci1394_isr_self_id_pi_fail,
			    HCI1394_TNF_HAL_ERROR, "");
			selfid_buf_p[0] = 0xFFFFFFFF;
			selfid_buf_p[1] = 0xFFFFFFFF;
		} else {
			selfid_buf_p[1] = ~selfid_buf_p[0];
		}
		selfid_size = selfid_size + 8;
	}

	/* Flush out async DMA Q's */
	hci1394_async_flush(soft_state->async);

	/*
	 * Make sure generation count is still valid.  i.e. we have not gotten
	 * another bus reset since the last time we checked.  If we have gotten
	 * another bus reset, we should have another selfid interrupt coming.
	 */
	if (soft_state->drvinfo.di_gencnt !=
	    hci1394_ohci_current_busgen(soft_state->ohci)) {
		TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit,
		    HCI1394_TNF_HAL_STACK, "");
		return;
	}

	/*
	 * do whatever CSR register processing that needs to be done.
	 */
	hci1394_csr_bus_reset(soft_state->csr);

	/*
	 * do whatever management may be necessary for the CYCLE_LOST and
	 * CYCLE_INCONSISTENT interrupts.
	 */
	hci1394_isoch_error_ints_enable(soft_state);

	/*
	 * See if we saw an error.  If we did, tell the services layer that we
	 * finished selfid processing and give them an illegal selfid buffer
	 * size of 0.  The Services Layer will try to reset the bus again to
	 * see if we can recover from this problem.  It will threshold after
	 * a finite number of errors.
	 */
	if (saw_error == B_TRUE) {
		h1394_self_ids(soft_state->drvinfo.di_sl_private,
		    soft_state->sl_selfid_buf, 0, node_id,
		    soft_state->drvinfo.di_gencnt);

		/*
		 * Take ourself out of Bus Reset processing mode
		 *
		 * Set the driver state to normal. If we cannot, we have been
		 * shutdown. The only way we can get in this code is if we have
		 * a multi-processor machine and the HAL is shutdown by one
		 * processor running in base context while this interrupt
		 * handler runs in another processor. We will disable all
		 * interrupts and just return.  We shouldn't have to disable
		 * the interrupts, but we will just in case.
		 */
		status = hci1394_state_set(&soft_state->drvinfo,
		    HCI1394_NORMAL);
		if (status != DDI_SUCCESS) {
			hci1394_ohci_intr_master_disable(soft_state->ohci);
			return;
		}
	} else if (IEEE1394_NODE_NUM(node_id) != 63) {
		/*
		 * Notify services layer about self-id-complete. Don't notify
		 * the services layer if there are too many devices on the bus.
		 */
		h1394_self_ids(soft_state->drvinfo.di_sl_private,
		    soft_state->sl_selfid_buf, selfid_size,
		    node_id, soft_state->drvinfo.di_gencnt);

		/*
		 * Take ourself out of Bus Reset processing mode
		 *
		 * Set the driver state to normal. If we cannot, we have been
		 * shutdown. The only way we can get in this code is if we have
		 * a multi-processor machine and the HAL is shutdown by one
		 * processor running in base context while this interrupt
		 * handler runs in another processor. We will disable all
		 * interrupts and just return.  We shouldn't have to disable
		 * the interrupts, but we will just in case.
		 */
		status = hci1394_state_set(&soft_state->drvinfo,
		    HCI1394_NORMAL);
		if (status != DDI_SUCCESS) {
			hci1394_ohci_intr_master_disable(soft_state->ohci);
			return;
		}
	} else {
		cmn_err(CE_NOTE, "hci1394(%d): Too many devices on the 1394 "
		    "bus", soft_state->drvinfo.di_instance);
	}

	/* enable bus reset interrupt */
	hci1394_ohci_intr_enable(soft_state->ohci, OHCI_INTR_BUS_RESET);

	TNF_PROBE_0_DEBUG(hci1394_isr_self_id_exit, HCI1394_TNF_HAL_STACK, "");
}
コード例 #13
0
/*
 * tavor_srq_modify()
 *    Context: Can be called only from user or kernel context.
 */
int
tavor_srq_modify(tavor_state_t *state, tavor_srqhdl_t srq, uint_t size,
    uint_t *real_size, uint_t sleepflag)
{
	tavor_qalloc_info_t	new_srqinfo, old_srqinfo;
	tavor_rsrc_t		*mtt, *mpt, *old_mtt;
	tavor_bind_info_t	bind;
	tavor_bind_info_t	old_bind;
	tavor_rsrc_pool_info_t	*rsrc_pool;
	tavor_mrhdl_t		mr;
	tavor_hw_mpt_t		mpt_entry;
	tavor_wrid_entry_t	*wre_new, *wre_old;
	uint64_t		mtt_ddrbaseaddr, mtt_addr;
	uint64_t		srq_desc_off;
	uint32_t		*buf, srq_old_bufsz;
	uint32_t		wqesz;
	uint_t			max_srq_size;
	uint_t			dma_xfer_mode, mtt_pgsize_bits;
	uint_t			srq_sync, log_srq_size, maxprot;
	uint_t			wq_location;
	int			status;
	char			*errormsg;

	TAVOR_TNF_ENTER(tavor_srq_modify);

	/*
	 * Check the "inddr" flag.  This flag tells the driver whether or not
	 * the SRQ's work queues should be come from normal system memory or
	 * whether they should be allocated from DDR memory.
	 */
	wq_location = state->ts_cfg_profile->cp_srq_wq_inddr;

	/*
	 * If size requested is larger than device capability, return
	 * Insufficient Resources
	 */
	max_srq_size = (1 << state->ts_cfg_profile->cp_log_max_srq_sz);
	if (size > max_srq_size) {
		TNF_PROBE_0(tavor_srq_modify_size_larger_than_maxsize,
		    TAVOR_TNF_ERROR, "");
		TAVOR_TNF_EXIT(tavor_srq_modify);
		return (IBT_HCA_WR_EXCEEDED);
	}

	/*
	 * Calculate the appropriate size for the SRQ.
	 * Note:  All Tavor SRQs must be a power-of-2 in size.  Also
	 * they may not be any smaller than TAVOR_SRQ_MIN_SIZE.  This step
	 * is to round the requested size up to the next highest power-of-2
	 */
	size = max(size, TAVOR_SRQ_MIN_SIZE);
	log_srq_size = highbit(size);
	if ((size & (size - 1)) == 0) {
		log_srq_size = log_srq_size - 1;
	}

	/*
	 * Next we verify that the rounded-up size is valid (i.e. consistent
	 * with the device limits and/or software-configured limits).
	 */
	if (log_srq_size > state->ts_cfg_profile->cp_log_max_srq_sz) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_HCA_WR_EXCEEDED, "max SRQ size");
		goto srqmodify_fail;
	}

	/*
	 * Allocate the memory for newly resized Shared Receive Queue.
	 *
	 * Note: If SRQ is not user-mappable, then it may come from either
	 * kernel system memory or from HCA-attached local DDR memory.
	 *
	 * Note2: We align this queue on a pagesize boundary.  This is required
	 * to make sure that all the resulting IB addresses will start at 0,
	 * for a zero-based queue.  By making sure we are aligned on at least a
	 * page, any offset we use into our queue will be the same as it was
	 * when we allocated it at tavor_srq_alloc() time.
	 */
	wqesz = (1 << srq->srq_wq_log_wqesz);
	new_srqinfo.qa_size = (1 << log_srq_size) * wqesz;
	new_srqinfo.qa_alloc_align = PAGESIZE;
	new_srqinfo.qa_bind_align  = PAGESIZE;
	if (srq->srq_is_umap) {
		new_srqinfo.qa_location = TAVOR_QUEUE_LOCATION_USERLAND;
	} else {
		new_srqinfo.qa_location = wq_location;
	}
	status = tavor_queue_alloc(state, &new_srqinfo, sleepflag);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed srq");
		goto srqmodify_fail;
	}
	buf = (uint32_t *)new_srqinfo.qa_buf_aligned;
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*buf))

	/*
	 * Allocate the memory for the new WRE list.  This will be used later
	 * when we resize the wridlist based on the new SRQ size.
	 */
	wre_new = (tavor_wrid_entry_t *)kmem_zalloc((1 << log_srq_size) *
	    sizeof (tavor_wrid_entry_t), sleepflag);
	if (wre_new == NULL) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE,
		    "failed wre_new alloc");
		goto srqmodify_fail;
	}

	/*
	 * Fill in the "bind" struct.  This struct provides the majority
	 * of the information that will be used to distinguish between an
	 * "addr" binding (as is the case here) and a "buf" binding (see
	 * below).  The "bind" struct is later passed to tavor_mr_mem_bind()
	 * which does most of the "heavy lifting" for the Tavor memory
	 * registration routines.
	 */
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(bind))
	bzero(&bind, sizeof (tavor_bind_info_t));
	bind.bi_type  = TAVOR_BINDHDL_VADDR;
	bind.bi_addr  = (uint64_t)(uintptr_t)buf;
	bind.bi_len   = new_srqinfo.qa_size;
	bind.bi_as    = NULL;
	bind.bi_flags = sleepflag == TAVOR_SLEEP ? IBT_MR_SLEEP :
	    IBT_MR_NOSLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
	if (srq->srq_is_umap) {
		bind.bi_bypass = state->ts_cfg_profile->cp_iommu_bypass;
	} else {
		if (wq_location == TAVOR_QUEUE_LOCATION_NORMAL) {
			bind.bi_bypass =
			    state->ts_cfg_profile->cp_iommu_bypass;
			dma_xfer_mode =
			    state->ts_cfg_profile->cp_streaming_consistent;
			if (dma_xfer_mode == DDI_DMA_STREAMING) {
				bind.bi_flags |= IBT_MR_NONCOHERENT;
			}
		} else {
			bind.bi_bypass = TAVOR_BINDMEM_BYPASS;
		}
	}
	status = tavor_mr_mtt_bind(state, &bind, new_srqinfo.qa_dmahdl, &mtt,
	    &mtt_pgsize_bits);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(status, "failed mtt bind");
		kmem_free(wre_new, srq->srq_wq_bufsz *
		    sizeof (tavor_wrid_entry_t));
		tavor_queue_free(state, &new_srqinfo);
		goto srqmodify_fail;
	}

	/*
	 * Calculate the offset between the kernel virtual address space
	 * and the IB virtual address space.  This will be used when
	 * posting work requests to properly initialize each WQE.
	 *
	 * Note: bind addr is zero-based (from alloc) so we calculate the
	 * correct new offset here.
	 */
	bind.bi_addr = bind.bi_addr & ((1 << mtt_pgsize_bits) - 1);
	srq_desc_off = (uint64_t)(uintptr_t)new_srqinfo.qa_buf_aligned -
	    (uint64_t)bind.bi_addr;

	/*
	 * Get the base address for the MTT table.  This will be necessary
	 * below when we are modifying the MPT entry.
	 */
	rsrc_pool = &state->ts_rsrc_hdl[TAVOR_MTT];
	mtt_ddrbaseaddr = (uint64_t)(uintptr_t)rsrc_pool->rsrc_ddr_offset;

	/*
	 * Fill in the MPT entry.  This is the final step before passing
	 * ownership of the MPT entry to the Tavor hardware.  We use all of
	 * the information collected/calculated above to fill in the
	 * requisite portions of the MPT.
	 */
	bzero(&mpt_entry, sizeof (tavor_hw_mpt_t));
	mpt_entry.reg_win_len	= bind.bi_len;
	mtt_addr = mtt_ddrbaseaddr + (mtt->tr_indx << TAVOR_MTT_SIZE_SHIFT);
	mpt_entry.mttseg_addr_h = mtt_addr >> 32;
	mpt_entry.mttseg_addr_l = mtt_addr >> 6;

	/*
	 * Now we grab the SRQ lock.  Since we will be updating the actual
	 * SRQ location and the producer/consumer indexes, we should hold
	 * the lock.
	 *
	 * We do a TAVOR_NOSLEEP here (and below), though, because we are
	 * holding the "srq_lock" and if we got raised to interrupt level
	 * by priority inversion, we would not want to block in this routine
	 * waiting for success.
	 */
	mutex_enter(&srq->srq_lock);

	/*
	 * Copy old entries to new buffer
	 */
	srq_old_bufsz = srq->srq_wq_bufsz;
	bcopy(srq->srq_wq_buf, buf, srq_old_bufsz * wqesz);

	/* Determine if later ddi_dma_sync will be necessary */
	srq_sync = TAVOR_SRQ_IS_SYNC_REQ(state, srq->srq_wqinfo);

	/* Sync entire "new" SRQ for use by hardware (if necessary) */
	if (srq_sync) {
		(void) ddi_dma_sync(bind.bi_dmahdl, 0,
		    new_srqinfo.qa_size, DDI_DMA_SYNC_FORDEV);
	}

	/*
	 * Setup MPT information for use in the MODIFY_MPT command
	 */
	mr = srq->srq_mrhdl;
	mutex_enter(&mr->mr_lock);
	mpt = srq->srq_mrhdl->mr_mptrsrcp;

	/*
	 * MODIFY_MPT
	 *
	 * If this fails for any reason, then it is an indication that
	 * something (either in HW or SW) has gone seriously wrong.  So we
	 * print a warning message and return.
	 */
	status = tavor_modify_mpt_cmd_post(state, &mpt_entry, mpt->tr_indx,
	    TAVOR_CMD_MODIFY_MPT_RESIZESRQ, sleepflag);
	if (status != TAVOR_CMD_SUCCESS) {
		cmn_err(CE_CONT, "Tavor: MODIFY_MPT command failed: %08x\n",
		    status);
		TNF_PROBE_1(tavor_mr_common_reg_sw2hw_mpt_cmd_fail,
		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
		TAVOR_TNF_FAIL(status, "MODIFY_MPT command failed");
		(void) tavor_mr_mtt_unbind(state, &srq->srq_mrhdl->mr_bindinfo,
		    srq->srq_mrhdl->mr_mttrsrcp);
		kmem_free(wre_new, srq->srq_wq_bufsz *
		    sizeof (tavor_wrid_entry_t));
		tavor_queue_free(state, &new_srqinfo);
		mutex_exit(&mr->mr_lock);
		mutex_exit(&srq->srq_lock);
		return (ibc_get_ci_failure(0));
	}

	/*
	 * Update the Tavor Shared Receive Queue handle with all the new
	 * information.  At the same time, save away all the necessary
	 * information for freeing up the old resources
	 */
	old_srqinfo	   = srq->srq_wqinfo;
	old_mtt		   = srq->srq_mrhdl->mr_mttrsrcp;
	bcopy(&srq->srq_mrhdl->mr_bindinfo, &old_bind,
	    sizeof (tavor_bind_info_t));

	/* Now set the new info */
	srq->srq_wqinfo	   = new_srqinfo;
	srq->srq_wq_buf	   = buf;
	srq->srq_wq_bufsz  = (1 << log_srq_size);
	bcopy(&bind, &srq->srq_mrhdl->mr_bindinfo, sizeof (tavor_bind_info_t));
	srq->srq_mrhdl->mr_mttrsrcp = mtt;
	srq->srq_desc_off  = srq_desc_off;
	srq->srq_real_sizes.srq_wr_sz = (1 << log_srq_size);

	/* Update MR mtt pagesize */
	mr->mr_logmttpgsz = mtt_pgsize_bits;
	mutex_exit(&mr->mr_lock);

#ifdef __lock_lint
	mutex_enter(&srq->srq_wrid_wql->wql_lock);
#else
	if (srq->srq_wrid_wql != NULL) {
		mutex_enter(&srq->srq_wrid_wql->wql_lock);
	}
#endif

	/*
	 * Initialize new wridlist, if needed.
	 *
	 * If a wridlist already is setup on an SRQ (the QP associated with an
	 * SRQ has moved "from_reset") then we must update this wridlist based
	 * on the new SRQ size.  We allocate the new size of Work Request ID
	 * Entries, copy over the old entries to the new list, and
	 * re-initialize the srq wridlist in non-umap case
	 */
	wre_old = NULL;
	if (srq->srq_wridlist != NULL) {
		wre_old = srq->srq_wridlist->wl_wre;

		bcopy(wre_old, wre_new, srq_old_bufsz *
		    sizeof (tavor_wrid_entry_t));

		/* Setup new sizes in wre */
		srq->srq_wridlist->wl_wre = wre_new;
		srq->srq_wridlist->wl_size = srq->srq_wq_bufsz;

		if (!srq->srq_is_umap) {
			tavor_wrid_list_srq_init(srq->srq_wridlist, srq,
			    srq_old_bufsz);
		}
	}

#ifdef __lock_lint
	mutex_exit(&srq->srq_wrid_wql->wql_lock);
#else
	if (srq->srq_wrid_wql != NULL) {
		mutex_exit(&srq->srq_wrid_wql->wql_lock);
	}
#endif

	/*
	 * If "old" SRQ was a user-mappable SRQ that is currently mmap()'d out
	 * to a user process, then we need to call devmap_devmem_remap() to
	 * invalidate the mapping to the SRQ memory.  We also need to
	 * invalidate the SRQ tracking information for the user mapping.
	 *
	 * Note: On failure, the remap really shouldn't ever happen.  So, if it
	 * does, it is an indication that something has gone seriously wrong.
	 * So we print a warning message and return error (knowing, of course,
	 * that the "old" SRQ memory will be leaked)
	 */
	if ((srq->srq_is_umap) && (srq->srq_umap_dhp != NULL)) {
		maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
		status = devmap_devmem_remap(srq->srq_umap_dhp,
		    state->ts_dip, 0, 0, srq->srq_wqinfo.qa_size, maxprot,
		    DEVMAP_MAPPING_INVALID, NULL);
		if (status != DDI_SUCCESS) {
			mutex_exit(&srq->srq_lock);
			TAVOR_WARNING(state, "failed in SRQ memory "
			    "devmap_devmem_remap()");
			/* We can, however, free the memory for old wre */
			if (wre_old != NULL) {
				kmem_free(wre_old, srq_old_bufsz *
				    sizeof (tavor_wrid_entry_t));
			}
			TAVOR_TNF_EXIT(tavor_srq_modify);
			return (ibc_get_ci_failure(0));
		}
		srq->srq_umap_dhp = (devmap_cookie_t)NULL;
	}

	/*
	 * Drop the SRQ lock now.  The only thing left to do is to free up
	 * the old resources.
	 */
	mutex_exit(&srq->srq_lock);

	/*
	 * Unbind the MTT entries.
	 */
	status = tavor_mr_mtt_unbind(state, &old_bind, old_mtt);
	if (status != DDI_SUCCESS) {
		TAVOR_WARNING(state, "failed to unbind old SRQ memory");
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(ibc_get_ci_failure(0),
		    "failed to unbind (old)");
		goto srqmodify_fail;
	}

	/* Free the memory for old wre */
	if (wre_old != NULL) {
		kmem_free(wre_old, srq_old_bufsz *
		    sizeof (tavor_wrid_entry_t));
	}

	/* Free the memory for the old SRQ */
	tavor_queue_free(state, &old_srqinfo);

	/*
	 * Fill in the return arguments (if necessary).  This includes the
	 * real new completion queue size.
	 */
	if (real_size != NULL) {
		*real_size = (1 << log_srq_size);
	}

	TAVOR_TNF_EXIT(tavor_srq_modify);
	return (DDI_SUCCESS);

srqmodify_fail:
	TNF_PROBE_1(tavor_srq_modify_fail, TAVOR_TNF_ERROR, "",
	    tnf_string, msg, errormsg);
	TAVOR_TNF_EXIT(tavor_srq_modify);
	return (status);
}
コード例 #14
0
/*
 * tavor_srq_alloc()
 *    Context: Can be called only from user or kernel context.
 */
int
tavor_srq_alloc(tavor_state_t *state, tavor_srq_info_t *srqinfo,
    uint_t sleepflag, tavor_srq_options_t *op)
{
	ibt_srq_hdl_t		ibt_srqhdl;
	tavor_pdhdl_t		pd;
	ibt_srq_sizes_t		*sizes;
	ibt_srq_sizes_t		*real_sizes;
	tavor_srqhdl_t		*srqhdl;
	ibt_srq_flags_t		flags;
	tavor_rsrc_t		*srqc, *rsrc;
	tavor_hw_srqc_t		srqc_entry;
	uint32_t		*buf;
	tavor_srqhdl_t		srq;
	tavor_umap_db_entry_t	*umapdb;
	ibt_mr_attr_t		mr_attr;
	tavor_mr_options_t	mr_op;
	tavor_mrhdl_t		mr;
	uint64_t		addr;
	uint64_t		value, srq_desc_off;
	uint32_t		lkey;
	uint32_t		log_srq_size;
	uint32_t		uarpg;
	uint_t			wq_location, dma_xfer_mode, srq_is_umap;
	int			flag, status;
	char			*errormsg;
	uint_t			max_sgl;
	uint_t			wqesz;

	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sizes))

	TAVOR_TNF_ENTER(tavor_srq_alloc);

	/*
	 * Check the "options" flag.  Currently this flag tells the driver
	 * whether or not the SRQ's work queues should be come from normal
	 * system memory or whether they should be allocated from DDR memory.
	 */
	if (op == NULL) {
		wq_location = TAVOR_QUEUE_LOCATION_NORMAL;
	} else {
		wq_location = op->srqo_wq_loc;
	}

	/*
	 * Extract the necessary info from the tavor_srq_info_t structure
	 */
	real_sizes = srqinfo->srqi_real_sizes;
	sizes	   = srqinfo->srqi_sizes;
	pd	   = srqinfo->srqi_pd;
	ibt_srqhdl = srqinfo->srqi_ibt_srqhdl;
	flags	   = srqinfo->srqi_flags;
	srqhdl	   = srqinfo->srqi_srqhdl;

	/*
	 * Determine whether SRQ is being allocated for userland access or
	 * whether it is being allocated for kernel access.  If the SRQ is
	 * being allocated for userland access, then lookup the UAR doorbell
	 * page number for the current process.  Note:  If this is not found
	 * (e.g. if the process has not previously open()'d the Tavor driver),
	 * then an error is returned.
	 */
	srq_is_umap = (flags & IBT_SRQ_USER_MAP) ? 1 : 0;
	if (srq_is_umap) {
		status = tavor_umap_db_find(state->ts_instance, ddi_get_pid(),
		    MLNX_UMAP_UARPG_RSRC, &value, 0, NULL);
		if (status != DDI_SUCCESS) {
			/* Set "status" and "errormsg" and goto failure */
			TAVOR_TNF_FAIL(IBT_INVALID_PARAM, "failed UAR page");
			goto srqalloc_fail3;
		}
		uarpg = ((tavor_rsrc_t *)(uintptr_t)value)->tr_indx;
	}

	/* Increase PD refcnt */
	tavor_pd_refcnt_inc(pd);

	/* Allocate an SRQ context entry */
	status = tavor_rsrc_alloc(state, TAVOR_SRQC, 1, sleepflag, &srqc);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed SRQ context");
		goto srqalloc_fail1;
	}

	/* Allocate the SRQ Handle entry */
	status = tavor_rsrc_alloc(state, TAVOR_SRQHDL, 1, sleepflag, &rsrc);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed SRQ handle");
		goto srqalloc_fail2;
	}

	srq = (tavor_srqhdl_t)rsrc->tr_addr;
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*srq))

	/* Calculate the SRQ number */
	tavor_srq_numcalc(state, srqc->tr_indx, &srq->srq_srqnum);

	/*
	 * If this will be a user-mappable SRQ, then allocate an entry for
	 * the "userland resources database".  This will later be added to
	 * the database (after all further SRQ operations are successful).
	 * If we fail here, we must undo the reference counts and the
	 * previous resource allocation.
	 */
	if (srq_is_umap) {
		umapdb = tavor_umap_db_alloc(state->ts_instance,
		    srq->srq_srqnum, MLNX_UMAP_SRQMEM_RSRC,
		    (uint64_t)(uintptr_t)rsrc);
		if (umapdb == NULL) {
			/* Set "status" and "errormsg" and goto failure */
			TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed umap add");
			goto srqalloc_fail3;
		}
	}

	/*
	 * Calculate the appropriate size for the SRQ.
	 * Note:  All Tavor SRQs must be a power-of-2 in size.  Also
	 * they may not be any smaller than TAVOR_SRQ_MIN_SIZE.  This step
	 * is to round the requested size up to the next highest power-of-2
	 */
	sizes->srq_wr_sz = max(sizes->srq_wr_sz, TAVOR_SRQ_MIN_SIZE);
	log_srq_size = highbit(sizes->srq_wr_sz);
	if ((sizes->srq_wr_sz & (sizes->srq_wr_sz - 1)) == 0) {
		log_srq_size = log_srq_size - 1;
	}

	/*
	 * Next we verify that the rounded-up size is valid (i.e. consistent
	 * with the device limits and/or software-configured limits).  If not,
	 * then obviously we have a lot of cleanup to do before returning.
	 */
	if (log_srq_size > state->ts_cfg_profile->cp_log_max_srq_sz) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_HCA_WR_EXCEEDED, "max SRQ size");
		goto srqalloc_fail4;
	}

	/*
	 * Next we verify that the requested number of SGL is valid (i.e.
	 * consistent with the device limits and/or software-configured
	 * limits).  If not, then obviously the same cleanup needs to be done.
	 */
	max_sgl = state->ts_cfg_profile->cp_srq_max_sgl;
	if (sizes->srq_sgl_sz > max_sgl) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_HCA_SGL_EXCEEDED, "max SRQ SGL");
		goto srqalloc_fail4;
	}

	/*
	 * Determine the SRQ's WQE sizes.  This depends on the requested
	 * number of SGLs.  Note: This also has the side-effect of
	 * calculating the real number of SGLs (for the calculated WQE size)
	 */
	tavor_srq_sgl_to_logwqesz(state, sizes->srq_sgl_sz,
	    TAVOR_QP_WQ_TYPE_RECVQ, &srq->srq_wq_log_wqesz,
	    &srq->srq_wq_sgl);

	/*
	 * Allocate the memory for SRQ work queues.  Note:  The location from
	 * which we will allocate these work queues has been passed in through
	 * the tavor_qp_options_t structure.  Since Tavor work queues are not
	 * allowed to cross a 32-bit (4GB) boundary, the alignment of the work
	 * queue memory is very important.  We used to allocate work queues
	 * (the combined receive and send queues) so that they would be aligned
	 * on their combined size.  That alignment guaranteed that they would
	 * never cross the 4GB boundary (Tavor work queues are on the order of
	 * MBs at maximum).  Now we are able to relax this alignment constraint
	 * by ensuring that the IB address assigned to the queue memory (as a
	 * result of the tavor_mr_register() call) is offset from zero.
	 * Previously, we had wanted to use the ddi_dma_mem_alloc() routine to
	 * guarantee the alignment, but when attempting to use IOMMU bypass
	 * mode we found that we were not allowed to specify any alignment that
	 * was more restrictive than the system page size.  So we avoided this
	 * constraint by passing two alignment values, one for the memory
	 * allocation itself and the other for the DMA handle (for later bind).
	 * This used to cause more memory than necessary to be allocated (in
	 * order to guarantee the more restrictive alignment contraint).  But
	 * be guaranteeing the zero-based IB virtual address for the queue, we
	 * are able to conserve this memory.
	 *
	 * Note: If SRQ is not user-mappable, then it may come from either
	 * kernel system memory or from HCA-attached local DDR memory.
	 *
	 * Note2: We align this queue on a pagesize boundary.  This is required
	 * to make sure that all the resulting IB addresses will start at 0, for
	 * a zero-based queue.  By making sure we are aligned on at least a
	 * page, any offset we use into our queue will be the same as when we
	 * perform tavor_srq_modify() operations later.
	 */
	wqesz = (1 << srq->srq_wq_log_wqesz);
	srq->srq_wqinfo.qa_size = (1 << log_srq_size) * wqesz;
	srq->srq_wqinfo.qa_alloc_align = PAGESIZE;
	srq->srq_wqinfo.qa_bind_align = PAGESIZE;
	if (srq_is_umap) {
		srq->srq_wqinfo.qa_location = TAVOR_QUEUE_LOCATION_USERLAND;
	} else {
		srq->srq_wqinfo.qa_location = wq_location;
	}
	status = tavor_queue_alloc(state, &srq->srq_wqinfo, sleepflag);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed srq");
		goto srqalloc_fail4;
	}
	buf = (uint32_t *)srq->srq_wqinfo.qa_buf_aligned;
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*buf))

	/*
	 * Register the memory for the SRQ work queues.  The memory for the SRQ
	 * must be registered in the Tavor TPT tables.  This gives us the LKey
	 * to specify in the SRQ context later.  Note: If the work queue is to
	 * be allocated from DDR memory, then only a "bypass" mapping is
	 * appropriate.  And if the SRQ memory is user-mappable, then we force
	 * DDI_DMA_CONSISTENT mapping.  Also, in order to meet the alignment
	 * restriction, we pass the "mro_bind_override_addr" flag in the call
	 * to tavor_mr_register().  This guarantees that the resulting IB vaddr
	 * will be zero-based (modulo the offset into the first page).  If we
	 * fail here, we still have the bunch of resource and reference count
	 * cleanup to do.
	 */
	flag = (sleepflag == TAVOR_SLEEP) ? IBT_MR_SLEEP :
	    IBT_MR_NOSLEEP;
	mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
	mr_attr.mr_len   = srq->srq_wqinfo.qa_size;
	mr_attr.mr_as    = NULL;
	mr_attr.mr_flags = flag | IBT_MR_ENABLE_LOCAL_WRITE;
	if (srq_is_umap) {
		mr_op.mro_bind_type   = state->ts_cfg_profile->cp_iommu_bypass;
	} else {
		if (wq_location == TAVOR_QUEUE_LOCATION_NORMAL) {
			mr_op.mro_bind_type =
			    state->ts_cfg_profile->cp_iommu_bypass;
			dma_xfer_mode =
			    state->ts_cfg_profile->cp_streaming_consistent;
			if (dma_xfer_mode == DDI_DMA_STREAMING) {
				mr_attr.mr_flags |= IBT_MR_NONCOHERENT;
			}
		} else {
			mr_op.mro_bind_type = TAVOR_BINDMEM_BYPASS;
		}
	}
	mr_op.mro_bind_dmahdl = srq->srq_wqinfo.qa_dmahdl;
	mr_op.mro_bind_override_addr = 1;
	status = tavor_mr_register(state, pd, &mr_attr, &mr, &mr_op);
	if (status != DDI_SUCCESS) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed register mr");
		goto srqalloc_fail5;
	}
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
	addr = mr->mr_bindinfo.bi_addr;
	lkey = mr->mr_lkey;

	/*
	 * Calculate the offset between the kernel virtual address space
	 * and the IB virtual address space.  This will be used when
	 * posting work requests to properly initialize each WQE.
	 */
	srq_desc_off = (uint64_t)(uintptr_t)srq->srq_wqinfo.qa_buf_aligned -
	    (uint64_t)mr->mr_bindinfo.bi_addr;

	/*
	 * Create WQL and Wridlist for use by this SRQ
	 */
	srq->srq_wrid_wql = tavor_wrid_wql_create(state);
	if (srq->srq_wrid_wql == NULL) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed wql create");
		goto srqalloc_fail6;
	}
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*(srq->srq_wrid_wql)))

	srq->srq_wridlist = tavor_wrid_get_list(1 << log_srq_size);
	if (srq->srq_wridlist == NULL) {
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_INSUFF_RESOURCE, "failed wridlist create");
		goto srqalloc_fail7;
	}
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*(srq->srq_wridlist)))

	srq->srq_wridlist->wl_srq_en = 1;
	srq->srq_wridlist->wl_free_list_indx = -1;

	/*
	 * Fill in all the return arguments (if necessary).  This includes
	 * real queue size and real SGLs.
	 */
	if (real_sizes != NULL) {
		real_sizes->srq_wr_sz = (1 << log_srq_size);
		real_sizes->srq_sgl_sz = srq->srq_wq_sgl;
	}

	/*
	 * Fill in the SRQC entry.  This is the final step before passing
	 * ownership of the SRQC entry to the Tavor hardware.  We use all of
	 * the information collected/calculated above to fill in the
	 * requisite portions of the SRQC.  Note: If this SRQ is going to be
	 * used for userland access, then we need to set the UAR page number
	 * appropriately (otherwise it's a "don't care")
	 */
	bzero(&srqc_entry, sizeof (tavor_hw_srqc_t));
	srqc_entry.wqe_addr_h	   = (addr >> 32);
	srqc_entry.next_wqe_addr_l = 0;
	srqc_entry.ds		   = (wqesz >> 4);
	srqc_entry.state	   = TAVOR_SRQ_STATE_HW_OWNER;
	srqc_entry.pd		   = pd->pd_pdnum;
	srqc_entry.lkey		   = lkey;
	srqc_entry.wqe_cnt	   = 0;
	if (srq_is_umap) {
		srqc_entry.uar	   = uarpg;
	} else {
		srqc_entry.uar	   = 0;
	}

	/*
	 * Write the SRQC entry to hardware.  Lastly, we pass ownership of
	 * the entry to the hardware (using the Tavor SW2HW_SRQ firmware
	 * command).  Note: In general, this operation shouldn't fail.  But
	 * if it does, we have to undo everything we've done above before
	 * returning error.
	 */
	status = tavor_cmn_ownership_cmd_post(state, SW2HW_SRQ, &srqc_entry,
	    sizeof (tavor_hw_srqc_t), srq->srq_srqnum,
	    sleepflag);
	if (status != TAVOR_CMD_SUCCESS) {
		cmn_err(CE_CONT, "Tavor: SW2HW_SRQ command failed: %08x\n",
		    status);
		TNF_PROBE_1(tavor_srq_alloc_sw2hw_srq_cmd_fail,
		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
		/* Set "status" and "errormsg" and goto failure */
		TAVOR_TNF_FAIL(IBT_FAILURE, "tavor SW2HW_SRQ command");
		goto srqalloc_fail8;
	}

	/*
	 * Fill in the rest of the Tavor SRQ handle.  We can update
	 * the following fields for use in further operations on the SRQ.
	 */
	srq->srq_srqcrsrcp = srqc;
	srq->srq_rsrcp	   = rsrc;
	srq->srq_mrhdl	   = mr;
	srq->srq_refcnt	   = 0;
	srq->srq_is_umap   = srq_is_umap;
	srq->srq_uarpg	   = (srq->srq_is_umap) ? uarpg : 0;
	srq->srq_umap_dhp  = (devmap_cookie_t)NULL;
	srq->srq_pdhdl	   = pd;
	srq->srq_wq_lastwqeindx = -1;
	srq->srq_wq_bufsz  = (1 << log_srq_size);
	srq->srq_wq_buf	   = buf;
	srq->srq_desc_off  = srq_desc_off;
	srq->srq_hdlrarg   = (void *)ibt_srqhdl;
	srq->srq_state	   = 0;
	srq->srq_real_sizes.srq_wr_sz = (1 << log_srq_size);
	srq->srq_real_sizes.srq_sgl_sz = srq->srq_wq_sgl;

	/* Determine if later ddi_dma_sync will be necessary */
	srq->srq_sync = TAVOR_SRQ_IS_SYNC_REQ(state, srq->srq_wqinfo);

	/*
	 * Put SRQ handle in Tavor SRQNum-to-SRQhdl list.  Then fill in the
	 * "srqhdl" and return success
	 */
	ASSERT(state->ts_srqhdl[srqc->tr_indx] == NULL);
	state->ts_srqhdl[srqc->tr_indx] = srq;

	/*
	 * If this is a user-mappable SRQ, then we need to insert the
	 * previously allocated entry into the "userland resources database".
	 * This will allow for later lookup during devmap() (i.e. mmap())
	 * calls.
	 */
	if (srq->srq_is_umap) {
		tavor_umap_db_add(umapdb);
	} else {
		mutex_enter(&srq->srq_wrid_wql->wql_lock);
		tavor_wrid_list_srq_init(srq->srq_wridlist, srq, 0);
		mutex_exit(&srq->srq_wrid_wql->wql_lock);
	}

	*srqhdl = srq;

	TAVOR_TNF_EXIT(tavor_srq_alloc);
	return (status);

/*
 * The following is cleanup for all possible failure cases in this routine
 */
srqalloc_fail8:
	kmem_free(srq->srq_wridlist->wl_wre, srq->srq_wridlist->wl_size *
	    sizeof (tavor_wrid_entry_t));
	kmem_free(srq->srq_wridlist, sizeof (tavor_wrid_list_hdr_t));
srqalloc_fail7:
	tavor_wql_refcnt_dec(srq->srq_wrid_wql);
srqalloc_fail6:
	if (tavor_mr_deregister(state, &mr, TAVOR_MR_DEREG_ALL,
	    TAVOR_SLEEPFLAG_FOR_CONTEXT()) != DDI_SUCCESS) {
		TAVOR_WARNING(state, "failed to deregister SRQ memory");
	}
srqalloc_fail5:
	tavor_queue_free(state, &srq->srq_wqinfo);
srqalloc_fail4:
	if (srq_is_umap) {
		tavor_umap_db_free(umapdb);
	}
srqalloc_fail3:
	tavor_rsrc_free(state, &rsrc);
srqalloc_fail2:
	tavor_rsrc_free(state, &srqc);
srqalloc_fail1:
	tavor_pd_refcnt_dec(pd);
srqalloc_fail:
	TNF_PROBE_1(tavor_srq_alloc_fail, TAVOR_TNF_ERROR, "",
	    tnf_string, msg, errormsg);
	TAVOR_TNF_EXIT(tavor_srq_alloc);
	return (status);
}
コード例 #15
0
/* ARGSUSED */
int
tavor_srq_free(tavor_state_t *state, tavor_srqhdl_t *srqhdl, uint_t sleepflag)
{
	tavor_rsrc_t		*srqc, *rsrc;
	tavor_umap_db_entry_t	*umapdb;
	uint64_t		value;
	tavor_srqhdl_t		srq;
	tavor_mrhdl_t		mr;
	tavor_pdhdl_t		pd;
	tavor_hw_srqc_t		srqc_entry;
	uint32_t		srqnum;
	uint32_t		size;
	uint_t			maxprot;
	int			status;

	TAVOR_TNF_ENTER(tavor_srq_free);

	/*
	 * Pull all the necessary information from the Tavor Shared Receive
	 * Queue handle.  This is necessary here because the resource for the
	 * SRQ handle is going to be freed up as part of this operation.
	 */
	srq	= *srqhdl;
	mutex_enter(&srq->srq_lock);
	srqc	= srq->srq_srqcrsrcp;
	rsrc	= srq->srq_rsrcp;
	pd	= srq->srq_pdhdl;
	mr	= srq->srq_mrhdl;
	srqnum	= srq->srq_srqnum;

	/*
	 * If there are work queues still associated with the SRQ, then return
	 * an error.  Otherwise, we will be holding the SRQ lock.
	 */
	if (srq->srq_refcnt != 0) {
		mutex_exit(&srq->srq_lock);
		TNF_PROBE_1(tavor_srq_free_refcnt_fail, TAVOR_TNF_ERROR, "",
		    tnf_int, refcnt, srq->srq_refcnt);
		TAVOR_TNF_EXIT(tavor_srq_free);
		return (IBT_SRQ_IN_USE);
	}

	/*
	 * If this was a user-mappable SRQ, then we need to remove its entry
	 * from the "userland resources database".  If it is also currently
	 * mmap()'d out to a user process, then we need to call
	 * devmap_devmem_remap() to remap the SRQ memory to an invalid mapping.
	 * We also need to invalidate the SRQ tracking information for the
	 * user mapping.
	 */
	if (srq->srq_is_umap) {
		status = tavor_umap_db_find(state->ts_instance, srq->srq_srqnum,
		    MLNX_UMAP_SRQMEM_RSRC, &value, TAVOR_UMAP_DB_REMOVE,
		    &umapdb);
		if (status != DDI_SUCCESS) {
			mutex_exit(&srq->srq_lock);
			TAVOR_WARNING(state, "failed to find in database");
			TAVOR_TNF_EXIT(tavor_srq_free);
			return (ibc_get_ci_failure(0));
		}
		tavor_umap_db_free(umapdb);
		if (srq->srq_umap_dhp != NULL) {
			maxprot = (PROT_READ | PROT_WRITE | PROT_USER);
			status = devmap_devmem_remap(srq->srq_umap_dhp,
			    state->ts_dip, 0, 0, srq->srq_wqinfo.qa_size,
			    maxprot, DEVMAP_MAPPING_INVALID, NULL);
			if (status != DDI_SUCCESS) {
				mutex_exit(&srq->srq_lock);
				TAVOR_WARNING(state, "failed in SRQ memory "
				    "devmap_devmem_remap()");
				TAVOR_TNF_EXIT(tavor_srq_free);
				return (ibc_get_ci_failure(0));
			}
			srq->srq_umap_dhp = (devmap_cookie_t)NULL;
		}
	}

	/*
	 * Put NULL into the Tavor SRQNum-to-SRQHdl list.  This will allow any
	 * in-progress events to detect that the SRQ corresponding to this
	 * number has been freed.
	 */
	state->ts_srqhdl[srqc->tr_indx] = NULL;

	mutex_exit(&srq->srq_lock);
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*srq));
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*srq->srq_wridlist));

	/*
	 * Reclaim SRQC entry from hardware (using the Tavor HW2SW_SRQ
	 * firmware command).  If the ownership transfer fails for any reason,
	 * then it is an indication that something (either in HW or SW) has
	 * gone seriously wrong.
	 */
	status = tavor_cmn_ownership_cmd_post(state, HW2SW_SRQ, &srqc_entry,
	    sizeof (tavor_hw_srqc_t), srqnum, sleepflag);
	if (status != TAVOR_CMD_SUCCESS) {
		TAVOR_WARNING(state, "failed to reclaim SRQC ownership");
		cmn_err(CE_CONT, "Tavor: HW2SW_SRQ command failed: %08x\n",
		    status);
		TNF_PROBE_1(tavor_srq_free_hw2sw_srq_cmd_fail,
		    TAVOR_TNF_ERROR, "", tnf_uint, status, status);
		TAVOR_TNF_EXIT(tavor_srq_free);
		return (IBT_FAILURE);
	}

	/*
	 * Deregister the memory for the Shared Receive Queue.  If this fails
	 * for any reason, then it is an indication that something (either
	 * in HW or SW) has gone seriously wrong.  So we print a warning
	 * message and return.
	 */
	status = tavor_mr_deregister(state, &mr, TAVOR_MR_DEREG_ALL,
	    sleepflag);
	if (status != DDI_SUCCESS) {
		TAVOR_WARNING(state, "failed to deregister SRQ memory");
		TNF_PROBE_0(tavor_srq_free_dereg_mr_fail, TAVOR_TNF_ERROR, "");
		TAVOR_TNF_EXIT(tavor_srq_free);
		return (IBT_FAILURE);
	}

	/* Calculate the size and free the wridlist container */
	if (srq->srq_wridlist != NULL) {
		size = (srq->srq_wridlist->wl_size *
		    sizeof (tavor_wrid_entry_t));
		kmem_free(srq->srq_wridlist->wl_wre, size);
		kmem_free(srq->srq_wridlist, sizeof (tavor_wrid_list_hdr_t));

		/*
		 * Release reference to WQL; If this is the last reference,
		 * this call also has the side effect of freeing up the
		 * 'srq_wrid_wql' memory.
		 */
		tavor_wql_refcnt_dec(srq->srq_wrid_wql);
	}

	/* Free the memory for the SRQ */
	tavor_queue_free(state, &srq->srq_wqinfo);

	/* Free the Tavor SRQ Handle */
	tavor_rsrc_free(state, &rsrc);

	/* Free the SRQC entry resource */
	tavor_rsrc_free(state, &srqc);

	/* Decrement the reference count on the protection domain (PD) */
	tavor_pd_refcnt_dec(pd);

	/* Set the srqhdl pointer to NULL and return success */
	*srqhdl = NULL;

	TAVOR_TNF_EXIT(tavor_srq_free);
	return (DDI_SUCCESS);
}
コード例 #16
0
/*
 * tavor_agent_handle_req()
 *    Context: Called with priority of taskQ thread
 */
static void
tavor_agent_handle_req(void *cb_args)
{
	tavor_agent_handler_arg_t	*agent_args;
	tavor_agent_list_t		*curr;
	tavor_state_t			*state;
	ibmf_handle_t			ibmf_handle;
	ibmf_msg_t			*msgp;
	ibmf_msg_bufs_t			*recv_msgbufp;
	ibmf_msg_bufs_t			*send_msgbufp;
	ibmf_retrans_t			retrans;
	uint_t				port;
	int				status;

	TAVOR_TNF_ENTER(tavor_agent_handle_req);

	/* Extract the necessary info from the callback args parameter */
	agent_args  = (tavor_agent_handler_arg_t *)cb_args;
	ibmf_handle = agent_args->ahd_ibmfhdl;
	msgp	    = agent_args->ahd_ibmfmsg;
	curr	    = agent_args->ahd_agentlist;
	state	    = curr->agl_state;
	port	    = curr->agl_port;

	/*
	 * Set the message send buffer pointers to the message receive buffer
	 * pointers to reuse the IBMF provided buffers for the sender
	 * information.
	 */
	recv_msgbufp = &msgp->im_msgbufs_recv;
	send_msgbufp = &msgp->im_msgbufs_send;
	bcopy(recv_msgbufp, send_msgbufp, sizeof (ibmf_msg_bufs_t));

	/*
	 * Check if the incoming packet is a special "Tavor Trap" MAD.  If it
	 * is, then do the special handling.  If it isn't, then simply pass it
	 * on to the firmware and forward the response back to the IBMF.
	 *
	 * Note: Tavor has a unique method for handling internally generated
	 * Traps.  All internally detected/generated Trap messages are
	 * automatically received by the IBMF (as receive completions on QP0),
	 * which (because all Tavor Trap MADs have SLID == 0) detects it as a
	 * special "Tavor Trap" and forwards it here to the driver's SMA.
	 * It is then our responsibility here to fill in the Trap MAD's DLID
	 * for forwarding to the real Master SM (as programmed in the port's
	 * PortInfo.MasterSMLID field.)
	 */
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(msgp->im_local_addr))
	if (TAVOR_IS_SPECIAL_TRAP_MAD(msgp)) {
		msgp->im_local_addr.ia_remote_lid =
		    TAVOR_PORT_MASTERSMLID_GET(state, port - 1);
	} else {
		/*
		 * Post the command to the firmware (using the MAD_IFC
		 * command).  Note: We also reuse the command that was passed
		 * in.  We pass the pointer to the original MAD payload as if
		 * it were both the source of the incoming MAD as well as the
		 * destination for the response.  This is acceptable and saves
		 * us the step of one additional copy.  Note:  If this command
		 * fails for any reason other than TAVOR_CMD_BAD_PKT, it
		 * probably indicates a serious problem.
		 */
		status = tavor_mad_ifc_cmd_post(state, port,
		    TAVOR_CMD_SLEEP_NOSPIN,
		    (uint32_t *)recv_msgbufp->im_bufs_mad_hdr,
		    (uint32_t *)send_msgbufp->im_bufs_mad_hdr);
		if (status != TAVOR_CMD_SUCCESS) {
			if ((status != TAVOR_CMD_BAD_PKT) &&
			    (status != TAVOR_CMD_INSUFF_RSRC)) {
				cmn_err(CE_CONT, "Tavor: MAD_IFC (port %02d) "
				    "command failed: %08x\n", port, status);
				TNF_PROBE_1(tavor_agent_handle_req_madifc_fail,
				    TAVOR_TNF_ERROR, "", tnf_uint, cmd_status,
				    status);
			}

			/* finish cleanup */
			goto tavor_agent_handle_req_skip_response;
		}
	}

	/*
	 * If incoming MAD was "TrapRepress", then no response is necessary.
	 * Free the IBMF message and return.
	 */
	if (TAVOR_IS_TRAP_REPRESS_MAD(msgp)) {
		goto tavor_agent_handle_req_skip_response;
	}

	/*
	 * Modify the response MAD as necessary (for any special cases).
	 * Specifically, if this MAD was a directed route MAD, then some
	 * additional packet manipulation may be necessary because the Tavor
	 * firmware does not do all the required steps to respond to the
	 * MAD.
	 */
	tavor_agent_mad_resp_handling(state, msgp, port);

	/*
	 * Send response (or forwarded "Trap" MAD) back to IBMF.  We use the
	 * "response callback" to indicate when it is appropriate (later) to
	 * free the IBMF msg.
	 */
	status = ibmf_msg_transport(ibmf_handle, IBMF_QP_HANDLE_DEFAULT,
	    msgp, &retrans, tavor_agent_response_cb, state, 0);
	if (status != IBMF_SUCCESS) {
		TNF_PROBE_1(tavor_ibmf_send_msg_fail, TAVOR_TNF_ERROR, "",
		    tnf_uint, ibmf_status, status);
		goto tavor_agent_handle_req_skip_response;
	}

	/* Free up the callback args parameter */
	kmem_free(agent_args, sizeof (tavor_agent_handler_arg_t));
	TAVOR_TNF_EXIT(tavor_agent_handle_req);
	return;

tavor_agent_handle_req_skip_response:
	/* Free up the ibmf message */
	status = ibmf_free_msg(ibmf_handle, &msgp);
	if (status != IBMF_SUCCESS) {
		TNF_PROBE_1(tavor_agent_handle_req_ibmf_free_msg_fail,
		    TAVOR_TNF_ERROR, "", tnf_uint, ibmf_status,
		    status);
	}
	/* Free up the callback args parameter */
	kmem_free(agent_args, sizeof (tavor_agent_handler_arg_t));
	TAVOR_TNF_EXIT(tavor_agent_handle_req);
}
コード例 #17
0
/*
 * hci1394_ixl_intr_check_done()
 *    checks if context has stopped, or if able to match hardware location
 *    with an expected IXL program location.
 */
static int
hci1394_ixl_intr_check_done(hci1394_state_t *soft_statep,
    hci1394_iso_ctxt_t *ctxtp)
{
	ixl1394_command_t   *ixlp;
	hci1394_xfer_ctl_t  *xferctlp;
	uint_t		    ixldepth;
	hci1394_xfer_ctl_dma_t *dma;
	ddi_acc_handle_t    acc_hdl;
	ddi_dma_handle_t    dma_hdl;
	uint32_t	    desc_status;
	hci1394_desc_t	    *hcidescp;
	off_t		    hcidesc_off;
	int		    err;
	uint32_t	    dma_cmd_cur_loc;
	uint32_t	    dma_cmd_last_loc;
	uint32_t	    dma_loc_check_enabled;
	uint32_t	    dmastartp;
	uint32_t	    dmaendp;

	uint_t		    rem_dma_skips;
	uint16_t	    skipmode;
	uint16_t	    skipdepth;
	ixl1394_command_t   *skipdestp;
	ixl1394_command_t   *skipxferp;

	TNF_PROBE_0_DEBUG(hci1394_ixl_intr_check_done_enter,
	    HCI1394_TNF_HAL_STACK_ISOCH, "");

	/*
	 * start looking through the IXL list from the xfer start command where
	 * we last left off (for composite opcodes, need to start from the
	 * appropriate depth).
	 */

	ixlp = ctxtp->ixl_execp;
	ixldepth = ctxtp->ixl_exec_depth;

	/* control struct for xfer start IXL command */
	xferctlp = (hci1394_xfer_ctl_t *)ixlp->compiler_privatep;
	dma = &xferctlp->dma[ixldepth];

	/* determine if dma location checking is enabled */
	if ((dma_loc_check_enabled =
	    (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_CMDREG)) != 0) {

		/* if so, get current dma command location */
		dma_cmd_last_loc = 0xFFFFFFFF;

		while ((dma_cmd_cur_loc = HCI1394_ISOCH_CTXT_CMD_PTR(
		    soft_statep, ctxtp)) != dma_cmd_last_loc) {

			/* retry get until location register stabilizes */
			dma_cmd_last_loc = dma_cmd_cur_loc;
		}
	}

	/*
	 * compare the (bound) address of the DMA descriptor corresponding to
	 * the current xfer IXL command against the current value in the
	 * DMA location register.  If exists and if matches, then
	 *    if context stopped, return stopped, else return done.
	 *
	 * The dma start address is the first address of the descriptor block.
	 * Since "Z" is a count of 16-byte descriptors in the block, calculate
	 * the end address by adding Z*16 to the start addr.
	 */
	dmastartp = dma->dma_bound & ~DESC_Z_MASK;
	dmaendp = dmastartp + ((dma->dma_bound & DESC_Z_MASK) << 4);

	if (dma_loc_check_enabled &&
	    ((dma_cmd_cur_loc >= dmastartp) && (dma_cmd_cur_loc < dmaendp))) {

		if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_STOP");
			return (IXL_CHECK_STOP);
		}

		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "CHECK_DONE");
		return (IXL_CHECK_DONE);
	}

	/*
	 * if receive mode:
	 */
	if ((ixlp->ixl_opcode & IXL1394_OPF_ONXMIT) == 0)  {
		/*
		 * if context stopped, return stopped, else,
		 * if there is no current dma location reg, return done
		 * else return location indeterminate
		 */
		if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_STOP");
			return (IXL_CHECK_STOP);
		}
		if (!dma_loc_check_enabled) {
			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_DONE");
			return (IXL_CHECK_DONE);
		}

		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "CHECK_LOST");
		return (IXL_CHECK_LOST);
	}

	/*
	 * else is xmit mode:
	 * check status of current xfer IXL command's dma descriptor
	 */
	acc_hdl  = dma->dma_buf->bi_handle;
	dma_hdl  = dma->dma_buf->bi_dma_handle;
	hcidescp = (hci1394_desc_t *)dma->dma_descp;
	hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;

	/* Sync the descriptor before we get the status */
	err = ddi_dma_sync(dma_hdl, hcidesc_off, sizeof (hci1394_desc_t),
	    DDI_DMA_SYNC_FORCPU);
	if (err != DDI_SUCCESS) {
		TNF_PROBE_1(hci1394_ixl_intr_check_done_error,
		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
		    "dma_sync() failed");
	}
	desc_status = ddi_get32(acc_hdl, &hcidescp->status);

	if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {

		/*
		 * if status is now set here, return skipped, to cause calling
		 * function to continue, even though location hasn't changed
		 */
		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "CHECK_SKIP");
		return (IXL_CHECK_SKIP);
	}

	/*
	 * At this point, we have gotten to a DMA descriptor with an empty
	 * status.  This is not enough information however to determine that
	 * we've found all processed DMA descriptors because during cycle-lost
	 * conditions, the HW will skip over some descriptors without writing
	 * status.  So we have to look ahead until we're convinced that the HW
	 * hasn't jumped ahead.
	 *
	 * Follow the IXL skip-to links until find one whose status is set
	 * or until dma location register (if any) matches an xfer IXL
	 * command's dma location or until have examined max_dma_skips
	 * IXL commands.
	 */
	rem_dma_skips = ctxtp->max_dma_skips;

	while (rem_dma_skips-- > 0) {

		/*
		 * get either IXL command specific or
		 * system default skipmode info
		 */
		skipdepth = 0;
		if (xferctlp->skipmodep != NULL) {
			skipmode  = xferctlp->skipmodep->skipmode;
			skipdestp = xferctlp->skipmodep->label;
			skipxferp = (ixl1394_command_t *)
			    xferctlp->skipmodep->compiler_privatep;
		} else {
			skipmode  = ctxtp->default_skipmode;
			skipdestp = ctxtp->default_skiplabelp;
			skipxferp = ctxtp->default_skipxferp;
		}

		switch (skipmode) {

		case IXL1394_SKIP_TO_SELF:
			/*
			 * mode is skip to self:
			 *   if context is stopped, return stopped, else
			 *   if dma location reg not enabled, return done
			 *   else, return location indeterminate
			 */
			if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
			    0) {
				TNF_PROBE_1_DEBUG(
					hci1394_ixl_intr_check_done_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "",
					tnf_string, msg, "CHECK_STOP");
				return (IXL_CHECK_STOP);
			}

			if (!dma_loc_check_enabled) {
				TNF_PROBE_1_DEBUG(
					hci1394_ixl_intr_check_done_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "",
					tnf_string, msg, "CHECK_DONE");
				return (IXL_CHECK_DONE);
			}

			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_LOST");
			return (IXL_CHECK_LOST);

		case IXL1394_SKIP_TO_NEXT:
			/*
			 * mode is skip to next:
			 *    set potential skip target to current command at
			 *    next depth
			 */
			skipdestp = ixlp;
			skipxferp = ixlp;
			skipdepth = ixldepth + 1;

			/*
			 * else if at max depth at current cmd adjust to next
			 * IXL command.
			 *
			 * (NOTE: next means next IXL command along execution
			 * path,  whatever IXL command it might be.  e.g. store
			 * timestamp or callback or label or jump or send... )
			 */
			if (skipdepth >= xferctlp->cnt) {
				skipdepth = 0;
				skipdestp = ixlp->next_ixlp;
				skipxferp = xferctlp->execp;
			}

			/* evaluate skip to status further, below */
			break;


		case IXL1394_SKIP_TO_LABEL:
			/*
			 * mode is skip to label:
			 *    set skip destination depth to 0 (should be
			 *    redundant)
			 */
			skipdepth = 0;

			/* evaluate skip to status further, below */
			break;

		case IXL1394_SKIP_TO_STOP:
			/*
			 * mode is skip to stop:
			 *    set all xfer and destination skip to locations to
			 *    null
			 */
			skipxferp = NULL;
			skipdestp = NULL;
			skipdepth = 0;

			/* evaluate skip to status further, below */
			break;

		} /* end switch */

		/*
		 * if no xfer IXL command follows at or after current skip-to
		 * location
		 */
		if (skipxferp == NULL) {
			/*
			 *   if context is stopped, return stopped, else
			 *   if dma location reg not enabled, return done
			 *   else, return location indeterminate
			 */
			if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
			    0) {
				TNF_PROBE_1_DEBUG(
					hci1394_ixl_intr_check_done_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "",
					tnf_string, msg, "CHECK_STOP");
				return (IXL_CHECK_STOP);
			}

			if (!dma_loc_check_enabled) {
				TNF_PROBE_1_DEBUG(
					hci1394_ixl_intr_check_done_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "",
					tnf_string, msg, "CHECK_DONE");
				return (IXL_CHECK_DONE);
			}
			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_LOST");
			return (IXL_CHECK_LOST);
		}

		/*
		 * if the skip to xfer IXL dma descriptor's status is set,
		 * then execution did skip
		 */
		xferctlp = (hci1394_xfer_ctl_t *)skipxferp->compiler_privatep;
		dma	 = &xferctlp->dma[skipdepth];
		acc_hdl  = dma->dma_buf->bi_handle;
		dma_hdl  = dma->dma_buf->bi_dma_handle;
		hcidescp = (hci1394_desc_t *)dma->dma_descp;
		hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;

		/* Sync the descriptor before we get the status */
		err = ddi_dma_sync(dma_hdl, hcidesc_off,
		    sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
		if (err != DDI_SUCCESS) {
			TNF_PROBE_1(hci1394_ixl_intr_check_done_error,
			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
			    "dma_sync() failed");
		}
		desc_status = ddi_get32(acc_hdl, &hcidescp->status);

		if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {

			/*
			 * adjust to continue from skip to IXL command and
			 * return skipped, to have calling func continue.
			 * (Note: next IXL command may be any allowed IXL
			 * command)
			 */
			ctxtp->ixl_execp = skipdestp;
			ctxtp->ixl_exec_depth = skipdepth;

			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_SKIP");
			return (IXL_CHECK_SKIP);
		}

		/*
		 * if dma location command register checking is enabled,
		 * and the skip to xfer IXL dma location matches current
		 * dma location register value, execution did skip
		 */
		dmastartp = dma->dma_bound & ~DESC_Z_MASK;
		dmaendp = dmastartp + ((dma->dma_bound & DESC_Z_MASK) << 4);

		if (dma_loc_check_enabled && ((dma_cmd_cur_loc >= dmastartp) &&
		    (dma_cmd_cur_loc < dmaendp))) {

			/* if the context is stopped, return stopped */
			if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
			    0) {
				TNF_PROBE_1_DEBUG(
					hci1394_ixl_intr_check_done_exit,
					HCI1394_TNF_HAL_STACK_ISOCH, "",
					tnf_string, msg, "CHECK STOP");
				return (IXL_CHECK_STOP);
			}
			/*
			 * adjust to continue from skip to IXL command and
			 * return skipped, to have calling func continue
			 * (Note: next IXL command may be any allowed IXL cmd)
			 */
			ctxtp->ixl_execp = skipdestp;
			ctxtp->ixl_exec_depth = skipdepth;

			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
			    "CHECK_SKIP");
			return (IXL_CHECK_SKIP);
		}

		/*
		 * else, advance working current locn to skipxferp and
		 * skipdepth and continue skip evaluation loop processing
		 */
		ixlp = skipxferp;
		ixldepth = skipdepth;

	} /* end while */

	/*
	 * didn't find dma status set, nor location reg match, along skip path
	 *
	 * if context is stopped, return stopped,
	 *
	 * else if no current location reg active don't change context values,
	 * just return done (no skip)
	 *
	 * else, return location indeterminate
	 */

	if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "CHECK_STOP");
		return (IXL_CHECK_STOP);
	}
	if (!dma_loc_check_enabled) {
		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
		    "CHECK_DONE");
		return (IXL_CHECK_DONE);
	}

	TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
	    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg, "CHECK_LOST");
	return (IXL_CHECK_LOST);
}
コード例 #18
0
/*
 * hci1394_ixl_interrupt
 *    main entry point (front-end) into interrupt processing.
 *    acquires mutex, checks if update in progress, sets flags accordingly,
 *    and calls to do real interrupt processing.
 */
void
hci1394_ixl_interrupt(hci1394_state_t *soft_statep,
    hci1394_iso_ctxt_t *ctxtp, boolean_t in_stop)
{
	uint_t	status;
	int	retcode;

	TNF_PROBE_0_DEBUG(hci1394_ixl_interrupt_enter,
	    HCI1394_TNF_HAL_STACK_ISOCH, "");

	status = 1;

	/* acquire the interrupt processing context mutex */
	mutex_enter(&ctxtp->intrprocmutex);

	/* set flag to indicate that interrupt processing is required */
	ctxtp->intr_flags |= HCI1394_ISO_CTXT_INTRSET;

	/* if update proc already in progress, let it handle intr processing */
	if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
		retcode = HCI1394_IXL_INTR_INUPDATE;
		status = 0;
		TNF_PROBE_1_DEBUG(hci1394_ixl_interrupt_error,
		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
		    "HCI1394_IXL_INTR_INUPDATE");

	} else if (ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
		/* else fatal error if inter processing already in progress */
		retcode = HCI1394_IXL_INTR_ININTR;
		status = 0;
		TNF_PROBE_1(hci1394_ixl_interrupt_error,
		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
		    "HCI1394_IXL_INTR_ININTR");

	} else if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INCALL) {
		/* else fatal error if callback in progress flag is set */
		retcode = HCI1394_IXL_INTR_INCALL;
		status = 0;
		TNF_PROBE_1_DEBUG(hci1394_ixl_interrupt_error,
		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
		    "HCI1394_IXL_INTR_INCALL");
	} else if (!in_stop && (ctxtp->intr_flags & HCI1394_ISO_CTXT_STOP)) {
		/* context is being stopped */
		retcode = HCI1394_IXL_INTR_STOP;
		status = 0;
		TNF_PROBE_1_DEBUG(hci1394_ixl_interrupt_error,
		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
		    "HCI1394_IXL_INTR_STOP");
	}

	/*
	 * if context is available, reserve it, do interrupt processing
	 * and free it
	 */
	if (status) {
		ctxtp->intr_flags |= HCI1394_ISO_CTXT_ININTR;
		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
		mutex_exit(&ctxtp->intrprocmutex);

		retcode = hci1394_ixl_dma_sync(soft_statep, ctxtp);

		mutex_enter(&ctxtp->intrprocmutex);
		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_ININTR;

		/* notify stop thread that the interrupt is finished */
		if ((ctxtp->intr_flags & HCI1394_ISO_CTXT_STOP) && !in_stop) {
			cv_signal(&ctxtp->intr_cv);
		}
	};

	/* free the intr processing context mutex before error checks */
	mutex_exit(&ctxtp->intrprocmutex);

	/* if context stopped, invoke callback */
	if (retcode == HCI1394_IXL_INTR_DMASTOP) {
		hci1394_do_stop(soft_statep, ctxtp, B_TRUE, ID1394_DONE);
	}
	/* if error, stop and invoke callback */
	if (retcode == HCI1394_IXL_INTR_DMALOST) {
		hci1394_do_stop(soft_statep, ctxtp, B_TRUE, ID1394_FAIL);
	}

	TNF_PROBE_0_DEBUG(hci1394_ixl_interrupt_exit,
	    HCI1394_TNF_HAL_STACK_ISOCH, "");
}
コード例 #19
0
ファイル: nx1394.c プロジェクト: andreiw/polaris
/*
 * nx1394_bus_ctl()
 *    This routine implements nexus bus ctl operations. Of importance are
 *    DDI_CTLOPS_REPORTDEV, DDI_CTLOPS_INITCHILD, DDI_CTLOPS_UNINITCHILD
 *    and DDI_CTLOPS_POWER. For DDI_CTLOPS_INITCHILD, it tries to lookup
 *    reg property on the child node and builds and sets the name
 *    (name is of the form GGGGGGGGGGGGGGGG[,AAAAAAAAAAAA], where
 *    GGGGGGGGGGGGGGGG is the GUID and AAAAAAAAAAAA is the optional unit
 *    address).
 */
static int
nx1394_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
    void *result)
{
	int status;

	TNF_PROBE_0_DEBUG(nx1394_bus_ctl_enter, S1394_TNF_SL_NEXUS_STACK, "");

	switch (op) {
	case DDI_CTLOPS_REPORTDEV: {
		dev_info_t *pdip = ddi_get_parent(rdip);
		cmn_err(CE_CONT, "?%s%d at %s%d",
		    ddi_node_name(rdip), ddi_get_instance(rdip),
		    ddi_node_name(pdip), ddi_get_instance(pdip));
		TNF_PROBE_0_DEBUG(nx1394_bus_ctl_exit, S1394_TNF_SL_NEXUS_STACK,
		    "");
		return (DDI_SUCCESS);
	}

	case DDI_CTLOPS_INITCHILD: {
		dev_info_t *ocdip, *cdip = (dev_info_t *)arg;
		dev_info_t *pdip = ddi_get_parent(cdip);
		int reglen, i;
		uint32_t *regptr;
		char addr[MAXNAMELEN];

		TNF_PROBE_1(nx1394_bus_ctl_init_child,
		    S1394_TNF_SL_HOTPLUG_STACK, "", tnf_opaque, dip, cdip);

		i = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
		    DDI_PROP_DONTPASS, "reg", (int **)&regptr,
		    (uint_t *)&reglen);

		if (i != DDI_PROP_SUCCESS) {
			cmn_err(CE_NOTE, "!%s(%d): \"reg\" property not found",
			    ddi_node_name(cdip), ddi_get_instance(cdip));
			TNF_PROBE_2(nx1394_bus_ctl,
			    S1394_TNF_SL_NEXUS_ERROR, "", tnf_string, msg,
			    "Reg property not found", tnf_int, reason, i);
			TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit,
			    S1394_TNF_SL_NEXUS_STACK, "", tnf_string, op,
			    "initchild");
			return (DDI_NOT_WELL_FORMED);
		}

		ASSERT(reglen != 0);

		/*
		 * addr is of the format GGGGGGGGGGGGGGGG[,AAAAAAAAAAAA]
		 */
		if (regptr[2] || regptr[3]) {
			(void) sprintf(addr, "%08x%08x,%04x%08x", regptr[0],
			    regptr[1], regptr[2], regptr[3]);
		} else {
			(void) sprintf(addr, "%08x%08x", regptr[0], regptr[1]);
		}
		ddi_prop_free(regptr);
		ddi_set_name_addr(cdip, addr);

		/*
		 * Check for a node with the same name & addr as the current
		 * node. If such a node exists, return failure.
		 */
		if ((ocdip = ndi_devi_find(pdip, ddi_node_name(cdip), addr)) !=
		    NULL && ocdip != cdip) {
			cmn_err(CE_NOTE,
			    "!%s(%d): Duplicate dev_info node found %s@%s",
			    ddi_node_name(cdip), ddi_get_instance(cdip),
			    ddi_node_name(ocdip), addr);
			TNF_PROBE_1(nx1394_bus_ctl,
			    S1394_TNF_SL_NEXUS_ERROR, "", tnf_string, msg,
			    "Duplicate nodes");
			TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit,
			    S1394_TNF_SL_NEXUS_STACK, "", tnf_string, op,
			    "initchild");
			ddi_set_name_addr(cdip, NULL);
			return (DDI_NOT_WELL_FORMED);
		}

		/*
		 * If HAL (parent dip) has "active-dma-flush" property, then
		 * add property to child as well.  Workaround for active
		 * context flushing bug in Schizo rev 2.1 and 2.2.
		 */
		if (ddi_prop_exists(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
		    "active-dma-flush") != 0) {
			status = ndi_prop_update_int(DDI_DEV_T_NONE, cdip,
			    "active-dma-flush", 1);
			if (status != NDI_SUCCESS) {
				cmn_err(CE_NOTE, "!%s(%d): Unable to add "
				    "\"active-dma-flush\" property",
				    ddi_node_name(cdip),
				    ddi_get_instance(cdip));
				TNF_PROBE_1(nx1394_bus_ctl,
				    S1394_TNF_SL_NEXUS_ERROR, "", tnf_string,
				    msg, "Unable to add \"active-dma-flush\" "
				    "property");
				TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit,
				    S1394_TNF_SL_NEXUS_STACK, "", tnf_string,
				    op, "initchild");
				ddi_set_name_addr(cdip, NULL);
				return (DDI_NOT_WELL_FORMED);
			}
		}

		TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit,
		    S1394_TNF_SL_NEXUS_STACK, "", tnf_string, op, "initchild");
		return (DDI_SUCCESS);
	}

	case DDI_CTLOPS_UNINITCHILD: {
		ddi_prop_remove_all((dev_info_t *)arg);
		ddi_set_name_addr((dev_info_t *)arg, NULL);
		TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit, S1394_TNF_SL_NEXUS_STACK,
		    "", tnf_string, op, "uninitchild");
		return (DDI_SUCCESS);
	}

	case DDI_CTLOPS_IOMIN: {
		status = ddi_ctlops(dip, rdip, op, arg, result);
		TNF_PROBE_1_DEBUG(nx1394_bus_ctl_exit, S1394_TNF_SL_NEXUS_STACK,
		    "", tnf_string, op, "iomin");
		return (status);
	}

	case DDI_CTLOPS_POWER: {
		return (DDI_SUCCESS);
	}

	/*
	 * These ops correspond to functions that "shouldn't" be called
	 * by a 1394 client driver.
	 */
	case DDI_CTLOPS_DMAPMAPC:
	case DDI_CTLOPS_REPORTINT:
	case DDI_CTLOPS_REGSIZE:
	case DDI_CTLOPS_NREGS:
	case DDI_CTLOPS_SIDDEV:
	case DDI_CTLOPS_SLAVEONLY:
	case DDI_CTLOPS_AFFINITY:
	case DDI_CTLOPS_POKE:
	case DDI_CTLOPS_PEEK: {
		cmn_err(CE_CONT, "!%s(%d): invalid op (%d) from %s(%d)",
		    ddi_node_name(dip), ddi_get_instance(dip),
		    op, ddi_node_name(rdip), ddi_get_instance(rdip));
		TNF_PROBE_2(nx1394_bus_ctl, S1394_TNF_SL_NEXUS_ERROR, "",
		    tnf_string, msg, "invalid op", tnf_int, op, op);
		TNF_PROBE_0_DEBUG(nx1394_bus_ctl_exit, S1394_TNF_SL_NEXUS_STACK,
		    "");
		return (DDI_FAILURE);
	}

	/*
	 * Everything else (e.g. PTOB/BTOP/BTOPR requests) we pass up
	 */
	default: {
		status = ddi_ctlops(dip, rdip, op, arg, result);
		TNF_PROBE_0_DEBUG(nx1394_bus_ctl_exit, S1394_TNF_SL_NEXUS_STACK,
		    "");
		return (status);
	}
	}
}
コード例 #20
0
/*
 * hci1394_isoch_cycle_inconsistent()
 *    Called during interrupt notification to indicate that the cycle time
 *    has changed unexpectedly.  We need to take this opportunity to
 *    update our tracking of each running transmit context's execution.
 *    cycle_inconsistent only affects transmit, so recv contexts are left alone.
 */
void
hci1394_isoch_cycle_inconsistent(hci1394_state_t *soft_statep)
{
	int i, cnt_thresh;
	boolean_t note;
	hrtime_t current_time, last_time, delta, delta_thresh;
	hci1394_iso_ctxt_t *ctxtp; 	/* current context */

	ASSERT(soft_statep);
	TNF_PROBE_0_DEBUG(hci1394_isoch_cycle_inconsistent_enter,
	    HCI1394_TNF_HAL_STACK_ISOCH, "");

	hci1394_ohci_intr_clear(soft_statep->ohci, OHCI_INTR_CYC_INCONSISTENT);

	/* grab the mutex before checking each context's INUSE and RUNNING */
	mutex_enter(&soft_statep->isoch->ctxt_list_mutex);

	/* check for transmit contexts which are inuse and running */
	for (i = 0; i < soft_statep->isoch->ctxt_xmit_count; i++) {
		ctxtp = &soft_statep->isoch->ctxt_xmit[i];

		if ((ctxtp->ctxt_flags &
		    (HCI1394_ISO_CTXT_INUSE | HCI1394_ISO_CTXT_RUNNING)) != 0) {

			mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
			hci1394_ixl_interrupt(soft_statep, ctxtp, B_FALSE);
			mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
		}
	}

	/*
	 * get the current time and calculate the delta between now and
	 * when the last interrupt was processed.  (NOTE: if the time
	 * returned by gethrtime() rolls-over while we are counting these
	 * interrupts, we will incorrectly restart the counting process.
	 * However, because the probability of this happening is small and
	 * not catching the roll-over will AT MOST double the time it takes
	 * us to discover and correct from this condition, we can safely
	 * ignore it.)
	 */
	current_time = gethrtime();
	last_time = soft_statep->isoch->cycle_incon_thresh.last_intr_time;
	delta = current_time - last_time;

	/*
	 * compare the calculated delta to the delta T threshold.  If it
	 * is less than the threshold, then increment the counter.  If it
	 * is not then reset the counter.
	 */
	delta_thresh = soft_statep->isoch->cycle_incon_thresh.delta_t_thresh;
	if (delta < delta_thresh)
		soft_statep->isoch->cycle_incon_thresh.delta_t_counter++;
	else
		soft_statep->isoch->cycle_incon_thresh.delta_t_counter = 0;

	/*
	 * compare the counter to the counter threshold.  If it is greater,
	 * then disable the cycle inconsistent interrupt.
	 */
	cnt_thresh = soft_statep->isoch->cycle_incon_thresh.counter_thresh;
	note = B_FALSE;
	if (soft_statep->isoch->cycle_incon_thresh.delta_t_counter >
	    cnt_thresh) {
		hci1394_ohci_intr_disable(soft_statep->ohci,
		    OHCI_INTR_CYC_INCONSISTENT);
		note = B_TRUE;
	}

	/* save away the current time into the last_intr_time field */
	soft_statep->isoch->cycle_incon_thresh.last_intr_time = current_time;

	mutex_exit(&soft_statep->isoch->ctxt_list_mutex);

	if (note == B_TRUE) {
		cmn_err(CE_NOTE, "!hci1394(%d): cycle_inconsistent interrupt "
		    "disabled until next bus reset",
		    soft_statep->drvinfo.di_instance);
		TNF_PROBE_1(hci1394_isoch_cycle_inconsistent_error,
		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, msg,
		    "CYCLE_INCONSISTENT intr disabled until next bus reset");
	}

	TNF_PROBE_0_DEBUG(hci1394_isoch_cycle_inconsistent_exit,
	    HCI1394_TNF_HAL_STACK_ISOCH, "");
}
コード例 #21
0
/*
 * hci1394_tlabel_alloc()
 *    alloc a tlabel based on the node id. If alloc fails, we are out of
 *    tlabels for that node. See comments before set_reclaim_time() on when
 *    bad tlabel's are free to be used again.
 */
int
hci1394_tlabel_alloc(hci1394_tlabel_handle_t tlabel_handle, uint_t destination,
    hci1394_tlabel_info_t *tlabel_info)
{
	uint_t node_number;
	uint_t index;
	uint64_t bad;
	uint64_t free;
	hrtime_t time;
	uint8_t last;


	ASSERT(tlabel_handle != NULL);
	ASSERT(tlabel_info != NULL);
	TNF_PROBE_0_DEBUG(hci1394_tlabel_alloc_enter,
	    HCI1394_TNF_HAL_STACK, "");

	/* copy destination into tlabel_info */
	tlabel_info->tbi_destination = destination;

	/* figure out what node we are going to */
	node_number = IEEE1394_NODE_NUM(destination);

	mutex_enter(&tlabel_handle->tb_mutex);

	/*
	 * Keep track of if we have sent out a broadcast request and what the
	 * maximum # node we have sent to for reset processing optimization
	 */
	if (node_number == IEEE1394_BROADCAST_NODEID) {
		tlabel_handle->tb_bcast_sent = B_TRUE;
	} else if (node_number > tlabel_handle->tb_max_node) {
		tlabel_handle->tb_max_node = node_number;
	}

	/* setup copies so we don't take up so much space :-) */
	bad = tlabel_handle->tb_bad[node_number];
	free = tlabel_handle->tb_free[node_number];
	time = tlabel_handle->tb_bad_timestamp[node_number];
	last = tlabel_handle->tb_last[node_number];

	/*
	 * If there are any bad tlabels, see if the last bad tlabel recorded for
	 * this nodeid is now good to use. If so, add all bad tlabels for that
	 * node id back into the free list
	 *
	 * NOTE: This assumes that bad tlabels are infrequent.
	 */
	if (bad != 0) {
		if (gethrtime() > time) {

			/* add the bad tlabels back into the free list */
			free |= bad;

			/* clear the bad list */
			bad = 0;

			TNF_PROBE_1(hci1394_tlabel_free_bad,
			    HCI1394_TNF_HAL_ERROR, "", tnf_uint, nodeid,
			    node_number);
		}
	}

	/*
	 * Find a free tlabel.  This will break out of the loop once it finds a
	 * tlabel.  There are a total of TLABEL_RANGE tlabels.  The alloc
	 * rotates the check so that we don't always use the same tlabel. It
	 * stores the last tlabel used in last.
	 */
	for (index = 0; index < TLABEL_RANGE; index++) {

		/* if the next tlabel to check is free */
		if ((free & ((uint64_t)1 << last)) != 0) {
			/* we are using this tlabel */
			tlabel_info->tbi_tlabel = last;

			TNF_PROBE_2_DEBUG(hci1394_tlabel_alloc,
			    HCI1394_TNF_HAL_TLABEL, "", tnf_uint, nodeid,
			    node_number, tnf_uint, alloced_tlabel,
			    tlabel_info->tbi_tlabel);

			/* take it out of the free list */
			free = free & ~((uint64_t)1 << last);

			/*
			 * increment the last count so we start checking on the
			 * next tlabel next alloc().  Note the rollover at
			 * TLABEL_RANGE since we only have TLABEL_RANGE tlabels.
			 */
			(last)++;
			if (last >= TLABEL_RANGE) {
				last = 0;
			}

			/* Copy the copies back */
			tlabel_handle->tb_bad[node_number] = bad;
			tlabel_handle->tb_free[node_number] = free;
			tlabel_handle->tb_bad_timestamp[node_number] = time;
			tlabel_handle->tb_last[node_number] = last;

			/* unlock the tlabel structure */
			mutex_exit(&tlabel_handle->tb_mutex);

			TNF_PROBE_0_DEBUG(hci1394_tlabel_alloc_exit,
			    HCI1394_TNF_HAL_STACK, "");
			return (DDI_SUCCESS);
		}

		/*
		 * This tlabel is not free, lets go to the next one. Note the
		 * rollover at TLABEL_RANGE since we only have TLABEL_RANGE
		 * tlabels.
		 */
		(last)++;
		if (last >= TLABEL_RANGE) {
			last = 0;
		}
	}

	/* Copy the copies back */
	tlabel_handle->tb_bad[node_number] = bad;
	tlabel_handle->tb_free[node_number] = free;
	tlabel_handle->tb_bad_timestamp[node_number] = time;
	tlabel_handle->tb_last[node_number] = last;

	mutex_exit(&tlabel_handle->tb_mutex);

	TNF_PROBE_1(hci1394_tlabel_alloc_empty, HCI1394_TNF_HAL_ERROR, "",
	    tnf_string, errmsg, "No more tlabels left to alloc");
	TNF_PROBE_0_DEBUG(hci1394_tlabel_alloc_exit, HCI1394_TNF_HAL_STACK, "");

	return (DDI_FAILURE);
}
コード例 #22
0
/*
 * tavor_agent_request_cb()
 *    Context: Called from the IBMF context
 */
static void
tavor_agent_request_cb(ibmf_handle_t ibmf_handle, ibmf_msg_t *msgp,
    void *args)
{
	tavor_agent_handler_arg_t	*cb_args;
	tavor_agent_list_t		*curr;
	tavor_state_t			*state;
	int				status;
	int				ibmf_status;

	TAVOR_TNF_ENTER(tavor_agent_request_cb);

	curr  = (tavor_agent_list_t *)args;
	state = curr->agl_state;

	/*
	 * Allocate space to hold the callback args (for passing to the
	 * task queue).  Note: If we are unable to allocate space for the
	 * the callback args here, then we just return.  But we must ensure
	 * that we call ibmf_free_msg() to free up the message.
	 */
	cb_args = (tavor_agent_handler_arg_t *)kmem_zalloc(
	    sizeof (tavor_agent_handler_arg_t), KM_NOSLEEP);
	if (cb_args == NULL) {
		ibmf_status = ibmf_free_msg(ibmf_handle, &msgp);
		if (ibmf_status != IBMF_SUCCESS) {
			TNF_PROBE_1(tavor_agent_request_cb_ibmf_free_msg_fail,
			    TAVOR_TNF_ERROR, "", tnf_uint, ibmf_status,
			    ibmf_status);
		}
		TNF_PROBE_0(tavor_agent_request_cb_kma_fail,
		    TAVOR_TNF_ERROR, "");
		TAVOR_TNF_EXIT(tavor_agent_request_cb);
		return;
	}
	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cb_args))

	/* Fill in the callback args */
	cb_args->ahd_ibmfhdl	= ibmf_handle;
	cb_args->ahd_ibmfmsg	= msgp;
	cb_args->ahd_agentlist	= args;

	/*
	 * Dispatch the message to the task queue.  Note: Just like above,
	 * if this request fails for any reason then make sure to free up
	 * the IBMF message and then return
	 */
	status = ddi_taskq_dispatch(state->ts_taskq_agents,
	    tavor_agent_handle_req, cb_args, DDI_NOSLEEP);
	if (status == DDI_FAILURE) {
		kmem_free(cb_args, sizeof (tavor_agent_handler_arg_t));
		ibmf_status = ibmf_free_msg(ibmf_handle, &msgp);
		if (ibmf_status != IBMF_SUCCESS) {
			TNF_PROBE_1(tavor_agent_request_cb_ibmf_free_msg_fail,
			    TAVOR_TNF_ERROR, "", tnf_uint, ibmf_status,
			    ibmf_status);
		}
		TNF_PROBE_0(tavor_agent_request_cb_taskq_fail,
		    TAVOR_TNF_ERROR, "");
	}
	TAVOR_TNF_EXIT(tavor_agent_request_cb);
}