Пример #1
0
static void
openpic_send_ipi(cpuid_t target, uint32_t mesg)
{
	struct cpu_info * const ci = curcpu();
	uint32_t cpumask = 0;

	switch (target) {
		case IPI_DST_ALL:
		case IPI_DST_NOTME:
			for (u_int i = 0; i < ncpu; i++) {
				struct cpu_info * const dst_ci = cpu_lookup(i);
				if (target == IPI_DST_ALL || dst_ci != ci) {
					cpumask |= 1 << cpu_index(dst_ci);
					atomic_or_32(&dst_ci->ci_pending_ipis,
					    mesg);
				}
			}
			break;
		default: {
			struct cpu_info * const dst_ci = cpu_lookup(target);
			cpumask = 1 << cpu_index(dst_ci);
			atomic_or_32(&dst_ci->ci_pending_ipis, mesg);
			break;
		}
	}
	openpic_write(OPENPIC_IPI(cpu_index(ci), 1), cpumask);
}
Пример #2
0
uint32_t
pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base,
	uint32_t pending)
{
	struct intrsource ** const isbase = &pic->pic_sources[irq_base];
	struct intrsource *is;
	volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5];
	uint32_t ipl_mask = 0;

	if (pending == 0)
		return ipl_mask;

	KASSERT((irq_base & 31) == 0);
	
	(*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending);

	atomic_or_32(ipending, pending);
        while (pending != 0) {
		int n = ffs(pending);
		if (n-- == 0)
			break;
		is = isbase[n];
		KASSERT(is != NULL);
		KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32);
		pending &= ~__BIT(n);
		ipl_mask |= __BIT(is->is_ipl);
	}

	atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
	atomic_or_32(&pic_pending_ipls, ipl_mask);
	atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id));

	return ipl_mask;
}
Пример #3
0
/*
 * Solicited frames callback area
 */
static int
fcoet_send_unsol_els_rsp_done(fcoe_frame_t *frm)
{
	fcoet_exchange_t	*xch = FRM2TFM(frm)->tfm_xch;
	fct_status_t		 fc_st;
	uint32_t		 iof;

	FCOET_EXT_LOG("fcoet_send_unsol_els_rsp_done",
	    "frm/oxid/els: %p/%x/%x",
	    frm, FRM_OXID(frm), XCH2ELS(xch)->els_req_payload[0]);
	if (xch->xch_flags & XCH_FLAG_FCT_CALLED_ABORT) {
		FCOET_RELE_XCHG(xch);
		return (FCOE_SUCCESS);
	}

	if (fcoet_clear_unsol_exchange(xch) == FCOE_FAILURE) {
		FCOET_RELE_XCHG(xch);
		return (FCOE_SUCCESS);
	}

	FCOET_RELE_XCHG(xch);
	if (XCH2ELS(xch)->els_req_payload[0] != ELS_OP_FLOGI) {
		fc_st = FCT_SUCCESS;
		iof = FCT_IOF_FCA_DONE;
		fct_send_response_done(xch->xch_cmd, fc_st, iof);
	} else {
		/*
		 * We need update ss_link_info and flags.
		 */
		mutex_enter(&xch->xch_ss->ss_watch_mutex);
		xch->xch_ss->ss_link_info.portid =
		    xch->xch_cmd->cmd_lportid;
		xch->xch_ss->ss_link_info.port_topology =
		    PORT_TOPOLOGY_PT_TO_PT;
		if (frm->frm_eport->eport_link_speed == FCOE_PORT_SPEED_1G) {
			xch->xch_ss->ss_link_info.port_speed = PORT_SPEED_1G;
		} else if (frm->frm_eport->eport_link_speed ==
		    FCOE_PORT_SPEED_10G) {
			xch->xch_ss->ss_link_info.port_speed = PORT_SPEED_10G;
		}
		xch->xch_ss->ss_link_info.port_no_fct_flogi = 1;
		xch->xch_ss->ss_link_info.port_fca_flogi_done = 1;
		xch->xch_ss->ss_link_info.port_fct_flogi_done = 0;
		bcopy(XCH2ELS(xch)->els_req_payload + 20,
		    xch->xch_ss->ss_link_info.port_rpwwn, 8);
		bcopy(XCH2ELS(xch)->els_req_payload + 28,
		    xch->xch_ss->ss_link_info.port_rnwwn, 8);
		atomic_or_32(&xch->xch_ss->ss_flags,
		    SS_FLAG_UNSOL_FLOGI_DONE);
		atomic_or_32(&xch->xch_ss->ss_flags,
		    SS_FLAG_REPORT_TO_FCT);

		xch->xch_ss->ss_sol_flogi_state = SFS_FLOGI_ACC;
		mutex_exit(&xch->xch_ss->ss_watch_mutex);

		fct_free(xch->xch_cmd);
	}
	return (FCOE_SUCCESS);
}
Пример #4
0
void
pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is)
{
	const uint32_t ipl_mask = __BIT(is->is_ipl);

	atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5],
	    __BIT(is->is_irq & 0x1f));

	atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
	atomic_or_32(&pic_pending_ipls, ipl_mask);
	atomic_or_32(&pic_pending_pics, __BIT(pic->pic_id));
}
Пример #5
0
/*
 * Set only flag to suggest that device is suspended. This call is
 * not supported in NetBSD.
 *
 */
int
dm_dev_suspend_ioctl(prop_dictionary_t dm_dict)
{
	dm_dev_t *dmv;
	const char *name, *uuid;
	uint32_t flags, minor;

	name = NULL;
	uuid = NULL;
	flags = 0;

	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_NAME, &name);
	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_UUID, &uuid);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_FLAGS, &flags);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_MINOR, &minor);

	if ((dmv = dm_dev_lookup(name, uuid, minor)) == NULL) {
		DM_REMOVE_FLAG(flags, DM_EXISTS_FLAG);
		return ENOENT;
	}
	atomic_or_32(&dmv->flags, DM_SUSPEND_FLAG);

	dm_dbg_print_flags(dmv->flags);

	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_OPEN, dmv->table_head.io_cnt);
	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_FLAGS, dmv->flags);
	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_MINOR, dmv->minor);

	dm_dev_unbusy(dmv);

	/* Add flags to dictionary flag after dmv -> dict copy */
	DM_ADD_FLAG(flags, DM_EXISTS_FLAG);

	return 0;
}
Пример #6
0
/* register pending event - always called with interrupt disabled */
void
xenevt_setipending(int l1, int l2)
{
	atomic_or_ulong(&xenevt_ev1, 1UL << l1);
	atomic_or_ulong(&xenevt_ev2[l1], 1UL << l2);
	atomic_or_32(&cpu_info_primary.ci_ipending, 1 << IPL_HIGH);
}
Пример #7
0
/*
 * Bring the device out of the reset/quiesced state that it
 * was in when the interface was registered.
 */
int
igb_m_start(void *arg)
{
	igb_t *igb = (igb_t *)arg;

	mutex_enter(&igb->gen_lock);

	if (igb->igb_state & IGB_SUSPENDED) {
		mutex_exit(&igb->gen_lock);
		return (ECANCELED);
	}

	if (igb_start(igb, B_TRUE) != IGB_SUCCESS) {
		mutex_exit(&igb->gen_lock);
		return (EIO);
	}

	atomic_or_32(&igb->igb_state, IGB_STARTED);

	mutex_exit(&igb->gen_lock);

	/*
	 * Enable and start the watchdog timer
	 */
	igb_enable_watchdog_timer(igb);

	return (0);
}
Пример #8
0
/*
 * Bring the device out of the reset/quiesced state that it
 * was in when the interface was registered.
 */
int
ixgbe_m_start(void *arg)
{
	ixgbe_t *ixgbe = (ixgbe_t *)arg;

	mutex_enter(&ixgbe->gen_lock);

	if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
		mutex_exit(&ixgbe->gen_lock);
		return (ECANCELED);
	}

	if (ixgbe_start(ixgbe, B_TRUE) != IXGBE_SUCCESS) {
		mutex_exit(&ixgbe->gen_lock);
		return (EIO);
	}

	atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);

	mutex_exit(&ixgbe->gen_lock);

	/*
	 * Enable and start the watchdog timer
	 */
	ixgbe_enable_watchdog_timer(ixgbe);

	return (0);
}
Пример #9
0
fct_status_t
fcoet_disable_port(fcoet_soft_state_t *ss)
{
	fct_status_t	status;

	FCOET_EXT_LOG(ss->ss_alias, "port is being disabled-%p", ss);
	/* Call fcoe function to offline the port */
	status = fcoet_logo_fabric(ss);
	ss->ss_eport->eport_ctl(ss->ss_eport, FCOE_CMD_PORT_OFFLINE, 0);
	atomic_or_32(&ss->ss_flags, SS_FLAG_PORT_DISABLED);
	return (status);
}
Пример #10
0
/*
 * Simulate Linux behaviour better and switch tables here and not in
 * dm_table_load_ioctl.
 */
int
dm_dev_resume_ioctl(prop_dictionary_t dm_dict)
{
	dm_dev_t *dmv;
	const char *name, *uuid;
	uint32_t flags, minor;

	name = NULL;
	uuid = NULL;
	flags = 0;

	/*
	 * char *xml; xml = prop_dictionary_externalize(dm_dict);
	 * printf("%s\n",xml);
	 */

	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_NAME, &name);
	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_UUID, &uuid);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_FLAGS, &flags);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_MINOR, &minor);

	/* Remove device from global device list */
	if ((dmv = dm_dev_lookup(name, uuid, minor)) == NULL) {
		DM_REMOVE_FLAG(flags, DM_EXISTS_FLAG);
		return ENOENT;
	}
	atomic_and_32(&dmv->flags, ~(DM_SUSPEND_FLAG | DM_INACTIVE_PRESENT_FLAG));
	atomic_or_32(&dmv->flags, DM_ACTIVE_PRESENT_FLAG);

	dm_table_switch_tables(&dmv->table_head);

	DM_ADD_FLAG(flags, DM_EXISTS_FLAG);

	dmgetproperties(dmv->diskp, &dmv->table_head);

	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_OPEN, dmv->table_head.io_cnt);
	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_FLAGS, flags);
	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_MINOR, dmv->minor);

	dm_dev_unbusy(dmv);

	/* Destroy inactive table after resume. */
	dm_table_destroy(&dmv->table_head, DM_TABLE_INACTIVE);

	return 0;
}
Пример #11
0
static int
i40e_m_start(void *arg)
{
	i40e_t *i40e = arg;
	int rc = 0;

	mutex_enter(&i40e->i40e_general_lock);
	if (i40e->i40e_state & I40E_SUSPENDED) {
		rc = ECANCELED;
		goto done;
	}

	if (!i40e_start(i40e, B_TRUE)) {
		rc = EIO;
		goto done;
	}

	atomic_or_32(&i40e->i40e_state, I40E_STARTED);
done:
	mutex_exit(&i40e->i40e_general_lock);

	return (rc);
}
Пример #12
0
void
fcoet_send_sol_flogi(fcoet_soft_state_t *ss)
{
	fcoet_exchange_t	*xch;
	fct_cmd_t		*cmd;
	fct_els_t		*els;
	fcoe_frame_t		*frm;

	/*
	 * FCT will initialize fct_cmd_t
	 * Initialize fcoet_exchange
	 */
	cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_SOL_ELS,
	    sizeof (fcoet_exchange_t), 0);
	xch = CMD2XCH(cmd);
	els = CMD2ELS(cmd);

	xch->xch_oxid = atomic_add_16_nv(&ss->ss_next_sol_oxid, 1);
	if (xch->xch_oxid == 0xFFFF) {
		xch->xch_oxid =
		    atomic_add_16_nv(&ss->ss_next_sol_oxid, 1);
	}
	xch->xch_rxid = 0xFFFF;
	xch->xch_flags = 0;
	xch->xch_ss = ss;
	xch->xch_cmd = cmd;
	xch->xch_current_seq = NULL;
	xch->xch_start_time = ddi_get_lbolt();

	/*
	 * Keep it to compare with response
	 */
	ss->ss_sol_flogi = xch;
	els->els_resp_alloc_size = 116;
	els->els_resp_size = 116;
	els->els_resp_payload = (uint8_t *)
	    kmem_zalloc(els->els_resp_size, KM_SLEEP);
	(void) mod_hash_insert(xch->xch_ss->ss_sol_oxid_hash,
	    (mod_hash_key_t)(uintptr_t)xch->xch_oxid, (mod_hash_val_t)xch);
	xch->xch_flags |= XCH_FLAG_IN_HASH_TABLE;
	atomic_or_32(&ss->ss_flags, SS_FLAG_DELAY_PLOGI);

	/*
	 * FCoE will initialize fcoe_frame_t
	 */
	frm = ss->ss_eport->eport_alloc_frame(ss->ss_eport,
	    FLOGI_REQ_PAYLOAD_SIZE + FCFH_SIZE, NULL);
	if (frm == NULL) {
		ASSERT(0);
		return;
	} else {
		fcoet_init_tfm(frm, xch);
		bzero(frm->frm_payload, frm->frm_payload_size);
	}

	FFM_R_CTL(0x22, frm);
	FRM2TFM(frm)->tfm_rctl = 0x22;
	FFM_TYPE(0x01, frm);
	FFM_F_CTL(0x290000, frm);
	FFM_OXID(xch->xch_oxid, frm);
	FFM_RXID(xch->xch_rxid, frm);
	FFM_D_ID(0xfffffe, frm);
	frm->frm_payload[0] = ELS_OP_FLOGI;
	/* Common Service Parameters */
	frm->frm_payload[4] = 0x20;
	frm->frm_payload[5] = 0x08;
	frm->frm_payload[6] = 0x0;
	frm->frm_payload[7] = 0x03;
	/* N_PORT */
	frm->frm_payload[8] = 0x88;
	frm->frm_payload[9] = 0x00;
	frm->frm_payload[10] = 0x08;
	frm->frm_payload[11] = 0x0;
	frm->frm_payload[12] = 0x0;
	frm->frm_payload[13] = 0xff;
	frm->frm_payload[14] = 0x0;
	frm->frm_payload[15] = 0x03;
	frm->frm_payload[16] = 0x0;
	frm->frm_payload[17] = 0x0;
	frm->frm_payload[18] = 0x07;
	frm->frm_payload[19] = 0xd0;
	/* PWWN and NWWN */
	frm->frm_payload[20] = 0x0;
	bcopy(ss->ss_eport->eport_portwwn, frm->frm_payload+20, 8);
	bcopy(ss->ss_eport->eport_nodewwn, frm->frm_payload+28, 8);
	/* Class 3 Service Parameters */
	frm->frm_payload[68] = 0x88;
	frm->frm_payload[74] = 0x08;
	frm->frm_payload[77] = 0xff;

	ss->ss_eport->eport_tx_frame(frm);
	xch->xch_flags |= XCH_FLAG_NONFCP_REQ_SENT;
}
Пример #13
0
/*
 * Load new table/tables to device.
 * Call apropriate target init routine open all physical pdev's and
 * link them to device. For other targets mirror, strip, snapshot
 * etc. also add dependency devices to upcalls list.
 *
 * Load table to inactive slot table are switched in dm_device_resume_ioctl.
 * This simulates Linux behaviour better there should not be any difference.
 *
 */
int
dm_table_load_ioctl(prop_dictionary_t dm_dict)
{
	dm_dev_t *dmv;
	dm_table_entry_t *table_en, *last_table;
	dm_table_t *tbl;
	dm_target_t *target;

	prop_object_iterator_t iter;
	prop_array_t cmd_array;
	prop_dictionary_t target_dict;

	const char *name, *uuid, *type;

	uint32_t flags, ret, minor;

	char *str;

	ret = 0;
	flags = 0;
	name = NULL;
	uuid = NULL;
	dmv = NULL;
	last_table = NULL;
	str = NULL;

	/*
	 * char *xml; xml = prop_dictionary_externalize(dm_dict);
	 * printf("%s\n",xml);
	 */

	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_NAME, &name);
	prop_dictionary_get_cstring_nocopy(dm_dict, DM_IOCTL_UUID, &uuid);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_FLAGS, &flags);
	prop_dictionary_get_uint32(dm_dict, DM_IOCTL_MINOR, &minor);

	cmd_array = prop_dictionary_get(dm_dict, DM_IOCTL_CMD_DATA);
	iter = prop_array_iterator(cmd_array);
	dm_dbg_print_flags(flags);

	if ((dmv = dm_dev_lookup(name, uuid, minor)) == NULL) {
		DM_REMOVE_FLAG(flags, DM_EXISTS_FLAG);
		prop_object_iterator_release(iter);
		return ENOENT;
	}
	aprint_debug("Loading table to device: %s--%d\n", name,
	    dmv->table_head.cur_active_table);

	/*
	 * I have to check if this table slot is not used by another table list.
	 * if it is used I should free them.
	 */
	if (dmv->flags & DM_INACTIVE_PRESENT_FLAG)
		dm_table_destroy(&dmv->table_head, DM_TABLE_INACTIVE);

	dm_dbg_print_flags(dmv->flags);
	tbl = dm_table_get_entry(&dmv->table_head, DM_TABLE_INACTIVE);

	aprint_debug("dmv->name = %s\n", dmv->name);

	prop_dictionary_set_uint32(dm_dict, DM_IOCTL_MINOR, dmv->minor);

	while ((target_dict = prop_object_iterator_next(iter)) != NULL) {

		prop_dictionary_get_cstring_nocopy(target_dict,
		    DM_TABLE_TYPE, &type);
		/*
		 * If we want to deny table with 2 or more different
		 * target we should do it here
		 */
		if (((target = dm_target_lookup(type)) == NULL) &&
		    ((target = dm_target_autoload(type)) == NULL)) {
			dm_table_release(&dmv->table_head, DM_TABLE_INACTIVE);
			dm_dev_unbusy(dmv);
			prop_object_iterator_release(iter);
			return ENOENT;
		}
		if ((table_en = kmem_alloc(sizeof(dm_table_entry_t),
			    KM_SLEEP)) == NULL) {
			dm_table_release(&dmv->table_head, DM_TABLE_INACTIVE);
			dm_dev_unbusy(dmv);
			prop_object_iterator_release(iter);
			return ENOMEM;
		}
		prop_dictionary_get_uint64(target_dict, DM_TABLE_START,
		    &table_en->start);
		prop_dictionary_get_uint64(target_dict, DM_TABLE_LENGTH,
		    &table_en->length);

		table_en->target = target;
		table_en->dm_dev = dmv;
		table_en->target_config = NULL;

		/*
		 * There is a parameter string after dm_target_spec
		 * structure which  points to /dev/wd0a 284 part of
		 * table. String str points to this text. This can be
		 * null and therefore it should be checked before we try to
		 * use it.
		 */
		prop_dictionary_get_cstring(target_dict,
		    DM_TABLE_PARAMS, (char **) &str);

		if (SLIST_EMPTY(tbl))
			/* insert this table to head */
			SLIST_INSERT_HEAD(tbl, table_en, next);
		else
			SLIST_INSERT_AFTER(last_table, table_en, next);

		/*
		 * Params string is different for every target,
		 * therfore I have to pass it to target init
		 * routine and parse parameters there.
		 */

		if ((ret = target->init(dmv, &table_en->target_config,
			    str)) != 0) {

			dm_table_release(&dmv->table_head, DM_TABLE_INACTIVE);
			dm_table_destroy(&dmv->table_head, DM_TABLE_INACTIVE);
			free(str, M_TEMP);

			dm_dev_unbusy(dmv);
			dm_target_unbusy(target);
			prop_object_iterator_release(iter);
			return ret;
		}
		last_table = table_en;
		free(str, M_TEMP);
	}
	prop_object_iterator_release(iter);

	DM_ADD_FLAG(flags, DM_INACTIVE_PRESENT_FLAG);
	atomic_or_32(&dmv->flags, DM_INACTIVE_PRESENT_FLAG);

	dm_table_release(&dmv->table_head, DM_TABLE_INACTIVE);
	dm_dev_unbusy(dmv);
	return 0;
}
Пример #14
0
void
pic_deliver_irqs(struct pic_softc *pic, int ipl, void *frame)
{
	const uint32_t ipl_mask = __BIT(ipl);
	struct intrsource *is;
	volatile uint32_t *ipending = pic->pic_pending_irqs;
	volatile uint32_t *iblocked = pic->pic_blocked_irqs;
	size_t irq_base;
#if PIC_MAXSOURCES > 32
	size_t irq_count;
	int poi = 0;		/* Possibility of interrupting */
#endif
	uint32_t pending_irqs;
	uint32_t blocked_irqs;
	int irq;
	bool progress = false;
	
	KASSERT(pic->pic_pending_ipls & ipl_mask);

	irq_base = 0;
#if PIC_MAXSOURCES > 32
	irq_count = 0;
#endif

	for (;;) {
		pending_irqs = pic_find_pending_irqs_by_ipl(pic, irq_base,
		    *ipending, ipl);
		KASSERT((pending_irqs & *ipending) == pending_irqs);
		KASSERT((pending_irqs & ~(*ipending)) == 0);
		if (pending_irqs == 0) {
#if PIC_MAXSOURCES > 32
			irq_count += 32;
			if (__predict_true(irq_count >= pic->pic_maxsources)) {
				if (!poi)
					/*Interrupt at this level was handled.*/
					break;
				irq_base = 0;
				irq_count = 0;
				poi = 0;
				ipending = pic->pic_pending_irqs;
				iblocked = pic->pic_blocked_irqs;
			} else {
				irq_base += 32;
				ipending++;
				iblocked++;
				KASSERT(irq_base <= pic->pic_maxsources);
			}
			continue;
#else
			break;
#endif
		}
		progress = true;
		blocked_irqs = 0;
		do {
			irq = ffs(pending_irqs) - 1;
			KASSERT(irq >= 0);

			atomic_and_32(ipending, ~__BIT(irq));
			is = pic->pic_sources[irq_base + irq];
			if (is != NULL) {
				cpsie(I32_bit);
				pic_dispatch(is, frame);
				cpsid(I32_bit);
#if PIC_MAXSOURCES > 32
				/*
				 * There is a possibility of interrupting
				 * from cpsie() to cpsid().
				 */
				poi = 1;
#endif
				blocked_irqs |= __BIT(irq);
			} else {
				KASSERT(0);
			}
			pending_irqs = pic_find_pending_irqs_by_ipl(pic,
			    irq_base, *ipending, ipl);
		} while (pending_irqs);
		if (blocked_irqs) {
			atomic_or_32(iblocked, blocked_irqs);
			atomic_or_32(&pic_blocked_pics, __BIT(pic->pic_id));
		}
	}

	KASSERT(progress);
	/*
	 * Since interrupts are disabled, we don't have to be too careful
	 * about these.
	 */
	if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
		atomic_and_32(&pic_pending_pics, ~__BIT(pic->pic_id));
}
Пример #15
0
/*
 * FCoET can only interpret solicited and unsolicited FLOGI, all the other
 * ELS/CT/FCP should be passed up to FCT.
 */
static int
fcoet_process_unsol_flogi_req(fcoet_exchange_t *xch)
{
	fcoe_frame_t *frm;

	atomic_or_32(&xch->xch_ss->ss_flags, SS_FLAG_DELAY_PLOGI);

	/*
	 * In spec, common service parameter should indicate if it's from
	 * N-port or F-port, but the initial intel implementation is not
	 * spec-compliant, so we use eport_flags to workaround the problem
	 */
	if (!(xch->xch_ss->ss_eport->eport_flags & EPORT_FLAG_IS_DIRECT_P2P)) {
		/*
		 * The topology is switch P2P, so there's no need to respond
		 * to this FLOGI
		 */
		FCOET_LOG("fcoet_process_unsol_flogi_req",
		    "skip FLOGI, since we are in switch topology");
		return (FCOE_SUCCESS);
	}

	/*
	 * Send ACC according to the spec.
	 */
	frm = xch->xch_ss->ss_eport->eport_alloc_frame(xch->xch_ss->ss_eport,
	    FLOGI_ACC_PAYLOAD_SIZE + FCFH_SIZE, 0);
	if (frm == NULL) {
		ASSERT(0);
		return (FCOE_FAILURE);
	} else {
		fcoet_init_tfm(frm, xch);
		bzero(frm->frm_payload, frm->frm_payload_size);
	}

	FFM_R_CTL(0x23, frm);
	FRM2TFM(frm)->tfm_rctl = 0x23;
	FFM_TYPE(0x01, frm);
	FFM_F_CTL(0x980000, frm);
	FFM_OXID(xch->xch_oxid, frm);
	FFM_RXID(xch->xch_rxid, frm);
	FFM_S_ID(0xFFFFFE, frm);

	/*
	 * ACC
	 */
	frm->frm_payload[0] = 0x02;

	/*
	 * Common Svc Parameters
	 */
	frm->frm_payload[4] = 0x20;
	frm->frm_payload[5] = 0x20;
	frm->frm_payload[7] = 0x0A;
	frm->frm_payload[10] = 0x05;
	frm->frm_payload[11] = 0xAC;
	bcopy(xch->xch_ss->ss_eport->eport_portwwn, frm->frm_payload + 20, 8);
	bcopy(xch->xch_ss->ss_eport->eport_nodewwn, frm->frm_payload + 28, 8);

	/*
	 * Class3 Svc Parameters
	 */
	frm->frm_payload[68] = 0x88;

	/*
	 * Send FLOGI ACC out
	 * After this, we should never use the exchange, because it could
	 * have been released. Please pay attention to other similiar cases.
	 */
	xch->xch_ss->ss_eport->eport_tx_frame(frm);
	return (FCOE_SUCCESS);
}
Пример #16
0
/*
 * Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock,
 * and trywrlock for process-private (USYNC_THREAD) rwlocks.
 */
int
rwlock_lock(rwlock_t *rwlp, timespec_t *tsp, int rd_wr)
{
	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
	uint32_t readers;
	ulwp_t *self = curthread;
	queue_head_t *qp;
	ulwp_t *ulwp;
	int try_flag;
	int ignore_waiters_flag;
	int error = 0;

	try_flag = (rd_wr & TRY_FLAG);
	rd_wr &= ~TRY_FLAG;
	ASSERT(rd_wr == READ_LOCK || rd_wr == WRITE_LOCK);

	if (!try_flag) {
		DTRACE_PROBE2(plockstat, rw__block, rwlp, rd_wr);
	}

	qp = queue_lock(rwlp, MX);
	/* initial attempt to acquire the lock fails if there are waiters */
	ignore_waiters_flag = 0;
	while (error == 0) {
		if (rd_wr == READ_LOCK) {
			if (read_lock_try(rwlp, ignore_waiters_flag))
				break;
		} else {
			if (write_lock_try(rwlp, ignore_waiters_flag))
				break;
		}
		/* subsequent attempts do not fail due to waiters */
		ignore_waiters_flag = 1;
		atomic_or_32(rwstate, URW_HAS_WAITERS);
		readers = *rwstate;
		ASSERT_CONSISTENT_STATE(readers);
		if ((readers & URW_WRITE_LOCKED) ||
		    (rd_wr == WRITE_LOCK &&
		    (readers & URW_READERS_MASK) != 0))
			/* EMPTY */;	/* somebody holds the lock */
		else if ((ulwp = queue_waiter(qp)) == NULL) {
			atomic_and_32(rwstate, ~URW_HAS_WAITERS);
			ignore_waiters_flag = 0;
			continue;	/* no queued waiters, start over */
		} else {
			/*
			 * Do a priority check on the queued waiter (the
			 * highest priority thread on the queue) to see
			 * if we should defer to him or just grab the lock.
			 */
			int our_pri = real_priority(self);
			int his_pri = real_priority(ulwp);

			if (rd_wr == WRITE_LOCK) {
				/*
				 * We defer to a queued thread that has
				 * a higher priority than ours.
				 */
				if (his_pri <= our_pri) {
					/*
					 * Don't defer, just grab the lock.
					 */
					continue;
				}
			} else {
				/*
				 * We defer to a queued thread that has
				 * a higher priority than ours or that
				 * is a writer whose priority equals ours.
				 */
				if (his_pri < our_pri ||
				    (his_pri == our_pri && !ulwp->ul_writer)) {
					/*
					 * Don't defer, just grab the lock.
					 */
					continue;
				}
			}
		}
		/*
		 * We are about to block.
		 * If we're doing a trylock, return EBUSY instead.
		 */
		if (try_flag) {
			error = EBUSY;
			break;
		}
		/*
		 * Enqueue writers ahead of readers.
		 */
		self->ul_writer = rd_wr;	/* *must* be 0 or 1 */
		enqueue(qp, self, 0);
		set_parking_flag(self, 1);
		queue_unlock(qp);
		if ((error = __lwp_park(tsp, 0)) == EINTR)
			error = 0;
		set_parking_flag(self, 0);
		qp = queue_lock(rwlp, MX);
		if (self->ul_sleepq && dequeue_self(qp) == 0) {
			atomic_and_32(rwstate, ~URW_HAS_WAITERS);
			ignore_waiters_flag = 0;
		}
		self->ul_writer = 0;
		if (rd_wr == WRITE_LOCK &&
		    (*rwstate & URW_WRITE_LOCKED) &&
		    rwlp->rwlock_owner == (uintptr_t)self) {
			/*
			 * We acquired the lock by hand-off
			 * from the previous owner,
			 */
			error = 0;	/* timedlock did not fail */
			break;
		}
	}

	/*
	 * Make one final check to see if there are any threads left
	 * on the rwlock queue.  Clear the URW_HAS_WAITERS flag if not.
	 */
	if (qp->qh_root == NULL || qp->qh_root->qr_head == NULL)
		atomic_and_32(rwstate, ~URW_HAS_WAITERS);

	queue_unlock(qp);

	if (!try_flag) {
		DTRACE_PROBE3(plockstat, rw__blocked, rwlp, rd_wr, error == 0);
	}

	return (error);
}
Пример #17
0
/*
 * Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock,
 * and trywrlock for process-shared (USYNC_PROCESS) rwlocks.
 *
 * Note: if the lock appears to be contended we call __lwp_rwlock_rdlock()
 * or __lwp_rwlock_wrlock() holding the mutex. These return with the mutex
 * released, and if they need to sleep will release the mutex first. In the
 * event of a spurious wakeup, these will return EAGAIN (because it is much
 * easier for us to re-acquire the mutex here).
 */
int
shared_rwlock_lock(rwlock_t *rwlp, timespec_t *tsp, int rd_wr)
{
	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
	mutex_t *mp = &rwlp->mutex;
	uint32_t readers;
	int try_flag;
	int error;

	try_flag = (rd_wr & TRY_FLAG);
	rd_wr &= ~TRY_FLAG;
	ASSERT(rd_wr == READ_LOCK || rd_wr == WRITE_LOCK);

	if (!try_flag) {
		DTRACE_PROBE2(plockstat, rw__block, rwlp, rd_wr);
	}

	do {
		if (try_flag && (*rwstate & URW_WRITE_LOCKED)) {
			error = EBUSY;
			break;
		}
		if ((error = mutex_lock(mp)) != 0)
			break;
		if (rd_wr == READ_LOCK) {
			if (read_lock_try(rwlp, 0)) {
				(void) mutex_unlock(mp);
				break;
			}
		} else {
			if (write_lock_try(rwlp, 0)) {
				(void) mutex_unlock(mp);
				break;
			}
		}
		atomic_or_32(rwstate, URW_HAS_WAITERS);
		readers = *rwstate;
		ASSERT_CONSISTENT_STATE(readers);
		/*
		 * The calls to __lwp_rwlock_*() below will release the mutex,
		 * so we need a dtrace probe here.  The owner field of the
		 * mutex is cleared in the kernel when the mutex is released,
		 * so we should not clear it here.
		 */
		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
		/*
		 * The waiters bit may be inaccurate.
		 * Only the kernel knows for sure.
		 */
		if (rd_wr == READ_LOCK) {
			if (try_flag)
				error = __lwp_rwlock_tryrdlock(rwlp);
			else
				error = __lwp_rwlock_rdlock(rwlp, tsp);
		} else {
			if (try_flag)
				error = __lwp_rwlock_trywrlock(rwlp);
			else
				error = __lwp_rwlock_wrlock(rwlp, tsp);
		}
	} while (error == EAGAIN || error == EINTR);

	if (!try_flag) {
		DTRACE_PROBE3(plockstat, rw__blocked, rwlp, rd_wr, error == 0);
	}

	return (error);
}
Пример #18
0
/*
 * Release a process-private rwlock and wake up any thread(s) sleeping on it.
 * This is called when a thread releases a lock that appears to have waiters.
 */
static void
rw_queue_release(rwlock_t *rwlp)
{
	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
	queue_head_t *qp;
	uint32_t readers;
	uint32_t writer;
	ulwp_t **ulwpp;
	ulwp_t *ulwp;
	ulwp_t *prev;
	int nlwpid = 0;
	int more;
	int maxlwps = MAXLWPS;
	lwpid_t buffer[MAXLWPS];
	lwpid_t *lwpid = buffer;

	qp = queue_lock(rwlp, MX);

	/*
	 * Here is where we actually drop the lock,
	 * but we retain the URW_HAS_WAITERS flag, if it is already set.
	 */
	readers = *rwstate;
	ASSERT_CONSISTENT_STATE(readers);
	if (readers & URW_WRITE_LOCKED)	/* drop the writer lock */
		atomic_and_32(rwstate, ~URW_WRITE_LOCKED);
	else				/* drop the readers lock */
		atomic_dec_32(rwstate);
	if (!(readers & URW_HAS_WAITERS)) {	/* no waiters */
		queue_unlock(qp);
		return;
	}

	/*
	 * The presence of the URW_HAS_WAITERS flag causes all rwlock
	 * code to go through the slow path, acquiring queue_lock(qp).
	 * Therefore, the rest of this code is safe because we are
	 * holding the queue lock and the URW_HAS_WAITERS flag is set.
	 */

	readers = *rwstate;		/* must fetch the value again */
	ASSERT_CONSISTENT_STATE(readers);
	ASSERT(readers & URW_HAS_WAITERS);
	readers &= URW_READERS_MASK;	/* count of current readers */
	writer = 0;			/* no current writer */

	/*
	 * Examine the queue of waiters in priority order and prepare
	 * to wake up as many readers as we encounter before encountering
	 * a writer.  If the highest priority thread on the queue is a
	 * writer, stop there and wake it up.
	 *
	 * We keep track of lwpids that are to be unparked in lwpid[].
	 * __lwp_unpark_all() is called to unpark all of them after
	 * they have been removed from the sleep queue and the sleep
	 * queue lock has been dropped.  If we run out of space in our
	 * on-stack buffer, we need to allocate more but we can't call
	 * lmalloc() because we are holding a queue lock when the overflow
	 * occurs and lmalloc() acquires a lock.  We can't use alloca()
	 * either because the application may have allocated a small
	 * stack and we don't want to overrun the stack.  So we call
	 * alloc_lwpids() to allocate a bigger buffer using the mmap()
	 * system call directly since that path acquires no locks.
	 */
	while ((ulwpp = queue_slot(qp, &prev, &more)) != NULL) {
		ulwp = *ulwpp;
		ASSERT(ulwp->ul_wchan == rwlp);
		if (ulwp->ul_writer) {
			if (writer != 0 || readers != 0)
				break;
			/* one writer to wake */
			writer++;
		} else {
			if (writer != 0)
				break;
			/* at least one reader to wake */
			readers++;
			if (nlwpid == maxlwps)
				lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps);
		}
		queue_unlink(qp, ulwpp, prev);
		ulwp->ul_sleepq = NULL;
		ulwp->ul_wchan = NULL;
		if (writer) {
			/*
			 * Hand off the lock to the writer we will be waking.
			 */
			ASSERT((*rwstate & ~URW_HAS_WAITERS) == 0);
			atomic_or_32(rwstate, URW_WRITE_LOCKED);
			rwlp->rwlock_owner = (uintptr_t)ulwp;
		}
		lwpid[nlwpid++] = ulwp->ul_lwpid;
	}

	/*
	 * This modification of rwstate must be done last.
	 * The presence of the URW_HAS_WAITERS flag causes all rwlock
	 * code to go through the slow path, acquiring queue_lock(qp).
	 * Otherwise the read_lock_try() and write_lock_try() fast paths
	 * are effective.
	 */
	if (ulwpp == NULL)
		atomic_and_32(rwstate, ~URW_HAS_WAITERS);

	if (nlwpid == 0) {
		queue_unlock(qp);
	} else {
		ulwp_t *self = curthread;
		no_preempt(self);
		queue_unlock(qp);
		if (nlwpid == 1)
			(void) __lwp_unpark(lwpid[0]);
		else
			(void) __lwp_unpark_all(lwpid, nlwpid);
		preempt(self);
	}
	if (lwpid != buffer)
		(void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t));
}