Beispiel #1
0
int
damap_sync(damap_t *damapp, int sync_usec)
{
	dam_t	*mapp = (dam_t *)damapp;
	int	rv;

	ASSERT(mapp);
	DTRACE_PROBE3(damap__map__sync__start,
	    char *, mapp->dam_name, dam_t *, mapp,
	    int, sync_usec);

	/*
	 * Block when waiting for
	 *	a) stabilization pending or a fullset update pending
	 *	b) the report set to finalize (bitset is null)
	 *	c) any scheduled timeouts to fire
	 */
	rv = 1;					/* return synced */
	mutex_enter(&mapp->dam_lock);
again:	while ((mapp->dam_flags & WAITFOR_FLAGS) ||
	    (!bitset_is_null(&mapp->dam_report_set)) ||
	    (mapp->dam_tid != 0)) {
		DTRACE_PROBE2(damap__map__sync__waiting,
		    char *, mapp->dam_name, dam_t *, mapp);

		/* Wait for condition relayed via timeout */
		if (sync_usec) {
			if (cv_reltimedwait(&mapp->dam_sync_cv, &mapp->dam_lock,
			    drv_usectohz(sync_usec), TR_MICROSEC) == -1) {
				mapp->dam_sync_to_cnt++;
				rv = 0;		/* return timeout */
				break;
			}
		} else
			cv_wait(&mapp->dam_sync_cv, &mapp->dam_lock);
	}

	if (rv) {
		/*
		 * Delay one stabilization time after the apparent sync above
		 * and verify accuracy - resync if not accurate.
		 */
		(void) cv_reltimedwait(&mapp->dam_sync_cv, &mapp->dam_lock,
		    mapp->dam_stable_ticks, TR_MICROSEC);
		if (rv && ((mapp->dam_flags & WAITFOR_FLAGS) ||
		    (!bitset_is_null(&mapp->dam_report_set)) ||
		    (mapp->dam_tid != 0)))
			goto again;
	}
	mutex_exit(&mapp->dam_lock);

	DTRACE_PROBE3(damap__map__sync__end,
	    char *, mapp->dam_name, dam_t *, mapp,
	    int, rv);
	return (rv);
}
int
smb_nt_request(struct smb_ntrq *ntp)
{
	int error = EINVAL, i;

	for (i = 0; ; ) {
		/*
		 * Don't send any new requests if force unmount is underway.
		 * This check was moved into smb_rq_enqueue, called by
		 * smb_nt_request_int()
		 */
		ntp->nt_flags &= ~SMBT2_RESTART;
		error = smb_nt_request_int(ntp);
		if (!error)
			break;
		if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
		    SMBT2_RESTART)
			break;
		if (++i > SMBMAXRESTARTS)
			break;
		mutex_enter(&(ntp)->nt_lock);
		if (ntp->nt_share) {
			cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
			    (hz * SMB_RCNDELAY), TR_CLOCK_TICK);

		} else {
			delay(ddi_get_lbolt() + (hz * SMB_RCNDELAY));
		}
		mutex_exit(&(ntp)->nt_lock);
	}
	return (error);
}
Beispiel #3
0
/*
 * This is the main balloon thread.  Wait on the cv.  When woken, if our
 * reservation has changed, call the appropriate function to adjust the
 * reservation.
 */
static void
balloon_worker_thread(void)
{
	uint_t		bln_wait;
	callb_cpr_t	cprinfo;
	spgcnt_t	rv;

	bln_wait = bln_wait_sec;

	CALLB_CPR_INIT(&cprinfo, &bln_mutex, callb_generic_cpr, "balloon");
	for (;;) {
		rv = 0;

		mutex_enter(&bln_mutex);
		CALLB_CPR_SAFE_BEGIN(&cprinfo);
		if (bln_stats.bln_new_target != bln_stats.bln_current_pages) {
			/*
			 * We weren't able to fully complete the request
			 * last time through, so try again.
			 */
			(void) cv_reltimedwait(&bln_cv, &bln_mutex,
			    (bln_wait * hz), TR_CLOCK_TICK);
		} else {
			cv_wait(&bln_cv, &bln_mutex);
		}
		CALLB_CPR_SAFE_END(&cprinfo, &bln_mutex);

		if (bln_stats.bln_new_target != bln_stats.bln_current_pages) {
			if (bln_stats.bln_new_target <
			    bln_stats.bln_current_pages) {
				/* reservation shrunk */
				rv = -balloon_dec_reservation(
				    bln_stats.bln_current_pages -
				    bln_stats.bln_new_target);
			} else if (bln_stats.bln_new_target >
			    bln_stats.bln_current_pages) {
				/* reservation grew */
				rv = balloon_inc_reservation(
				    bln_stats.bln_new_target -
				    bln_stats.bln_current_pages);
			}
		}
		if (rv == 0) {
			if (bln_wait == 0) {
				bln_wait = 1;
			} else {
				bln_wait <<= bln_wait_shift;
			}
		} else {
			bln_stats.bln_current_pages += rv;
			bln_wait = bln_wait_sec;
		}
		if (bln_stats.bln_current_pages < bln_stats.bln_low)
			bln_stats.bln_low = bln_stats.bln_current_pages;
		else if (bln_stats.bln_current_pages > bln_stats.bln_high)
			bln_stats.bln_high = bln_stats.bln_current_pages;
		mutex_exit(&bln_mutex);
	}
}
/*
 * iscsi_thread_wait -
 */
int
iscsi_thread_wait(
	iscsi_thread_t		*thread,
	clock_t			timeout
)
{
	int			rtn = 1;

	ASSERT(thread != NULL);
	ASSERT(thread->signature == SIG_ISCSI_THREAD);

	/* Acquire the mutex before anychecking. */
	mutex_enter(&thread->sign.mtx);

	/* Check the signals. */
	if (thread->sign.bitmap & ISCSI_THREAD_SIGNAL_KILL) {
		goto signal_kill;
	} else if (thread->sign.bitmap & ISCSI_THREAD_SIGNAL_WAKEUP) {
		goto signal_wakeup;
	} else if (timeout == 0) {
		goto iscsi_thread_sleep_exit;
	}

	if (timeout == -1) {
		cv_wait(&thread->sign.cdv, &thread->sign.mtx);
	} else {
		rtn = cv_reltimedwait(&thread->sign.cdv, &thread->sign.mtx,
		    timeout, TR_CLOCK_TICK);
	}

	/* Check the signals. */
	if (thread->sign.bitmap & ISCSI_THREAD_SIGNAL_KILL) {
		goto signal_kill;
	} else if (thread->sign.bitmap & ISCSI_THREAD_SIGNAL_WAKEUP) {
		goto signal_wakeup;
	}

iscsi_thread_sleep_exit:
	mutex_exit(&thread->sign.mtx);
	return (rtn);

signal_kill:
	mutex_exit(&thread->sign.mtx);
	return (0);

signal_wakeup:
	thread->sign.bitmap &= ~ISCSI_THREAD_SIGNAL_WAKEUP;
	mutex_exit(&thread->sign.mtx);
	return (1);
}
static void
trans_roll_wait(mt_map_t *logmap, callb_cpr_t *cprinfop)
{
	mutex_enter(&logmap->mtm_mutex);
	logmap->mtm_ref = 0;
	if (logmap->mtm_flags & MTM_FORCE_ROLL) {
		cv_broadcast(&logmap->mtm_from_roll_cv);
	}
	logmap->mtm_flags &= ~(MTM_FORCE_ROLL | MTM_ROLLING);
	CALLB_CPR_SAFE_BEGIN(cprinfop);
	(void) cv_reltimedwait(&logmap->mtm_to_roll_cv, &logmap->mtm_mutex,
	    trans_roll_tics, TR_CLOCK_TICK);
	CALLB_CPR_SAFE_END(cprinfop, &logmap->mtm_mutex);
	logmap->mtm_flags |= MTM_ROLLING;
	mutex_exit(&logmap->mtm_mutex);
}
/*
 * Kill all user processes in said zone.  A special argument of ALL_ZONES is
 * passed in when the system as a whole is shutting down.  The lack of per-zone
 * process lists is likely to make the following a performance bottleneck on a
 * system with many zones.
 */
void
killall(zoneid_t zoneid)
{
	proc_t *p;

	ASSERT(zoneid != GLOBAL_ZONEID);
	/*
	 * Kill all processes except kernel daemons and ourself.
	 * Make a first pass to stop all processes so they won't
	 * be trying to restart children as we kill them.
	 */
	mutex_enter(&pidlock);
	for (p = practive; p != NULL; p = p->p_next) {
		if ((zoneid == ALL_ZONES || p->p_zone->zone_id == zoneid) &&
		    p->p_exec != NULLVP &&	/* kernel daemons */
		    p->p_as != &kas &&
		    p->p_stat != SZOMB) {
			mutex_enter(&p->p_lock);
			p->p_flag |= SNOWAIT;
			sigtoproc(p, NULL, SIGSTOP);
			mutex_exit(&p->p_lock);
		}
	}
	p = practive;
	while (p != NULL) {
		if ((zoneid == ALL_ZONES || p->p_zone->zone_id == zoneid) &&
		    p->p_exec != NULLVP &&	/* kernel daemons */
		    p->p_as != &kas &&
		    p->p_stat != SIDL &&
		    p->p_stat != SZOMB) {
			mutex_enter(&p->p_lock);
			if (sigismember(&p->p_sig, SIGKILL)) {
				mutex_exit(&p->p_lock);
				p = p->p_next;
			} else {
				sigtoproc(p, NULL, SIGKILL);
				mutex_exit(&p->p_lock);
				(void) cv_reltimedwait(&p->p_srwchan_cv,
				    &pidlock, hz, TR_CLOCK_TICK);
				p = practive;
			}
		} else {
			p = p->p_next;
		}
	}
	mutex_exit(&pidlock);
}
/*
 * Simple request-reply exchange
 */
int
smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
{
	int error = EINVAL;

	for (; ; ) {
		/*
		 * Don't send any new requests if force unmount is underway.
		 * This check was moved into smb_rq_enqueue.
		 */
		rqp->sr_flags &= ~SMBR_RESTART;
		rqp->sr_timo = timeout;	/* in seconds */
		rqp->sr_state = SMBRQ_NOTSENT;
		error = smb_rq_enqueue(rqp);
		if (error) {
			break;
		}
		error = smb_rq_reply(rqp);
		if (!error)
			break;
		if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) !=
		    SMBR_RESTART)
			break;
		if (rqp->sr_rexmit <= 0)
			break;
		SMBRQ_LOCK(rqp);
		if (rqp->sr_share) {
			cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
			    (hz * SMB_RCNDELAY), TR_CLOCK_TICK);

		} else {
			delay(ddi_get_lbolt() + (hz * SMB_RCNDELAY));
		}
		SMBRQ_UNLOCK(rqp);
		rqp->sr_rexmit--;
#ifdef XXX
		timeout *= 2;
#endif
	}
	return (error);
}
Beispiel #8
0
/*
 * xpvtap_user_app_stop()
 */
static void xpvtap_user_app_stop(caddr_t arg)
{
	xpvtap_state_t *state;
	clock_t rc;

	state = (xpvtap_state_t *)arg;

	/*
	 * Give the app 10 secs to exit. If it doesn't exit, it's not a serious
	 * problem, we just won't auto-detach the driver.
	 */
	mutex_enter(&state->bt_open.bo_mutex);
	if (state->bt_open.bo_opened) {
		rc = cv_reltimedwait(&state->bt_open.bo_exit_cv,
		    &state->bt_open.bo_mutex, drv_usectohz(10000000),
		    TR_CLOCK_TICK);
		if (rc <= 0) {
			cmn_err(CE_NOTE, "!user process still has driver open, "
			    "deferring detach\n");
		}
	}
	mutex_exit(&state->bt_open.bo_mutex);
}
void
sda_slot_thread(void *arg)
{
	sda_slot_t	*slot = arg;

	for (;;) {
		sda_cmd_t	*cmdp;
		boolean_t	datline;
		sda_err_t	rv;

		mutex_enter(&slot->s_evlock);

		/*
		 * Process any abort list first.
		 */
		if ((cmdp = list_head(&slot->s_abortlist)) != NULL) {
			list_remove(&slot->s_abortlist, cmdp);
			mutex_exit(&slot->s_evlock);
			/*
			 * EOK used here, to avoid clobbering previous
			 * error code.
			 */
			sda_cmd_notify(cmdp, SDA_CMDF_BUSY | SDA_CMDF_DAT,
			    SDA_EOK);
			continue;
		}

		if (slot->s_detach) {
			/* Parent is detaching the slot, bail out. */
			break;
		}

		if ((slot->s_suspend) && (slot->s_xfrp == NULL)) {
			/*
			 * Host wants to suspend, but don't do it if
			 * we have a transfer outstanding.
			 */
			break;
		}

		if (slot->s_detect) {
			slot->s_detect = B_FALSE;
			mutex_exit(&slot->s_evlock);

			sda_slot_handle_detect(slot);
			continue;
		}

		if (slot->s_xfrdone) {
			sda_err_t	errno;

			errno = slot->s_errno;
			slot->s_errno = SDA_EOK;
			slot->s_xfrdone = B_FALSE;
			mutex_exit(&slot->s_evlock);

			sda_slot_handle_transfer(slot, errno);
			continue;
		}

		if (slot->s_fault != SDA_FAULT_NONE) {
			sda_fault_t	fault;

			fault = slot->s_fault;
			slot->s_fault = SDA_FAULT_NONE;
			mutex_exit(&slot->s_evlock);

			sda_slot_handle_fault(slot, fault);
			continue;
		}

		if (slot->s_reap) {
			/*
			 * Do not sleep while holding the evlock.  If this
			 * fails, we'll just try again the next cycle.
			 */
			(void) ddi_taskq_dispatch(slot->s_hp_tq,
			    sda_nexus_reap, slot, DDI_NOSLEEP);
		}

		if ((slot->s_xfrp != NULL) && (gethrtime() > slot->s_xfrtmo)) {
			/*
			 * The device stalled processing the data request.
			 * At this point, we really have no choice but to
			 * nuke the request, and flag a fault.
			 */
			mutex_exit(&slot->s_evlock);
			sda_slot_handle_transfer(slot, SDA_ETIME);
			sda_slot_fault(slot, SDA_FAULT_TIMEOUT);
			continue;
		}

		/*
		 * If the slot has suspended, then we can't process
		 * any new commands yet.
		 */
		if ((slot->s_suspend) || (!slot->s_wake)) {

			/*
			 * We use a timed wait if we are waiting for a
			 * data transfer to complete, or if we might
			 * need to reap child nodes.  Otherwise we
			 * avoid the timed wait to avoid waking CPU
			 * (power savings.)
			 */

			if ((slot->s_xfrp != NULL) || (slot->s_reap)) {
				/* Wait 3 sec (reap attempts). */
				(void) cv_reltimedwait(&slot->s_evcv,
				    &slot->s_evlock, drv_usectohz(3000000),
				    TR_CLOCK_TICK);
			} else {
				(void) cv_wait(&slot->s_evcv, &slot->s_evlock);
			}

			mutex_exit(&slot->s_evlock);
			continue;
		}

		slot->s_wake = B_FALSE;

		/*
		 * Possibly reap child nodes.
		 */
		if (slot->s_reap) {
			slot->s_reap = B_FALSE;
			mutex_exit(&slot->s_evlock);
			sda_nexus_reap(slot);
		} else {
			mutex_exit(&slot->s_evlock);
		}

		/*
		 * We're awake now, so look for work to do.  First
		 * acquire access to the slot.
		 */
		sda_slot_enter(slot);


		/*
		 * If no more commands to process, go back to sleep.
		 */
		if ((cmdp = list_head(&slot->s_cmdlist)) == NULL) {
			sda_slot_exit(slot);
			continue;
		}

		/*
		 * If the current command is not an initialization
		 * command, but we are initializing, go back to sleep.
		 * (This happens potentially during a card reset or
		 * suspend/resume cycle, where the card has not been
		 * removed, but a reset is in progress.)
		 */
		if (slot->s_init && !(cmdp->sc_flags & SDA_CMDF_INIT)) {
			sda_slot_exit(slot);
			continue;
		}

		datline = ((cmdp->sc_flags & SDA_CMDF_DAT) != 0);

		if (datline) {
			/*
			 * If the current command has a data phase
			 * while a transfer is in progress, then go
			 * back to sleep.
			 */
			if (slot->s_xfrp != NULL) {
				sda_slot_exit(slot);
				continue;
			}

			/*
			 * Note that APP_CMD doesn't have a data phase,
			 * although the associated ACMD might.
			 */
			if (cmdp->sc_index != CMD_APP_CMD) {
				slot->s_xfrp = cmdp;
				/*
				 * All commands should complete in
				 * less than 5 seconds.  The worst
				 * case is actually somewhere around 4
				 * seconds, but that is when the clock
				 * is only 100 kHz.
				 */
				slot->s_xfrtmo = gethrtime() +
				    5000000000ULL;
				(void) sda_setprop(slot, SDA_PROP_LED, 1);
			}
		}

		/*
		 * We're committed to dispatching this command now,
		 * so remove it from the list.
		 */
		list_remove(&slot->s_cmdlist, cmdp);

		/*
		 * There could be more commands after this one, so we
		 * mark ourself so we stay awake for another cycle.
		 */
		sda_slot_wakeup(slot);

		/*
		 * Submit the command.  Note that we are holding the
		 * slot lock here, so it is critical that the caller
		 * *not* call back up into the framework.  The caller
		 * must break context.  But doing it this way prevents
		 * a critical race on card removal.
		 *
		 * Note that we don't resubmit memory to the device if
		 * it isn't flagged as ready (e.g. if the wrong device
		 * was inserted!)
		 */
		if ((!slot->s_ready) && (cmdp->sc_flags & SDA_CMDF_MEM)) {
			rv = SDA_ENODEV;
			if (!slot->s_warn) {
				sda_slot_err(slot,
				    "Device removed while in use.  "
				    "Please reinsert!");
				slot->s_warn = B_TRUE;
			}
		} else {
			rv = slot->s_ops.so_cmd(slot->s_prv, cmdp);
		}
		if (rv == SDA_EOK)
			rv = sda_slot_check_response(cmdp);

		if (rv == SDA_EOK) {
			/*
			 * If APP_CMD completed properly, then
			 * resubmit with ACMD index.  Note wake was
			 * already set above.
			 */
			if (cmdp->sc_index == CMD_APP_CMD) {
				if ((cmdp->sc_response[0] & R1_APP_CMD) == 0) {
					sda_slot_log(slot, "APP_CMD not set!");
				}
				sda_cmd_resubmit_acmd(slot, cmdp);
				sda_slot_exit(slot);

				continue;
			}

		} else if (datline) {
			/*
			 * If an error occurred and we were expecting
			 * a transfer phase, we have to clean up.
			 */
			(void) sda_setprop(slot, SDA_PROP_LED, 0);
			slot->s_xfrp = NULL;
			slot->s_xfrtmo = 0;

			/*
			 * And notify any waiter.
			 */
			sda_slot_exit(slot);
			sda_cmd_notify(cmdp, SDA_CMDF_BUSY | SDA_CMDF_DAT, rv);
			continue;
		}

		/*
		 * Wake any waiter.
		 */
		sda_slot_exit(slot);
		sda_cmd_notify(cmdp, SDA_CMDF_BUSY, rv);
	}

	mutex_exit(&slot->s_evlock);
}
/*
 * heci_hw_init  - init host and fw to start work.
 *
 * @dev: Device object for our driver
 *
 * @return 0 on success, <0 on failure.
 */
int
heci_hw_init(struct iamt_heci_device *dev)
{
	int err = 0;

	mutex_enter(&dev->device_lock);
	dev->host_hw_state = read_heci_register(dev, H_CSR);
	dev->me_hw_state = read_heci_register(dev, ME_CSR_HA);
	DBG("host_hw_state = 0x%08x, mestate = 0x%08x.\n",
	    dev->host_hw_state, dev->me_hw_state);

	if ((dev->host_hw_state & H_IS) == H_IS) {
		/* acknowledge interrupt and stop interupts */
		heci_set_csr_register(dev);
	}
	dev->recvd_msg = 0;
	DBG("reset in start the heci device.\n");

	heci_reset(dev, 1);

	DBG("host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
	    dev->host_hw_state, dev->me_hw_state);

	/* wait for ME to turn on ME_RDY */
	err = 0;
	while (!dev->recvd_msg && err != -1) {
		err = cv_reltimedwait(&dev->wait_recvd_msg,
		    &dev->device_lock, HECI_INTEROP_TIMEOUT, TR_CLOCK_TICK);
	}

	if (err == -1 && !dev->recvd_msg) {
		dev->heci_state = HECI_DISABLED;
		DBG("wait_event_interruptible_timeout failed"
		    "on wait for ME to turn on ME_RDY.\n");
		mutex_exit(&dev->device_lock);
		return (-ENODEV);
	} else {
		if (!(((dev->host_hw_state & H_RDY) == H_RDY) &&
		    ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA))) {
			dev->heci_state = HECI_DISABLED;
			DBG("host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
			    dev->host_hw_state,
			    dev->me_hw_state);

			if (!(dev->host_hw_state & H_RDY) != H_RDY)
				DBG("host turn off H_RDY.\n");

			if (!(dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
				DBG("ME turn off ME_RDY.\n");

			cmn_err(CE_WARN,
			    "heci: link layer initialization failed.\n");
			mutex_exit(&dev->device_lock);
			return (-ENODEV);
		}
	}
	dev->recvd_msg = 0;
	DBG("host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
	    dev->host_hw_state, dev->me_hw_state);
	DBG("ME turn on ME_RDY and host turn on H_RDY.\n");
	DBG("heci: link layer has been established.\n");
	mutex_exit(&dev->device_lock);
	return (0);
}
Beispiel #11
0
/*
 * irm_balance_thread()
 *
 *	One instance of this thread operates per each defined IRM pool.
 *	It does the initial activation of the pool, as well as balancing
 *	any requests that were queued up before the pool was active.
 *	Once active, it waits forever to service balance operations.
 */
static void
irm_balance_thread(ddi_irm_pool_t *pool_p)
{
	clock_t		interval;

	DDI_INTR_IRMDBG((CE_CONT, "irm_balance_thread: pool_p %p\n",
	    (void *)pool_p));

	/* Lock the pool */
	mutex_enter(&pool_p->ipool_lock);

	/* Perform initial balance if required */
	if (pool_p->ipool_reqno > pool_p->ipool_resno)
		i_ddi_irm_balance(pool_p);

	/* Activate the pool */
	pool_p->ipool_flags |= DDI_IRM_FLAG_ACTIVE;

	/*
	 * Main loop.
	 * Iterate once first before wait on signal, in case there is signal
	 * sent before this thread being created
	 */
	for (;;) {

		/* Compute the delay interval */
		interval = drv_usectohz(irm_balance_delay * 1000000);

		/* Wait one interval, or until there are waiters */
		if ((interval > 0) &&
		    !(pool_p->ipool_flags & DDI_IRM_FLAG_WAITERS) &&
		    !(pool_p->ipool_flags & DDI_IRM_FLAG_EXIT)) {
			(void) cv_reltimedwait(&pool_p->ipool_cv,
			    &pool_p->ipool_lock, interval, TR_CLOCK_TICK);
		}

		/* Check if awakened to exit */
		if (pool_p->ipool_flags & DDI_IRM_FLAG_EXIT) {
			DDI_INTR_IRMDBG((CE_CONT,
			    "irm_balance_thread: exiting...\n"));
			mutex_exit(&pool_p->ipool_lock);
			thread_exit();
		}

		/* Balance the pool */
		i_ddi_irm_balance(pool_p);

		/* Notify waiters */
		if (pool_p->ipool_flags & DDI_IRM_FLAG_WAITERS) {
			cv_broadcast(&pool_p->ipool_cv);
			pool_p->ipool_flags &= ~(DDI_IRM_FLAG_WAITERS);
		}

		/* Clear QUEUED condition */
		pool_p->ipool_flags &= ~(DDI_IRM_FLAG_QUEUED);

		/* Sleep until queued */
		cv_wait(&pool_p->ipool_cv, &pool_p->ipool_lock);

		DDI_INTR_IRMDBG((CE_CONT, "irm_balance_thread: signaled.\n"));
	}
}
int
ipw2200_load_fw(struct ipw2200_softc *sc, uint8_t *buf, size_t size)
{
	struct dma_region	dr[MAX_DR_NUM]; /* maximal, 64 * 4KB = 256KB */
	uint8_t			*p, *end, *v;
	uint32_t		mlen;
	uint32_t		src, dst, ctl, len, sum, off;
	uint32_t		sentinel;
	int			ntries, err, cnt, i;
	clock_t			clk = drv_usectohz(5000000);  /* 5 second */

	ipw2200_imem_put32(sc, 0x3000a0, 0x27000);

	p   = buf;
	end = p + size;

	cnt = 0;
	err = ipw2200_dma_region_alloc(sc, &dr[cnt], MAX_DR_SIZE, DDI_DMA_READ,
	    DDI_DMA_STREAMING);
	if (err != DDI_SUCCESS)
		goto fail0;
	off = 0;
	src = dr[cnt].dr_pbase;

	ipw2200_csr_put32(sc, IPW2200_CSR_AUTOINC_ADDR, 0x27000);

	while (p < end) {
		dst = LE_32(*((uint32_t *)(uintptr_t)p)); p += 4;
		len = LE_32(*((uint32_t *)(uintptr_t)p)); p += 4;
		v = p;
		p += len;
		IPW2200_DBG(IPW2200_DBG_FW, (sc->sc_dip, CE_CONT,
		    "ipw2200_load_fw(): dst=0x%x,len=%u\n", dst, len));

		while (len > 0) {
			/*
			 * if no DMA region is available, allocate a new one
			 */
			if (off == dr[cnt].dr_size) {
				cnt++;
				if (cnt >= MAX_DR_NUM) {
					IPW2200_WARN((sc->sc_dip, CE_WARN,
					    "ipw2200_load_fw(): "
					    "maximum %d DRs is reached\n",
					    cnt));
					cnt--; /* only free alloced DMA */
					goto fail1;
				}
				err = ipw2200_dma_region_alloc(sc, &dr[cnt],
				    MAX_DR_SIZE, DDI_DMA_WRITE,
				    DDI_DMA_STREAMING);
				if (err != DDI_SUCCESS) {
					cnt--; /* only free alloced DMA */
					goto fail1;
				}
				off = 0;
				src = dr[cnt].dr_pbase;
			}
			mlen = min(IPW2200_CB_MAXDATALEN, len);
			mlen = min(mlen, dr[cnt].dr_size - off);

			(void) memcpy(dr[cnt].dr_base + off, v, mlen);
			(void) ddi_dma_sync(dr[cnt].dr_hnd, off, mlen,
			    DDI_DMA_SYNC_FORDEV);

			ctl = IPW2200_CB_DEFAULT_CTL | mlen;
			sum = ctl ^ src ^ dst;
			/*
			 * write a command
			 */
			ipw2200_csr_put32(sc, IPW2200_CSR_AUTOINC_DATA, ctl);
			ipw2200_csr_put32(sc, IPW2200_CSR_AUTOINC_DATA, src);
			ipw2200_csr_put32(sc, IPW2200_CSR_AUTOINC_DATA, dst);
			ipw2200_csr_put32(sc, IPW2200_CSR_AUTOINC_DATA, sum);

			off += mlen;
			src += mlen;
			dst += mlen;
			v   += mlen;
			len -= mlen;
		}
	}

	sentinel = ipw2200_csr_get32(sc, IPW2200_CSR_AUTOINC_ADDR);
	ipw2200_csr_put32(sc, IPW2200_CSR_AUTOINC_DATA, 0);

	IPW2200_DBG(IPW2200_DBG_FW, (sc->sc_dip, CE_CONT,
	    "ipw2200_load_fw(): sentinel=%x\n", sentinel));

	ipw2200_csr_put32(sc, IPW2200_CSR_RST,
	    ~(IPW2200_RST_MASTER_DISABLED | IPW2200_RST_STOP_MASTER)
	    & ipw2200_csr_get32(sc, IPW2200_CSR_RST));

	ipw2200_imem_put32(sc, 0x3000a4, 0x540100);
	for (ntries = 0; ntries < 400; ntries++) {
		uint32_t val;
		val = ipw2200_imem_get32(sc, 0x3000d0);
		if (val >= sentinel)
			break;
		drv_usecwait(100);
	}
	if (ntries == 400) {
		IPW2200_WARN((sc->sc_dip, CE_WARN,
		    "ipw2200_load_fw(): timeout processing command blocks\n"));
		goto fail1;
	}

	mutex_enter(&sc->sc_ilock);

	ipw2200_imem_put32(sc, 0x3000a4, 0x540c00);

	/*
	 * enable all interrupts
	 */
	ipw2200_csr_put32(sc, IPW2200_CSR_INTR_MASK, IPW2200_INTR_MASK_ALL);

	/*
	 * tell the adapter to initialize the firmware,
	 * just simply set it to 0
	 */
	ipw2200_csr_put32(sc, IPW2200_CSR_RST, 0);
	ipw2200_csr_put32(sc, IPW2200_CSR_CTL,
	    ipw2200_csr_get32(sc, IPW2200_CSR_CTL) |
	    IPW2200_CTL_ALLOW_STANDBY);

	/*
	 * wait for interrupt to notify fw initialization is done
	 */
	sc->sc_fw_ok = 0;
	while (!sc->sc_fw_ok) {
		/*
		 * There is an enhancement! we just change from 1s to 5s
		 */
		if (cv_reltimedwait(&sc->sc_fw_cond, &sc->sc_ilock, clk,
		    TR_CLOCK_TICK) < 0)
			break;
	}
	mutex_exit(&sc->sc_ilock);

	if (!sc->sc_fw_ok) {
		IPW2200_WARN((sc->sc_dip, CE_WARN,
		    "ipw2200_load_fw(): firmware(%u) load failed!", size));
		goto fail1;
	}

	for (i = 0; i <= cnt; i++)
		ipw2200_dma_region_free(&dr[i]);

	return (DDI_SUCCESS);

fail1:
	IPW2200_WARN((sc->sc_dip, CE_WARN,
	    "ipw2200_load_fw(): DMA allocation failed, cnt=%d\n", cnt));
	for (i = 0; i <= cnt; i++)
		ipw2200_dma_region_free(&dr[i]);
fail0:
	return (DDI_FAILURE);
}
Beispiel #13
0
/* close a vldc port */
static int
i_vldc_close_port(vldc_t *vldcp, uint_t portno)
{
	vldc_port_t	*vport;
	vldc_minor_t	*vminor;
	int		rv = DDI_SUCCESS;

	vport = &(vldcp->port[portno]);

	ASSERT(MUTEX_HELD(&vport->minorp->lock));

	D1("i_vldc_close_port: vldc@%d:%d: closing port\n",
	    vport->inst, vport->minorp->portno);

	vminor = vport->minorp;

	switch (vport->status) {
	case VLDC_PORT_CLOSED:
		/* nothing to do */
		DWARN("i_vldc_close_port: port %d in an unexpected "
		    "state (%d)\n", portno, vport->status);
		return (DDI_SUCCESS);

	case VLDC_PORT_READY:
	case VLDC_PORT_RESET:
		do {
			rv = i_vldc_ldc_close(vport);
			if (rv != EAGAIN)
				break;

			/*
			 * EAGAIN indicates that ldc_close() failed because
			 * ldc callback thread is active for the channel.
			 * cv_timedwait() is used to release vminor->lock and
			 * allow ldc callback thread to complete.
			 * after waking up, check if the port has been closed
			 * by another thread in the meantime.
			 */
			(void) cv_reltimedwait(&vminor->cv, &vminor->lock,
			    drv_usectohz(vldc_close_delay), TR_CLOCK_TICK);
			rv = 0;
		} while (vport->status != VLDC_PORT_CLOSED);

		if ((rv != 0) || (vport->status == VLDC_PORT_CLOSED))
			return (rv);

		break;

	case VLDC_PORT_OPEN:
		break;

	default:
		DWARN("i_vldc_close_port: port %d in an unexpected "
		    "state (%d)\n", portno, vport->status);
		ASSERT(0);	/* fail quickly to help diagnosis */
		return (EINVAL);
	}

	ASSERT(vport->status == VLDC_PORT_OPEN);

	/* free memory */
	kmem_free(vport->send_buf, vport->mtu);
	kmem_free(vport->recv_buf, vport->mtu);

	if (strcmp(vminor->sname, VLDC_HVCTL_SVCNAME) == 0)
		kmem_free(vport->cookie_buf, vldc_max_cookie);

	vport->status = VLDC_PORT_CLOSED;

	return (rv);
}
Beispiel #14
0
/*
 * sckm_process_msg
 *
 * Process a message received from the SC. Invoked by sckm_event_task().
 */
static void
sckm_process_msg(uint32_t cmd, uint64_t transid,
                 uint32_t len, sckm_mbox_req_hdr_t *req_data,
                 sckm_mbox_rep_hdr_t *rep_data)
{
    int rv;

    mutex_enter(&sckm_umutex);

    switch (cmd) {
    case SCKM_MSG_SADB: {
        int sadb_msglen;

        sadb_msglen = len-sizeof (sckm_mbox_req_hdr_t);
        SCKM_DEBUG1(D_TASK, "received SCKM_MSG_SADB len=%d",
                    sadb_msglen);

        /* sanity check request */
        if (len-sizeof (sckm_mbox_req_hdr_t) <= 0) {
            SCKM_DEBUG0(D_TASK, "bad SADB message, "
                        "zero length");
            /*
             * SADB message is too short, send corresponding
             * error message to SC.
             */
            rep_data->sckm_version = SCKM_PROTOCOL_VERSION;
            rep_data->status = SCKM_ERR_SADB_MSG;

            if ((rv = mboxsc_putmsg(KEY_KDSC, MBOXSC_MSG_REPLY,
                                    cmd, &transid, sizeof (sckm_mbox_rep_hdr_t),
                                    rep_data, MBOXSC_PUTMSG_DEF_TIMEOUT)) != 0) {
                SCKM_DEBUG1(D_TASK, "sckm_mbox_task: "
                            "mboxsc_putmsg() failed (%d)\n", rv);
            }
            mutex_exit(&sckm_umutex);
            return;
        }

        /* initialize request for daemon */
        sckm_udata.transid = transid;
        sckm_udata.type = SCKM_IOCTL_REQ_SADB;
        sckm_udata.buf_len = len-sizeof (sckm_mbox_req_hdr_t);
        bcopy(req_data+1, sckm_udata.buf, sckm_udata.buf_len);

        break;
    }
    default:
        cmn_err(CE_WARN, "unknown cmd %x received from SC", cmd);
        /*
         * Received unknown command from SC. Send corresponding
         * error message to SC.
         */
        rep_data->sckm_version = SCKM_PROTOCOL_VERSION;
        rep_data->status = SCKM_ERR_BAD_CMD;

        if ((rv = mboxsc_putmsg(KEY_KDSC, MBOXSC_MSG_REPLY,
                                cmd, &transid, sizeof (sckm_mbox_rep_hdr_t),
                                rep_data, MBOXSC_PUTMSG_DEF_TIMEOUT)) != 0) {
            SCKM_DEBUG1(D_TASK, "sckm_mbox_task: "
                        "mboxsc_putmsg() failed (%d)\n", rv);
        }
        mutex_exit(&sckm_umutex);
        return;
    }

    /*
     * At this point, we know that the request is valid, so pass
     * the request to the daemon.
     */
    SCKM_DEBUG0(D_TASK, "waking up daemon");
    sckm_udata_req = B_TRUE;
    cv_signal(&sckm_udata_cv);

    /* wait for daemon to process request */
    if (cv_reltimedwait(&sckm_cons_cv, &sckm_umutex,
                        drv_usectohz(SCKM_DAEMON_TIMEOUT), TR_CLOCK_TICK) == -1) {
        /*
         * Daemon did not process the data, report this
         * error to the SC.
         */
        SCKM_DEBUG0(D_TASK, "daemon timeout!!");
        rep_data->sckm_version = SCKM_PROTOCOL_VERSION;
        rep_data->status = SCKM_ERR_DAEMON;
    } else {
        /* Daemon processed data, return status to SC */
        SCKM_DEBUG0(D_TASK, "daemon processed data");
        rep_data->sckm_version = SCKM_PROTOCOL_VERSION;
        switch (sckm_udata_status.status) {
        case SCKM_IOCTL_STAT_SUCCESS:
            SCKM_DEBUG0(D_TASK, "daemon returned success");
            rep_data->status = SCKM_SUCCESS;
            break;
        case SCKM_IOCTL_STAT_ERR_PFKEY:
            SCKM_DEBUG1(D_TASK, "daemon returned PF_KEY "
                        "error, errno=%d",
                        sckm_udata_status.sadb_msg_errno);
            rep_data->status = SCKM_ERR_SADB_PFKEY;
            rep_data->sadb_msg_errno =
                sckm_udata_status.sadb_msg_errno;
            break;
        case SCKM_IOCTL_STAT_ERR_REQ:
            SCKM_DEBUG0(D_TASK, "daemon returned "
                        "bad request");
            rep_data->status = SCKM_ERR_DAEMON;
            break;
        case SCKM_IOCTL_STAT_ERR_VERSION:
            SCKM_DEBUG0(D_TASK, "PF_KEY version not "
                        "supported");
            rep_data->status = SCKM_ERR_SADB_VERSION;
            rep_data->sadb_msg_version =
                sckm_udata_status.sadb_msg_version;
            break;
        case SCKM_IOCTL_STAT_ERR_TIMEOUT:
            SCKM_DEBUG0(D_TASK, "no response received "
                        "from key engine");
            rep_data->status = SCKM_ERR_SADB_TIMEOUT;
            break;
        case SCKM_IOCTL_STAT_ERR_OTHER:
            SCKM_DEBUG0(D_TASK, "daemon encountered "
                        "an error");
            rep_data->status = SCKM_ERR_DAEMON;
            break;
        case SCKM_IOCTL_STAT_ERR_SADB_TYPE:
            SCKM_DEBUG0(D_TASK, "daemon returned bad "
                        "SADB message type");
            rep_data->status = SCKM_ERR_SADB_BAD_TYPE;
            break;
        default:
            cmn_err(CE_WARN, "SCKM daemon returned "
                    "invalid status %d", sckm_udata_status.status);
            rep_data->status = SCKM_ERR_DAEMON;
        }
    }

    /* send reply back to SC */
    if ((rv = mboxsc_putmsg(KEY_KDSC, MBOXSC_MSG_REPLY,
                            cmd, &transid, sizeof (sckm_mbox_rep_hdr_t),
                            rep_data, MBOXSC_PUTMSG_DEF_TIMEOUT)) != 0) {
        SCKM_DEBUG1(D_TASK, "failed sending reply to SC (%d)", rv);
    } else {
        SCKM_DEBUG0(D_TASK, "reply sent to SC");
    }

    sckm_udata_req = B_FALSE;
    mutex_exit(&sckm_umutex);
}
/*
 * heci_remove - Device Removal Routine
 *
 * @pdev: PCI device information struct
 *
 * heci_remove is called by the PCI subsystem to alert the driver
 * that it should release a PCI device.
 */
static int
heci_detach(dev_info_t *dip,  ddi_detach_cmd_t cmd)
{
	struct iamt_heci_device	*dev;
	int err;

	dev = ddi_get_soft_state(heci_soft_state_p, ddi_get_instance(dip));
	ASSERT(dev != NULL);

	switch (cmd) {
	case DDI_SUSPEND:
		err = heci_suspend(dip);
		if (err)
			return (DDI_FAILURE);
		else
			return (DDI_SUCCESS);

	case DDI_DETACH:
		break;

	default:
		return (DDI_FAILURE);
	}

	if (dev->wd_timer)
		(void) untimeout(dev->wd_timer);

	mutex_enter(&dev->device_lock);
	if (dev->wd_file_ext.state == HECI_FILE_CONNECTED &&
	    dev->wd_timeout) {
		dev->wd_timeout = 0;
		dev->wd_due_counter = 0;
		(void) memcpy(dev->wd_data, stop_wd_params,
		    HECI_WD_PARAMS_SIZE);
		dev->stop = 1;
		if (dev->host_buffer_is_empty &&
		    flow_ctrl_creds(dev, &dev->wd_file_ext)) {
			dev->host_buffer_is_empty = 0;

			if (!heci_send_wd(dev)) {
				DBG("send stop WD failed\n");
			} else
				flow_ctrl_reduce(dev, &dev->wd_file_ext);

			dev->wd_pending = 0;
		} else
			dev->wd_pending = 1;

		dev->wd_stoped = 0;

		err = 0;
		while (!dev->wd_stoped && err != -1) {
			err = cv_reltimedwait(&dev->wait_stop_wd,
			    &dev->device_lock, 10*HZ, TR_CLOCK_TICK);
		}

		if (!dev->wd_stoped) {
			DBG("stop wd failed to complete.\n");
		} else {
			DBG("stop wd complete.\n");
		}

	}

	mutex_exit(&dev->device_lock);

	if (dev->iamthif_file_ext.state == HECI_FILE_CONNECTED) {
		dev->iamthif_file_ext.state = HECI_FILE_DISCONNECTING;
		(void) heci_disconnect_host_client(dev,
		    &dev->iamthif_file_ext);
	}
	if (dev->wd_file_ext.state == HECI_FILE_CONNECTED) {
		dev->wd_file_ext.state = HECI_FILE_DISCONNECTING;
		(void) heci_disconnect_host_client(dev,
		    &dev->wd_file_ext);
	}


	/* remove entry if already in list */
	DBG("list del iamthif and wd file list.\n");
	heci_remove_client_from_file_list(dev, dev->wd_file_ext.
	    host_client_id);
	heci_remove_client_from_file_list(dev,
	    dev->iamthif_file_ext.host_client_id);

	dev->iamthif_current_cb = NULL;
	dev->iamthif_file_ext.file = NULL;

	/* disable interrupts */
	heci_csr_disable_interrupts(dev);

	ddi_remove_intr(dip, 0, dev->sc_iblk);

	if (dev->work)
		ddi_taskq_destroy(dev->work);
	if (dev->reinit_tsk)
		ddi_taskq_destroy(dev->reinit_tsk);
	if (dev->mem_addr)
		ddi_regs_map_free(&dev->io_handle);

	if (dev->me_clients && dev->num_heci_me_clients > 0) {
		kmem_free(dev->me_clients, sizeof (struct heci_me_client) *
		    dev->num_heci_me_clients);
	}

	dev->num_heci_me_clients = 0;

	heci_destroy_locks(dev);

	ddi_remove_minor_node(dip, NULL);
	ddi_soft_state_free(heci_soft_state_p, ddi_get_instance(dip));

	return (DDI_SUCCESS);
}