Пример #1
0
/*
 * Init CPU info - get CPU type info for processor_info system call.
 */
void
init_cpu_info(struct cpu *cp)
{
	processor_info_t *pi = &cp->cpu_type_info;
	int cpuid = cp->cpu_id;
	struct cpu_node *cpunode = &cpunodes[cpuid];

	cp->cpu_fpowner = NULL;		/* not used for V9 */

	/*
	 * Get clock-frequency property from cpunodes[] for the CPU.
	 */
	pi->pi_clock = (cpunode->clock_freq + 500000) / 1000000;

	/*
	 * Current frequency in Hz.
	 */
	cp->cpu_curr_clock = cpunode->clock_freq;

	/*
	 * Supported frequencies.
	 */
	cpu_set_supp_freqs(cp, NULL);

	(void) strcpy(pi->pi_processor_type, "sparcv9");
	(void) strcpy(pi->pi_fputypes, "sparcv9");

	/*
	 * StarFire requires the signature block stuff setup here
	 */
	CPU_SGN_MAPIN(cpuid);

	/*
	 * cpu0 is always initialized at boot time, but it can be initialized
	 * again if it is dynamically removed and then re-added. We check if
	 * we are booting by verifying cpu_list. During boot, cpu0 is already
	 * in cpu_list when this function is called. When a cpu is dynamically
	 * added (after the boot) then it is added to cpu_list after this
	 * function is called.
	 */
	if (cpuid == cpu0.cpu_id && ncpus == 1 && cpu_list[0].cpu_id == cpuid) {
		/*
		 * cpu0 starts out running.  Other cpus are
		 * still in OBP land and we will leave them
		 * alone for now.
		 */
		CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cpuid);
		/*
		 * On first cpu setup, tell hv we are booting
		 */
		mach_set_soft_state(SIS_TRANSITION,
		    &SOLARIS_SOFT_STATE_BOOT_MSG);
#ifdef	lint
		cpuid = cpuid;
#endif	/* lint */
	}
}
Пример #2
0
/*
 * Init CPU info - get CPU type info for processor_info system call.
 */
void
init_cpu_info(struct cpu *cp)
{
    processor_info_t *pi = &cp->cpu_type_info;
    int cpuid = cp->cpu_id;
    struct cpu_node *cpunode = &cpunodes[cpuid];

    cp->cpu_fpowner = NULL;		/* not used for V9 */

    /*
     * Get clock-frequency property from cpunodes[] for the CPU.
     */
    pi->pi_clock = (cpunode->clock_freq + 500000) / 1000000;

    /*
     * Current frequency in Hz.
     */
    cp->cpu_curr_clock = cpunode->clock_freq;

    /*
     * Supported frequencies.
     */
    cpu_set_supp_freqs(cp, NULL);

    (void) strcpy(pi->pi_processor_type, "sparcv9");
    (void) strcpy(pi->pi_fputypes, "sparcv9");

    /*
     * StarFire requires the signature block stuff setup here
     */
    CPU_SGN_MAPIN(cpuid);
    if (cpuid == cpu0.cpu_id) {
        /*
         * cpu0 starts out running.  Other cpus are
         * still in OBP land and we will leave them
         * alone for now.
         */
        CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cpuid);
#ifdef	lint
        cpuid = cpuid;
#endif	/* lint */
    }
}
Пример #3
0
/*
 * Startup function executed on 'other' CPUs.  This is the first
 * C function after cpu_start sets up the cpu registers.
 */
static void
slave_startup(void)
{
	struct cpu	*cp = CPU;
	ushort_t	original_flags = cp->cpu_flags;

	mach_htraptrace_configure(cp->cpu_id);
	cpu_intrq_register(CPU);
	cp->cpu_m.mutex_ready = 1;

	/* acknowledge that we are done with initialization */
	CPUSET_ADD(proxy_ready_set, cp->cpu_id);

	/* synchronize STICK */
	sticksync_slave();

	if (boothowto & RB_DEBUG)
		kdi_dvec_cpu_init(cp);

	/*
	 * the slave will wait here forever -- assuming that the master
	 * will get back to us.  if it doesn't we've got bigger problems
	 * than a master not replying to this slave.
	 * the small delay improves the slave's responsiveness to the
	 * master's ack and decreases the time window between master and
	 * slave operations.
	 */
	while (!CPU_IN_SET(cpu_ready_set, cp->cpu_id))
		DELAY(1);

	/*
	 * The CPU is now in cpu_ready_set, safely able to take pokes.
	 */
	cp->cpu_m.poke_cpu_outstanding = B_FALSE;

	/* enable interrupts */
	(void) spl0();

	/*
	 * Signature block update to indicate that this CPU is in OS now.
	 * This needs to be done after the PIL is lowered since on
	 * some platforms the update code may block.
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id);

	/*
	 * park the slave thread in a safe/quiet state and wait for the master
	 * to finish configuring this CPU before proceeding to thread_exit().
	 */
	while (((volatile ushort_t)cp->cpu_flags) & CPU_QUIESCED)
		DELAY(1);

	/*
	 * Initialize CPC CPU state.
	 */
	kcpc_hw_startup_cpu(original_flags);

	/*
	 * Notify the PG subsystem that the CPU  has started
	 */
	pg_cmt_cpu_startup(CPU);

	/*
	 * Now we are done with the startup thread, so free it up.
	 */
	thread_exit();
	cmn_err(CE_PANIC, "slave_startup: cannot return");
	/*NOTREACHED*/
}
Пример #4
0
int
dr_suspend(dr_sr_handle_t *srh)
{
	dr_handle_t	*handle;
	int		force;
	int		dev_errs_idx;
	uint64_t	dev_errs[DR_MAX_ERR_INT];
	int		rc = DDI_SUCCESS;

	handle = srh->sr_dr_handlep;

	force = dr_cmd_flags(handle) & SBD_FLAG_FORCE;

	/*
	 * update the signature block
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_QUIESCE_INPROGRESS, SIGSUBST_NULL,
	    CPU->cpu_id);

	i_ndi_block_device_tree_changes(&handle->h_ndi);

	prom_printf("\nDR: suspending user threads...\n");
	srh->sr_suspend_state = DR_SRSTATE_USER;
	if (((rc = dr_stop_user_threads(srh)) != DDI_SUCCESS) &&
	    dr_check_user_stop_result) {
		dr_resume(srh);
		return (rc);
	}

	if (!force) {
		struct dr_ref drc = {0};

		prom_printf("\nDR: checking devices...\n");
		dev_errs_idx = 0;

		drc.arr = dev_errs;
		drc.idx = &dev_errs_idx;
		drc.len = DR_MAX_ERR_INT;

		/*
		 * Since the root node can never go away, it
		 * doesn't have to be held.
		 */
		ddi_walk_devs(ddi_root_node(), dr_check_unsafe_major, &drc);
		if (dev_errs_idx) {
			handle->h_err = drerr_int(ESBD_UNSAFE, dev_errs,
				dev_errs_idx, 1);
			dr_resume(srh);
			return (DDI_FAILURE);
		}
		PR_QR("done\n");
	} else {
		prom_printf("\nDR: dr_suspend invoked with force flag\n");
	}

#ifndef	SKIP_SYNC
	/*
	 * This sync swap out all user pages
	 */
	vfs_sync(SYNC_ALL);
#endif

	/*
	 * special treatment for lock manager
	 */
	lm_cprsuspend();

#ifndef	SKIP_SYNC
	/*
	 * sync the file system in case we never make it back
	 */
	sync();
#endif

	/*
	 * now suspend drivers
	 */
	prom_printf("DR: suspending drivers...\n");
	srh->sr_suspend_state = DR_SRSTATE_DRIVER;
	srh->sr_err_idx = 0;
	/* No parent to hold busy */
	if ((rc = dr_suspend_devices(ddi_root_node(), srh)) != DDI_SUCCESS) {
		if (srh->sr_err_idx && srh->sr_dr_handlep) {
			(srh->sr_dr_handlep)->h_err = drerr_int(ESBD_SUSPEND,
				srh->sr_err_ints, srh->sr_err_idx, 1);
		}
		dr_resume(srh);
		return (rc);
	}

	drmach_suspend_last();

	/*
	 * finally, grab all cpus
	 */
	srh->sr_suspend_state = DR_SRSTATE_FULL;

	/*
	 * if watchdog was activated, disable it
	 */
	if (watchdog_activated) {
		mutex_enter(&tod_lock);
		tod_ops.tod_clear_watchdog_timer();
		mutex_exit(&tod_lock);
		srh->sr_flags |= SR_FLAG_WATCHDOG;
	} else {
		srh->sr_flags &= ~(SR_FLAG_WATCHDOG);
	}

	/*
	 * Update the signature block.
	 * This must be done before cpus are paused, since on Starcat the
	 * cpu signature update aquires an adaptive mutex in the iosram driver.
	 * Blocking with cpus paused can lead to deadlock.
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_QUIESCED, SIGSUBST_NULL, CPU->cpu_id);

	mutex_enter(&cpu_lock);
	pause_cpus(NULL);
	dr_stop_intr();

	return (rc);
}
Пример #5
0
void
dr_resume(dr_sr_handle_t *srh)
{
	dr_handle_t	*handle;

	handle = srh->sr_dr_handlep;

	if (srh->sr_suspend_state < DR_SRSTATE_FULL) {
		/*
		 * Update the signature block.
		 * If cpus are not paused, this can be done now.
		 * See comments below.
		 */
		CPU_SIGNATURE(OS_SIG, SIGST_RESUME_INPROGRESS, SIGSUBST_NULL,
		    CPU->cpu_id);
	}

	switch (srh->sr_suspend_state) {
	case DR_SRSTATE_FULL:

		ASSERT(MUTEX_HELD(&cpu_lock));

		/*
		 * Prevent false alarm in tod_validate() due to tod
		 * value change between suspend and resume
		 */
		mutex_enter(&tod_lock);
		tod_fault_reset();
		mutex_exit(&tod_lock);

		dr_enable_intr(); 	/* enable intr & clock */

		start_cpus();
		mutex_exit(&cpu_lock);

		/*
		 * Update the signature block.
		 * This must not be done while cpus are paused, since on
		 * Starcat the cpu signature update aquires an adaptive
		 * mutex in the iosram driver. Blocking with cpus paused
		 * can lead to deadlock.
		 */
		CPU_SIGNATURE(OS_SIG, SIGST_RESUME_INPROGRESS, SIGSUBST_NULL,
		    CPU->cpu_id);

		/*
		 * If we suspended hw watchdog at suspend,
		 * re-enable it now.
		 */
		if (srh->sr_flags & (SR_FLAG_WATCHDOG)) {
			mutex_enter(&tod_lock);
			tod_ops.tod_set_watchdog_timer(
				watchdog_timeout_seconds);
			mutex_exit(&tod_lock);
		}

		/*
		 * This should only be called if drmach_suspend_last()
		 * was called and state transitioned to DR_SRSTATE_FULL
		 * to prevent resume attempts on device instances that
		 * were not previously suspended.
		 */
		drmach_resume_first();

		/* FALLTHROUGH */

	case DR_SRSTATE_DRIVER:
		/*
		 * resume drivers
		 */
		srh->sr_err_idx = 0;

		/* no parent dip to hold busy */
		dr_resume_devices(ddi_root_node(), srh);

		if (srh->sr_err_idx && srh->sr_dr_handlep) {
			(srh->sr_dr_handlep)->h_err = drerr_int(ESBD_RESUME,
				srh->sr_err_ints, srh->sr_err_idx, 1);
		}

		/*
		 * resume the lock manager
		 */
		lm_cprresume();

		/* FALLTHROUGH */

	case DR_SRSTATE_USER:
		/*
		 * finally, resume user threads
		 */
		if (!dr_skip_user_threads) {
			prom_printf("DR: resuming user threads...\n");
			dr_start_user_threads();
		}
		/* FALLTHROUGH */

	case DR_SRSTATE_BEGIN:
	default:
		/*
		 * let those who care know that we've just resumed
		 */
		PR_QR("sending SIGTHAW...\n");
		dr_signal_user(SIGTHAW);
		break;
	}

	i_ndi_allow_device_tree_changes(handle->h_ndi);

	/*
	 * update the signature block
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, CPU->cpu_id);

	prom_printf("DR: resume COMPLETED\n");
}
Пример #6
0
static int
rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	struct rmc_comm_state *rcs = NULL;
	sig_state_t *current_sgn_p;
	int instance;

	/*
	 * only allow one instance
	 */
	instance = ddi_get_instance(dip);
	if (instance != 0)
		return (DDI_FAILURE);

	switch (cmd) {
	default:
		return (DDI_FAILURE);

	case DDI_RESUME:
		if ((rcs = rmc_comm_getstate(dip, instance,
		    "rmc_comm_attach")) == NULL)
			return (DDI_FAILURE);	/* this "can't happen" */

		rmc_comm_hw_reset(rcs);
		rmc_comm_set_irq(rcs, B_TRUE);
		rcs->dip = dip;

		mutex_enter(&tod_lock);
		if (watchdog_enable && tod_ops.tod_set_watchdog_timer != NULL &&
		    watchdog_was_active) {
			(void) tod_ops.tod_set_watchdog_timer(0);
		}
		mutex_exit(&tod_lock);

		mutex_enter(rcs->dp_state.dp_mutex);
		dp_reset(rcs, INITIAL_SEQID, 1, 1);
		mutex_exit(rcs->dp_state.dp_mutex);

		current_sgn_p = (sig_state_t *)modgetsymvalue(
			"current_sgn", 0);
		if ((current_sgn_p != NULL) &&
			(current_sgn_p->state_t.sig != 0)) {
			CPU_SIGNATURE(current_sgn_p->state_t.sig,
				current_sgn_p->state_t.state,
				current_sgn_p->state_t.sub_state, -1);
		}
		return (DDI_SUCCESS);

	case DDI_ATTACH:
		break;
	}

	/*
	 *  Allocate the soft-state structure
	 */
	if (ddi_soft_state_zalloc(rmc_comm_statep, instance) != DDI_SUCCESS)
		return (DDI_FAILURE);
	if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) ==
	    NULL) {
		rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
		return (DDI_FAILURE);
	}
	ddi_set_driver_private(dip, rcs);

	rcs->dip = NULL;

	/*
	 *  Set various options from .conf properties
	 */
	rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
	    "baud-rate", 0);
	rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
	    "debug", 0);

	/*
	 * the baud divisor factor tells us how to scale the result of
	 * the SIO_BAUD_TO_DIVISOR macro for platforms which do not
	 * use the standard 24MHz uart clock
	 */
	rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
	    DDI_PROP_DONTPASS, "baud-divisor-factor", SIO_BAUD_DIVISOR_MIN);

	/*
	 * try to be reasonable if the scale factor contains a silly value
	 */
	if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) ||
	    (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX))
	    rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN;

	/*
	 * initialize serial device
	 */
	if (rmc_comm_serdev_init(rcs, dip) != 0) {
		rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
		return (DDI_FAILURE);
	}

	/*
	 * initialize data protocol
	 */
	rmc_comm_dp_init(rcs);

	/*
	 * initialize driver interface
	 */
	if (rmc_comm_drvintf_init(rcs) != 0) {
		rmc_comm_unattach(rcs, dip, instance, 0, 1, 1);
		return (DDI_FAILURE);
	}

	/*
	 *  Initialise devinfo-related fields
	 */
	rcs->majornum = ddi_driver_major(dip);
	rcs->instance = instance;
	rcs->dip = dip;

	/*
	 * enable interrupts now
	 */
	rmc_comm_set_irq(rcs, B_TRUE);

	/*
	 *  All done, report success
	 */
	ddi_report_dev(dip);
	mutex_enter(&rmc_comm_attach_lock);
	rcs->is_attached = B_TRUE;
	mutex_exit(&rmc_comm_attach_lock);
	return (DDI_SUCCESS);
}
Пример #7
0
int
sbdp_suspend(sbdp_sr_handle_t *srh)
{
	int force;
	int rc = DDI_SUCCESS;

	force = (srh && (srh->sr_flags & SBDP_IOCTL_FLAG_FORCE));

	/*
	 * if no force flag, check for unsafe drivers
	 */
	if (force) {
		SBDP_DBG_QR("\nsbdp_suspend invoked with force flag");
	}

	/*
	 * update the signature block
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_QUIESCE_INPROGRESS, SIGSUBST_NULL,
	    CPU->cpu_id);

	/*
	 * first, stop all user threads
	 */
	SBDP_DBG_QR("SBDP: suspending user threads...\n");
	SR_SET_STATE(srh, SBDP_SRSTATE_USER);
	if (((rc = sbdp_stop_user_threads(srh)) != DDI_SUCCESS) &&
	    sbdp_check_user_stop_result) {
		sbdp_resume(srh);
		return (rc);
	}

#ifndef	SKIP_SYNC
	/*
	 * This sync swap out all user pages
	 */
	vfs_sync(SYNC_ALL);
#endif

	/*
	 * special treatment for lock manager
	 */
	lm_cprsuspend();

#ifndef	SKIP_SYNC
	/*
	 * sync the file system in case we never make it back
	 */
	sync();

#endif
	/*
	 * now suspend drivers
	 */
	SBDP_DBG_QR("SBDP: suspending drivers...\n");
	SR_SET_STATE(srh, SBDP_SRSTATE_DRIVER);

	/*
	 * Root node doesn't have to be held in any way.
	 */
	if ((rc = sbdp_suspend_devices(ddi_root_node(), srh)) != DDI_SUCCESS) {
		sbdp_resume(srh);
		return (rc);
	}

	/*
	 * finally, grab all cpus
	 */
	SR_SET_STATE(srh, SBDP_SRSTATE_FULL);

	/*
	 * if watchdog was activated, disable it
	 */
	if (watchdog_activated) {
		mutex_enter(&tod_lock);
		saved_watchdog_seconds = tod_ops.tod_clear_watchdog_timer();
		mutex_exit(&tod_lock);
		SR_SET_FLAG(srh, SR_FLAG_WATCHDOG);
	} else {
		SR_CLEAR_FLAG(srh, SR_FLAG_WATCHDOG);
	}

	mutex_enter(&cpu_lock);
	pause_cpus(NULL);
	sbdp_stop_intr();

	/*
	 * update the signature block
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_QUIESCED, SIGSUBST_NULL, CPU->cpu_id);

	return (rc);
}
Пример #8
0
void
sbdp_resume(sbdp_sr_handle_t *srh)
{
	/*
	 * update the signature block
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_RESUME_INPROGRESS, SIGSUBST_NULL,
	    CPU->cpu_id);

	switch (SR_STATE(srh)) {
	case SBDP_SRSTATE_FULL:

		ASSERT(MUTEX_HELD(&cpu_lock));

		/*
		 * Prevent false alarm in tod_validate() due to tod
		 * value change between suspend and resume
		 */
		mutex_enter(&tod_lock);
		tod_fault_reset();
		mutex_exit(&tod_lock);

		sbdp_enable_intr(); 	/* enable intr & clock */

		/*
		 * release all the other cpus
		 * using start_cpus() vice sbdp_release_cpus()
		 */
		start_cpus();
		mutex_exit(&cpu_lock);

		/*
		 * If we suspended hw watchdog at suspend,
		 * re-enable it now.
		 */
		if (SR_CHECK_FLAG(srh, SR_FLAG_WATCHDOG)) {
			mutex_enter(&tod_lock);
			tod_ops.tod_set_watchdog_timer(
				saved_watchdog_seconds);
			mutex_exit(&tod_lock);
		}

		/* FALLTHROUGH */

	case SBDP_SRSTATE_DRIVER:
		/*
		 * resume devices: root node doesn't have to
		 * be held in any way.
		 */
		sbdp_resume_devices(ddi_root_node(), srh);

		/*
		 * resume the lock manager
		 */
		lm_cprresume();

		/* FALLTHROUGH */

	case SBDP_SRSTATE_USER:
		/*
		 * finally, resume user threads
		 */
		if (!sbdp_skip_user_threads) {
			SBDP_DBG_QR("DR: resuming user threads...\n");
			sbdp_start_user_threads();
		}
		/* FALLTHROUGH */

	case SBDP_SRSTATE_BEGIN:
	default:
		/*
		 * let those who care know that we've just resumed
		 */
		SBDP_DBG_QR("sending SIGTHAW...\n");
		sbdp_signal_user(SIGTHAW);
		break;
	}

	/*
	 * update the signature block
	 */
	CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, CPU->cpu_id);

	SBDP_DBG_QR("DR: resume COMPLETED\n");
}