Example #1
0
void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
{
	struct _lowcore *lc, *current_lc;
	struct stack_frame *sf;
	struct pt_regs *regs;
	unsigned long sp;

	if (smp_processor_id() == 0)
		func(data);
	__load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
	/* Disable lowcore protection */
	__ctl_clear_bit(0, 28);
	current_lc = lowcore_ptr[smp_processor_id()];
	lc = lowcore_ptr[0];
	if (!lc)
		lc = current_lc;
	lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
	lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
	if (!cpu_online(0))
		smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
	while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
		cpu_relax();
	sp = lc->panic_stack;
	sp -= sizeof(struct pt_regs);
	regs = (struct pt_regs *) sp;
	memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
	regs->psw = lc->psw_save_area;
	sp -= STACK_FRAME_OVERHEAD;
	sf = (struct stack_frame *) sp;
	sf->back_chain = regs->gprs[15];
	smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
}
Example #2
0
/*
 *	Activate a secondary processor.
 */
int __cpuinit start_secondary(void *cpuvoid)
{
	/* Setup the cpu */
	cpu_init();
	preempt_disable();
	/* Enable TOD clock interrupts on the secondary cpu. */
	init_cpu_timer();
	/* Enable cpu timer interrupts on the secondary cpu. */
	init_cpu_vtimer();
	/* Enable pfault pseudo page faults on this cpu. */
	pfault_init();

	/* call cpu notifiers */
	notify_cpu_starting(smp_processor_id());
	/* Mark this cpu as online */
	ipi_call_lock();
	set_cpu_online(smp_processor_id(), true);
	ipi_call_unlock();
	__ctl_clear_bit(0, 28); /* Disable lowcore protection */
	S390_lowcore.restart_psw.mask =
		PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
	S390_lowcore.restart_psw.addr =
		PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
	__ctl_set_bit(0, 28); /* Enable lowcore protection */
	local_irq_enable();
	/* cpu_idle will call schedule for us */
	cpu_idle();
	return 0;
}
Example #3
0
/*
 *	Activate a secondary processor.
 */
int __cpuinit start_secondary(void *cpuvoid)
{
	cpu_init();
	preempt_disable();
	init_cpu_timer();
	init_cpu_vtimer();
	pfault_init();

	notify_cpu_starting(smp_processor_id());
	ipi_call_lock();
	set_cpu_online(smp_processor_id(), true);
	ipi_call_unlock();
	__ctl_clear_bit(0, 28); /* Disable lowcore protection */
	S390_lowcore.restart_psw.mask =
		PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
	S390_lowcore.restart_psw.addr =
		PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
	__ctl_set_bit(0, 28); /* Enable lowcore protection */
	/*
	 * Wait until the cpu which brought this one up marked it
	 * active before enabling interrupts.
	 */
	while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
		cpu_relax();
	local_irq_enable();
	/* cpu_idle will call schedule for us */
	cpu_idle();
	return 0;
}
Example #4
0
/*
 * machine check handler.
 */
void
s390_do_machine_check(void)
{
    struct mci *mci;

    mci = (struct mci *) &S390_lowcore.mcck_interruption_code;

    if (mci->sd)		/* system damage */
        s390_handle_damage("received system damage machine check\n");

    if (mci->pd)		/* instruction processing damage */
        s390_handle_damage("received instruction processing "
                           "damage machine check\n");

    if (mci->se)		/* storage error uncorrected */
        s390_handle_damage("received storage error uncorrected "
                           "machine check\n");

    if (mci->sc)		/* storage error corrected */
        printk(KERN_WARNING
               "received storage error corrected machine check\n");

    if (mci->ke)		/* storage key-error uncorrected */
        s390_handle_damage("received storage key-error uncorrected "
                           "machine check\n");

    if (mci->ds && mci->fa)	/* storage degradation */
        s390_handle_damage("received storage degradation machine "
                           "check\n");

    if (mci->cp)		/* channel report word pending */
        up(&m_sem);

#ifdef CONFIG_MACHCHK_WARNING
    /*
     * The warning may remain for a prolonged period on the bare iron.
     * (actually till the machine is powered off, or until the problem is gone)
     * So we just stop listening for the WARNING MCH and prevent continuously
     * being interrupted.  One caveat is however, that we must do this per
     * processor and cannot use the smp version of ctl_clear_bit().
     * On VM we only get one interrupt per virtally presented machinecheck.
     * Though one suffices, we may get one interrupt per (virtual) processor.
     */
    if (mci->w) {	/* WARNING pending ? */
        static int mchchk_wng_posted = 0;
        /*
         * Use single machine clear, as we cannot handle smp right now
         */
        __ctl_clear_bit(14, 24);	/* Disable WARNING MCH */
        if (xchg(&mchchk_wng_posted, 1) == 0)
            kill_proc(1, SIGPWR, 1);
    }
#endif
}
Example #5
0
void detect_memory_layout(struct mem_chunk chunk[])
{
	unsigned long flags, cr0;

	memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
	/* Disable IRQs, DAT and low address protection so tprot does the
	 * right thing and we don't get scheduled away with low address
	 * protection disabled.
	 */
	flags = __arch_local_irq_stnsm(0xf8);
	__ctl_store(cr0, 0, 0);
	__ctl_clear_bit(0, 28);
	find_memory_chunks(chunk);
	__ctl_load(cr0, 0, 0);
	arch_local_irq_restore(flags);
}
Example #6
0
static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
{
	int rc;

	__ctl_set_bit(0, 9);
	rc = sclp_service_call(cmd, sccb);
	if (rc)
		goto out;
	__load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT |
			PSW_MASK_WAIT | PSW_DEFAULT_KEY);
	local_irq_disable();
out:
	/* Contents of the sccb might have changed. */
	barrier();
	__ctl_clear_bit(0, 9);
	return rc;
}
Example #7
0
static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
{
	int rc;

	__ctl_set_bit(0, 9);
	rc = sclp_service_call(cmd, sccb);
	if (rc)
		goto out;
	__load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
			PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
	local_irq_disable();
out:
	
	barrier();
	__ctl_clear_bit(0, 9);
	return rc;
}
void save_processor_state(void)
{
	/* swsusp_arch_suspend() actually saves all cpu register contents.
	 * Machine checks must be disabled since swsusp_arch_suspend() stores
	 * register contents to their lowcore save areas. That's the same
	 * place where register contents on machine checks would be saved.
	 * To avoid register corruption disable machine checks.
	 * We must also disable machine checks in the new psw mask for
	 * program checks, since swsusp_arch_suspend() may generate program
	 * checks. Disabling machine checks for all other new psw masks is
	 * just paranoia.
	 */
	local_mcck_disable();
	/* Disable lowcore protection */
	__ctl_clear_bit(0,28);
	S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
	S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
	S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
	S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
}
Example #9
0
static void css_reset(void)
{
	int i, ret;
	unsigned long long timeout;
	struct chp_id chpid;

	/* Reset subchannels. */
	for_each_subchannel(__shutdown_subchannel_easy,  NULL);
	/* Reset channel paths. */
	s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
	/* Enable channel report machine checks. */
	__ctl_set_bit(14, 28);
	/* Temporarily reenable machine checks. */
	local_mcck_enable();
	chp_id_init(&chpid);
	for (i = 0; i <= __MAX_CHPID; i++) {
		chpid.id = i;
		ret = rchp(chpid);
		if ((ret == 0) || (ret == 2))
			/*
			 * rchp either succeeded, or another rchp is already
			 * in progress. In either case, we'll get a crw.
			 */
			atomic_inc(&chpid_reset_count);
	}
	/* Wait for machine check for all channel paths. */
	timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
	while (atomic_read(&chpid_reset_count) != 0) {
		if (get_tod_clock_fast() > timeout)
			break;
		cpu_relax();
	}
	/* Disable machine checks again. */
	local_mcck_disable();
	/* Disable channel report machine checks. */
	__ctl_clear_bit(14, 28);
	s390_base_mcck_handler_fn = NULL;
}
Example #10
0
/*
 * s390_do_machine_check
 *
 * mchine check pre-processor, collecting the machine check info,
 *  queueing it and posting the machine check handler for processing.
 */
void s390_do_machine_check( void )
{
	int      crw_count;
	mcic_t   mcic;
        trapid_t ltt_interruption_code;
        uint32_t ltt_old_psw;

#ifdef S390_MACHCHK_DEBUG
	printk( KERN_INFO "s390_do_machine_check : starting ...\n");
#endif

	memcpy( &mcic,
	        &S390_lowcore.mcck_interruption_code,
	        sizeof(__u64));
	memcpy( &ltt_interruption_code,
	        &S390_lowcore.mcck_interruption_code,
	        sizeof(__u64));
	memcpy( &ltt_old_psw,
	        &S390_lowcore.mcck_old_psw,
	        sizeof(uint32_t));
        ltt_old_psw &=  PSW_ADDR_MASK;
        TRACE_TRAP_ENTRY(ltt_interruption_code,ltt_old_psw);
 		
	if (mcic.mcc.mcd.sd) /* system damage */
		s390_handle_damage("received system damage machine check\n");

	if (mcic.mcc.mcd.pd) /* instruction processing damage */
		s390_handle_damage("received instruction processing damage machine check\n");

	if (mcic.mcc.mcd.se) /* storage error uncorrected */
		s390_handle_damage("received storage error uncorrected machine check\n");

	if (mcic.mcc.mcd.sc) /* storage error corrected */
		printk(KERN_WARNING "received storage error corrected machine check\n");

	if (mcic.mcc.mcd.ke) /* storage key-error uncorrected */
		s390_handle_damage("received storage key-error uncorrected machine check\n");

	if (mcic.mcc.mcd.ds && mcic.mcc.mcd.fa) /* storage degradation */
		s390_handle_damage("received storage degradation machine check\n");

	if ( mcic.mcc.mcd.cp )	// CRW pending ?
	{
		crw_count = s390_collect_crw_info();

		if ( crw_count )
		{
			up( &s_sem );

		} /* endif */

	} /* endif */
#ifdef CONFIG_MACHCHK_WARNING
/*
 * The warning may remain for a prolonged period on the bare iron.
 * (actually till the machine is powered off, or until the problem is gone)
 * So we just stop listening for the WARNING MCH and prevent continuously
 * being interrupted.  One caveat is however, that we must do this per 
 * processor and cannot use the smp version of ctl_clear_bit().
 * On VM we only get one interrupt per virtally presented machinecheck.
 * Though one suffices, we may get one interrupt per (virtual) processor. 
 */
	if ( mcic.mcc.mcd.w )	// WARNING pending ?
	{
		// Use single machine clear, as we cannot handle smp right now
		__ctl_clear_bit( 14, 24 );	// Disable WARNING MCH

		if ( ! mchchk_wng_posted )
		{ 
			mchchk_wng_posted = s390_post_warning();

			if ( mchchk_wng_posted )
			{
				up( &s_sem );

			} /* endif */

		} /* endif */

	} /* endif */
#endif

#ifdef S390_MACHCHK_DEBUG
	printk( KERN_INFO "s390_do_machine_check : done \n");
#endif

	return;
}