Exemplo n.º 1
0
void smp_local_timer_interrupt(struct pt_regs * regs)
{
	if (!--(get_paca()->prof_counter)) {
		update_process_times(user_mode(regs));
		(get_paca()->prof_counter)=get_paca()->prof_multiplier;
	}
}
Exemplo n.º 2
0
asmlinkage int ppc64_rtas(struct rtas_args *uargs)
{
	struct rtas_args args;
	unsigned long flags;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
		return -EFAULT;

	if (args.nargs > ARRAY_SIZE(args.args)
	    || args.nret > ARRAY_SIZE(args.args)
	    || args.nargs + args.nret > ARRAY_SIZE(args.args))
		return -EINVAL;

	/* Copy in args. */
	if (copy_from_user(args.args, uargs->args,
			   args.nargs * sizeof(rtas_arg_t)) != 0)
		return -EFAULT;

	spin_lock_irqsave(&rtas.lock, flags);
	get_paca()->xRtas = args;
	enter_rtas((void *)__pa((unsigned long)&get_paca()->xRtas));
	args = get_paca()->xRtas;
	spin_unlock_irqrestore(&rtas.lock, flags);

	/* Copy out args. */
	if (copy_to_user(uargs->args + args.nargs,
			 args.args + args.nargs,
			 args.nret * sizeof(rtas_arg_t)) != 0)
		return -EFAULT;

	return 0;
}
Exemplo n.º 3
0
/** Fix up paca fields required for the boot cpu */
static void __init fixup_boot_paca(void)
{
	/* The boot cpu is started */
	get_paca()->cpu_start = 1;
	/* Allow percpu accesses to work until we setup percpu data */
	get_paca()->data_offset = 0;
	/* Mark interrupts disabled in PACA */
	irq_soft_mask_set(IRQS_DISABLED);
}
Exemplo n.º 4
0
void __init early_setup(unsigned long dt_ptr)
{
	/* Identify CPU type */
	identify_cpu(0, mfspr(SPRN_PVR));

	/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
	setup_paca(0);

	/* Enable early debugging if any specified (see udbg.h) */
	udbg_early_init();

 	DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);

	/*
	 * Do early initializations using the flattened device
	 * tree, like retreiving the physical memory map or
	 * calculating/retreiving the hash table size
	 */
	early_init_devtree(__va(dt_ptr));

	/* Now we know the logical id of our boot cpu, setup the paca. */
	setup_paca(boot_cpuid);

	/* Fix up paca fields required for the boot cpu */
	get_paca()->cpu_start = 1;
	get_paca()->stab_real = __pa((u64)&initial_stab);
	get_paca()->stab_addr = (u64)&initial_stab;

	/* Probe the machine type */
	probe_machine();

	setup_kdump_trampoline();

	DBG("Found, Initializing memory management...\n");

	/*
	 * Initialize the MMU Hash table and create the linear mapping
	 * of memory. Has to be done before stab/slb initialization as
	 * this is currently where the page size encoding is obtained
	 */
	htab_initialize();

	/*
	 * Initialize stab / SLB management except on iSeries
	 */
	if (cpu_has_feature(CPU_FTR_SLB))
		slb_initialize();
	else if (!firmware_has_feature(FW_FEATURE_ISERIES))
		stab_initialize(get_paca()->stab_real);

	DBG(" <- early_setup()\n");
}
Exemplo n.º 5
0
int proc_pmc_set_mmcr0( struct file *file, const char *buffer, unsigned long count, void *data )
{
	unsigned long v;
	v = proc_pmc_conv_int( buffer, count );
	v = v & ~0x04000000;	/* Don't allow interrupts for now */
	if ( v & ~0x80000000 ) 	/* Inform hypervisor we are using PMCs */
		get_paca()->xLpPacaPtr->xPMCRegsInUse = 1;
	else
		get_paca()->xLpPacaPtr->xPMCRegsInUse = 0;
	mtspr( MMCR0, v );
	
	return count;	
}
Exemplo n.º 6
0
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
	xics_kexec_teardown_cpu(secondary);

	/* On OPAL v3, we return all CPUs to firmware */

	if (!firmware_has_feature(FW_FEATURE_OPALv3))
		return;

	if (secondary) {
		/* Return secondary CPUs to firmware on OPAL v3 */
		mb();
		get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
		mb();

		/* Return the CPU to OPAL */
		opal_return_cpu();
	} else if (crash_shutdown) {
		/*
		 * On crash, we don't wait for secondaries to go
		 * down as they might be unreachable or hung, so
		 * instead we just wait a bit and move on.
		 */
		mdelay(1);
	} else {
		/* Primary waits for the secondaries to have reached OPAL */
		pnv_kexec_wait_secondaries_down();
	}
}
Exemplo n.º 7
0
static void yield_shared_processor(void)
{
	unsigned long tb;
	unsigned long yieldTime;

	HvCall_setEnabledInterrupts(HvCall_MaskIPI |
				    HvCall_MaskLpEvent |
				    HvCall_MaskLpProd |
				    HvCall_MaskTimeout);

	tb = get_tb();
	/* Compute future tb value when yield should expire */
	HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);

	yieldTime = get_tb() - tb;
	if (yieldTime > maxYieldTime)
		maxYieldTime = yieldTime;

	if (yieldTime < minYieldTime)
		minYieldTime = yieldTime;
	
	/*
	 * The decrementer stops during the yield.  Force a fake decrementer
	 * here and let the timer_interrupt code sort out the actual time.
	 */
	get_paca()->lppaca.xIntDword.xFields.xDecrInt = 1;
	process_iSeries_events();
}
Exemplo n.º 8
0
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
	if (xive_enabled())
		xive_kexec_teardown_cpu(secondary);
	else
		xics_kexec_teardown_cpu(secondary);

	/* On OPAL, we return all CPUs to firmware */
	if (!firmware_has_feature(FW_FEATURE_OPAL))
		return;

	if (secondary) {
		/* Return secondary CPUs to firmware on OPAL v3 */
		mb();
		get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
		mb();

		/* Return the CPU to OPAL */
		opal_return_cpu();
	} else {
		/* Primary waits for the secondaries to have reached OPAL */
		pnv_kexec_wait_secondaries_down();

		/* Switch XIVE back to emulation mode */
		if (xive_enabled())
			xive_shutdown();

		/*
		 * We might be running as little-endian - now that interrupts
		 * are disabled, reset the HILE bit to big-endian so we don't
		 * take interrupts in the wrong endian later
		 */
		opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
	}
}
Exemplo n.º 9
0
/*
 * Set up the variables that describe the cache line sizes
 * for this machine.
 */
static void __init setup_iSeries_cache_sizes(void)
{
	unsigned int i, n;
	unsigned int procIx = get_paca()->lppaca.xDynHvPhysicalProcIndex;

	systemcfg->iCacheL1Size =
		xIoHriProcessorVpd[procIx].xInstCacheSize * 1024;
	systemcfg->iCacheL1LineSize =
		xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
	systemcfg->dCacheL1Size =
		xIoHriProcessorVpd[procIx].xDataL1CacheSizeKB * 1024;
	systemcfg->dCacheL1LineSize =
		xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
	naca->iCacheL1LinesPerPage = PAGE_SIZE / systemcfg->iCacheL1LineSize;
	naca->dCacheL1LinesPerPage = PAGE_SIZE / systemcfg->dCacheL1LineSize;

	i = systemcfg->iCacheL1LineSize;
	n = 0;
	while ((i = (i / 2)))
		++n;
	naca->iCacheL1LogLineSize = n;

	i = systemcfg->dCacheL1LineSize;
	n = 0;
	while ((i = (i / 2)))
		++n;
	naca->dCacheL1LogLineSize = n;

	printk("D-cache line size = %d\n",
			(unsigned int)systemcfg->dCacheL1LineSize);
	printk("I-cache line size = %d\n",
			(unsigned int)systemcfg->iCacheL1LineSize);
}
static void kexec_prepare_cpus(void)
{
	wake_offline_cpus();
	smp_call_function(kexec_smp_down, NULL, /* wait */0);
	local_irq_disable();
	hard_irq_disable();

	mb(); /* make sure IRQs are disabled before we say they are */
	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;

	kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
	/* we are sure every CPU has IRQs off at this point */
	kexec_all_irq_disabled = 1;

	/* after we tell the others to go down */
	if (ppc_md.kexec_cpu_down)
		ppc_md.kexec_cpu_down(0, 0);

	/*
	 * Before removing MMU mappings make sure all CPUs have entered real
	 * mode:
	 */
	kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);

	put_cpu();
}
Exemplo n.º 11
0
static void __init setup_iSeries_cache_sizes(void)
{
	unsigned i,n;
	unsigned procIx = get_paca()->xLpPaca.xDynHvPhysicalProcIndex;

	naca->iCacheL1LineSize = xIoHriProcessorVpd[procIx].xInstCacheOperandSize;
	naca->dCacheL1LineSize = xIoHriProcessorVpd[procIx].xDataCacheOperandSize;
	naca->iCacheL1LinesPerPage = PAGE_SIZE / naca->iCacheL1LineSize;
	naca->dCacheL1LinesPerPage = PAGE_SIZE / naca->dCacheL1LineSize;
	i = naca->iCacheL1LineSize;
	n = 0;
	while ((i=(i/2))) ++n;
	naca->iCacheL1LogLineSize = n;
	i = naca->dCacheL1LineSize;
	n = 0;
	while ((i=(i/2))) ++n;
	naca->dCacheL1LogLineSize = n;

	printk( "D-cache line size = %d  (log = %d)\n",
			(unsigned)naca->dCacheL1LineSize,
			(unsigned)naca->dCacheL1LogLineSize );
	printk( "I-cache line size = %d  (log = %d)\n",
			(unsigned)naca->iCacheL1LineSize,
			(unsigned)naca->iCacheL1LogLineSize );
	
}
Exemplo n.º 12
0
static void cpu_ready_for_interrupts(void)
{
	/*
	 * Enable AIL if supported, and we are in hypervisor mode. This
	 * is called once for every processor.
	 *
	 * If we are not in hypervisor mode the job is done once for
	 * the whole partition in configure_exceptions().
	 */
	if (cpu_has_feature(CPU_FTR_HVMODE) &&
	    cpu_has_feature(CPU_FTR_ARCH_207S)) {
		unsigned long lpcr = mfspr(SPRN_LPCR);
		mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
	}

	/*
	 * Fixup HFSCR:TM based on CPU features. The bit is set by our
	 * early asm init because at that point we haven't updated our
	 * CPU features from firmware and device-tree. Here we have,
	 * so let's do it.
	 */
	if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
		mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);

	/* Set IR and DR in PACA MSR */
	get_paca()->kernel_msr = MSR_KERNEL;
}
Exemplo n.º 13
0
void __init early_setup(unsigned long dt_ptr)
{
	struct paca_struct *lpaca = get_paca();
	static struct machdep_calls **mach;

	/* Enable early debugging if any specified (see udbg.h) */
	udbg_early_init();

	DBG(" -> early_setup()\n");

	/*
	 * Do early initializations using the flattened device
	 * tree, like retreiving the physical memory map or
	 * calculating/retreiving the hash table size
	 */
	early_init_devtree(__va(dt_ptr));

	/*
	 * Iterate all ppc_md structures until we find the proper
	 * one for the current machine type
	 */
	DBG("Probing machine type for platform %x...\n", _machine);

	for (mach = machines; *mach; mach++) {
		if ((*mach)->probe(_machine))
			break;
	}
	/* What can we do if we didn't find ? */
	if (*mach == NULL) {
		DBG("No suitable machine found !\n");
		for (;;);
	}
	ppc_md = **mach;

#ifdef CONFIG_CRASH_DUMP
	kdump_setup();
#endif

	DBG("Found, Initializing memory management...\n");

	/*
	 * Initialize the MMU Hash table and create the linear mapping
	 * of memory. Has to be done before stab/slb initialization as
	 * this is currently where the page size encoding is obtained
	 */
	htab_initialize();

	/*
	 * Initialize stab / SLB management except on iSeries
	 */
	if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
		if (cpu_has_feature(CPU_FTR_SLB))
			slb_initialize();
		else
			stab_initialize(lpaca->stab_real);
	}

	DBG(" <- early_setup()\n");
}
Exemplo n.º 14
0
void early_setup_secondary(void)
{
	/* Mark interrupts enabled in PACA */
	get_paca()->soft_enabled = 0;

	/* Initialize the hash table or TLB handling */
	early_init_mmu_secondary();
}
Exemplo n.º 15
0
/*
 * Document me.
 */
void __init
iSeries_setup_arch(void)
{
	void *	eventStack;
	unsigned procIx = get_paca()->xLpPaca.xDynHvPhysicalProcIndex;

	/* Setup the Lp Event Queue */

	/* Allocate a page for the Event Stack
	 * The hypervisor wants the absolute real address, so
	 * we subtract out the KERNELBASE and add in the
	 * absolute real address of the kernel load area
	 */
	
	eventStack = alloc_bootmem_pages( LpEventStackSize );
	
	memset( eventStack, 0, LpEventStackSize );
	
	/* Invoke the hypervisor to initialize the event stack */
	
	HvCallEvent_setLpEventStack( 0, eventStack, LpEventStackSize );
	
	/* Initialize fields in our Lp Event Queue */
	
	xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
	xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
	xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack + 
					(LpEventStackSize - LpEventMaxSize);
	xItLpQueue.xIndex = 0;
	
	/* Compute processor frequency */
	procFreqHz = (((1UL<<34) * 1000000) / xIoHriProcessorVpd[procIx].xProcFreq );
	procFreqMhz = procFreqHz / 1000000;
	procFreqMhzHundreths = (procFreqHz/10000) - (procFreqMhz*100);

	ppc_proc_freq = procFreqHz;

	/* Compute time base frequency */
	tbFreqHz = (((1UL<<32) * 1000000) / xIoHriProcessorVpd[procIx].xTimeBaseFreq );
	tbFreqMhz = tbFreqHz / 1000000;
	tbFreqMhzHundreths = (tbFreqHz/10000) - (tbFreqMhz*100);

	ppc_tb_freq = tbFreqHz;

	printk("Max  logical processors = %d\n", 
			itVpdAreas.xSlicMaxLogicalProcs );
	printk("Max physical processors = %d\n",
			itVpdAreas.xSlicMaxPhysicalProcs );
	printk("Processor frequency = %lu.%02lu\n",
			procFreqMhz, 
			procFreqMhzHundreths );
	printk("Time base frequency = %lu.%02lu\n",
			tbFreqMhz,
			tbFreqMhzHundreths );
	printk("Processor version = %x\n",
			xIoHriProcessorVpd[procIx].xPVR );

}
Exemplo n.º 16
0
__openfirmware
long
rtas_call(int token, int nargs, int nret,
	  unsigned long *outputs, ...)
{
	va_list list;
	int i;
	unsigned long s;
	struct rtas_args *rtas_args = &(get_paca()->xRtas);

	PPCDBG(PPCDBG_RTAS, "Entering rtas_call\n");
	PPCDBG(PPCDBG_RTAS, "\ttoken    = 0x%x\n", token);
	PPCDBG(PPCDBG_RTAS, "\tnargs    = %d\n", nargs);
	PPCDBG(PPCDBG_RTAS, "\tnret     = %d\n", nret);
	PPCDBG(PPCDBG_RTAS, "\t&outputs = 0x%lx\n", outputs);
	if (token == RTAS_UNKNOWN_SERVICE)
		return -1;

	rtas_args->token = token;
	rtas_args->nargs = nargs;
	rtas_args->nret  = nret;
	rtas_args->rets  = (rtas_arg_t *)&(rtas_args->args[nargs]);
	va_start(list, outputs);
	for (i = 0; i < nargs; ++i) {
		rtas_args->args[i] = (rtas_arg_t)LONG_LSW(va_arg(list, ulong));
		PPCDBG(PPCDBG_RTAS, "\tnarg[%d] = 0x%lx\n", i, rtas_args->args[i]);
	}
	va_end(list);

	for (i = 0; i < nret; ++i)
	  rtas_args->rets[i] = 0;

#if 0   /* Gotta do something different here, use global lock for now... */
	spin_lock_irqsave(&rtas_args->lock, s);
#else
	spin_lock_irqsave(&rtas.lock, s);
#endif
	PPCDBG(PPCDBG_RTAS, "\tentering rtas with 0x%lx\n",
		(void *)__pa((unsigned long)rtas_args));
	enter_rtas((void *)__pa((unsigned long)rtas_args));
	PPCDBG(PPCDBG_RTAS, "\treturned from rtas ...\n");
#if 0   /* Gotta do something different here, use global lock for now... */
	spin_unlock_irqrestore(&rtas_args->lock, s);
#else
	spin_unlock_irqrestore(&rtas.lock, s);
#endif
	ifppcdebug(PPCDBG_RTAS) {
		for(i=0; i < nret ;i++)
			udbg_printf("\tnret[%d] = 0x%lx\n", i, (ulong)rtas_args->rets[i]);
	}

	if (nret > 1 && outputs != NULL)
		for (i = 0; i < nret-1; ++i)
			outputs[i] = rtas_args->rets[i+1];
	return (ulong)((nret > 0) ? rtas_args->rets[0] : 0);
}
Exemplo n.º 17
0
static int pseries_dedicated_idle(void)
{
	long oldval;
	struct paca_struct *lpaca = get_paca();
	unsigned int cpu = smp_processor_id();
	unsigned long start_snooze;
	unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);

	while (1) {
		/*
		 * Indicate to the HV that we are idle. Now would be
		 * a good time to find other work to dispatch.
		 */
		lpaca->lppaca.idle = 1;

		oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
		if (!oldval) {
			set_thread_flag(TIF_POLLING_NRFLAG);

			start_snooze = __get_tb() +
				*smt_snooze_delay * tb_ticks_per_usec;

			while (!need_resched() && !cpu_is_offline(cpu)) {
				ppc64_runlatch_off();

				/*
				 * Go into low thread priority and possibly
				 * low power mode.
				 */
				HMT_low();
				HMT_very_low();

				if (*smt_snooze_delay != 0 &&
				    __get_tb() > start_snooze) {
					HMT_medium();
					dedicated_idle_sleep(cpu);
				}

			}

			HMT_medium();
			clear_thread_flag(TIF_POLLING_NRFLAG);
		} else {
			set_need_resched();
		}

		lpaca->lppaca.idle = 0;
		ppc64_runlatch_on();

		schedule();

		if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
			cpu_die();
	}
}
Exemplo n.º 18
0
/*
 * Document me.
 */
static void __init iSeries_setup_arch(void)
{
	void *eventStack;
	unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;

	/* Add an eye catcher and the systemcfg layout version number */
	strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
	systemcfg->version.major = SYSTEMCFG_MAJOR;
	systemcfg->version.minor = SYSTEMCFG_MINOR;

	/* Setup the Lp Event Queue */

	/* Allocate a page for the Event Stack
	 * The hypervisor wants the absolute real address, so
	 * we subtract out the KERNELBASE and add in the
	 * absolute real address of the kernel load area
	 */
	eventStack = alloc_bootmem_pages(LpEventStackSize);
	memset(eventStack, 0, LpEventStackSize);

	/* Invoke the hypervisor to initialize the event stack */
	HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);

	/* Initialize fields in our Lp Event Queue */
	xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
	xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
	xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
					(LpEventStackSize - LpEventMaxSize);
	xItLpQueue.xIndex = 0;

	/* Compute processor frequency */
	procFreqHz = ((1UL << 34) * 1000000) /
			xIoHriProcessorVpd[procIx].xProcFreq;
	procFreqMhz = procFreqHz / 1000000;
	procFreqMhzHundreths = (procFreqHz / 10000) - (procFreqMhz * 100);
	ppc_proc_freq = procFreqHz;

	/* Compute time base frequency */
	tbFreqHz = ((1UL << 32) * 1000000) /
		xIoHriProcessorVpd[procIx].xTimeBaseFreq;
	tbFreqMhz = tbFreqHz / 1000000;
	tbFreqMhzHundreths = (tbFreqHz / 10000) - (tbFreqMhz * 100);
	ppc_tb_freq = tbFreqHz;

	printk("Max  logical processors = %d\n",
			itVpdAreas.xSlicMaxLogicalProcs);
	printk("Max physical processors = %d\n",
			itVpdAreas.xSlicMaxPhysicalProcs);
	printk("Processor frequency = %lu.%02lu\n", procFreqMhz,
			procFreqMhzHundreths);
	printk("Time base frequency = %lu.%02lu\n", tbFreqMhz,
			tbFreqMhzHundreths);
	systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
	printk("Processor version = %x\n", systemcfg->processor);
}
Exemplo n.º 19
0
void hash_preload(struct mm_struct *mm, unsigned long ea,
		  unsigned long access, unsigned long trap)
{
	unsigned long vsid;
	void *pgdir;
	pte_t *ptep;
	cpumask_t mask;
	unsigned long flags;
	int local = 0;

	/* We don't want huge pages prefaulted for now
	 */
	if (unlikely(in_hugepage_area(mm->context, ea)))
		return;

	DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
		" trap=%lx\n", mm, mm->pgd, ea, access, trap);

	/* Get PTE, VSID, access mask */
	pgdir = mm->pgd;
	if (pgdir == NULL)
		return;
	ptep = find_linux_pte(pgdir, ea);
	if (!ptep)
		return;
	vsid = get_vsid(mm->context.id, ea);

	/* Hash it in */
	local_irq_save(flags);
	mask = cpumask_of_cpu(smp_processor_id());
	if (cpus_equal(mm->cpu_vm_mask, mask))
		local = 1;
#ifndef CONFIG_PPC_64K_PAGES
	__hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
	if (mmu_ci_restrictions) {
		/* If this PTE is non-cacheable, switch to 4k */
		if (mm->context.user_psize == MMU_PAGE_64K &&
		    (pte_val(*ptep) & _PAGE_NO_CACHE)) {
			mm->context.user_psize = MMU_PAGE_4K;
			mm->context.sllp = SLB_VSID_USER |
				mmu_psize_defs[MMU_PAGE_4K].sllp;
			get_paca()->context = mm->context;
			slb_flush_and_rebolt();
		}
	}
	if (mm->context.user_psize == MMU_PAGE_64K)
		__hash_page_64K(ea, access, vsid, ptep, trap, local);
	else
		__hash_page_4K(ea, access, vsid, ptep, trap, local);
#endif /* CONFIG_PPC_64K_PAGES */
	local_irq_restore(flags);
}
Exemplo n.º 20
0
static void check_and_cede_processor(void)
{
	/*
	 * Interrupts are soft-disabled at this point,
	 * but not hard disabled. So an interrupt might have
	 * occurred before entering NAP, and would be potentially
	 * lost (edge events, decrementer events, etc...) unless
	 * we first hard disable then check.
	 */
	hard_irq_disable();
	if (get_paca()->irq_happened == 0)
		cede_processor();
}
Exemplo n.º 21
0
void
call_rtas_display_status(char c)
{
	struct rtas_args *rtas = &(get_paca()->xRtas);

	rtas->token = 10;
	rtas->nargs = 1;
	rtas->nret  = 1;
	rtas->rets  = (rtas_arg_t *)&(rtas->args[1]);
	rtas->args[0] = (int)c;

	enter_rtas((void *)__pa((unsigned long)rtas));	
}
Exemplo n.º 22
0
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
	xics_kexec_teardown_cpu(secondary);

	/* Return secondary CPUs to firmware on OPAL v3 */
	if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) {
		mb();
		get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
		mb();

		/* Return the CPU to OPAL */
		opal_return_cpu();
	}
}
Exemplo n.º 23
0
void
phys_call_rtas_display_status(char c)
{
	unsigned long offset = reloc_offset();
	struct rtas_args *rtas = PTRRELOC(&(get_paca()->xRtas));

	rtas->token = 10;
	rtas->nargs = 1;
	rtas->nret  = 1;
	rtas->rets  = (rtas_arg_t *)PTRRELOC(&(rtas->args[1]));
	rtas->args[0] = (int)c;

	enter_rtas(rtas);	
}
Exemplo n.º 24
0
/*
 * timer_interrupt - gets called when the decrementer overflows,
 * with interrupts disabled.
 */
int timer_interrupt(struct pt_regs * regs)
{
	int next_dec;
	unsigned long cur_tb;
	struct paca_struct *lpaca = get_paca();
	unsigned long cpu = lpaca->xPacaIndex;

	irq_enter();

#ifndef CONFIG_PPC_ISERIES
	ppc64_do_profile(regs);
#endif

	lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;

	while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) {

#ifdef CONFIG_SMP
		smp_local_timer_interrupt(regs);
#endif
		if (cpu == boot_cpuid) {
			write_seqlock(&xtime_lock);
			tb_last_stamp = lpaca->next_jiffy_update_tb;
			do_timer(regs);
			timer_sync_xtime( cur_tb );
			timer_check_rtc();
			write_sequnlock(&xtime_lock);
			if ( adjusting_time && (time_adjust == 0) )
				ppc_adjtimex();
		}
		lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy;
	}
	
	next_dec = lpaca->next_jiffy_update_tb - cur_tb;
	if (next_dec > lpaca->default_decr)
        	next_dec = lpaca->default_decr;
	set_dec(next_dec);

#ifdef CONFIG_PPC_ISERIES
	{
		struct ItLpQueue *lpq = lpaca->lpQueuePtr;
		if (lpq && ItLpQueue_isLpIntPending(lpq))
			lpEvent_count += ItLpQueue_process(lpq, regs);
	}
#endif

	irq_exit();

	return 1;
}
Exemplo n.º 25
0
void __init early_setup(unsigned long dt_ptr)
{
	/* -------- printk is _NOT_ safe to use here ! ------- */

	/* Fill in any unititialised pacas */
	initialise_pacas();

	/* Identify CPU type */
	identify_cpu(0, mfspr(SPRN_PVR));

	/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
	setup_paca(0);

	/* Initialize lockdep early or else spinlocks will blow */
	lockdep_init();

	/* -------- printk is now safe to use ------- */

	/* Enable early debugging if any specified (see udbg.h) */
	udbg_early_init();

 	DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);

	/*
	 * Do early initialization using the flattened device
	 * tree, such as retrieving the physical memory map or
	 * calculating/retrieving the hash table size.
	 */
	early_init_devtree(__va(dt_ptr));

	/* Now we know the logical id of our boot cpu, setup the paca. */
	setup_paca(boot_cpuid);

	/* Fix up paca fields required for the boot cpu */
	get_paca()->cpu_start = 1;

	/* Probe the machine type */
	probe_machine();

	setup_kdump_trampoline();

	DBG("Found, Initializing memory management...\n");

	/* Initialize the hash table or TLB handling */
	early_init_mmu();

	DBG(" <- early_setup()\n");
}
Exemplo n.º 26
0
static inline void proc_pmc_cpi(void)
{
	/* Configure the PMC registers to count cycles and instructions */
	/* so we can compute cpi */
	/*
	 * MMCRA[30]    = 1     Don't count in wait state (CTRL[31]=0)
	 * MMCR0[6]     = 1     Freeze counters when any overflow
	 * MMCR0[19:25] = 0x01  PMC1 counts Thread Active Run Cycles
	 * MMCR0[26:31] = 0x05	PMC2 counts Thread Active Cycles
	 * MMCR1[0:4]   = 0x07	PMC3 counts Instructions Dispatched
	 * MMCR1[5:9]   = 0x03	PMC4 counts Instructions Completed
	 * MMCR1[10:14] = 0x06	PMC5 counts Machine Cycles
	 *
	 */

	proc_pmc_control_mode = PMC_CONTROL_CPI;
	
	/* Indicate to hypervisor that we are using the PMCs */
	get_paca()->xLpPacaPtr->xPMCRegsInUse = 1;

	/* Freeze all counters */
	mtspr( MMCR0, 0x80000000 );
	mtspr( MMCR1, 0x00000000 );
	
	/* Clear all the PMCs */
	mtspr( PMC1, 0 );
	mtspr( PMC2, 0 );
	mtspr( PMC3, 0 );
	mtspr( PMC4, 0 );
	mtspr( PMC5, 0 );
	mtspr( PMC6, 0 );
	mtspr( PMC7, 0 );
	mtspr( PMC8, 0 );

	/* Freeze counters in Wait State (CTRL[31]=0) */
	mtspr( MMCRA, 0x00000002 );

	/* PMC3<-0x07, PMC4<-0x03, PMC5<-0x06 */
	mtspr( MMCR1, 0x38cc0000 );

	mb();
	
	/* PMC1<-0x01, PMC2<-0x05
	 * Start all counters
	 */
	mtspr( MMCR0, 0x02000045 );
	
}
Exemplo n.º 27
0
void
call_rtas_display_status(char c)
{
	struct rtas_args *rargs = &(get_paca()->xRtas);
	unsigned long flags;

	spin_lock_irqsave(&rtas.lock, flags);

	rargs->token = 10;
	rargs->nargs = 1;
	rargs->nret  = 1;
	rargs->rets  = (rtas_arg_t *)(&(rargs->args[1]));
	rargs->args[0] = (int)c;

	enter_rtas((void *)__pa((unsigned long)rargs));
	spin_unlock_irqrestore(&rtas.lock, flags);
}
Exemplo n.º 28
0
static inline void proc_pmc_tlb(void)
{
	/* Configure the PMC registers to count tlb misses  */
	/*
	 * MMCR0[6]     = 1     Freeze counters when any overflow
	 * MMCR0[19:25] = 0x55  Group count
	 *   PMC1 counts  I misses
	 *   PMC2 counts  I miss duration (latency)
	 *   PMC3 counts  D misses
	 *   PMC4 counts  D miss duration (latency)
	 *   PMC5 counts  IERAT misses
	 *   PMC6 counts  D references (including PMC7)
	 *   PMC7 counts  miss PTEs searched
	 *   PMC8 counts  miss >8 PTEs searched
	 *   
	 */

	proc_pmc_control_mode = PMC_CONTROL_TLB;
	
	/* Indicate to hypervisor that we are using the PMCs */
	get_paca()->xLpPacaPtr->xPMCRegsInUse = 1;

	/* Freeze all counters */
	mtspr( MMCR0, 0x80000000 );
	mtspr( MMCR1, 0x00000000 );
	
	/* Clear all the PMCs */
	mtspr( PMC1, 0 );
	mtspr( PMC2, 0 );
	mtspr( PMC3, 0 );
	mtspr( PMC4, 0 );
	mtspr( PMC5, 0 );
	mtspr( PMC6, 0 );
	mtspr( PMC7, 0 );
	mtspr( PMC8, 0 );

	mtspr( MMCRA, 0x00000000 );

	mb();
	
	/* PMC1<-0x55
	 * Start all counters
	 */
	mtspr( MMCR0, 0x02001540 );
	
}
/* too late to fail here */
void default_machine_kexec(struct kimage *image)
{
	/* prepare control code if any */

	/*
        * If the kexec boot is the normal one, need to shutdown other cpus
        * into our wait loop and quiesce interrupts.
        * Otherwise, in the case of crashed mode (crashing_cpu >= 0),
        * stopping other CPUs and collecting their pt_regs is done before
        * using debugger IPI.
        */

	if (crashing_cpu == -1)
		kexec_prepare_cpus();

	pr_debug("kexec: Starting switchover sequence.\n");

	/* switch to a staticly allocated stack.  Based on irq stack code.
	 * XXX: the task struct will likely be invalid once we do the copy!
	 */
	kexec_stack.thread_info.task = current_thread_info()->task;
	kexec_stack.thread_info.flags = 0;

	/* We need a static PACA, too; copy this CPU's PACA over and switch to
	 * it.  Also poison per_cpu_offset to catch anyone using non-static
	 * data.
	 */
	memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct));
	kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL;
	paca = (struct paca_struct *)RELOC_HIDE(&kexec_paca, 0) -
		kexec_paca.paca_index;
	setup_paca(&kexec_paca);

	/* XXX: If anyone does 'dynamic lppacas' this will also need to be
	 * switched to a static version!
	 */

	/* Some things are best done in assembly.  Finding globals with
	 * a toc is easier in C, so pass in what we can.
	 */
	kexec_sequence(&kexec_stack, image->start, image,
			page_address(image->control_code_page),
			ppc_md.hpte_clear_all);
	/* NOTREACHED */
}
Exemplo n.º 30
0
static int iSeries_idle(void)
{
	struct paca_struct *lpaca;
	long oldval;
	unsigned long CTRL;

	/* ensure iSeries run light will be out when idle */
	clear_thread_flag(TIF_RUN_LIGHT);
	CTRL = mfspr(CTRLF);
	CTRL &= ~RUNLATCH;
	mtspr(CTRLT, CTRL);

	lpaca = get_paca();

	while (1) {
		if (lpaca->lppaca.xSharedProc) {
			if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
				process_iSeries_events();
			if (!need_resched())
				yield_shared_processor();
		} else {
			oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

			if (!oldval) {
				set_thread_flag(TIF_POLLING_NRFLAG);

				while (!need_resched()) {
					HMT_medium();
					if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
						process_iSeries_events();
					HMT_low();
				}

				HMT_medium();
				clear_thread_flag(TIF_POLLING_NRFLAG);
			} else {
				set_need_resched();
			}
		}

		schedule();
	}

	return 0;
}