Пример #1
0
int read_current_timer(unsigned long *timer_value)
{
	*timer_value = mfspr(SPR_TTCR);
	return 0;
}
Пример #2
0
void
trap(struct trapframe *frame)
{
	struct thread	*td;
	struct proc	*p;
#ifdef KDTRACE_HOOKS
	uint32_t inst;
#endif
	int		sig, type, user;
	u_int		ucode;
	ksiginfo_t	ksi;

	PCPU_INC(cnt.v_trap);

	td = curthread;
	p = td->td_proc;

	type = ucode = frame->exc;
	sig = 0;
	user = frame->srr1 & PSL_PR;

	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
	    trapname(type), user ? "user" : "kernel");

#ifdef KDTRACE_HOOKS
	/*
	 * A trap can occur while DTrace executes a probe. Before
	 * executing the probe, DTrace blocks re-scheduling and sets
	 * a flag in its per-cpu flags to indicate that it doesn't
	 * want to fault. On returning from the probe, the no-fault
	 * flag is cleared and finally re-scheduling is enabled.
	 *
	 * If the DTrace kernel module has registered a trap handler,
	 * call it and if it returns non-zero, assume that it has
	 * handled the trap and modified the trap frame so that this
	 * function can return normally.
	 */
	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0)
		return;
#endif

	if (user) {
		td->td_pticks = 0;
		td->td_frame = frame;
		if (td->td_cowgen != p->p_cowgen)
			thread_cow_update(td);

		/* User Mode Traps */
		switch (type) {
		case EXC_RUNMODETRC:
		case EXC_TRC:
			frame->srr1 &= ~PSL_SE;
			sig = SIGTRAP;
			ucode = TRAP_TRACE;
			break;

#ifdef __powerpc64__
		case EXC_ISE:
		case EXC_DSE:
			if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
			    (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){
				sig = SIGSEGV;
				ucode = SEGV_MAPERR;
			}
			break;
#endif
		case EXC_DSI:
		case EXC_ISI:
			sig = trap_pfault(frame, 1);
			if (sig == SIGSEGV)
				ucode = SEGV_MAPERR;
			break;

		case EXC_SC:
			syscall(frame);
			break;

		case EXC_FPU:
			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
			    ("FPU already enabled for thread"));
			enable_fpu(td);
			break;

		case EXC_VEC:
			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
			    ("Altivec already enabled for thread"));
			enable_vec(td);
			break;

		case EXC_VSX:
			KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX,
			    ("VSX already enabled for thread"));
			if (!(td->td_pcb->pcb_flags & PCB_VEC))
				enable_vec(td);
			if (!(td->td_pcb->pcb_flags & PCB_FPU))
				save_fpu(td);
			td->td_pcb->pcb_flags |= PCB_VSX;
			enable_fpu(td);
			break;

		case EXC_VECAST_E:
		case EXC_VECAST_G4:
		case EXC_VECAST_G5:
			/*
			 * We get a VPU assist exception for IEEE mode
			 * vector operations on denormalized floats.
			 * Emulating this is a giant pain, so for now,
			 * just switch off IEEE mode and treat them as
			 * zero.
			 */

			save_vec(td);
			td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
			enable_vec(td);
			break;

		case EXC_ALI:
			if (fix_unaligned(td, frame) != 0) {
				sig = SIGBUS;
				ucode = BUS_ADRALN;
			}
			else
				frame->srr0 += 4;
			break;

		case EXC_DEBUG:	/* Single stepping */
			mtspr(SPR_DBSR, mfspr(SPR_DBSR));
			frame->srr1 &= ~PSL_DE;
			frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM || DBCR0_IC);
			sig = SIGTRAP;
			ucode = TRAP_TRACE;
			break;

		case EXC_PGM:
			/* Identify the trap reason */
#ifdef AIM
			if (frame->srr1 & EXC_PGM_TRAP) {
#else
			if (frame->cpu.booke.esr & ESR_PTR) {
#endif
#ifdef KDTRACE_HOOKS
				inst = fuword32((const void *)frame->srr0);
				if (inst == 0x0FFFDDDD &&
				    dtrace_pid_probe_ptr != NULL) {
					struct reg regs;
					fill_regs(td, &regs);
					(*dtrace_pid_probe_ptr)(&regs);
					break;
				}
#endif
 				sig = SIGTRAP;
				ucode = TRAP_BRKPT;
			} else {
				sig = ppc_instr_emulate(frame, td->td_pcb);
				if (sig == SIGILL) {
					if (frame->srr1 & EXC_PGM_PRIV)
						ucode = ILL_PRVOPC;
					else if (frame->srr1 & EXC_PGM_ILLEGAL)
						ucode = ILL_ILLOPC;
				} else if (sig == SIGFPE)
					ucode = FPE_FLTINV;	/* Punt for now, invalid operation. */
			}
			break;

		case EXC_MCHK:
			/*
			 * Note that this may not be recoverable for the user
			 * process, depending on the type of machine check,
			 * but it at least prevents the kernel from dying.
			 */
			sig = SIGBUS;
			ucode = BUS_OBJERR;
			break;

		default:
			trap_fatal(frame);
		}
	} else {
		/* Kernel Mode Traps */

		KASSERT(cold || td->td_ucred != NULL,
		    ("kernel trap doesn't have ucred"));
		switch (type) {
#ifdef KDTRACE_HOOKS
		case EXC_PGM:
			if (frame->srr1 & EXC_PGM_TRAP) {
				if (*(uint32_t *)frame->srr0 == EXC_DTRACE) {
					if (dtrace_invop_jump_addr != NULL) {
						dtrace_invop_jump_addr(frame);
						return;
					}
				}
			}
			break;
#endif
#ifdef __powerpc64__
		case EXC_DSE:
			if ((frame->dar & SEGMENT_MASK) == USER_ADDR) {
				__asm __volatile ("slbmte %0, %1" ::
					"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
					"r"(USER_SLB_SLBE));
				return;
			}
			break;
#endif
		case EXC_DSI:
			if (trap_pfault(frame, 0) == 0)
 				return;
			break;
		case EXC_MCHK:
			if (handle_onfault(frame))
 				return;
			break;
		default:
			break;
		}
		trap_fatal(frame);
	}
Пример #3
0
/**
 * kvmppc_handle_exit
 *
 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
 */
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                       unsigned int exit_nr)
{
	enum emulation_result er;
	int r = RESUME_HOST;

	/* update before a new last_exit_type is rewritten */
	kvmppc_update_timing_stats(vcpu);

	local_irq_enable();

	run->exit_reason = KVM_EXIT_UNKNOWN;
	run->ready_for_interrupt_injection = 1;

	switch (exit_nr) {
	case BOOKE_INTERRUPT_MACHINE_CHECK:
		printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
		kvmppc_dump_vcpu(vcpu);
		r = RESUME_HOST;
		break;

	case BOOKE_INTERRUPT_EXTERNAL:
		kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
		if (need_resched())
			cond_resched();
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_DECREMENTER:
		/* Since we switched IVPR back to the host's value, the host
		 * handled this interrupt the moment we enabled interrupts.
		 * Now we just offer it a chance to reschedule the guest. */
		kvmppc_account_exit(vcpu, DEC_EXITS);
		if (need_resched())
			cond_resched();
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_PROGRAM:
		if (vcpu->arch.msr & MSR_PR) {
			/* Program traps generated by user-level software must be handled
			 * by the guest kernel. */
			vcpu->arch.esr = vcpu->arch.fault_esr;
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
			r = RESUME_GUEST;
			kvmppc_account_exit(vcpu, USR_PR_INST);
			break;
		}

		er = kvmppc_emulate_instruction(run, vcpu);
		switch (er) {
		case EMULATE_DONE:
			/* don't overwrite subtypes, just account kvm_stats */
			kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
			/* Future optimization: only reload non-volatiles if
			 * they were actually modified by emulation. */
			r = RESUME_GUEST_NV;
			break;
		case EMULATE_DO_DCR:
			run->exit_reason = KVM_EXIT_DCR;
			r = RESUME_HOST;
			break;
		case EMULATE_FAIL:
			/* XXX Deliver Program interrupt to guest. */
			printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
			       __func__, vcpu->arch.pc, vcpu->arch.last_inst);
			/* For debugging, encode the failing instruction and
			 * report it to userspace. */
			run->hw.hardware_exit_reason = ~0ULL << 32;
			run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
			r = RESUME_HOST;
			break;
		default:
			BUG();
		}
		break;

	case BOOKE_INTERRUPT_FP_UNAVAIL:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
		kvmppc_account_exit(vcpu, FP_UNAVAIL);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SPE_UNAVAIL:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SPE_FP_DATA:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SPE_FP_ROUND:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_DATA_STORAGE:
		vcpu->arch.dear = vcpu->arch.fault_dear;
		vcpu->arch.esr = vcpu->arch.fault_esr;
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
		kvmppc_account_exit(vcpu, DSI_EXITS);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_INST_STORAGE:
		vcpu->arch.esr = vcpu->arch.fault_esr;
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
		kvmppc_account_exit(vcpu, ISI_EXITS);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SYSCALL:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
		kvmppc_account_exit(vcpu, SYSCALL_EXITS);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_DTLB_MISS: {
		unsigned long eaddr = vcpu->arch.fault_dear;
		int gtlb_index;
		gpa_t gpaddr;
		gfn_t gfn;

		/* Check the guest TLB. */
		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
		if (gtlb_index < 0) {
			/* The guest didn't have a mapping for it. */
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
			vcpu->arch.dear = vcpu->arch.fault_dear;
			vcpu->arch.esr = vcpu->arch.fault_esr;
			kvmppc_mmu_dtlb_miss(vcpu);
			kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
			r = RESUME_GUEST;
			break;
		}

		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
		gfn = gpaddr >> PAGE_SHIFT;

		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
			/* The guest TLB had a mapping, but the shadow TLB
			 * didn't, and it is RAM. This could be because:
			 * a) the entry is mapping the host kernel, or
			 * b) the guest used a large mapping which we're faking
			 * Either way, we need to satisfy the fault without
			 * invoking the guest. */
			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
			r = RESUME_GUEST;
		} else {
			/* Guest has mapped and accessed a page which is not
			 * actually RAM. */
			vcpu->arch.paddr_accessed = gpaddr;
			r = kvmppc_emulate_mmio(run, vcpu);
			kvmppc_account_exit(vcpu, MMIO_EXITS);
		}

		break;
	}

	case BOOKE_INTERRUPT_ITLB_MISS: {
		unsigned long eaddr = vcpu->arch.pc;
		gpa_t gpaddr;
		gfn_t gfn;
		int gtlb_index;

		r = RESUME_GUEST;

		/* Check the guest TLB. */
		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
		if (gtlb_index < 0) {
			/* The guest didn't have a mapping for it. */
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
			kvmppc_mmu_itlb_miss(vcpu);
			kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
			break;
		}

		kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);

		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
		gfn = gpaddr >> PAGE_SHIFT;

		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
			/* The guest TLB had a mapping, but the shadow TLB
			 * didn't. This could be because:
			 * a) the entry is mapping the host kernel, or
			 * b) the guest used a large mapping which we're faking
			 * Either way, we need to satisfy the fault without
			 * invoking the guest. */
			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
		} else {
			/* Guest mapped and leaped at non-RAM! */
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
		}

		break;
	}

	case BOOKE_INTERRUPT_DEBUG: {
		u32 dbsr;

		vcpu->arch.pc = mfspr(SPRN_CSRR0);

		/* clear IAC events in DBSR register */
		dbsr = mfspr(SPRN_DBSR);
		dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
		mtspr(SPRN_DBSR, dbsr);

		run->exit_reason = KVM_EXIT_DEBUG;
		kvmppc_account_exit(vcpu, DEBUG_EXITS);
		r = RESUME_HOST;
		break;
	}

	default:
		printk(KERN_EMERG "exit_nr %d\n", exit_nr);
		BUG();
	}

	local_irq_disable();

	kvmppc_core_deliver_interrupts(vcpu);

	if (!(r & RESUME_HOST)) {
		/* To avoid clobbering exit_reason, only check for signals if
		 * we aren't already exiting to userspace for some other
		 * reason. */
		if (signal_pending(current)) {
			run->exit_reason = KVM_EXIT_INTR;
			r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
		}
	}

	return r;
}
Пример #4
0
static int bcom_engine_init(void)
{
	int task;
	phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
	unsigned int tdt_size, ctx_size, var_size, fdt_size;

	/* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
	tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
	ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE;
	var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE);
	fdt_size = BCOM_FDT_SIZE;

	bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa);
	bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa);
	bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa);
	bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa);

	if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) {
		printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n");

		bcom_sram_free(bcom_eng->tdt);
		bcom_sram_free(bcom_eng->ctx);
		bcom_sram_free(bcom_eng->var);
		bcom_sram_free(bcom_eng->fdt);

		return -ENOMEM;
	}

	memset(bcom_eng->tdt, 0x00, tdt_size);
	memset(bcom_eng->ctx, 0x00, ctx_size);
	memset(bcom_eng->var, 0x00, var_size);
	memset(bcom_eng->fdt, 0x00, fdt_size);

	/* Copy the FDT for the EU#3 */
	memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));

	/* Initialize Task base structure */
	for (task=0; task<BCOM_MAX_TASKS; task++)
	{
		out_be16(&bcom_eng->regs->tcr[task], 0);
		out_8(&bcom_eng->regs->ipr[task], 0);

		bcom_eng->tdt[task].context	= ctx_pa;
		bcom_eng->tdt[task].var	= var_pa;
		bcom_eng->tdt[task].fdt	= fdt_pa;

		var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE;
		ctx_pa += BCOM_CTX_SIZE;
	}

	out_be32(&bcom_eng->regs->taskBar, tdt_pa);

	/* Init 'always' initiator */
	out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);

	/* Disable COMM Bus Prefetch on the original 5200; it's broken */
	if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
		bcom_disable_prefetch();

	/* Init lock */
	spin_lock_init(&bcom_eng->lock);

	return 0;
}
Пример #5
0
void __init early_setup(unsigned long dt_ptr)
{
	/* -------- printk is _NOT_ safe to use here ! ------- */

	/* Fill in any unititialised pacas */
	initialise_pacas();

	/* Identify CPU type */
	identify_cpu(0, mfspr(SPRN_PVR));

	/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
	setup_paca(0);

	/* Initialize lockdep early or else spinlocks will blow */
	lockdep_init();

	/* -------- printk is now safe to use ------- */

	/* Enable early debugging if any specified (see udbg.h) */
	udbg_early_init();

 	DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);

	/*
	 * Do early initialization using the flattened device
	 * tree, such as retrieving the physical memory map or
	 * calculating/retrieving the hash table size.
	 */
	early_init_devtree(__va(dt_ptr));

	/* Now we know the logical id of our boot cpu, setup the paca. */
	setup_paca(boot_cpuid);

	/* Fix up paca fields required for the boot cpu */
	get_paca()->cpu_start = 1;
	get_paca()->stab_real = __pa((u64)&initial_stab);
	get_paca()->stab_addr = (u64)&initial_stab;

	/* Probe the machine type */
	probe_machine();

	setup_kdump_trampoline();

	DBG("Found, Initializing memory management...\n");

	/*
	 * Initialize the MMU Hash table and create the linear mapping
	 * of memory. Has to be done before stab/slb initialization as
	 * this is currently where the page size encoding is obtained
	 */
	htab_initialize();

	/*
	 * Initialize stab / SLB management except on iSeries
	 */
	if (cpu_has_feature(CPU_FTR_SLB))
		slb_initialize();
	else if (!firmware_has_feature(FW_FEATURE_ISERIES))
		stab_initialize(get_paca()->stab_real);

	DBG(" <- early_setup()\n");
}
Пример #6
0
void fwext_DefaultInterruptHandler(FwExt_Regs_t* context, uint64_t code) {

    static char* INTERRUPT_CODES[] = {
        "Machine Check",
        "Critical Interrupt",
        "Debug",
        "Data Storage Exception",
        "Instruction Storage Exception",
        "External Interrupt",
        "Alignment Interrupt",
        "Program Interrupt",
        "Floating Point Unavailable",
        "System Call",
        "APU",
        "Decrementer",
        "Fixed Interval Timer",
        "Watchdog Timer",
        "Data TLB Miss",
        "Instruction TLB Miss",
        "Vector Unavailable",
        "?",
        "?",
        "?",
        "Processor Doorbell",
        "Processor Doorbell Critical",
        "Guest Doorbell",
        "Guest Doorbell Critical",
        "Hypervisor System Call",
        "Hypervisor Privilege",
        "LRAT Error"
    };

    unsigned n = code>>5;

    printf(
        "Interrupt.  Code=%lX (%s)  IP=%lx  LR=%lx  MSR=%lx DEAR=%lx ESR=%lx MCSR=%lx Timestamp=%lx\n",
        (unsigned long)code,
        (n < sizeof(INTERRUPT_CODES)/sizeof(char*)) ? INTERRUPT_CODES[n] : "?",
        context->ip,
        context->lr,
        context->msr,
        mfspr(SPRN_DEAR),
        mfspr(SPRN_ESR),
        mfspr(SPRN_MCSR),
        GetTimeBase());


    switch (code) {

    case IVO_MCHK :
    case IVO_CI   :
    case IVO_EI   :
    {
        fwext_PUEA_Handler(context,code);
        break;
    }

    // NOTE: all other interrupts are fatal at this time:

    default :
    {
        Kernel_Crash(code);
        break;
    }
    }
}
static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
		 int signr, sigset_t *set, unsigned long handler,
		 int ctx_has_vsx_region)
{
	/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
	 * process never used altivec yet (MSR_VEC is zero in pt_regs of
	 * the context). This is very important because we must ensure we
	 * don't lose the VRSAVE content that may have been set prior to
	 * the process doing its first vector operation
	 * Userland shall check AT_HWCAP to know whether it can rely on the
	 * v_regs pointer or not
	 */
#ifdef CONFIG_ALTIVEC
	elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful);
#endif
	unsigned long msr = regs->msr;
	long err = 0;

#ifdef CONFIG_ALTIVEC
	err |= __put_user(v_regs, &sc->v_regs);

	/* save altivec registers */
	if (current->thread.used_vr) {
		flush_altivec_to_thread(current);
		/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
		err |= __copy_to_user(v_regs, &current->thread.vr_state,
				      33 * sizeof(vector128));
		/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
		 * contains valid data.
		 */
		msr |= MSR_VEC;
	}
	/* We always copy to/from vrsave, it's 0 if we don't have or don't
	 * use altivec.
	 */
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
		current->thread.vrsave = mfspr(SPRN_VRSAVE);
	err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
#else /* CONFIG_ALTIVEC */
	err |= __put_user(0, &sc->v_regs);
#endif /* CONFIG_ALTIVEC */
	flush_fp_to_thread(current);
	/* copy fpr regs and fpscr */
	err |= copy_fpr_to_user(&sc->fp_regs, current);
#ifdef CONFIG_VSX
	/*
	 * Copy VSX low doubleword to local buffer for formatting,
	 * then out to userspace.  Update v_regs to point after the
	 * VMX data.
	 */
	if (current->thread.used_vsr && ctx_has_vsx_region) {
		__giveup_vsx(current);
		v_regs += ELF_NVRREG;
		err |= copy_vsx_to_user(v_regs, current);
		/* set MSR_VSX in the MSR value in the frame to
		 * indicate that sc->vs_reg) contains valid data.
		 */
		msr |= MSR_VSX;
	}
#endif /* CONFIG_VSX */
	err |= __put_user(&sc->gp_regs, &sc->regs);
	WARN_ON(!FULL_REGS(regs));
	err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
	err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
	err |= __put_user(signr, &sc->signal);
	err |= __put_user(handler, &sc->handler);
	if (set != NULL)
		err |=  __put_user(set->sig[0], &sc->oldmask);

	return err;
}
Пример #8
0
int mpc52xx_pm_enter(suspend_state_t state)
{
	u32 clk_enables;
	u32 msr, hid0;
	u32 intr_main_mask;
	void __iomem * irq_0x500 = (void *)CONFIG_KERNEL_START + 0x500;
	unsigned long irq_0x500_stop = (unsigned long)irq_0x500 + mpc52xx_ds_cached_size;
	char saved_0x500[mpc52xx_ds_cached_size];

	/* disable all interrupts in PIC */
	intr_main_mask = in_be32(&intr->main_mask);
	out_be32(&intr->main_mask, intr_main_mask | 0x1ffff);

	/* don't let DEC expire any time soon */
	mtspr(SPRN_DEC, 0x7fffffff);

	/* save SRAM */
	memcpy(saved_sram, sram, sram_size);

	/* copy low level suspend code to sram */
	memcpy(sram, mpc52xx_ds_sram, mpc52xx_ds_sram_size);

	out_8(&cdm->ccs_sleep_enable, 1);
	out_8(&cdm->osc_sleep_enable, 1);
	out_8(&cdm->ccs_qreq_test, 1);

	/* disable all but SDRAM and bestcomm (SRAM) clocks */
	clk_enables = in_be32(&cdm->clk_enables);
	out_be32(&cdm->clk_enables, clk_enables & 0x00088000);

	/* disable power management */
	msr = mfmsr();
	mtmsr(msr & ~MSR_POW);

	/* enable sleep mode, disable others */
	hid0 = mfspr(SPRN_HID0);
	mtspr(SPRN_HID0, (hid0 & ~(HID0_DOZE | HID0_NAP | HID0_DPM)) | HID0_SLEEP);

	/* save original, copy our irq handler, flush from dcache and invalidate icache */
	memcpy(saved_0x500, irq_0x500, mpc52xx_ds_cached_size);
	memcpy(irq_0x500, mpc52xx_ds_cached, mpc52xx_ds_cached_size);
	flush_icache_range((unsigned long)irq_0x500, irq_0x500_stop);

	/* call low-level sleep code */
	mpc52xx_deep_sleep(sram, sdram, cdm, intr);

	/* restore original irq handler */
	memcpy(irq_0x500, saved_0x500, mpc52xx_ds_cached_size);
	flush_icache_range((unsigned long)irq_0x500, irq_0x500_stop);

	/* restore old power mode */
	mtmsr(msr & ~MSR_POW);
	mtspr(SPRN_HID0, hid0);
	mtmsr(msr);

	out_be32(&cdm->clk_enables, clk_enables);
	out_8(&cdm->ccs_sleep_enable, 0);
	out_8(&cdm->osc_sleep_enable, 0);

	/* restore SRAM */
	memcpy(sram, saved_sram, sram_size);

	/* restart jiffies */
	wakeup_decrementer();

	/* reenable interrupts in PIC */
	out_be32(&intr->main_mask, intr_main_mask);

	return 0;
}
Пример #9
0
int proc_pmc_get_control
(char *page, char **start, off_t off, int count, int *eof, void *data)
{
	int len = 0;

	if ( proc_pmc_control_mode == PMC_CONTROL_CPI ) {
		unsigned long mach_cycles   = mfspr( PMC5 );
		unsigned long inst_complete = mfspr( PMC4 );
		unsigned long inst_dispatch = mfspr( PMC3 );
		unsigned long thread_active_run = mfspr( PMC1 );
		unsigned long thread_active  = mfspr( PMC2 );
		unsigned long cpi = 0;
		unsigned long cpithou = 0;
		unsigned long remain;
	
		if ( inst_complete ) {
			cpi = thread_active_run / inst_complete;
			remain = thread_active_run % inst_complete;
			if ( inst_complete > 1000000 ) 
				cpithou = remain / ( inst_complete / 1000 );
			else 
				cpithou = ( remain * 1000 ) / inst_complete;
		}
		len += sprintf( page+len, "PMC CPI Mode\nRaw Counts\n" );
		len += sprintf( page+len, "machine cycles           : %12lu\n", mach_cycles );
		len += sprintf( page+len, "thread active cycles     : %12lu\n\n", thread_active );

		len += sprintf( page+len, "instructions completed   : %12lu\n", inst_complete );
		len += sprintf( page+len, "instructions dispatched  : %12lu\n", inst_dispatch );
		len += sprintf( page+len, "thread active run cycles : %12lu\n", thread_active_run );

		len += sprintf( page+len, "thread active run cycles/instructions completed\n" );
		len += sprintf( page+len, "CPI = %lu.%03lu\n", cpi, cpithou );
		
	}
	else if ( proc_pmc_control_mode == PMC_CONTROL_TLB ) {
		len += sprintf( page+len, "PMC TLB Mode\n" );
		len += sprintf( page+len, "I-miss count             : %12lu\n", mfspr( PMC1 ) );
		len += sprintf( page+len, "I-miss latency           : %12lu\n", mfspr( PMC2 ) );
		len += sprintf( page+len, "D-miss count             : %12lu\n", mfspr( PMC3 ) );
		len += sprintf( page+len, "D-miss latency           : %12lu\n", mfspr( PMC4 ) );
		len += sprintf( page+len, "IERAT miss count         : %12lu\n", mfspr( PMC5 ) );
		len += sprintf( page+len, "D-reference count        : %12lu\n", mfspr( PMC6 ) );
		len += sprintf( page+len, "miss PTEs searched       : %12lu\n", mfspr( PMC7 ) );
		len += sprintf( page+len, "miss >8 PTEs searched    : %12lu\n", mfspr( PMC8 ) );
	}
	/* IMPLEMENT ME */
	return pmc_calc_metrics( page, start, off, count, eof, len );
}
Пример #10
0
static void do_show_pwrmgtcr0(void *val)
{
	u32 *value = val;

	*value = mfspr(SPRN_PWRMGTCR0);
}
Пример #11
0
void __init
platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
	      unsigned long r6, unsigned long r7)
{
	bd_t *binfo = (bd_t *) __res;

	/* parse_bootinfo must always be called first */
	parse_bootinfo(find_bootinfo());

	/*
	 * If we were passed in a board information, copy it into the
	 * residual data area.
	 */
	if (r3) {
		memcpy((void *) __res, (void *) (r3 + KERNELBASE),
		       sizeof (bd_t));
	}

#if defined(CONFIG_BLK_DEV_INITRD)
	/*
	 * If the init RAM disk has been configured in, and there's a valid
	 * starting address for it, set it up.
	 */
	if (r4) {
		initrd_start = r4 + KERNELBASE;
		initrd_end = r5 + KERNELBASE;
	}
#endif /* CONFIG_BLK_DEV_INITRD */

	/* Copy the kernel command line arguments to a safe place. */
	if (r6) {
		*(char *) (r7 + KERNELBASE) = 0;
		strcpy(cmd_line, (char *) (r6 + KERNELBASE));
	}

	immrbar = binfo->bi_immr_base;

	mpc834x_sys_set_bat();

#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
	{
		struct uart_port p;

		memset(&p, 0, sizeof (p));
		p.iotype = SERIAL_IO_MEM;
		p.membase = (unsigned char __iomem *)(VIRT_IMMRBAR + 0x4500);
		p.uartclk = binfo->bi_busfreq;

		gen550_init(0, &p);

		memset(&p, 0, sizeof (p));
		p.iotype = SERIAL_IO_MEM;
		p.membase = (unsigned char __iomem *)(VIRT_IMMRBAR + 0x4600);
		p.uartclk = binfo->bi_busfreq;

		gen550_init(1, &p);
	}
#endif

	identify_ppc_sys_by_id(mfspr(SPRN_SVR));

	/* setup the PowerPC module struct */
	ppc_md.setup_arch = mpc834x_sys_setup_arch;
	ppc_md.show_cpuinfo = mpc834x_sys_show_cpuinfo;

	ppc_md.init_IRQ = mpc834x_sys_init_IRQ;
	ppc_md.get_irq = ipic_get_irq;

	ppc_md.restart = mpc83xx_restart;
	ppc_md.power_off = mpc83xx_power_off;
	ppc_md.halt = mpc83xx_halt;

	ppc_md.find_end_of_memory = mpc83xx_find_end_of_memory;
	ppc_md.setup_io_mappings  = mpc834x_sys_map_io;

	ppc_md.time_init = mpc83xx_time_init;
	ppc_md.set_rtc_time = NULL;
	ppc_md.get_rtc_time = NULL;
	ppc_md.calibrate_decr = mpc83xx_calibrate_decr;

	ppc_md.early_serial_map = mpc83xx_early_serial_map;
#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG)
	ppc_md.progress = gen550_progress;
#endif	/* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */

	if (ppc_md.progress)
		ppc_md.progress("mpc834x_sys_init(): exit", 0);

	return;
}
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int emulated = EMULATE_DONE;
	unsigned long val;

	switch (sprn) {
	case SPRN_PID:
		kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
	case SPRN_PID1:
		kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
	case SPRN_PID2:
		kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
	case SPRN_MAS0:
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
	case SPRN_MAS1:
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
	case SPRN_MAS2:
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
	case SPRN_MAS3:
		val = (u32)vcpu->arch.shared->mas7_3;
		kvmppc_set_gpr(vcpu, rt, val);
		break;
	case SPRN_MAS4:
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
	case SPRN_MAS6:
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
	case SPRN_MAS7:
		val = vcpu->arch.shared->mas7_3 >> 32;
		kvmppc_set_gpr(vcpu, rt, val);
		break;
	case SPRN_TLB0CFG:
		kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
	case SPRN_TLB1CFG:
		kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break;
	case SPRN_L1CSR0:
		kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break;
	case SPRN_L1CSR1:
		kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
	case SPRN_HID0:
		kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
	case SPRN_HID1:
		kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
	case SPRN_SVR:
		kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break;

	case SPRN_MMUCSR0:
		kvmppc_set_gpr(vcpu, rt, 0); break;

	case SPRN_MMUCFG:
		kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;

	/* extra exceptions */
	case SPRN_IVOR32:
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
		break;
	case SPRN_IVOR33:
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
		break;
	case SPRN_IVOR34:
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
		break;
	case SPRN_IVOR35:
		kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
		break;
	default:
		emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
	}

	return emulated;
}
Пример #13
0
static void __devinit smp_store_cpu_info(int id)
{
	per_cpu(pvr, id) = mfspr(SPRN_PVR);
}
Пример #14
0
/* ************************************************************************
 *
 * Setup the architecture
 *
 */
static void __init mpc83xx_km_setup_arch(void)
{
	struct device_node *np;

	if (ppc_md.progress)
		ppc_md.progress("kmpbec83xx_setup_arch()", 0);

#ifdef CONFIG_PCI
	for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
		mpc83xx_add_bridge(np);
#endif

#ifdef CONFIG_QUICC_ENGINE
	qe_reset();

	np = of_find_node_by_name(NULL, "par_io");
	if (np != NULL) {
		par_io_init(np);
		of_node_put(np);

		for_each_node_by_name(np, "spi")
			par_io_of_config(np);

		for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
			par_io_of_config(np);
	}

	np = of_find_compatible_node(NULL, "network", "ucc_geth");
	if (np != NULL) {
		uint svid;

		/* handle mpc8360ea rev.2.1 erratum 2: RGMII Timing */
		svid = mfspr(SPRN_SVR);
		if (SVR_REV(svid) == 0x0021) {
			struct	device_node *np_par;
			struct	resource res;
			void	__iomem *base;
			int	ret;

			np_par = of_find_node_by_name(NULL, "par_io");
			if (np_par == NULL) {
				printk(KERN_WARNING "%s couldn;t find par_io node\n",
					__func__);
				return;
			}
			/* Map Parallel I/O ports registers */
			ret = of_address_to_resource(np_par, 0, &res);
			if (ret) {
				printk(KERN_WARNING "%s couldn;t map par_io registers\n",
					__func__);
				return;
			}
			base = ioremap(res.start, resource_size(&res));

			/*
			 * IMMR + 0x14A8[4:5] = 11 (clk delay for UCC 2)
			 * IMMR + 0x14A8[18:19] = 11 (clk delay for UCC 1)
			 */
			setbits32((base + 0xa8), 0x0c003000);

			/*
			 * IMMR + 0x14AC[20:27] = 10101010
			 * (data delay for both UCC's)
			 */
			clrsetbits_be32((base + 0xac), 0xff0, 0xaa0);
			iounmap(base);
			of_node_put(np_par);
		}
		of_node_put(np);
	}
#endif				/* CONFIG_QUICC_ENGINE */
}
Пример #15
0
Файл: irq.c Проект: 08opt/linux
static void or1k_pic_unmask(struct irq_data *data)
{
	mtspr(SPR_PICMR, mfspr(SPR_PICMR) | (1UL << data->irq));
}
Пример #16
0
static inline void proc_pmc_start(void)
{
	/* Unfreeze all counters, leave everything else alone */
	mtspr( MMCR0, mfspr( MMCR0 ) & ~0x80000000 );

}
Пример #17
0
void __init
adjust_total_lowmem(void)
{
	unsigned long max_low_mem = MAX_LOW_MEM;
	
#ifdef HAVE_BATS
	unsigned long bat_max = 0x10000000;
	unsigned long align;
	unsigned long ram;
	int is601 = 0;
	
	/* 601s have smaller BATs */
	if (PVR_VER(mfspr(PVR)) == 1) {
		bat_max = 0x00800000;
		is601 = 1;
	}

	/* adjust BAT block size to max_low_mem */
	if (max_low_mem < bat_max)
		bat_max = max_low_mem;

	/* adjust lowmem size to max_low_mem */
	if (max_low_mem < total_lowmem)
		ram = max_low_mem;
	else
		ram = total_lowmem;

	/* Make sure we don't map a block larger than the
	   smallest alignment of the physical address. */
	/* alignment of PPC_MEMSTART */
	align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
	/* set BAT block size to MIN(max_size, align) */
	if (align && align < bat_max)
		bat_max = align;

	/* Calculate BAT values */	
	__bat2 = 1UL << __ilog2(ram);
	if (__bat2 > bat_max)
		__bat2 = bat_max;
	ram -= __bat2;
	if (ram) {
		__bat3 = 1UL << __ilog2(ram);
		if (__bat3 > bat_max)
			__bat3 = bat_max;
		ram -= __bat3;
	}

	printk(KERN_INFO "Memory BAT mapping: BAT2=%ldMb, BAT3=%ldMb,"
			" residual: %ldMb\n", __bat2 >> 20, __bat3 >> 20,
			(total_lowmem - (__bat2 - __bat3)) >> 20);

	/* On SMP, we limit the lowmem to the area mapped with BATs.
	 * We also assume nobody will do SMP with 601s
	 */
#ifdef CONFIG_SMP
	if (!is601)
		max_low_mem = __bat2 + __bat3;
#endif /* CONFIG_SMP */

#endif /* HAVE_BATS */
	if (total_lowmem > max_low_mem) {
		total_lowmem = max_low_mem;
#ifndef CONFIG_HIGHMEM
		printk(KERN_INFO "Warning, memory limited to %ld Mb, use "
				"CONFIG_HIGHMEM to reach %ld Mb\n",
				max_low_mem >> 20, total_memory >> 20);
		total_memory = total_lowmem;
#endif /* CONFIG_HIGHMEM */
	}
Пример #18
0
/*
 * As above, but Transactional Memory is in use, so deliver sigcontexts
 * containing checkpointed and transactional register states.
 *
 * To do this, we treclaim (done before entering here) to gather both sets of
 * registers and set up the 'normal' sigcontext registers with rolled-back
 * register values such that a simple signal handler sees a correct
 * checkpointed register state.  If interested, a TM-aware sighandler can
 * examine the transactional registers in the 2nd sigcontext to determine the
 * real origin of the signal.
 */
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
				 struct sigcontext __user *tm_sc,
				 struct pt_regs *regs,
				 int signr, sigset_t *set, unsigned long handler)
{
	/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
	 * process never used altivec yet (MSR_VEC is zero in pt_regs of
	 * the context). This is very important because we must ensure we
	 * don't lose the VRSAVE content that may have been set prior to
	 * the process doing its first vector operation
	 * Userland shall check AT_HWCAP to know wether it can rely on the
	 * v_regs pointer or not.
	 */
#ifdef CONFIG_ALTIVEC
	elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)
		(((unsigned long)sc->vmx_reserve + 15) & ~0xful);
	elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *)
		(((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful);
#endif
	unsigned long msr = regs->msr;
	long err = 0;

	BUG_ON(!MSR_TM_ACTIVE(regs->msr));

	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
	 * just indicates to userland that we were doing a transaction, but we
	 * don't want to return in transactional state.  This also ensures
	 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
	 */
	regs->msr &= ~MSR_TS_MASK;

	flush_fp_to_thread(current);

#ifdef CONFIG_ALTIVEC
	err |= __put_user(v_regs, &sc->v_regs);
	err |= __put_user(tm_v_regs, &tm_sc->v_regs);

	/* save altivec registers */
	if (current->thread.used_vr) {
		flush_altivec_to_thread(current);
		/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
		err |= __copy_to_user(v_regs, &current->thread.vr_state,
				      33 * sizeof(vector128));
		/* If VEC was enabled there are transactional VRs valid too,
		 * else they're a copy of the checkpointed VRs.
		 */
		if (msr & MSR_VEC)
			err |= __copy_to_user(tm_v_regs,
					      &current->thread.transact_vr,
					      33 * sizeof(vector128));
		else
			err |= __copy_to_user(tm_v_regs,
					      &current->thread.vr_state,
					      33 * sizeof(vector128));

		/* set MSR_VEC in the MSR value in the frame to indicate
		 * that sc->v_reg contains valid data.
		 */
		msr |= MSR_VEC;
	}
	/* We always copy to/from vrsave, it's 0 if we don't have or don't
	 * use altivec.
	 */
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
		current->thread.vrsave = mfspr(SPRN_VRSAVE);
	err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
	if (msr & MSR_VEC)
		err |= __put_user(current->thread.transact_vrsave,
				  (u32 __user *)&tm_v_regs[33]);
	else
		err |= __put_user(current->thread.vrsave,
				  (u32 __user *)&tm_v_regs[33]);

#else /* CONFIG_ALTIVEC */
	err |= __put_user(0, &sc->v_regs);
	err |= __put_user(0, &tm_sc->v_regs);
#endif /* CONFIG_ALTIVEC */

	/* copy fpr regs and fpscr */
	err |= c
Пример #19
0
int
checkcpu(void)
{
	sys_info_t sysinfo;
	uint pvr, svr;
	uint major, minor;
	char buf1[32], buf2[32];
	volatile immap_t *immap = (immap_t *) CONFIG_SYS_IMMR;
	volatile ccsr_gur_t *gur = &immap->im_gur;
	struct cpu_type *cpu;
	uint msscr0 = mfspr(MSSCR0);

	svr = get_svr();
	major = SVR_MAJ(svr);
	minor = SVR_MIN(svr);

	if (cpu_numcores() > 1) {
#ifndef CONFIG_MP
		puts("Unicore software on multiprocessor system!!\n"
		     "To enable mutlticore build define CONFIG_MP\n");
#endif
	}
	puts("CPU:   ");

	cpu = gd->arch.cpu;

	puts(cpu->name);

	printf(", Version: %d.%d, (0x%08x)\n", major, minor, svr);
	puts("Core:  ");

	pvr = get_pvr();
	major = PVR_E600_MAJ(pvr);
	minor = PVR_E600_MIN(pvr);

	printf("e600 Core %d", (msscr0 & 0x20) ? 1 : 0);
	if (gur->pordevsr & MPC86xx_PORDEVSR_CORE1TE)
		puts("\n    Core1Translation Enabled");
	debug(" (MSSCR0=%x, PORDEVSR=%x)", msscr0, gur->pordevsr);

	printf(", Version: %d.%d, (0x%08x)\n", major, minor, pvr);

	get_sys_info(&sysinfo);

	puts("Clock Configuration:\n");
	printf("       CPU:%-4s MHz, ", strmhz(buf1, sysinfo.freq_processor));
	printf("MPX:%-4s MHz\n", strmhz(buf1, sysinfo.freq_systembus));
	printf("       DDR:%-4s MHz (%s MT/s data rate), ",
		strmhz(buf1, sysinfo.freq_systembus / 2),
		strmhz(buf2, sysinfo.freq_systembus));

	if (sysinfo.freq_localbus > LCRR_CLKDIV) {
		printf("LBC:%-4s MHz\n", strmhz(buf1, sysinfo.freq_localbus));
	} else {
		printf("LBC: unknown (LCRR[CLKDIV] = 0x%02lx)\n",
		       sysinfo.freq_localbus);
	}

	puts("L1:    D-cache 32 KiB enabled\n");
	puts("       I-cache 32 KiB enabled\n");

	puts("L2:    ");
	if (get_l2cr() & 0x80000000) {
#if defined(CONFIG_ARCH_MPC8610)
		puts("256");
#elif defined(CONFIG_ARCH_MPC8641)
		puts("512");
#endif
		puts(" KiB enabled\n");
	} else {
		puts("Disabled\n");
	}

	return 0;
}
Пример #20
0
void
trap(struct trapframe *frame)
{
	struct thread	*td;
	struct proc	*p;
	int		sig, type, user;
	ksiginfo_t	ksi;

	PCPU_INC(cnt.v_trap);

	td = curthread;
	p = td->td_proc;

	type = frame->exc;
	sig = 0;
	user = (frame->srr1 & PSL_PR) ? 1 : 0;

	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", p->p_comm,
	    trapname(type), user ? "user" : "kernel");

	if (user) {
		td->td_frame = frame;
		if (td->td_ucred != p->p_ucred)
			cred_update_thread(td);

		/* User Mode Traps */
		switch (type) {
		case EXC_DSI:
		case EXC_ISI:
			sig = trap_pfault(frame, 1);
			break;

		case EXC_SC:
			syscall(frame);
			break;

		case EXC_ALI:
			if (fix_unaligned(td, frame) != 0)
				sig = SIGBUS;
			else
				frame->srr0 += 4;
			break;

		case EXC_DEBUG:	/* Single stepping */
			mtspr(SPR_DBSR, mfspr(SPR_DBSR));
			frame->srr1 &= ~PSL_DE;
			frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM || DBCR0_IC);
			sig = SIGTRAP;
			break;

		case EXC_PGM:	/* Program exception */
#ifdef FPU_EMU
			sig = fpu_emulate(frame,
			    (struct fpreg *)&td->td_pcb->pcb_fpu);
#else
			/* XXX SIGILL for non-trap instructions. */
			sig = SIGTRAP;
#endif
			break;

		default:
			trap_fatal(frame);
		}
	} else {
		/* Kernel Mode Traps */
		KASSERT(cold || td->td_ucred != NULL,
		    ("kernel trap doesn't have ucred"));

		switch (type) {
		case EXC_DEBUG:
			mtspr(SPR_DBSR, mfspr(SPR_DBSR));
			kdb_trap(frame->exc, 0, frame);
			return;

		case EXC_DSI:
			if (trap_pfault(frame, 0) == 0)
 				return;
			break;

		case EXC_MCHK:
			if (handle_onfault(frame))
 				return;
			break;
#ifdef KDB
		case EXC_PGM:
			if (frame->cpu.booke.esr & ESR_PTR)
				kdb_trap(EXC_PGM, 0, frame);
			return;
#endif
		default:
			break;
		}
		trap_fatal(frame);
	}

	if (sig != 0) {
		if (p->p_sysent->sv_transtrap != NULL)
			sig = (p->p_sysent->sv_transtrap)(sig, type);
		ksiginfo_init_trap(&ksi);
		ksi.ksi_signo = sig;
		ksi.ksi_code = type; /* XXX, not POSIX */
		/* ksi.ksi_addr = ? */
		ksi.ksi_trapno = type;
		trapsignal(td, &ksi);
	}

	userret(td, frame);
	mtx_assert(&Giant, MA_NOTOWNED);
}
Пример #21
0
void
init_85xx_watchdog(void)
{
	mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WATCHDOG_MASK) |
	      TCR_WP(CONFIG_WATCHDOG_PRESC) | TCR_WRC(CONFIG_WATCHDOG_RC));
}
int ClockIsOn(const rtems_irq_connect_data* unused)
{
    return ((mfspr(TCR) & PIE) != 0);
}
Пример #23
0
void JitArm::mftb(UGeckoInstruction inst)
{
	INSTRUCTION_START
	JITDISABLE(SystemRegisters)
	mfspr(inst);
}
Пример #24
0
static void power9_config_bhrb(u64 pmu_bhrb_filter)
{
	/* Enable BHRB filter in PMU */
	mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
Пример #25
0
void __init
platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
              unsigned long r6, unsigned long r7)
{
    /* Generic MPC52xx platform initialization */
    /* TODO Create one and move a max of stuff in it.
       Put this init in the syslib */

    struct bi_record *bootinfo = find_bootinfo();

    if (bootinfo)
        parse_bootinfo(bootinfo);
    else {
        /* Load the bd_t board info structure */
        if (r3)
            memcpy((void*)&__res,(void*)(r3+KERNELBASE),
                   sizeof(bd_t));

#ifdef CONFIG_BLK_DEV_INITRD
        /* Load the initrd */
        if (r4) {
            initrd_start = r4 + KERNELBASE;
            initrd_end = r5 + KERNELBASE;
        }
#endif

        /* Load the command line */
        if (r6) {
            *(char *)(r7+KERNELBASE) = 0;
            strcpy(cmd_line, (char *)(r6+KERNELBASE));
        }
    }

    /* PPC Sys identification */
    identify_ppc_sys_by_id(mfspr(SPRN_SVR));

    /* BAT setup */
    mpc52xx_set_bat();

    /* No ISA bus by default */
    isa_io_base		= 0;
    isa_mem_base		= 0;

    /* Powersave */
    /* This is provided as an example on how to do it. But you
       need to be aware that NAP disable bus snoop and that may
       be required for some devices to work properly, like USB ... */
    /* powersave_nap = 1; */


    /* Setup the ppc_md struct */
    ppc_md.setup_arch	= lite5200_setup_arch;
    ppc_md.show_cpuinfo	= lite5200_show_cpuinfo;
    ppc_md.show_percpuinfo	= NULL;
    ppc_md.init_IRQ		= mpc52xx_init_irq;
    ppc_md.get_irq		= mpc52xx_get_irq;

#ifdef CONFIG_PCI
    ppc_md.pci_map_irq	= lite5200_map_irq;
#endif

    ppc_md.find_end_of_memory = mpc52xx_find_end_of_memory;
    ppc_md.setup_io_mappings  = mpc52xx_map_io;

    ppc_md.restart		= mpc52xx_restart;
    ppc_md.power_off	= mpc52xx_power_off;
    ppc_md.halt		= mpc52xx_halt;

    /* No time keeper on the LITE5200 */
    ppc_md.time_init	= NULL;
    ppc_md.get_rtc_time	= NULL;
    ppc_md.set_rtc_time	= NULL;

    ppc_md.calibrate_decr	= mpc52xx_calibrate_decr;
#ifdef CONFIG_SERIAL_TEXT_DEBUG
    ppc_md.progress		= mpc52xx_progress;
#endif
}
Пример #26
0
Файл: irq.c Проект: 08opt/linux
/* read interrupt enabled status */
unsigned long arch_local_save_flags(void)
{
	return mfspr(SPR_SR) & (SPR_SR_IEE|SPR_SR_TEE);
}
Пример #27
0
int show_cpuinfo(struct seq_file *m, void *v)
{
	int i = (int) v - 1;
	int err = 0;
	unsigned int pvr;
	unsigned short maj, min;
	unsigned long lpj;

	if (i >= NR_CPUS) {
		/* Show summary information */
#ifdef CONFIG_SMP
		unsigned long bogosum = 0;
		for (i = 0; i < NR_CPUS; ++i)
			if (cpu_online(i))
				bogosum += cpu_data[i].loops_per_jiffy;
		seq_printf(m, "total bogomips\t: %lu.%02lu\n",
			   bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
#endif /* CONFIG_SMP */

		if (ppc_md.show_cpuinfo != NULL)
			err = ppc_md.show_cpuinfo(m);
		return err;
	}

#ifdef CONFIG_SMP
	if (!cpu_online(i))
		return 0;
	pvr = cpu_data[i].pvr;
	lpj = cpu_data[i].loops_per_jiffy;
#else
	pvr = mfspr(SPRN_PVR);
	lpj = loops_per_jiffy;
#endif

	seq_printf(m, "processor\t: %d\n", i);
	seq_printf(m, "cpu\t\t: ");

	if (cur_cpu_spec->pvr_mask)
		seq_printf(m, "%s", cur_cpu_spec->cpu_name);
	else
		seq_printf(m, "unknown (%08x)", pvr);
#ifdef CONFIG_ALTIVEC
	if (cur_cpu_spec->cpu_features & CPU_FTR_ALTIVEC)
		seq_printf(m, ", altivec supported");
#endif
	seq_printf(m, "\n");

#ifdef CONFIG_TAU
	if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
#ifdef CONFIG_TAU_AVERAGE
		/* more straightforward, but potentially misleading */
		seq_printf(m,  "temperature \t: %u C (uncalibrated)\n",
			   cpu_temp(i));
#else
		/* show the actual temp sensor range */
		u32 temp;
		temp = cpu_temp_both(i);
		seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
			   temp & 0xff, temp >> 16);
#endif
	}
Пример #28
0
Файл: irq.c Проект: 08opt/linux
/* set interrupt enabled status */
void arch_local_irq_restore(unsigned long flags)
{
	mtspr(SPR_SR, ((mfspr(SPR_SR) & ~(SPR_SR_IEE|SPR_SR_TEE)) | flags));
}
Пример #29
0
int __init kvmppc_booke_init(void)
{
	unsigned long ivor[16];
	unsigned long max_ivor = 0;
	int i;

	/* We install our own exception handlers by hijacking IVPR. IVPR must
	 * be 16-bit aligned, so we need a 64KB allocation. */
	kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
	                                         VCPU_SIZE_ORDER);
	if (!kvmppc_booke_handlers)
		return -ENOMEM;

	/* XXX make sure our handlers are smaller than Linux's */

	/* Copy our interrupt handlers to match host IVORs. That way we don't
	 * have to swap the IVORs on every guest/host transition. */
	ivor[0] = mfspr(SPRN_IVOR0);
	ivor[1] = mfspr(SPRN_IVOR1);
	ivor[2] = mfspr(SPRN_IVOR2);
	ivor[3] = mfspr(SPRN_IVOR3);
	ivor[4] = mfspr(SPRN_IVOR4);
	ivor[5] = mfspr(SPRN_IVOR5);
	ivor[6] = mfspr(SPRN_IVOR6);
	ivor[7] = mfspr(SPRN_IVOR7);
	ivor[8] = mfspr(SPRN_IVOR8);
	ivor[9] = mfspr(SPRN_IVOR9);
	ivor[10] = mfspr(SPRN_IVOR10);
	ivor[11] = mfspr(SPRN_IVOR11);
	ivor[12] = mfspr(SPRN_IVOR12);
	ivor[13] = mfspr(SPRN_IVOR13);
	ivor[14] = mfspr(SPRN_IVOR14);
	ivor[15] = mfspr(SPRN_IVOR15);

	for (i = 0; i < 16; i++) {
		if (ivor[i] > max_ivor)
			max_ivor = ivor[i];

		memcpy((void *)kvmppc_booke_handlers + ivor[i],
		       kvmppc_handlers_start + i * kvmppc_handler_len,
		       kvmppc_handler_len);
	}
	flush_icache_range(kvmppc_booke_handlers,
	                   kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);

	return 0;
}
Пример #30
0
/*
 * Initialize some remaining members of the naca and systemcfg structures
 * (at least until we get rid of them completely). This is mostly some
 * cache informations about the CPU that will be used by cache flush
 * routines and/or provided to userland
 */
static void __init initialize_naca(void)
{
    struct device_node *np;
    unsigned long num_cpus = 0;

    DBG(" -> initialize_naca()\n");

    for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
        num_cpus += 1;

        /* We're assuming *all* of the CPUs have the same
         * d-cache and i-cache sizes... -Peter
         */

        if ( num_cpus == 1 ) {
            u32 *sizep, *lsizep;
            u32 size, lsize;
            const char *dc, *ic;

            /* Then read cache informations */
            if (systemcfg->platform == PLATFORM_POWERMAC) {
                dc = "d-cache-block-size";
                ic = "i-cache-block-size";
            } else {
                dc = "d-cache-line-size";
                ic = "i-cache-line-size";
            }

            size = 0;
            lsize = cur_cpu_spec->dcache_bsize;
            sizep = (u32 *)get_property(np, "d-cache-size", NULL);
            if (sizep != NULL)
                size = *sizep;
            lsizep = (u32 *) get_property(np, dc, NULL);
            if (lsizep != NULL)
                lsize = *lsizep;

            if (sizep == 0 || lsizep == 0)
                DBG("Argh, can't find dcache properties ! "
                    "sizep: %p, lsizep: %p\n", sizep, lsizep);

            systemcfg->dCacheL1Size = size;
            systemcfg->dCacheL1LineSize = lsize;
            naca->dCacheL1LogLineSize = __ilog2(lsize);
            naca->dCacheL1LinesPerPage = PAGE_SIZE/(lsize);

            size = 0;
            lsize = cur_cpu_spec->icache_bsize;
            sizep = (u32 *)get_property(np, "i-cache-size", NULL);
            if (sizep != NULL)
                size = *sizep;
            lsizep = (u32 *)get_property(np, ic, NULL);
            if (lsizep != NULL)
                lsize = *lsizep;
            if (sizep == 0 || lsizep == 0)
                DBG("Argh, can't find icache properties ! "
                    "sizep: %p, lsizep: %p\n", sizep, lsizep);

            systemcfg->iCacheL1Size = size;
            systemcfg->iCacheL1LineSize = lsize;
            naca->iCacheL1LogLineSize = __ilog2(lsize);
            naca->iCacheL1LinesPerPage = PAGE_SIZE/(lsize);

        }
    }

    /* Add an eye catcher and the systemcfg layout version number */
    strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
    systemcfg->version.major = SYSTEMCFG_MAJOR;
    systemcfg->version.minor = SYSTEMCFG_MINOR;
    systemcfg->processor = mfspr(SPRN_PVR);

    DBG(" <- initialize_naca()\n");
}