Exemplo n.º 1
0
void
cpu_initclocks(void)
{
	static struct timecounter tc = {
		.tc_get_timecount = get_itimer_count,
		.tc_name = "itimer",
		.tc_counter_mask = ~0,
		.tc_quality = 100,
	};

	extern u_int cpu_hzticks;
	u_int time_inval;

	tc.tc_frequency = cpu_hzticks * hz;

	/* Start the interval timer. */
	mfctl(CR_ITMR, time_inval);
	mtctl(time_inval + cpu_hzticks, CR_ITMR);

	tc_init(&tc);
}

unsigned
get_itimer_count(struct timecounter *tc)
{
	uint32_t val;

	mfctl(CR_ITMR, val);

	return val;
}
Exemplo n.º 2
0
/*
 * We keep time on PA-RISC Linux by using the Interval Timer which is
 * a pair of registers; one is read-only and one is write-only; both
 * accessed through CR16.  The read-only register is 32 or 64 bits wide,
 * and increments by 1 every CPU clock tick.  The architecture only
 * guarantees us a rate between 0.5 and 2, but all implementations use a
 * rate of 1.  The write-only register is 32-bits wide.  When the lowest
 * 32 bits of the read-only register compare equal to the write-only
 * register, it raises a maskable external interrupt.  Each processor has
 * an Interval Timer of its own and they are not synchronised.  
 *
 * We want to generate an interrupt every 1/HZ seconds.  So we program
 * CR16 to interrupt every @clocktick cycles.  The it_value in cpu_data
 * is programmed with the intended time of the next tick.  We can be
 * held off for an arbitrarily long period of time by interrupts being
 * disabled, so we may miss one or more ticks.
 */
irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
{
	unsigned long now;
	unsigned long next_tick;
	unsigned long ticks_elapsed = 0;
	unsigned int cpu = smp_processor_id();
	struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);

	/* gcc can optimize for "read-only" case with a local clocktick */
	unsigned long cpt = clocktick;

	profile_tick(CPU_PROFILING);

	/* Initialize next_tick to the old expected tick time. */
	next_tick = cpuinfo->it_value;

	/* Calculate how many ticks have elapsed. */
	do {
		++ticks_elapsed;
		next_tick += cpt;
		now = mfctl(16);
	} while (next_tick - now > cpt);

	/* Store (in CR16 cycles) up to when we are accounting right now. */
	cpuinfo->it_value = next_tick;

	/* Go do system house keeping. */
	if (cpu == 0)
		xtime_update(ticks_elapsed);

	update_process_times(user_mode(get_irq_regs()));

	/* Skip clockticks on purpose if we know we would miss those.
	 * The new CR16 must be "later" than current CR16 otherwise
	 * itimer would not fire until CR16 wrapped - e.g 4 seconds
	 * later on a 1Ghz processor. We'll account for the missed
	 * ticks on the next timer interrupt.
	 * We want IT to fire modulo clocktick even if we miss/skip some.
	 * But those interrupts don't in fact get delivered that regularly.
	 *
	 * "next_tick - now" will always give the difference regardless
	 * if one or the other wrapped. If "now" is "bigger" we'll end up
	 * with a very large unsigned number.
	 */
	while (next_tick - mfctl(16) > cpt)
		next_tick += cpt;

	/* Program the IT when to deliver the next interrupt.
	 * Only bottom 32-bits of next_tick are writable in CR16!
	 * Timer interrupt will be delivered at least a few hundred cycles
	 * after the IT fires, so if we are too close (<= 500 cycles) to the
	 * next cycle, simply skip it.
	 */
	if (next_tick - mfctl(16) <= 500)
		next_tick += cpt;
	mtctl(next_tick, 16);

	return IRQ_HANDLED;
}
Exemplo n.º 3
0
void show_regs(struct pt_regs *regs)
{
	int i;
	char buf[128], *p;
	char *level;
	unsigned long cr30;
	unsigned long cr31;

	level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;

	printk("%s\n", level); /* don't want to have that pretty register dump messed up */

	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
	printbinary(buf, regs->gr[0], 32);
	printk("%sPSW: %s %s\n", level, buf, print_tainted());

	for (i = 0; i < 32; i += 4) {
		int j;
		p = buf;
		p += sprintf(p, "%sr%02d-%02d ", level, i, i + 3);
		for (j = 0; j < 4; j++) {
			p += sprintf(p, " " RFMT, (i+j) == 0 ? 0 : regs->gr[i + j]);
		}
		printk("%s\n", buf);
	}

	for (i = 0; i < 8; i += 4) {
		int j;
		p = buf;
		p += sprintf(p, "%ssr%d-%d  ", level, i, i + 3);
		for (j = 0; j < 4; j++) {
			p += sprintf(p, " " RFMT, regs->sr[i + j]);
		}
		printk("%s\n", buf);
	}

#if RIDICULOUSLY_VERBOSE
	for (i = 0; i < 32; i += 2)
		printk("%sFR%02d : %016lx  FR%2d : %016lx", level, i,
				regs->fr[i], i+1, regs->fr[i+1]);
#endif

	cr30 = mfctl(30);
	cr31 = mfctl(31);
	printk("%s\n", level);
	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
	       level, regs->iir, regs->isr, regs->ior);
	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
	       level, ((struct task_struct *)cr30)->processor, cr30, cr31);
	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
}
Exemplo n.º 4
0
int
clock_intr(void *v)
{
	struct clockframe *frame = v;
	extern u_int cpu_hzticks;
	u_int time_inval;

	/* Restart the interval timer. */
	mfctl(CR_ITMR, time_inval);
	mtctl(time_inval + cpu_hzticks, CR_ITMR);

	/* printf ("clock int 0x%x @ 0x%x for %p\n", t,
	   CLKF_PC(frame), curproc); */

	if (!cold)
		hardclock(frame);

#if 0
	ddb_regs = *frame;
	db_show_regs(NULL, 0, 0, NULL);
#endif

	/* printf ("clock out 0x%x\n", t); */

	return 1;
}
Exemplo n.º 5
0
void __init time_init(void)
{
	unsigned long next_tick;
	static struct pdc_tod tod_data;

	clocktick = (100 * PAGE0->mem_10msec) / HZ;
	halftick = clocktick / 2;

	/* Setup clock interrupt timing */

	next_tick = mfctl(16);
	next_tick += clocktick;
	cpu_data[smp_processor_id()].it_value = next_tick;

	/* kick off Itimer (CR16) */
	mtctl(next_tick, 16);

	if(pdc_tod_read(&tod_data) == 0) {
		write_lock_irq(&xtime_lock);
		xtime.tv_sec = tod_data.tod_sec;
		xtime.tv_usec = tod_data.tod_usec;
		write_unlock_irq(&xtime_lock);
	} else {
		printk(KERN_ERR "Error reading tod clock\n");
	        xtime.tv_sec = 0;
		xtime.tv_usec = 0;
	}
}
Exemplo n.º 6
0
/*
 * Return the number of micro-seconds that elapsed since the last
 * update to wall time (aka xtime aka wall_jiffies).  The xtime_lock
 * must be at least read-locked when calling this routine.
 */
static inline unsigned long
gettimeoffset (void)
{
#ifndef CONFIG_SMP
	/*
	 * FIXME: This won't work on smp because jiffies are updated by cpu 0.
	 *    Once parisc-linux learns the cr16 difference between processors,
	 *    this could be made to work.
	 */
	long last_tick;
	long elapsed_cycles;

	/* it_value is the intended time of the next tick */
	last_tick = cpu_data[smp_processor_id()].it_value;

	/* Subtract one tick and account for possible difference between
	 * when we expected the tick and when it actually arrived.
	 * (aka wall vs real)
	 */
	last_tick -= clocktick * (jiffies - wall_jiffies + 1);
	elapsed_cycles = mfctl(16) - last_tick;

	/* the precision of this math could be improved */
	return elapsed_cycles / (PAGE0->mem_10msec / 10000);
#else
	return 0;
#endif
}
Exemplo n.º 7
0
Arquivo: intr.c Projeto: MarginC/kame
/*
 * Service interrupts.  This doesn't necessarily dispatch them.
 * This is called with %eiem loaded with zero.  It's named
 * hppa_intr instead of hp700_intr because trap.c calls it.
 */
void
hppa_intr(struct trapframe *frame)
{
	int eirr;
	int ipending_new;
	int hp700_intr_ipending_new(struct hp700_int_reg *, int);

	/*
	 * Read the CPU interrupt register and acknowledge
	 * all interrupts.  Starting with this value, get
	 * our set of new pending interrupts.
	 */
	mfctl(CR_EIRR, eirr);
	mtctl(eirr, CR_EIRR);
	ipending_new = hp700_intr_ipending_new(&int_reg_cpu, eirr);

	/* Add these new bits to ipending. */
	ipending |= ipending_new;

	/* If we have interrupts to dispatch, do so. */
	if (ipending & ~cpl)
		hp700_intr_dispatch(cpl, frame->tf_eiem, frame);
#if 0
	else if (ipending != 0x80000000)
		printf("ipending %x cpl %x\n", ipending, cpl);
#endif
}
Exemplo n.º 8
0
void __init start_cpu_itimer(void)
{
	unsigned int cpu = smp_processor_id();
	unsigned long next_tick = mfctl(16) + clocktick;

	mtctl(next_tick, 16);		/* kick off Interval Timer (CR16) */

	per_cpu(cpu_data, cpu).it_value = next_tick;
}
Exemplo n.º 9
0
void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	long now = mfctl(16);
	long next_tick;
	int nticks;
	int cpu = smp_processor_id();

	/* initialize next_tick to time at last clocktick */

	next_tick = cpu_data[cpu].it_value;

	/* since time passes between the interrupt and the mfctl()
	 * above, it is never true that last_tick + clocktick == now.  If we
	 * never miss a clocktick, we could set next_tick = last_tick + clocktick
	 * but maybe we'll miss ticks, hence the loop.
	 *
	 * Variables are *signed*.
	 */

	nticks = 0;
	while((next_tick - now) < halftick) {
		next_tick += clocktick;
		nticks++;
	}
	mtctl(next_tick, 16);
	cpu_data[cpu].it_value = next_tick;

	while (nticks--) {
#ifdef CONFIG_SMP
		smp_do_timer(regs);
#endif
		if (cpu == 0) {
			extern int pc_in_user_space;
			write_lock(&xtime_lock);
#ifndef CONFIG_SMP
			if (!user_mode(regs))
				parisc_do_profile(regs->iaoq[0]);
			else
				parisc_do_profile(&pc_in_user_space);
#endif
			do_timer(regs);
			write_unlock(&xtime_lock);
		}
	}
    
#ifdef CONFIG_CHASSIS_LCD_LED
	/* Only schedule the led tasklet on cpu 0, and only if it
	 * is enabled.
	 */
	if (cpu == 0 && !atomic_read(&led_tasklet.count))
		tasklet_schedule(&led_tasklet);
#endif

	/* check soft power switch status */
	if (cpu == 0 && !atomic_read(&power_tasklet.count))
		tasklet_schedule(&power_tasklet);
}
Exemplo n.º 10
0
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
	struct pt_regs *old_regs;
	unsigned long eirr_val;
	int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
	struct irq_desc *desc;
	cpumask_t dest;
#endif

	old_regs = set_irq_regs(regs);
	local_irq_disable();
	irq_enter();

	eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
	if (!eirr_val)
		goto set_out;
	irq = eirr_to_irq(eirr_val);

#ifdef CONFIG_SMP
	desc = irq_to_desc(irq);
	cpumask_copy(&dest, desc->irq_data.affinity);
	if (irqd_is_per_cpu(&desc->irq_data) &&
	    !cpu_isset(smp_processor_id(), dest)) {
		int cpu = first_cpu(dest);

		printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
		       irq, smp_processor_id(), cpu);
		gsc_writel(irq + CPU_IRQ_BASE,
			   per_cpu(cpu_data, cpu).hpa);
		goto set_out;
	}
#endif
	stack_overflow_check(regs);

#ifdef CONFIG_IRQSTACKS
	execute_on_irq_stack(&generic_handle_irq, irq);
#else
	generic_handle_irq(irq);
#endif /* CONFIG_IRQSTACKS */

 out:
	irq_exit();
	set_irq_regs(old_regs);
	return;

 set_out:
	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
	goto out;
}
Exemplo n.º 11
0
void
cpu_initclocks()
{
	extern volatile u_long cpu_itmr;
	extern u_long cpu_hzticks;
	u_long __itmr;

	itmr_timecounter.tc_frequency = PAGE0->mem_10msec * 100;
	tc_init(&itmr_timecounter);

	mfctl(CR_ITMR, __itmr);
	cpu_itmr = __itmr;
	__itmr += cpu_hzticks;
	mtctl(__itmr, CR_ITMR);
}
Exemplo n.º 12
0
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	long now;
	long next_tick;
	int nticks;
	int cpu = smp_processor_id();

	profile_tick(CPU_PROFILING, regs);

	now = mfctl(16);
	/* initialize next_tick to time at last clocktick */
	next_tick = cpu_data[cpu].it_value;

	/* since time passes between the interrupt and the mfctl()
	 * above, it is never true that last_tick + clocktick == now.  If we
	 * never miss a clocktick, we could set next_tick = last_tick + clocktick
	 * but maybe we'll miss ticks, hence the loop.
	 *
	 * Variables are *signed*.
	 */

	nticks = 0;
	while((next_tick - now) < halftick) {
		next_tick += clocktick;
		nticks++;
	}
	mtctl(next_tick, 16);
	cpu_data[cpu].it_value = next_tick;

	while (nticks--) {
#ifdef CONFIG_SMP
		smp_do_timer(regs);
#else
		update_process_times(user_mode(regs));
#endif
		if (cpu == 0) {
			write_seqlock(&xtime_lock);
			do_timer(regs);
			write_sequnlock(&xtime_lock);
		}
	}
    
	/* check soft power switch status */
	if (cpu == 0 && !atomic_read(&power_tasklet.count))
		tasklet_schedule(&power_tasklet);

	return IRQ_HANDLED;
}
Exemplo n.º 13
0
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
	struct pt_regs *old_regs;
	unsigned long eirr_val;
	int irq, cpu = smp_processor_id();
#ifdef CONFIG_SMP
	cpumask_t dest;
#endif

	old_regs = set_irq_regs(regs);
	local_irq_disable();
	irq_enter();

	eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
	if (!eirr_val)
		goto set_out;
	irq = eirr_to_irq(eirr_val);

#ifdef CONFIG_SMP
	cpumask_copy(&dest, irq_desc[irq].affinity);
	if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
	    !cpu_isset(smp_processor_id(), dest)) {
		int cpu = first_cpu(dest);

		printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
		       irq, smp_processor_id(), cpu);
		gsc_writel(irq + CPU_IRQ_BASE,
			   per_cpu(cpu_data, cpu).hpa);
		goto set_out;
	}
#endif
	__do_IRQ(irq);

 out:
	irq_exit();
	set_irq_regs(old_regs);
	return;

 set_out:
	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
	goto out;
}
Exemplo n.º 14
0
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb *pcbp;
	struct trapframe *tf;
	register_t sp, osp;

#ifdef DIAGNOSTIC
	if (round_page(sizeof(struct user)) > PAGE_SIZE)
		panic("USPACE too small for user");
#endif

	/* Flush the parent process out of the FPU. */
	hppa_fpu_flush(l1);

	/* Now copy the parent PCB into the child. */
	pcbp = &l2->l_addr->u_pcb;
	bcopy(&l1->l_addr->u_pcb, pcbp, sizeof(*pcbp));

	sp = (register_t)l2->l_addr + PAGE_SIZE;
	l2->l_md.md_regs = tf = (struct trapframe *)sp;
	sp += sizeof(struct trapframe);
	bcopy(l1->l_md.md_regs, tf, sizeof(*tf));

	/*
	 * cpu_swapin() is supposed to fill out all the PAs
	 * we gonna need in locore
	 */
	cpu_swapin(l2);

	/* Activate this process' pmap. */
	pmap_activate(l2);

	/*
	 * theoretically these could be inherited from the father,
	 * but just in case.
	 */
	tf->tf_sr7 = HPPA_SID_KERNEL;
	mfctl(CR_EIEM, tf->tf_eiem);
	tf->tf_ipsw = PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I /* | PSW_L */;
	pcbp->pcb_fpregs[HPPA_NFPREGS] = 0;

	/*
	 * Set up return value registers as libc:fork() expects
	 */
	tf->tf_ret0 = l1->l_proc->p_pid;
	tf->tf_ret1 = 1;	/* ischild */
	tf->tf_t1 = 0;		/* errno */

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		tf->tf_sp = (register_t)stack;

	/*
	 * Build a stack frame for the cpu_switch & co.
	 */
	osp = sp;
	sp += HPPA_FRAME_SIZE + 16*4; /* std frame + calee-save registers */
	*HPPA_FRAME_CARG(0, sp) = tf->tf_sp;
	*HPPA_FRAME_CARG(1, sp) = KERNMODE(func);
	*HPPA_FRAME_CARG(2, sp) = (register_t)arg;
	*(register_t*)(sp + HPPA_FRAME_PSP) = osp;
	*(register_t*)(sp + HPPA_FRAME_CRP) =
		(register_t)switch_trampoline;
	tf->tf_sp = sp;
	fdcache(HPPA_SID_KERNEL, (vaddr_t)l2->l_addr, sp - (vaddr_t)l2->l_addr);
}
Exemplo n.º 15
0
void
cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	extern register_t switch_tramp_p;

	struct pcb *pcbp;
	struct trapframe *tf;
	register_t sp, osp;

#ifdef DIAGNOSTIC
	if (round_page(sizeof(struct user) + sizeof(*tf)) > PAGE_SIZE)
		panic("USPACE too small for user");
#endif
	fpu_proc_save(p1);

	pcbp = &p2->p_addr->u_pcb;
	bcopy(&p1->p_addr->u_pcb, pcbp, sizeof(*pcbp));
	/* space is cached for the copy{in,out}'s pleasure */
	pcbp->pcb_space = p2->p_vmspace->vm_map.pmap->pm_space;
	pcbp->pcb_fpstate = pool_get(&hppa_fppl, PR_WAITOK);
	*pcbp->pcb_fpstate = *p1->p_addr->u_pcb.pcb_fpstate;
	/* reset any of the pending FPU exceptions from parent */
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[0] =
	    HPPA_FPU_FORK(pcbp->pcb_fpstate->hfp_regs.fpr_regs[0]);
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[1] = 0;
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[2] = 0;
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[3] = 0;

	sp = (register_t)p2->p_addr + PAGE_SIZE;
	p2->p_md.md_regs = tf = (struct trapframe *)sp;
	sp += sizeof(struct trapframe);
	bcopy(p1->p_md.md_regs, tf, sizeof(*tf));

	tf->tf_vtop = (paddr_t)p2->p_vmspace->vm_map.pmap->pm_pdir;
	tf->tf_cr30 = (paddr_t)pcbp->pcb_fpstate;

	tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
	tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 =
	tf->tf_iisq[0] = tf->tf_iisq[1] =
		p2->p_vmspace->vm_map.pmap->pm_space;
	tf->tf_pidr1 = tf->tf_pidr2 = pmap_sid2pid(tf->tf_sr0);

	/*
	 * theoretically these could be inherited from the father,
	 * but just in case.
	 */
	tf->tf_sr7 = HPPA_SID_KERNEL;
	tf->tf_eiem = mfctl(CR_EIEM);
	tf->tf_ipsw = PSL_C | PSL_Q | PSL_P | PSL_D | PSL_I /* | PSL_L */ |
	    PSL_O | PSL_W;

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		setstack(tf, (u_long)stack, 0);	/* XXX ignore error? */

	/*
	 * Build stack frames for the cpu_switchto & co.
	 */
	osp = sp + HPPA_FRAME_SIZE;
	*(register_t*)(osp - HPPA_FRAME_SIZE) = 0;
	*(register_t*)(osp + HPPA_FRAME_RP) = switch_tramp_p;
	*(register_t*)(osp) = (osp - HPPA_FRAME_SIZE);

	sp = osp + HPPA_FRAME_SIZE + 20*8; /* frame + callee-saved registers */
	*(register_t*)(sp - HPPA_FRAME_SIZE + 0) = (register_t)arg;
	*(register_t*)(sp - HPPA_FRAME_SIZE + 8) = KERNMODE(func);
	*(register_t*)(sp - HPPA_FRAME_SIZE + 16) = 0;	/* cpl */
	pcbp->pcb_ksp = sp;
}
Exemplo n.º 16
0
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb *pcb1, *pcb2;
	struct trapframe *tf;
	register_t sp, osp;
	vaddr_t uv;

	KASSERT(round_page(sizeof(struct pcb)) <= PAGE_SIZE);

	pcb1 = lwp_getpcb(l1);
	pcb2 = lwp_getpcb(l2);

	l2->l_md.md_astpending = 0;
	l2->l_md.md_flags = 0;

	/* Flush the parent LWP out of the FPU. */
	hppa_fpu_flush(l1);

	/* Now copy the parent PCB into the child. */
	memcpy(pcb2, pcb1, sizeof(struct pcb));

	pcb2->pcb_fpregs = pool_get(&hppa_fppl, PR_WAITOK);
	*pcb2->pcb_fpregs = *pcb1->pcb_fpregs;

	/* reset any of the pending FPU exceptions from parent */
	pcb2->pcb_fpregs->fpr_regs[0] =
	    HPPA_FPU_FORK(pcb2->pcb_fpregs->fpr_regs[0]);
	pcb2->pcb_fpregs->fpr_regs[1] = 0;
	pcb2->pcb_fpregs->fpr_regs[2] = 0;
	pcb2->pcb_fpregs->fpr_regs[3] = 0;

	l2->l_md.md_bpva = l1->l_md.md_bpva;
	l2->l_md.md_bpsave[0] = l1->l_md.md_bpsave[0];
	l2->l_md.md_bpsave[1] = l1->l_md.md_bpsave[1];

	uv = uvm_lwp_getuarea(l2);
	sp = (register_t)uv + PAGE_SIZE;
	l2->l_md.md_regs = tf = (struct trapframe *)sp;
	sp += sizeof(struct trapframe);

	/* copy the l1's trapframe to l2 */
	memcpy(tf, l1->l_md.md_regs, sizeof(*tf));

	/* Fill out all the PAs we are going to need in locore. */
	cpu_activate_pcb(l2);

	if (__predict_true(l2->l_proc->p_vmspace != NULL)) {
		struct proc *p = l2->l_proc;
		pmap_t pmap = p->p_vmspace->vm_map.pmap;
		pa_space_t space = pmap->pm_space;

		/* Load all of the user's space registers. */
		tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr3 = tf->tf_sr2 = 
		tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = space;
		tf->tf_iisq_head = tf->tf_iisq_tail = space;

		/* Load the protection registers */
		tf->tf_pidr1 = tf->tf_pidr2 = pmap->pm_pid;

		/*
		 * theoretically these could be inherited from the father,
		 * but just in case.
		 */
		tf->tf_sr7 = HPPA_SID_KERNEL;
		mfctl(CR_EIEM, tf->tf_eiem);
		tf->tf_ipsw = PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I /* | PSW_L */ |
		    (curcpu()->ci_psw & PSW_O);
	}

	/*
	 * Set up return value registers as libc:fork() expects
	 */
	tf->tf_ret0 = l1->l_proc->p_pid;
	tf->tf_ret1 = 1;	/* ischild */
	tf->tf_t1 = 0;		/* errno */

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		tf->tf_sp = (register_t)stack;

	/*
	 * Build stack frames for the cpu_switchto & co.
	 */
	osp = sp;

	/* lwp_trampoline's frame */
	sp += HPPA_FRAME_SIZE;

	*(register_t *)(sp) = 0;	/* previous frame pointer */
	*(register_t *)(sp + HPPA_FRAME_PSP) = osp;
	*(register_t *)(sp + HPPA_FRAME_CRP) = (register_t)lwp_trampoline;

	*HPPA_FRAME_CARG(2, sp) = KERNMODE(func);
	*HPPA_FRAME_CARG(3, sp) = (register_t)arg;

	/*
	 * cpu_switchto's frame
	 * 	stack usage is std frame + callee-save registers
	 */
	sp += HPPA_FRAME_SIZE + 16*4;
	pcb2->pcb_ksp = sp;
	fdcache(HPPA_SID_KERNEL, uv, sp - uv);
}
Exemplo n.º 17
0
void handle_interruption(int code, struct pt_regs *regs)
{
	unsigned long fault_address = 0;
	unsigned long fault_space = 0;
	struct siginfo si;

	switch(code) {

	case  1:
		/* High-priority machine check (HPMC) */
		pdc_console_restart();  /* switch back to pdc if HPMC */

		/* set up a new led state on systems shipped with a LED State panel */
		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);

		parisc_terminate("High Priority Machine Check (HPMC)",
				regs, code, 0);
		/* NOT REACHED */
		
	case  2:
		/* Power failure interrupt */
		printk(KERN_CRIT "Power failure interrupt !\n");
		return;

	case  3:
		/* Recovery counter trap */
		regs->gr[0] &= ~PSW_R;
		if (regs->iasq[0])
			handle_gdb_break(regs, TRAP_TRACE);
		/* else this must be the start of a syscall - just let it run */
		return;

	case  5:
		/* Low-priority machine check */

		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);

		flush_all_caches();
		cpu_lpmc(5, regs);
		return;

	case  6:
		/* Instruction TLB miss fault/Instruction page fault */
		fault_address = regs->iaoq[0];
		fault_space   = regs->iasq[0];
		break;

	case  8:
		/* Illegal instruction trap */
		die_if_kernel("Illegal instruction", regs, code);
		si.si_code = ILL_ILLOPC;
		goto give_sigill;

	case  9:
		/* Break instruction trap */
		handle_break(regs->iir,regs);
		return;
	
	case 10:
		/* Privileged operation trap */
		die_if_kernel("Privileged operation", regs, code);
		si.si_code = ILL_PRVOPC;
		goto give_sigill;
	
	case 11:
		/* Privileged register trap */
		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {

			/* This is a MFCTL cr26/cr27 to gr instruction.
			 * PCXS traps on this, so we need to emulate it.
			 */

			if (regs->iir & 0x00200000)
				regs->gr[regs->iir & 0x1f] = mfctl(27);
			else
				regs->gr[regs->iir & 0x1f] = mfctl(26);

			regs->iaoq[0] = regs->iaoq[1];
			regs->iaoq[1] += 4;
			regs->iasq[0] = regs->iasq[1];
			return;
		}

		die_if_kernel("Privileged register usage", regs, code);
		si.si_code = ILL_PRVREG;
		/* Fall thru */
	give_sigill:
		si.si_signo = SIGILL;
		si.si_errno = 0;
		si.si_addr = (void *) regs->iaoq[0];
		force_sig_info(SIGILL, &si, current);
		return;

	case 12:
		/* Overflow Trap, let the userland signal handler do the cleanup */
		si.si_signo = SIGFPE;
		si.si_code = FPE_INTOVF;
		si.si_addr = (void *) regs->iaoq[0];
		force_sig_info(SIGFPE, &si, current);
		return;
	
	case 13:
		/* Conditional Trap 
		   The condition succees in an instruction which traps on condition  */
		si.si_signo = SIGFPE;
		/* Set to zero, and let the userspace app figure it out from
		   the insn pointed to by si_addr */
		si.si_code = 0;
		si.si_addr = (void *) regs->iaoq[0];
		force_sig_info(SIGFPE, &si, current);
		return;

	case 14:
		/* Assist Exception Trap, i.e. floating point exception. */
		die_if_kernel("Floating point exception", regs, 0); /* quiet */
		handle_fpe(regs);
		return;

	case 15: 
		/* Data TLB miss fault/Data page fault */	
		/* Fall thru */
	case 16:
		/* Non-access instruction TLB miss fault */
		/* The instruction TLB entry needed for the target address of the FIC
		   is absent, and hardware can't find it, so we get to cleanup */
		/* Fall thru */
	case 17:
		/* Non-access data TLB miss fault/Non-access data page fault */
		/* TODO: Still need to add slow path emulation code here */
		/* TODO: Understand what is meant by the TODO listed 
		   above this one. (Carlos) */
		fault_address = regs->ior;
		fault_space = regs->isr;
		break;

	case 18:
		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
		/* Check for unaligned access */
		if (check_unaligned(regs)) {
			handle_unaligned(regs);
			return;
		}
		/* Fall Through */
	case 26: 
		/* PCXL: Data memory access rights trap */
		fault_address = regs->ior;
		fault_space   = regs->isr;
		break;

	case 19:
		/* Data memory break trap */
		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
		/* fall thru */
	case 21:
		/* Page reference trap */
		handle_gdb_break(regs, TRAP_HWBKPT);
		return;

	case 25:
		/* Taken branch trap */
		regs->gr[0] &= ~PSW_T;
		if (regs->iasq[0])
			handle_gdb_break(regs, TRAP_BRANCH);
		/* else this must be the start of a syscall - just let it
		 * run.
		 */
		return;

	case  7:  
		/* Instruction access rights */
		/* PCXL: Instruction memory protection trap */

		/*
		 * This could be caused by either: 1) a process attempting
		 * to execute within a vma that does not have execute
		 * permission, or 2) an access rights violation caused by a
		 * flush only translation set up by ptep_get_and_clear().
		 * So we check the vma permissions to differentiate the two.
		 * If the vma indicates we have execute permission, then
		 * the cause is the latter one. In this case, we need to
		 * call do_page_fault() to fix the problem.
		 */

		if (user_mode(regs)) {
			struct vm_area_struct *vma;

			down_read(&current->mm->mmap_sem);
			vma = find_vma(current->mm,regs->iaoq[0]);
			if (vma && (regs->iaoq[0] >= vma->vm_start)
				&& (vma->vm_flags & VM_EXEC)) {

				fault_address = regs->iaoq[0];
				fault_space = regs->iasq[0];

				up_read(&current->mm->mmap_sem);
				break; /* call do_page_fault() */
			}
			up_read(&current->mm->mmap_sem);
		}
		/* Fall Through */
	case 27: 
		/* Data memory protection ID trap */
		die_if_kernel("Protection id trap", regs, code);
		si.si_code = SEGV_MAPERR;
		si.si_signo = SIGSEGV;
		si.si_errno = 0;
		if (code == 7)
		    si.si_addr = (void *) regs->iaoq[0];
		else
		    si.si_addr = (void *) regs->ior;
		force_sig_info(SIGSEGV, &si, current);
		return;

	case 28: 
		/* Unaligned data reference trap */
		handle_unaligned(regs);
		return;

	default:
		if (user_mode(regs)) {
#ifdef PRINT_USER_FAULTS
			printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
			    current->pid, current->comm);
			show_regs(regs);
#endif
			/* SIGBUS, for lack of a better one. */
			si.si_signo = SIGBUS;
			si.si_code = BUS_OBJERR;
			si.si_errno = 0;
			si.si_addr = (void *) regs->ior;
			force_sig_info(SIGBUS, &si, current);
			return;
		}
		
		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);

		parisc_terminate("Unexpected interruption", regs, code, 0);
		/* NOT REACHED */
	}

	if (user_mode(regs)) {
	    if (fault_space != regs->sr[7]) {
#ifdef PRINT_USER_FAULTS
		if (fault_space == 0)
			printk(KERN_DEBUG "User Fault on Kernel Space ");
		else
			printk(KERN_DEBUG "User Fault (long pointer) ");
		printk("pid=%d command='%s'\n", current->pid, current->comm);
		show_regs(regs);
#endif
		si.si_signo = SIGSEGV;
		si.si_errno = 0;
		si.si_code = SEGV_MAPERR;
		si.si_addr = (void *) regs->ior;
		force_sig_info(SIGSEGV, &si, current);
		return;
	    }
	}
	else {

	    /*
	     * The kernel should never fault on its own address space.
	     */

	    if (fault_space == 0) {
		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
		parisc_terminate("Kernel Fault", regs, code, fault_address);
		/** NOT REACHED **/
	    }
	}

	local_irq_enable();
	do_page_fault(regs, code, fault_address);
}
Exemplo n.º 18
0
void
cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb *pcbp;
	struct trapframe *tf;
	register_t sp, osp;

#ifdef DIAGNOSTIC
	if (round_page(sizeof(struct user)) > NBPG)
		panic("USPACE too small for user");
#endif
	fpu_proc_save(p1);

	pcbp = &p2->p_addr->u_pcb;
	bcopy(&p1->p_addr->u_pcb, pcbp, sizeof(*pcbp));
	/* space is cached for the copy{in,out}'s pleasure */
	pcbp->pcb_space = p2->p_vmspace->vm_map.pmap->pm_space;
	pcbp->pcb_fpstate = pool_get(&hppa_fppl, PR_WAITOK);
	*pcbp->pcb_fpstate = *p1->p_addr->u_pcb.pcb_fpstate;
	/* reset any of the pending FPU exceptions from parent */
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[0] =
	    HPPA_FPU_FORK(pcbp->pcb_fpstate->hfp_regs.fpr_regs[0]);
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[1] = 0;
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[2] = 0;
	pcbp->pcb_fpstate->hfp_regs.fpr_regs[3] = 0;

	p2->p_md.md_bpva = p1->p_md.md_bpva;
	p2->p_md.md_bpsave[0] = p1->p_md.md_bpsave[0];
	p2->p_md.md_bpsave[1] = p1->p_md.md_bpsave[1];

	sp = (register_t)p2->p_addr + NBPG;
	p2->p_md.md_regs = tf = (struct trapframe *)sp;
	sp += sizeof(struct trapframe);
	bcopy(p1->p_md.md_regs, tf, sizeof(*tf));

	tf->tf_cr30 = (paddr_t)pcbp->pcb_fpstate;

	tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
	tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 =
	tf->tf_iisq_head = tf->tf_iisq_tail =
		p2->p_vmspace->vm_map.pmap->pm_space;
	tf->tf_pidr1 = tf->tf_pidr2 = pmap_sid2pid(tf->tf_sr0);

	/*
	 * theoretically these could be inherited from the father,
	 * but just in case.
	 */
	tf->tf_sr7 = HPPA_SID_KERNEL;
	mfctl(CR_EIEM, tf->tf_eiem);
	tf->tf_ipsw = PSL_C | PSL_Q | PSL_P | PSL_D | PSL_I /* | PSL_L */ |
	    (curcpu()->ci_psw & PSL_O);

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL)
		tf->tf_sp = (register_t)stack;

	/*
	 * Build stack frames for the cpu_switchto & co.
	 */
	osp = sp + HPPA_FRAME_SIZE;
	*(register_t*)(osp - HPPA_FRAME_SIZE) = 0;
	*(register_t*)(osp + HPPA_FRAME_CRP) = (register_t)&switch_trampoline;
	*(register_t*)(osp) = (osp - HPPA_FRAME_SIZE);

	sp = osp + HPPA_FRAME_SIZE + 20*4; /* frame + calee-save registers */
	*HPPA_FRAME_CARG(0, sp) = (register_t)arg;
	*HPPA_FRAME_CARG(1, sp) = KERNMODE(func);
	pcbp->pcb_ksp = sp;
}