コード例 #1
0
int
clock_intr(void *v)
{
	struct clockframe *frame = v;
	extern u_int cpu_hzticks;
	u_int time_inval;

	/* Restart the interval timer. */
	mfctl(CR_ITMR, time_inval);
	mtctl(time_inval + cpu_hzticks, CR_ITMR);

	/* printf ("clock int 0x%x @ 0x%x for %p\n", t,
	   CLKF_PC(frame), curproc); */

	if (!cold)
		hardclock(frame);

#if 0
	ddb_regs = *frame;
	db_show_regs(NULL, 0, 0, NULL);
#endif

	/* printf ("clock out 0x%x\n", t); */

	return 1;
}
コード例 #2
0
ファイル: intr.c プロジェクト: MarginC/kame
/*
 * Service interrupts.  This doesn't necessarily dispatch them.
 * This is called with %eiem loaded with zero.  It's named
 * hppa_intr instead of hp700_intr because trap.c calls it.
 */
void
hppa_intr(struct trapframe *frame)
{
	int eirr;
	int ipending_new;
	int hp700_intr_ipending_new(struct hp700_int_reg *, int);

	/*
	 * Read the CPU interrupt register and acknowledge
	 * all interrupts.  Starting with this value, get
	 * our set of new pending interrupts.
	 */
	mfctl(CR_EIRR, eirr);
	mtctl(eirr, CR_EIRR);
	ipending_new = hp700_intr_ipending_new(&int_reg_cpu, eirr);

	/* Add these new bits to ipending. */
	ipending |= ipending_new;

	/* If we have interrupts to dispatch, do so. */
	if (ipending & ~cpl)
		hp700_intr_dispatch(cpl, frame->tf_eiem, frame);
#if 0
	else if (ipending != 0x80000000)
		printf("ipending %x cpl %x\n", ipending, cpl);
#endif
}
コード例 #3
0
void
cpu_initclocks(void)
{
	static struct timecounter tc = {
		.tc_get_timecount = get_itimer_count,
		.tc_name = "itimer",
		.tc_counter_mask = ~0,
		.tc_quality = 100,
	};

	extern u_int cpu_hzticks;
	u_int time_inval;

	tc.tc_frequency = cpu_hzticks * hz;

	/* Start the interval timer. */
	mfctl(CR_ITMR, time_inval);
	mtctl(time_inval + cpu_hzticks, CR_ITMR);

	tc_init(&tc);
}

unsigned
get_itimer_count(struct timecounter *tc)
{
	uint32_t val;

	mfctl(CR_ITMR, val);

	return val;
}
コード例 #4
0
ファイル: irq.c プロジェクト: nhanh0/hah
static void enable_cpu_irq(void *unused, int irq)
{
	unsigned long mask = EIEM_MASK(irq);

	mtctl(mask, 23);
	SET_EIEM_BIT(irq);
}
コード例 #5
0
ファイル: setup.c プロジェクト: Broadcom/stblinux
void __init start_parisc(void)
{
	extern void early_trap_init(void);

	int ret, cpunum;
	struct pdc_coproc_cfg coproc_cfg;

	/* check QEMU/SeaBIOS marker in PAGE0 */
	running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);

	cpunum = smp_processor_id();

	init_cpu_topology();

	set_firmware_width_unlocked();

	ret = pdc_coproc_cfg_unlocked(&coproc_cfg);
	if (ret >= 0 && coproc_cfg.ccr_functional) {
		mtctl(coproc_cfg.ccr_functional, 10);

		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;

		asm volatile ("fstd	%fr0,8(%sp)");
	} else {
コード例 #6
0
void __init time_init(void)
{
	unsigned long next_tick;
	static struct pdc_tod tod_data;

	clocktick = (100 * PAGE0->mem_10msec) / HZ;
	halftick = clocktick / 2;

	/* Setup clock interrupt timing */

	next_tick = mfctl(16);
	next_tick += clocktick;
	cpu_data[smp_processor_id()].it_value = next_tick;

	/* kick off Itimer (CR16) */
	mtctl(next_tick, 16);

	if(pdc_tod_read(&tod_data) == 0) {
		write_lock_irq(&xtime_lock);
		xtime.tv_sec = tod_data.tod_sec;
		xtime.tv_usec = tod_data.tod_usec;
		write_unlock_irq(&xtime_lock);
	} else {
		printk(KERN_ERR "Error reading tod clock\n");
	        xtime.tv_sec = 0;
		xtime.tv_usec = 0;
	}
}
コード例 #7
0
ファイル: smp.c プロジェクト: iPodLinux/linux-2.6.7-ipod
/*
 * Slaves start using C here. Indirectly called from smp_slave_stext.
 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
 */
void __init smp_callin(void)
{
	extern void cpu_idle(void);	/* arch/parisc/kernel/process.c */
	int slave_id = cpu_now_booting;
#if 0
	void *istack;
#endif

	smp_cpu_init(slave_id);

#if 0	/* NOT WORKING YET - see entry.S */
	istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
	if (istack == NULL) {
	    printk(KERN_CRIT "Failed to allocate interrupt stack for cpu %d\n",slave_id);
	    BUG();
	}
	mtctl(istack,31);
#endif

	flush_cache_all_local(); /* start with known state */
	flush_tlb_all_local();

	local_irq_enable();  /* Interrupts have been off until now */

	cpu_idle();      /* Wait for timer to schedule some work */

	/* NOTREACHED */
	panic("smp_callin() AAAAaaaaahhhh....\n");
}
コード例 #8
0
ファイル: fpu.c プロジェクト: lacombar/netbsd-alc
/*
 * Bootstraps the FPU.
 */
void
hppa_fpu_bootstrap(u_int ccr_enable)
{
	u_int32_t junk[2];
	u_int32_t vers[2];
	extern u_int hppa_fpu_nop0;
	extern u_int hppa_fpu_nop1;

	/* See if we have a present and functioning hardware FPU. */
	fpu_present = (ccr_enable & HPPA_FPUS) == HPPA_FPUS;

	/* Initialize the FPU and get its version. */
	if (fpu_present) {

		/*
		 * To somewhat optimize the emulation
		 * assist trap handling and context
		 * switching (to save them from having
	 	 * to always load and check fpu_present),
		 * there are two instructions in locore.S
		 * that are replaced with nops when 
		 * there is a hardware FPU.
	 	 */
		hppa_fpu_nop0 = OPCODE_NOP;
		hppa_fpu_nop1 = OPCODE_NOP;
		fcacheall();

		/*
		 * We track what process has the FPU,
		 * and how many times we have to swap
		 * in and out.
		 */

		/*
		 * The PA-RISC 1.1 Architecture manual is 
		 * pretty clear that the copr,0,0 must be 
		 * wrapped in double word stores of fr0, 
		 * otherwise its operation is undefined.
		 */
		__asm volatile(
			"	ldo	%0, %%r22	\n"
			"	fstds	%%fr0, 0(%%r22)	\n"
			"	ldo	%1, %%r22	\n"
			"	copr,0,0		\n"
			"	fstds	%%fr0, 0(%%r22)	\n"
			: "=m" (junk), "=m" (vers) : : "r22");

		/*
		 * Now mark that no process has the FPU,
		 * and disable it, so the first time it
		 * gets used the process' state gets
		 * swapped in.
		 */
		fpu_csw = 0;
		fpu_cur_uspace = 0;
		mtctl(ccr_enable & (CCR_MASK ^ HPPA_FPUS), CR_CCR);	
	} 
#ifdef FPEMUL
	else
コード例 #9
0
ファイル: time.c プロジェクト: AshishNamdev/linux
/*
 * We keep time on PA-RISC Linux by using the Interval Timer which is
 * a pair of registers; one is read-only and one is write-only; both
 * accessed through CR16.  The read-only register is 32 or 64 bits wide,
 * and increments by 1 every CPU clock tick.  The architecture only
 * guarantees us a rate between 0.5 and 2, but all implementations use a
 * rate of 1.  The write-only register is 32-bits wide.  When the lowest
 * 32 bits of the read-only register compare equal to the write-only
 * register, it raises a maskable external interrupt.  Each processor has
 * an Interval Timer of its own and they are not synchronised.  
 *
 * We want to generate an interrupt every 1/HZ seconds.  So we program
 * CR16 to interrupt every @clocktick cycles.  The it_value in cpu_data
 * is programmed with the intended time of the next tick.  We can be
 * held off for an arbitrarily long period of time by interrupts being
 * disabled, so we may miss one or more ticks.
 */
irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
{
	unsigned long now;
	unsigned long next_tick;
	unsigned long ticks_elapsed = 0;
	unsigned int cpu = smp_processor_id();
	struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);

	/* gcc can optimize for "read-only" case with a local clocktick */
	unsigned long cpt = clocktick;

	profile_tick(CPU_PROFILING);

	/* Initialize next_tick to the old expected tick time. */
	next_tick = cpuinfo->it_value;

	/* Calculate how many ticks have elapsed. */
	do {
		++ticks_elapsed;
		next_tick += cpt;
		now = mfctl(16);
	} while (next_tick - now > cpt);

	/* Store (in CR16 cycles) up to when we are accounting right now. */
	cpuinfo->it_value = next_tick;

	/* Go do system house keeping. */
	if (cpu == 0)
		xtime_update(ticks_elapsed);

	update_process_times(user_mode(get_irq_regs()));

	/* Skip clockticks on purpose if we know we would miss those.
	 * The new CR16 must be "later" than current CR16 otherwise
	 * itimer would not fire until CR16 wrapped - e.g 4 seconds
	 * later on a 1Ghz processor. We'll account for the missed
	 * ticks on the next timer interrupt.
	 * We want IT to fire modulo clocktick even if we miss/skip some.
	 * But those interrupts don't in fact get delivered that regularly.
	 *
	 * "next_tick - now" will always give the difference regardless
	 * if one or the other wrapped. If "now" is "bigger" we'll end up
	 * with a very large unsigned number.
	 */
	while (next_tick - mfctl(16) > cpt)
		next_tick += cpt;

	/* Program the IT when to deliver the next interrupt.
	 * Only bottom 32-bits of next_tick are writable in CR16!
	 * Timer interrupt will be delivered at least a few hundred cycles
	 * after the IT fires, so if we are too close (<= 500 cycles) to the
	 * next cycle, simply skip it.
	 */
	if (next_tick - mfctl(16) <= 500)
		next_tick += cpt;
	mtctl(next_tick, 16);

	return IRQ_HANDLED;
}
コード例 #10
0
int
cpu_lwp_setprivate(lwp_t *l, void *addr)
{

	l->l_md.md_regs->tf_cr27 = (u_int)addr;
	if (l == curlwp)
		mtctl(addr, CR_TLS);
	return 0;
}
コード例 #11
0
ファイル: irq.c プロジェクト: muromec/linux-ezxdev
static void enable_cpu_irq(void *unused, int irq)
{
	unsigned long eirr_bit = EIEM_MASK(irq);

	mtctl(eirr_bit, 23);	/* clear EIRR bit before unmasking */
	cpu_eiem |= eirr_bit;
        smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
	set_eiem(cpu_eiem);
}
コード例 #12
0
void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	long now = mfctl(16);
	long next_tick;
	int nticks;
	int cpu = smp_processor_id();

	/* initialize next_tick to time at last clocktick */

	next_tick = cpu_data[cpu].it_value;

	/* since time passes between the interrupt and the mfctl()
	 * above, it is never true that last_tick + clocktick == now.  If we
	 * never miss a clocktick, we could set next_tick = last_tick + clocktick
	 * but maybe we'll miss ticks, hence the loop.
	 *
	 * Variables are *signed*.
	 */

	nticks = 0;
	while((next_tick - now) < halftick) {
		next_tick += clocktick;
		nticks++;
	}
	mtctl(next_tick, 16);
	cpu_data[cpu].it_value = next_tick;

	while (nticks--) {
#ifdef CONFIG_SMP
		smp_do_timer(regs);
#endif
		if (cpu == 0) {
			extern int pc_in_user_space;
			write_lock(&xtime_lock);
#ifndef CONFIG_SMP
			if (!user_mode(regs))
				parisc_do_profile(regs->iaoq[0]);
			else
				parisc_do_profile(&pc_in_user_space);
#endif
			do_timer(regs);
			write_unlock(&xtime_lock);
		}
	}
    
#ifdef CONFIG_CHASSIS_LCD_LED
	/* Only schedule the led tasklet on cpu 0, and only if it
	 * is enabled.
	 */
	if (cpu == 0 && !atomic_read(&led_tasklet.count))
		tasklet_schedule(&led_tasklet);
#endif

	/* check soft power switch status */
	if (cpu == 0 && !atomic_read(&power_tasklet.count))
		tasklet_schedule(&power_tasklet);
}
コード例 #13
0
ファイル: time.c プロジェクト: AshishNamdev/linux
void __init start_cpu_itimer(void)
{
	unsigned int cpu = smp_processor_id();
	unsigned long next_tick = mfctl(16) + clocktick;

	mtctl(next_tick, 16);		/* kick off Interval Timer (CR16) */

	per_cpu(cpu_data, cpu).it_value = next_tick;
}
コード例 #14
0
ファイル: irq.c プロジェクト: fread-ink/fread-kernel-k4
void cpu_ack_irq(unsigned int irq)
{
	unsigned long mask = EIEM_MASK(irq);
	int cpu = smp_processor_id();

	/* Clear in EIEM so we can no longer process */
	per_cpu(local_ack_eiem, cpu) &= ~mask;

	/* disable the interrupt */
	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));

	/* and now ack it */
	mtctl(mask, 23);
}
コード例 #15
0
ファイル: irq.c プロジェクト: fread-ink/fread-kernel-k4
void __init init_IRQ(void)
{
	local_irq_disable();	/* PARANOID - should already be disabled */
	mtctl(~0UL, 23);	/* EIRR : clear all pending external intr */
	claim_cpu_irqs();
#ifdef CONFIG_SMP
	if (!cpu_eiem)
		cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
#else
	cpu_eiem = EIEM_MASK(TIMER_IRQ);
#endif
        set_eiem(cpu_eiem);	/* EIEM : enable all external intr */

}
コード例 #16
0
ファイル: irq.c プロジェクト: Blackburn29/PsycoKernel
void __init init_IRQ(void)
{
	local_irq_disable();	
	mtctl(~0UL, 23);	
	claim_cpu_irqs();
#ifdef CONFIG_SMP
	if (!cpu_eiem)
		cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
#else
	cpu_eiem = EIEM_MASK(TIMER_IRQ);
#endif
        set_eiem(cpu_eiem);	

}
コード例 #17
0
ファイル: irq.c プロジェクト: Blackburn29/PsycoKernel
void cpu_ack_irq(struct irq_data *d)
{
	unsigned long mask = EIEM_MASK(d->irq);
	int cpu = smp_processor_id();

	
	per_cpu(local_ack_eiem, cpu) &= ~mask;

	
	set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));

	
	mtctl(mask, 23);
}
コード例 #18
0
ファイル: clock.c プロジェクト: repos-holder/openbsd-patches
void
cpu_initclocks()
{
	extern volatile u_long cpu_itmr;
	extern u_long cpu_hzticks;
	u_long __itmr;

	itmr_timecounter.tc_frequency = PAGE0->mem_10msec * 100;
	tc_init(&itmr_timecounter);

	mfctl(CR_ITMR, __itmr);
	cpu_itmr = __itmr;
	__itmr += cpu_hzticks;
	mtctl(__itmr, CR_ITMR);
}
コード例 #19
0
ファイル: time.c プロジェクト: BackupTheBerlios/arp2-svn
irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	long now;
	long next_tick;
	int nticks;
	int cpu = smp_processor_id();

	profile_tick(CPU_PROFILING, regs);

	now = mfctl(16);
	/* initialize next_tick to time at last clocktick */
	next_tick = cpu_data[cpu].it_value;

	/* since time passes between the interrupt and the mfctl()
	 * above, it is never true that last_tick + clocktick == now.  If we
	 * never miss a clocktick, we could set next_tick = last_tick + clocktick
	 * but maybe we'll miss ticks, hence the loop.
	 *
	 * Variables are *signed*.
	 */

	nticks = 0;
	while((next_tick - now) < halftick) {
		next_tick += clocktick;
		nticks++;
	}
	mtctl(next_tick, 16);
	cpu_data[cpu].it_value = next_tick;

	while (nticks--) {
#ifdef CONFIG_SMP
		smp_do_timer(regs);
#else
		update_process_times(user_mode(regs));
#endif
		if (cpu == 0) {
			write_seqlock(&xtime_lock);
			do_timer(regs);
			write_sequnlock(&xtime_lock);
		}
	}
    
	/* check soft power switch status */
	if (cpu == 0 && !atomic_read(&power_tasklet.count))
		tasklet_schedule(&power_tasklet);

	return IRQ_HANDLED;
}
コード例 #20
0
void start_parisc(void)
{
	extern void start_kernel(void);

	int ret, cpunum;
	struct pdc_coproc_cfg coproc_cfg;

	cpunum = smp_processor_id();

	set_firmware_width_unlocked();

	ret = pdc_coproc_cfg_unlocked(&coproc_cfg);
	if (ret >= 0 && coproc_cfg.ccr_functional) {
		mtctl(coproc_cfg.ccr_functional, 10);

		per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision;
		per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model;

		asm volatile ("fstd	%fr0,8(%sp)");
	} else {
コード例 #21
0
ファイル: machdep.c プロジェクト: MarginC/kame
void
hppa_init()
{
	extern int kernel_text, end;
	struct pdc_hwtlb pdc_hwtlb PDC_ALIGNMENT;
	struct pdc_coproc pdc_coproc PDC_ALIGNMENT;
	vm_offset_t v, vstart, vend;
	register int pdcerr;
	int usehpt;

	/* init PDC iface, so we can call em easy */
	pdc_init();

	/* calculate cpu speed */
	cpu_hzticks = (PAGE0->mem_10msec * 100) / hz;
	delay_init();

	/*
	 * get cache parameters from the PDC
	 */
	if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_CACHE, PDC_CACHE_DFLT,
			       &pdc_cache)) < 0) {
#ifdef DIAGNOSTIC
                printf("Warning: PDC_CACHE call Ret'd %d\n", pdcerr);
#endif
	}

	dcache_line_mask = pdc_cache.dc_conf.cc_line * 16 - 1;
	dcache_size = pdc_cache.dc_size;
	dcache_stride = pdc_cache.dc_stride;
	icache_stride = pdc_cache.ic_stride;

	/*
	 * purge TLBs and flush caches
	 */
	if (pdc_call((iodcio_t)pdc, 0, PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL) < 0)
		printf("WARNING: BTLB purge failed\n");
	ptlball();
	fcacheall();

	/* calculate HPT size */
	hpt_hashsize = PAGE0->imm_max_mem / NBPG;
	mtctl(hpt_hashsize - 1, CR_HPTMASK);

	/*
	 * If we want to use the HW TLB support, ensure that it exists.
	 */
	if (pdc_call((iodcio_t)pdc, 0, PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) &&
	    !pdc_hwtlb.min_size && !pdc_hwtlb.max_size) {
		printf("WARNING: no HW tlb walker\n");
		usehpt = 0;
	} else {
		usehpt = 1;
#ifdef DEBUG
		printf("hwtlb: %u-%u, %u/",
		       pdc_hwtlb.min_size, pdc_hwtlb.max_size, hpt_hashsize);
#endif
		if (hpt_hashsize > pdc_hwtlb.max_size)
			hpt_hashsize = pdc_hwtlb.max_size;
		else if (hpt_hashsize < pdc_hwtlb.min_size)
			hpt_hashsize = pdc_hwtlb.min_size;
#ifdef DEBUG
		printf("%u (0x%x)\n", hpt_hashsize,
		       hpt_hashsize * sizeof(struct hpt_entry));
#endif
	}
	
	totalphysmem = PAGE0->imm_max_mem / NBPG;
	resvmem = ((vm_offset_t)&kernel_text) / NBPG;

	vstart = hppa_round_page(&end);
	vend = VM_MAX_KERNEL_ADDRESS;

	/* we hope this won't fail */
	hppa_ex = extent_create("mem", 0x0, 0xffffffff, M_DEVBUF,
				(caddr_t)mem_ex_storage,
				sizeof(mem_ex_storage),
				EX_NOCOALESCE|EX_NOWAIT);
	if (extent_alloc_region(hppa_ex, 0, (vm_offset_t)PAGE0->imm_max_mem,
				EX_NOWAIT))
		panic("cannot reserve main memory");

	/*
	 * Allocate space for system data structures.  We are given
	 * a starting virtual address and we return a final virtual
	 * address; along the way we set each data structure pointer.
	 *
	 * We call allocsys() with 0 to find out how much space we want,
	 * allocate that much and fill it with zeroes, and the call
	 * allocsys() again with the correct base virtual address.
	 */

	v = vstart;
#define	valloc(name, type, num)	\
	    (name) = (type *)v; v = (vm_offset_t)((name)+(num))

#ifdef REAL_CLISTS
	valloc(cfree, struct cblock, nclist);
#endif
	valloc(callout, struct callout, ncallout);
	nswapmap = maxproc * 2;
	valloc(swapmap, struct map, nswapmap);
#ifdef SYSVSHM
	valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
#endif
#ifdef SYSVSEM 
	valloc(sema, struct semid_ds, seminfo.semmni);
	valloc(sem, struct sem, seminfo.semmns); 
	/* This is pretty disgusting! */
	valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
#endif
#ifdef SYSVMSG
	valloc(msgpool, char, msginfo.msgmax);
	valloc(msgmaps, struct msgmap, msginfo.msgseg);
	valloc(msghdrs, struct msg, msginfo.msgtql);
	valloc(msqids, struct msqid_ds, msginfo.msgmni);
#endif

#ifndef BUFCACHEPERCENT
#define BUFCACHEPERCENT 10
#endif /* BUFCACHEPERCENT */

	if (bufpages == 0)
		bufpages = totalphysmem / BUFCACHEPERCENT / CLSIZE;
	if (nbuf == 0) {
		nbuf = bufpages;
		if (nbuf < 16)
			nbuf = 16;
	}

	/* Restrict to at most 70% filled kvm */
	if (nbuf * MAXBSIZE >
	    (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) * 7 / 10)
		nbuf = (VM_MAX_KERNEL_ADDRESS-VM_MIN_KERNEL_ADDRESS) /
		    MAXBSIZE * 7 / 10;

	/* More buffer pages than fits into the buffers is senseless.  */
	if (bufpages > nbuf * MAXBSIZE / CLBYTES)
		bufpages = nbuf * MAXBSIZE / CLBYTES;

	if (nswbuf == 0) {
		nswbuf = (nbuf / 2) & ~1;	/* force even */
		if (nswbuf > 256)
			nswbuf = 256;		/* sanity */
	}

	valloc(swbuf, struct buf, nswbuf);
	valloc(buf, struct buf, nbuf);
#undef valloc
	bzero ((void *)vstart, (v - vstart));
	vstart = v;

	pmap_bootstrap(&vstart, &vend);
	physmem = totalphysmem - btoc(vstart);

	/* alloc msgbuf */
	if (!(msgbufp = (void *)pmap_steal_memory(sizeof(struct msgbuf),
						  NULL, NULL)))
		panic("cannot allocate msgbuf");
	msgbufmapped = 1;

#ifdef DEBUG
	printf("mem: %x+%x, %x\n", physmem, resvmem, totalphysmem);
#endif
	/* Turn on the HW TLB assist */
	if (usehpt) {
		if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_TLB,
				       PDC_TLB_CONFIG, &pdc_hwtlb, hpt_table,
				       sizeof(struct hpt_entry) * hpt_hashsize,
				       PDC_TLB_WORD3)) < 0) {
			printf("Warning: HW TLB init failed (%d), disabled\n",
			       pdcerr);
			usehpt = 0;
		} else
			printf("HW TLB(%d entries at 0x%x) initialized (%d)\n",
			       hpt_hashsize, hpt_table, pdcerr);
	}

        /*
         * Locate any coprocessors and enable them by setting up the CCR.
         * SFU's are ignored (since we dont have any).  Also, initialize
         * the floating point registers here.
         */
        if ((pdcerr = pdc_call((iodcio_t)pdc, 0, PDC_COPROC, PDC_COPROC_DFLT,
			       &pdc_coproc)) < 0)
                printf("WARNING: PDC_COPROC call Ret'd %d\n", pdcerr);
	else {
#ifdef DEBUG
		printf("pdc_coproc: %x, %x\n", pdc_coproc.ccr_enable,
		       pdc_coproc.ccr_present);
#endif
	}
        copr_sfu_config = pdc_coproc.ccr_enable;
        mtctl(copr_sfu_config & CCR_MASK, CR_CCR);
/*
        fprinit(&fpcopr_version);
	fpcopr_version = (fpcopr_version & 0x003ff800) >> 11;
        mtctl(CR_CCR, 0);
*/
        /*
         * Clear the FAULT light (so we know when we get a real one)
         * PDC_COPROC apparently turns it on (for whatever reason).
         */
        pdcerr = PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0;
        (void) (*pdc)(PDC_CHASSIS, PDC_CHASSIS_DISP, pdcerr);

#ifdef DDB
	ddb_init();
#endif
#ifdef DEBUG
	printf("hppa_init: leaving\n");
#endif
	kernelmapped++;
}
コード例 #22
0
ファイル: trap.c プロジェクト: MarginC/kame
void
trap(int type, struct trapframe *frame)
{
	struct lwp *l;
	struct proc *p;
	struct pcb *pcbp;
	vaddr_t va;
	struct vm_map *map;
	struct vmspace *vm;
	vm_prot_t vftype;
	pa_space_t space;
	u_int opcode;
	int ret;
	const char *tts;
	int type_raw;
#ifdef DIAGNOSTIC
	extern int emergency_stack_start, emergency_stack_end;
#endif

	type_raw = type & ~T_USER;
	opcode = frame->tf_iir;
	if (type_raw == T_ITLBMISS || type_raw == T_ITLBMISSNA) {
		va = frame->tf_iioq_head;
		space = frame->tf_iisq_head;
		vftype = VM_PROT_READ;	/* XXX VM_PROT_EXECUTE ??? */
	} else {
		va = frame->tf_ior;
		space = frame->tf_isr;
		vftype = inst_store(opcode) ? VM_PROT_WRITE : VM_PROT_READ;
	}

	if ((l = curlwp) == NULL)
		l = &lwp0;
	p = l->l_proc;

#ifdef DIAGNOSTIC
	/*
	 * If we are on the emergency stack, then we either got
	 * a fault on the kernel stack, or we're just handling
	 * a trap for the machine check handler (which also 
	 * runs on the emergency stack).
	 *
	 * We *very crudely* differentiate between the two cases
	 * by checking the faulting instruction: if it is the
	 * function prologue instruction that stores the old
	 * frame pointer and updates the stack pointer, we assume
	 * that we faulted on the kernel stack.
	 *
	 * In this case, not completing that instruction will
	 * probably confuse backtraces in kgdb/ddb.  Completing
	 * it would be difficult, because we already faulted on
	 * that part of the stack, so instead we fix up the 
	 * frame as if the function called has just returned.
	 * This has peculiar knowledge about what values are in
	 * what registers during the "normal gcc -g" prologue.
	 */
	if (&type >= &emergency_stack_start &&
	    &type < &emergency_stack_end &&
	    type != T_IBREAK && STWM_R1_D_SR0_SP(opcode)) {
		/* Restore the caller's frame pointer. */
		frame->tf_r3 = frame->tf_r1;
		/* Restore the caller's instruction offsets. */
		frame->tf_iioq_head = frame->tf_rp;
		frame->tf_iioq_tail = frame->tf_iioq_head + 4;
		goto dead_end;
	}
#endif /* DIAGNOSTIC */
		
#ifdef DEBUG
	frame_sanity_check(frame, l);
#endif /* DEBUG */

	/* If this is a trap, not an interrupt, reenable interrupts. */
	if (type_raw != T_INTERRUPT)
		mtctl(frame->tf_eiem, CR_EIEM);

	if (frame->tf_flags & TFF_LAST)
		l->l_md.md_regs = frame;

	if ((type & ~T_USER) > trap_types)
		tts = "reserved";
	else
		tts = trap_type[type & ~T_USER];

#ifdef TRAPDEBUG
	if (type_raw != T_INTERRUPT && type_raw != T_IBREAK)
		printf("trap: %d, %s for %x:%x at %x:%x, fp=%p, rp=%x\n",
		    type, tts, space, (u_int)va, frame->tf_iisq_head,
		    frame->tf_iioq_head, frame, frame->tf_rp);
	else if (type_raw == T_IBREAK)
		printf("trap: break instruction %x:%x at %x:%x, fp=%p\n",
		    break5(opcode), break13(opcode),
		    frame->tf_iisq_head, frame->tf_iioq_head, frame);

	{
		extern int etext;
		if (frame < (struct trapframe *)&etext) {
			printf("trap: bogus frame ptr %p\n", frame);
			goto dead_end;
		}
	}
#endif
	switch (type) {
	case T_NONEXIST:
	case T_NONEXIST|T_USER:
#if !defined(DDB) && !defined(KGDB)
		/* we've got screwed up by the central scrutinizer */
		panic ("trap: elvis has just left the building!");
		break;
#else
		goto dead_end;
#endif
	case T_RECOVERY|T_USER:
#ifdef USERTRACE
		for(;;) {
			if (frame->tf_iioq_head != rctr_next_iioq)
				printf("-%08x\nr %08x",
					rctr_next_iioq - 4,
					frame->tf_iioq_head);
			rctr_next_iioq = frame->tf_iioq_head + 4;
			if (frame->tf_ipsw & PSW_N) {
				/* Advance the program counter. */
				frame->tf_iioq_head = frame->tf_iioq_tail;
				frame->tf_iioq_tail = frame->tf_iioq_head + 4;
				/* Clear flags. */
				frame->tf_ipsw &= ~(PSW_N|PSW_X|PSW_Y|PSW_Z|PSW_B|PSW_T|PSW_H|PSW_L);
				/* Simulate another trap. */
				continue;
			}
			break;
		}
		frame->tf_rctr = 0;
		break;
#endif /* USERTRACE */
	case T_RECOVERY:
#if !defined(DDB) && !defined(KGDB)
		/* XXX will implement later */
		printf ("trap: handicapped");
		break;
#else
		goto dead_end;
#endif

	case T_EMULATION | T_USER:
#ifdef FPEMUL
		hppa_fpu_emulate(frame, l);
#else  /* !FPEMUL */
		/*
		 * We don't have FPU emulation, so signal the
		 * process with a SIGFPE.
		 */
		hppa_trapsignal_hack(l, SIGFPE, frame->tf_iioq_head);
#endif /* !FPEMUL */
		break;

#ifdef DIAGNOSTIC
	case T_EXCEPTION:
		panic("FPU/SFU emulation botch");

		/* these just can't happen ever */
	case T_PRIV_OP:
	case T_PRIV_REG:
		/* these just can't make it to the trap() ever */
	case T_HPMC:      case T_HPMC | T_USER:
	case T_EMULATION:
#endif
	case T_IBREAK:
	case T_DATALIGN:
	case T_DBREAK:
	dead_end:
		if (type & T_USER) {
#ifdef DEBUG
			user_backtrace(frame, l, type);
#endif
			hppa_trapsignal_hack(l, SIGILL, frame->tf_iioq_head);
			break;
		}
		if (trap_kdebug(type, va, frame))
			return;
		else if (type == T_DATALIGN)
			panic ("trap: %s at 0x%x", tts, (u_int) va);
		else
			panic ("trap: no debugger for \"%s\" (%d)", tts, type);
		break;

	case T_IBREAK | T_USER:
	case T_DBREAK | T_USER:
		/* pass to user debugger */
		break;

	case T_EXCEPTION | T_USER:	/* co-proc assist trap */
		hppa_trapsignal_hack(l, SIGFPE, va);
		break;

	case T_OVERFLOW | T_USER:
		hppa_trapsignal_hack(l, SIGFPE, va);
		break;
		
	case T_CONDITION | T_USER:
		break;

	case T_ILLEGAL | T_USER:
#ifdef DEBUG
		user_backtrace(frame, l, type);
#endif
		hppa_trapsignal_hack(l, SIGILL, va);
		break;

	case T_PRIV_OP | T_USER:
#ifdef DEBUG
		user_backtrace(frame, l, type);
#endif
		hppa_trapsignal_hack(l, SIGILL, va);
		break;

	case T_PRIV_REG | T_USER:
#ifdef DEBUG
		user_backtrace(frame, l, type);
#endif
		hppa_trapsignal_hack(l, SIGILL, va);
		break;

		/* these should never got here */
	case T_HIGHERPL | T_USER:
	case T_LOWERPL | T_USER:
		hppa_trapsignal_hack(l, SIGSEGV, va);
		break;

	case T_IPROT | T_USER:
	case T_DPROT | T_USER:
		hppa_trapsignal_hack(l, SIGSEGV, va);
		break;

	case T_DATACC:   	case T_USER | T_DATACC:
	case T_ITLBMISS:	case T_USER | T_ITLBMISS:
	case T_DTLBMISS:	case T_USER | T_DTLBMISS:
	case T_ITLBMISSNA:	case T_USER | T_ITLBMISSNA:
	case T_DTLBMISSNA:	case T_USER | T_DTLBMISSNA:
	case T_TLB_DIRTY:	case T_USER | T_TLB_DIRTY:
		vm = p->p_vmspace;

		if (!vm) {
#ifdef TRAPDEBUG
			printf("trap: no vm, p=%p\n", p);
#endif
			goto dead_end;
		}

		/*
		 * it could be a kernel map for exec_map faults
		 */
		if (!(type & T_USER) && space == HPPA_SID_KERNEL)
			map = kernel_map;
		else {
			map = &vm->vm_map;
			if (l->l_flag & L_SA) {
				l->l_savp->savp_faultaddr = va;
				l->l_flag |= L_SA_PAGEFAULT;
			}
		}

		va = hppa_trunc_page(va);

		if (map->pmap->pmap_space != space) {
#ifdef TRAPDEBUG
			printf("trap: space missmatch %d != %d\n",
			    space, map->pmap->pmap_space);
#endif
			/* actually dump the user, crap the kernel */
			goto dead_end;
		}

		/* Never call uvm_fault in interrupt context. */
		KASSERT(hppa_intr_depth == 0);

		ret = uvm_fault(map, va, 0, vftype);

#ifdef TRAPDEBUG
		printf("uvm_fault(%p, %x, %d, %d)=%d\n",
		    map, (u_int)va, 0, vftype, ret);
#endif

		if (map != kernel_map)
			l->l_flag &= ~L_SA_PAGEFAULT;
		/*
		 * If this was a stack access we keep track of the maximum
		 * accessed stack size.  Also, if uvm_fault gets a protection
		 * failure it is due to accessing the stack region outside
		 * the current limit and we need to reflect that as an access
		 * error.
		 */
		if (va >= (vaddr_t)vm->vm_maxsaddr + vm->vm_ssize) {
			if (ret == 0) {
				vsize_t nss = btoc(va - USRSTACK + PAGE_SIZE);
				if (nss > vm->vm_ssize)
					vm->vm_ssize = nss;
			} else if (ret == EACCES)
				ret = EFAULT;
		}

		if (ret != 0) {
			if (type & T_USER) {
printf("trapsignal: uvm_fault(%p, %x, %d, %d)=%d\n",
	map, (u_int)va, 0, vftype, ret);
#ifdef DEBUG
				user_backtrace(frame, l, type);
#endif
				hppa_trapsignal_hack(l, SIGSEGV, frame->tf_ior);
			} else {
				if (l && l->l_addr->u_pcb.pcb_onfault) {
#ifdef PMAPDEBUG
					printf("trap: copyin/out %d\n",ret);
#endif
					pcbp = &l->l_addr->u_pcb;
					frame->tf_iioq_tail = 4 +
					    (frame->tf_iioq_head =
						pcbp->pcb_onfault);
					pcbp->pcb_onfault = 0;
					break;
				}
#if 1
if (trap_kdebug (type, va, frame))
	return;
#else
				panic("trap: uvm_fault(%p, %x, %d, %d): %d",
				    map, va, 0, vftype, ret);
#endif
			}
		}
		break;

	case T_DATALIGN | T_USER:
#ifdef DEBUG
		user_backtrace(frame, l, type);
#endif
		hppa_trapsignal_hack(l, SIGBUS, va);
		break;

	case T_INTERRUPT:
	case T_INTERRUPT|T_USER:
		hppa_intr(frame);
		mtctl(frame->tf_eiem, CR_EIEM);
#if 0
if (trap_kdebug (type, va, frame))
return;
#endif
		break;
	case T_LOWERPL:
	case T_DPROT:
	case T_IPROT:
	case T_OVERFLOW:
	case T_CONDITION:
	case T_ILLEGAL:
	case T_HIGHERPL:
	case T_TAKENBR:
	case T_POWERFAIL:
	case T_LPMC:
	case T_PAGEREF:
	case T_DATAPID:  	case T_DATAPID  | T_USER:
		if (0 /* T-chip */) {
			break;
		}
		/* FALLTHROUGH to unimplemented */
	default:
#if 1
if (trap_kdebug (type, va, frame))
	return;
#endif
		panic ("trap: unimplemented \'%s\' (%d)", tts, type);
	}

	if (type & T_USER)
		userret(l, l->l_md.md_regs->tf_iioq_head, 0);

#ifdef DEBUG
	frame_sanity_check(frame, l);
	if (frame->tf_flags & TFF_LAST && curlwp != NULL)
		frame_sanity_check(curlwp->l_md.md_regs, curlwp);
#endif /* DEBUG */
}
コード例 #23
0
ファイル: hp700_init.c プロジェクト: rohsaini/mkunity
void
hp700_init(int argc, char *argv[], char *envp[])
{
	int hpmc_br_instr;
	int *p = (int *) i_hpmach_chk;
	register struct mapping *mp;
	int i;
	vm_offset_t addr;
	int pdcerr;
	vm_offset_t first_page;

	struct pdc_coproc pdc_coproc;
	struct pdc_cache pdc_cache;
	struct pdc_model pdc_model;
	struct pdc_iodc_read pdc_iodc;
	extern int crashdump(void);
#ifdef	BTLB
	struct pdc_btlb pdc_btlb;
#endif
#ifdef	HPT
	struct pdc_hwtlb pdc_hwtlb;
	extern struct hpt_entry *hpt_table;
	extern int usehpt;
#endif	

	first_page = move_bootstrap();

	if (argc >= 1 && argc <= 4) {
		char *btstring = boot_string;
		char *src = (argc == 1 ? envp[5] : argv[2]);

		i = 0;
		while (*src != '\0' && i++ <= BOOT_LINE_LENGTH)
			*btstring++ = *src++;
		*btstring = '\0';
	}

	pdc = PAGE0->mem_pdc;

	delay_init();
	pdc_console_init();

	printf("%s", version);

	/*
	 * Determine what the boot program is using as its console
	 * so that we can use the same device.
	 */
	pdcerr = (*pdc)(PDC_IODC, PDC_IODC_READ, &pdc_iodc,
			PAGE0->mem_cons.pz_hpa, PDC_IODC_INDEX_DATA,
			&cons_iodc, sizeof(cons_iodc));
	if (pdcerr == 0)
		bcopy((char *)&PAGE0->mem_cons.pz_dp, (char *)&cons_dp,
		      sizeof(struct device_path));
	else
		printf("Warning: can't id console boot device (PDC Ret'd %d)\n",
		       pdcerr);

        /*
         * Read boot device from PROM
         */
	pdcerr = (*PAGE0->mem_pdc)(PDC_IODC, PDC_IODC_READ, &pdc_iodc,
	                           PAGE0->mem_boot.pz_hpa, PDC_IODC_INDEX_DATA,
	                           &boot_iodc, sizeof(boot_iodc));
	if (pdcerr == 0)
		bcopy((char *)&PAGE0->mem_boot.pz_dp, (char *)&boot_dp,
		      sizeof(struct device_path));
	else
		printf("Warning: can't id boot device (PDC Ret'd %d)\n",
		       pdcerr);
	
	/*
	 * Setup the transfer of control addr to point to the crash dump
	 * initialization code.
	 */
	PAGE0->ivec_toc = crashdump;

	/*
	 * get cache parameters from the PDC
	 */
	(*PAGE0->mem_pdc)(PDC_CACHE, PDC_CACHE_DFLT, &pdc_cache);

	dcache_line_size = pdc_cache.dc_conf.cc_line * 16;
	dcache_line_mask = dcache_line_size - 1;
	dcache_block_size = dcache_line_size * pdc_cache.dc_conf.cc_block;

	dcache_size = pdc_cache.dc_size;
	dcache_base = pdc_cache.dc_base;
	dcache_stride = pdc_cache.dc_stride;
	dcache_count = pdc_cache.dc_count;
	dcache_loop = pdc_cache.dc_loop;

	icache_line_size = pdc_cache.ic_conf.cc_line * 16;
	icache_line_mask = icache_line_size - 1;
	icache_block_size = icache_line_size * pdc_cache.ic_conf.cc_block;

	icache_base = pdc_cache.ic_base;
	icache_stride = pdc_cache.ic_stride;
	icache_count = pdc_cache.ic_count;
	icache_loop = pdc_cache.ic_loop;

	/*
	 * purge TLBs and flush caches
	 */
	ptlball(&pdc_cache);

#ifdef	BTLB
        /*
         * get block tlb information for clearing
         */
	pdcerr = (*pdc)(PDC_BLOCK_TLB, PDC_BTLB_DEFAULT, &pdc_btlb);
	
        if (pdcerr != 0)
                printf("Warning: PDC_BTLB call Ret'd %d\n", pdcerr);

	switch (pdc_btlb.finfo.num_c) {
	/* S-Chip specific */
	case 0: 
		cputype = CPU_PCXS;
		for (i = 0; i < pdc_btlb.finfo.num_i; i++)
			purge_block_itlb(i);
		for (i = 0; i < pdc_btlb.finfo.num_d; i++)
			purge_block_dtlb(i);
		break;
	/* L-Chip specific */
	case 8:
		cputype = CPU_PCXL;
		for (i = 0; i < pdc_btlb.finfo.num_c; i++)
			purge_L_block_ctlb(i);
		break;
	/* T-Chip specific */
	case 16:
		cputype = CPU_PCXT;
		for (i = 0; i < pdc_btlb.finfo.num_c; i++)
			purge_block_ctlb(i);
		break;
	default:
		panic("unrecognized block-TLB, cannot purge block TLB(s)");
		/* NOTREACHED */
	}
#endif

	fcacheall();

	/*
	 * get the cpu type
	 */
	(*PAGE0->mem_pdc)(PDC_MODEL, PDC_MODEL_INFO, &pdc_model);

	machtype = pdc_model.hvers >> 4;

	cpuinfo(&pdc_cache);

	if (dcache_line_size != CACHE_LINE_SIZE)
		printf("WARNING: data cache line size = %d bytes, %s\n",
		       dcache_line_size, "THIS IS *VERY* BAD!");

	/*
	 * Get the instruction to do branch to PDC_HPMC from PDC.  If
	 * successful, then insert the instruction at the beginning
	 * of the HPMC handler.
	 */
	if ((*PAGE0->mem_pdc)(PDC_INSTR, PDC_INSTR_DFLT, &hpmc_br_instr) == 0)
		p[0] = hpmc_br_instr;
	else
		p[0] = 0;

	/* 
	 * Now compute the checksum of the hpmc interrupt vector entry
	 */
	p[5] = -(p[0] + p[1] + p[2] + p[3] + p[4] + p[6] + p[7]);

	/*
	 * setup page size for Mach
	 */
	page_size = HP700_PGBYTES;
	vm_set_page_size();

	/*
	 * configure the devices including memory. Passes back size of 
	 * physical memory in mem_size.
	 */
	busconf();

	/* 
	 * Zero out BSS of kernel before doing anything else. The location
	 * pointed to by &edata is included in the data section.
	 */
	bzero((char*)((vm_offset_t) &edata + 4), (vm_offset_t) &end - 
	      (vm_offset_t) &edata - 4);

        /*
         * Locate any coprocessors and enable them by setting up the CCR.
         * SFU's are ignored (since we dont have any).  Also, initialize
         * the floating point registers here.
         */
        if ((pdcerr = (*pdc)(PDC_COPROC, PDC_COPROC_DFLT, &pdc_coproc)) < 0)
                printf("Warning: PDC_COPROC call Ret'd %d\n", pdcerr);
        copr_sfu_config = pdc_coproc.ccr_enable;
        mtctl(CR_CCR, copr_sfu_config & CCR_MASK);
        fprinit(&fpcopr_version);
	fpcopr_version = (fpcopr_version & 0x003ff800) >> 11;
        mtctl(CR_CCR, 0);

        /*
         * Clear the FAULT light (so we know when we get a real one)
         * PDC_COPROC apparently turns it on (for whatever reason).
         */
        pdcerr = PDC_OSTAT(PDC_OSTAT_RUN) | 0xCEC0;
        (void) (*pdc)(PDC_CHASSIS, PDC_CHASSIS_DISP, pdcerr);

#ifdef TIMEX
	/*
	 * Enable the quad-store instruction.
	 */
	pdcerr = (*pdc)(PDC_MODEL, PDC_MODEL_ENSPEC,
			&pdc_model, pdc_model.pot_key);
	if (pdcerr < 0)
		printf("Warning: PDC enable FP quad-store Ret'd %d\n", pdcerr);
#endif


	/*
	 * Intialize the Event Trace Analysis Package
	 * Static Phase: 1 of 2
	 */
	etap_init_phase1();

	/*
	 * on the hp700 the value in &etext is a pointer to the last word
	 * in the text section. Similarly &edata and &end are pointers to
	 * the last words in the section. We want to change this so that 
	 * these pointers point past the sections that they terminate.
	 */
	text_start = trunc_page((vm_offset_t) &start_text);
	text_end = round_page((vm_offset_t) &etext + 4);

	/*
	 * before we go to all the work to initialize the VM see if we really 
	 * linked the image past the end of the PDC/IODC area.
	 */
	if (text_start < 0x10800)
		panic("kernel text mapped over PDC and IODC memory");

	/*
	 * find ranges of physical memory that isn't allocated to the kernel
	 */

	avail_start = round_page(first_page);
	first_avail = avail_start;
	avail_end = trunc_page(mem_size);
	
	/*
	 * bootstrap the rest of the virtual memory system
	 */
#ifdef MAXMEMBYTES
	if ((avail_end - avail_start) > MAXMEMBYTES) {
		mem_size  = trunc_page(MAXMEMBYTES);
		avail_end = mem_size;
	}
#endif

#ifdef HPT
	/*
	 * If we want to use the HW TLB support, ensure that it exists.
	 */
	if (usehpt &&
	    !((*pdc)(PDC_TLB, PDC_TLB_INFO, &pdc_hwtlb) == 0 &&
	      (pdc_hwtlb.min_size || pdc_hwtlb.max_size)))
		usehpt = 0;
#endif

	pmap_bootstrap(&avail_start, &avail_end);

	/*
	 * set limits on virtual memory and kernel equivalenced memory
	 */
	virtual_avail = avail_end;
	virtual_end = trunc_page(VM_MAX_KERNEL_ADDRESS);

	/*
	 * pmap_bootstrap allocated memory for data structures that must
	 * be equivalently mapped.
	 */
	equiv_end = (long) round_page((vm_offset_t) &end);
	io_end = 0xF0000000;	/* XXX */

	/*
	 * Do block mapping. We are mapping from 0, up through the first
	 * power of 2 address above the end of the equiv region. This 
	 * means some memory gets block mapped that should not be, but
	 * so be it (we make the text writable also :-)). We do this to
	 * conserve block entries since we hope to use them for other
	 * purposes (someday).
	 */
	addr = avail_start;
	if (addr != 1 << log2(addr))
		addr = 1 << log2(addr);

#ifdef	BTLB
	if(pdc_btlb.finfo.num_c)
		printf("%d BTLB entries found.  Block mapping up to 0x%x (0x%x)\n",
		       pdc_btlb.finfo.num_c, addr, avail_start);

	/*
	 * XXX L-CHIP vs T-CHIP vs S-CHIP difference in Block TLB insertion.
	 */
	switch (pdc_btlb.finfo.num_c) {
	/* S-CHIP */
	case 0:
		pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_ICACHE);
		pmap_block_map(0, addr, VM_PROT_READ|VM_PROT_WRITE,
			       0, BLK_DCACHE);
		break;
	/* L-CHIP */
	case 8:
		pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_LCOMBINED);
		break;
	/* T-CHIP */
	case 16:
		pmap_block_map(0, addr, VM_PROT_ALL, 0, BLK_COMBINED);
		break;
	default:
		panic("unrecognized block-TLB, cannot map kernel");
		/* NOTREACHED */
	}
#endif

#ifdef	HPT
	/*
	 * Turn on the HW TLB assist.
	 */
	if (usehpt) {
		pdcerr = (*pdc)(PDC_TLB, PDC_TLB_CONFIG,
				&pdc_hwtlb, hpt_table,
				sizeof(struct hpt_entry) * HP700_HASHSIZE,
				PDC_TLB_WORD3);
		if (pdcerr) {
			printf("Warning: HW TLB init failed (%d), disabled\n",
			       pdcerr);
			usehpt = 0;
		} else
			printf("HW TLB initialized (%d entries at 0x%x)\n",
			       HP700_HASHSIZE, hpt_table);
	}
#endif

	/*
	 * map the PDC and IODC area for kernel read/write
	 * XXX - should this be read only?
	 */
	(void) pmap_map(0, 0, text_start, VM_PROT_READ | VM_PROT_WRITE);

	/*
	 * map the kernel text area.
	 */
#if KGDB
	(void) pmap_map(text_start, text_start, text_end, 
			VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_WRITE);
#else
	(void) pmap_map(text_start, text_start, text_end, 
			VM_PROT_READ | VM_PROT_EXECUTE);
#endif

	/*
	 * map the data section of the kernel
	 */
	(void) pmap_map(text_end, text_end, avail_start,
			VM_PROT_READ | VM_PROT_WRITE);

#ifndef IO_HACK
	/*
	 * map the I/O pages
	 */
	(void) pmap_map(trunc_page(io_size), trunc_page(io_size),
			0, VM_PROT_READ | VM_PROT_WRITE);
#endif

#if 0
	/*
	 * map the breakpoint page
	 */
	(void) pmap_map(break_page, break_page, break_page+HP700_PAGE_SIZE,
			VM_PROT_READ | VM_PROT_EXECUTE);
#endif

	/*
	 * map the interrupt stack red zone.
	 */
	addr = trunc_page((vm_offset_t) &intstack_top);
	(void) pmap_map(addr, addr, addr + PAGE_SIZE, VM_PROT_READ);

	vm_on = 1;
}
コード例 #24
0
ファイル: intr.c プロジェクト: MarginC/kame
/*
 * This finally initializes interrupts.
 */
void
hp700_intr_init(void)
{
	int idx, bit_pos;
	struct hp700_int_bit *int_bit;
	int mask;
	struct hp700_int_reg *int_reg;
	int eiem;

	/* Initialize soft interrupts. */
	softintr_init();

	/*
	 * Put together the initial imask for each level.
	 */
	memset(imask, 0, sizeof(imask));
	for (bit_pos = 0; bit_pos < HP700_INT_BITS; bit_pos++) {
		int_bit = hp700_int_bits + bit_pos;
		if (int_bit->int_bit_reg == NULL)
			continue;
		imask[int_bit->int_bit_ipl] |= int_bit->int_bit_spl;
	}
	
	/* The following bits cribbed from i386/isa/isa_machdep.c: */

        /* 
         * IPL_NONE is used for hardware interrupts that are never blocked,
         * and do not block anything else.
         */
        imask[IPL_NONE] = 0;

        /*
         * Enforce a hierarchy that gives slow devices a better chance at not
         * dropping data.
         */
        imask[IPL_SOFTCLOCK] |= imask[IPL_NONE]; 
        imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK];
        imask[IPL_BIO] |= imask[IPL_SOFTNET];
        imask[IPL_NET] |= imask[IPL_BIO];
        imask[IPL_SOFTSERIAL] |= imask[IPL_NET];
        imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];

        /*
         * There are tty, network and disk drivers that use free() at interrupt
         * time, so imp > (tty | net | bio).
         */
        imask[IPL_VM] |= imask[IPL_TTY];
        
        imask[IPL_AUDIO] |= imask[IPL_VM];
   
        /*
         * Since run queues may be manipulated by both the statclock and tty,
         * network, and disk drivers, clock > imp.
         */             
        imask[IPL_CLOCK] |= imask[IPL_AUDIO];
        
        /* 
         * IPL_HIGH must block everything that can manipulate a run queue.
         */     
        imask[IPL_HIGH] |= imask[IPL_CLOCK]; 

	/* Now go back and flesh out the spl levels on each bit. */
	for(bit_pos = 0; bit_pos < HP700_INT_BITS; bit_pos++) {
		int_bit = hp700_int_bits + bit_pos;
		if (int_bit->int_bit_reg == NULL)
			continue;
		int_bit->int_bit_spl = imask[int_bit->int_bit_ipl];
	}

	/* Print out the levels. */
	printf("biomask %08x netmask %08x ttymask %08x\n",
	    imask[IPL_BIO], imask[IPL_NET], imask[IPL_TTY]);
#if 0
	for(bit_pos = 0; bit_pos < NIPL; bit_pos++)
		printf("imask[%d] == %08x\n", bit_pos, imask[bit_pos]);
#endif

	/*
	 * Load all mask registers, loading %eiem last.
	 * This will finally enable interrupts, but since
	 * cpl and ipending should be -1 and 0, respectively,
	 * no interrupts will get dispatched until the 
	 * priority level is lowered.
	 *
	 * Because we're paranoid, we force these values
	 * for cpl and ipending, even though they should
	 * be unchanged since hp700_intr_bootstrap().
	 */
	cpl = -1;
	ipending = 0;
	eiem = 0;
	for (idx = 0; idx < HP700_INT_BITS; idx++) {
		int_reg = hp700_int_regs[idx];
		if (int_reg == NULL)
			continue;
		mask = 0;
		for (bit_pos = 0; bit_pos < HP700_INT_BITS; bit_pos++) {
			if (int_reg->int_reg_bits_map[31 ^ bit_pos] !=
			    INT_REG_BIT_UNUSED)
				mask |= (1 << bit_pos);
		}
		if (int_reg == &int_reg_cpu)
			eiem = mask;
		else if (int_reg->int_reg_mask != NULL)
			*int_reg->int_reg_mask = mask;
	}
	mtctl(eiem, CR_EIEM);
}
コード例 #25
0
void
hpux_setregs(struct proc *p, struct exec_package *pack, u_long stack,
             register_t *retval)
{
    extern int cpu_model_hpux;	/* machdep.c */
    extern paddr_t fpu_curpcb;	/* from locore.S */
    extern u_int fpu_version;	/* from machdep.c */
    struct ps_strings arginfo;	/* XXX copy back in from the stack */
    struct hpux_keybits {
        int	kb_cpuver;
        int	kb_fpustat;
        int	kb_nbits;
        int	kb_bits[2];
    } hpux_keybits;
    struct trapframe *tf = p->p_md.md_regs;
    struct pcb *pcb = &p->p_addr->u_pcb;
    register_t zero;

    if (copyin((char *)PS_STRINGS, &arginfo, sizeof(arginfo)))
        sigexit(p, SIGILL);

    stack = (stack + 0x1f) & ~0x1f;
    hpux_keybits.kb_cpuver = cpu_model_hpux;
    hpux_keybits.kb_fpustat = fpu_version;
    hpux_keybits.kb_nbits = 1;
    hpux_keybits.kb_bits[0] = 0;	/* TODO report half-word insns */
    hpux_keybits.kb_bits[1] = -1;
    if (copyout(&hpux_keybits, (void *)stack, sizeof(hpux_keybits)))
        sigexit(p, SIGILL);

    tf->tf_flags = TFF_SYS|TFF_LAST;
    tf->tf_iioq_tail = 4 +
                       (tf->tf_iioq_head = pack->ep_entry | HPPA_PC_PRIV_USER);
    tf->tf_rp = 0;
    tf->tf_arg0 = (register_t)arginfo.ps_nargvstr;
    tf->tf_arg1 = (register_t)arginfo.ps_argvstr;
    tf->tf_arg2 = (register_t)arginfo.ps_envstr;
    tf->tf_arg3 = stack;	/* keybits */
    stack += sizeof(hpux_keybits);

    /* setup terminal stack frame */
    stack = (stack + 0x1f) & ~0x1f;
    tf->tf_r3 = stack;
    tf->tf_sp = stack += HPPA_FRAME_SIZE;
    zero = 0;
    copyout(&zero, (caddr_t)(stack - HPPA_FRAME_SIZE), sizeof(register_t));
    copyout(&zero, (caddr_t)(stack + HPPA_FRAME_CRP), sizeof(register_t));

    /* reset any of the pending FPU exceptions */
    pcb->pcb_fpregs[0] = ((u_int64_t)HPPA_FPU_INIT) << 32;
    pcb->pcb_fpregs[1] = 0;
    pcb->pcb_fpregs[2] = 0;
    pcb->pcb_fpregs[3] = 0;
    fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb->pcb_fpregs, 8 * 4);
    if (tf->tf_cr30 == fpu_curpcb) {
        fpu_curpcb = 0;
        /* force an fpu ctxsw, we won't be hugged by the cpu_switch */
        mtctl(0, CR_CCR);
    }
    retval[1] = 0;
}
コード例 #26
0
ファイル: intr.c プロジェクト: MarginC/kame
/*
 * Dispatch interrupts.  This dispatches at least one interrupt.
 * This is called with %eiem loaded with zero.
 */
void
hp700_intr_dispatch(int ncpl, int eiem, struct trapframe *frame)
{
	int ipending_run;
	u_int old_hppa_intr_depth;
	int bit_pos;
	struct hp700_int_bit *int_bit;
	void *arg;
	struct clockframe clkframe;
	int handled;

	/* Increment our depth, grabbing the previous value. */
	old_hppa_intr_depth = hppa_intr_depth++;

	/* Loop while we have interrupts to dispatch. */
	for (;;) {

		/* Read ipending and mask it with ncpl. */
		ipending_run = (ipending & ~ncpl);
		if (ipending_run == 0)
			break;

		/* Choose one of the resulting bits to dispatch. */
		bit_pos = ffs(ipending_run) - 1;

		/* Increment the counter for this interrupt. */
		intrcnt[bit_pos]++;

		/*
		 * If this interrupt handler takes the clockframe
		 * as an argument, conjure one up.
		 */
		int_bit = hp700_int_bits + bit_pos;
		arg = int_bit->int_bit_arg;
		if (arg == NULL) {
			clkframe.cf_flags = (old_hppa_intr_depth ? 
						TFF_INTR : 0);
			clkframe.cf_spl = ncpl;
			if (frame != NULL) {
				clkframe.cf_flags |= frame->tf_flags;
				clkframe.cf_pc = frame->tf_iioq_head;
			}
			arg = &clkframe;
		}
	
		/*
		 * Remove this bit from ipending, raise spl to 
		 * the level required to run this interrupt, 
		 * and reenable interrupts.
		 */
		ipending &= ~(1 << bit_pos);
		cpl = ncpl | int_bit->int_bit_spl;
		mtctl(eiem, CR_EIEM);

		/* Dispatch the interrupt. */
		handled = (*int_bit->int_bit_handler)(arg);
#if 0
		if (!handled)
			printf("%s: can't handle interrupt\n",
				int_bit->int_bit_evcnt.ev_name);
#endif

		/* Disable interrupts and loop. */
		mtctl(0, CR_EIEM);
	}

	/* Interrupts are disabled again, restore cpl and the depth. */
	cpl = ncpl;
	hppa_intr_depth = old_hppa_intr_depth;
}