Exemple #1
0
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
                           unsigned long end)
{
    if (mm->context != 0) {
        unsigned long flags;
        int size;

#ifdef DEBUG_TLB
        printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & 0xff),
               start, end);
#endif
        __save_and_cli(flags);
        size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        size = (size + 1) >> 1;
        if (size <= mips_cpu.tlbsize/2) {
            int oldpid = (get_entryhi() & 0xff);
            int newpid = (mm->context & 0xff);

            start &= (PAGE_MASK << 1);
            end += ((PAGE_SIZE << 1) - 1);
            end &= (PAGE_MASK << 1);
            while (start < end) {
                int idx;

                set_entryhi(start | newpid);
                start += (PAGE_SIZE << 1);
                BARRIER;
                tlb_probe();
                BARRIER;
                idx = get_index();
                set_entrylo0(0);
                set_entrylo1(0);
                set_entryhi(KSEG0);
                if (idx < 0)
                    continue;
                BARRIER;
                /* Make sure all entries differ. */
                set_entryhi(KSEG0+idx*0x2000);
                tlb_write_indexed();
                BARRIER;
            }
            set_entryhi(oldpid);
        } else {
            get_new_mmu_context(mm, smp_processor_id());
            if (mm == current->active_mm)
                set_entryhi(mm->context & 0xff);
        }
        __restore_flags(flags);
    }
Exemple #2
0
static void r5900_flush_cache_range_d64i64(struct mm_struct *mm,
					 unsigned long start,
					 unsigned long end)
{
	if(mm->context != 0) {
		unsigned long flags;

#ifdef DEBUG_CACHE
		printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
#endif
		__save_and_cli(flags);
		blast_dcache64(); blast_icache64();
		__restore_flags(flags);
	}
}
Exemple #3
0
void local_flush_tlb_mm(struct mm_struct *mm)
{
    if (mm->context != 0) {
        unsigned long flags;

#ifdef DEBUG_TLB
        printk("[tlbmm<%d>]", mm->context);
#endif
        __save_and_cli(flags);
        get_new_mmu_context(mm, smp_processor_id());
        if (mm == current->active_mm)
            set_entryhi(mm->context & 0xff);
        __restore_flags(flags);
    }
}
Exemple #4
0
void setup_APIC_timer(void * data)
{
	unsigned int clocks = (unsigned int) data, slice, t0, t1;
	unsigned long flags;
	int delta;

	__save_flags(flags);
	__sti();
	/*
	 * ok, Intel has some smart code in their APIC that knows
	 * if a CPU was in 'hlt' lowpower mode, and this increases
	 * its APIC arbitration priority. To avoid the external timer
	 * IRQ APIC event being in synchron with the APIC clock we
	 * introduce an interrupt skew to spread out timer events.
	 *
	 * The number of slices within a 'big' timeslice is smp_num_cpus+1
	 */

	slice = clocks / (smp_num_cpus+1);
	printk("cpu: %d, clocks: %d, slice: %d\n",
		smp_processor_id(), clocks, slice);

	/*
	 * Wait for IRQ0's slice:
	 */
	wait_8254_wraparound();

	__setup_APIC_LVTT(clocks);

	t0 = apic_read(APIC_TMICT)*APIC_DIVISOR;
	/* Wait till TMCCT gets reloaded from TMICT... */
	do {
		t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
		delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
	} while (delta >= 0);
	/* Now wait for our slice for real. */
	do {
		t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
		delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
	} while (delta < 0);

	__setup_APIC_LVTT(clocks);

	printk("CPU%d<T0:%d,T1:%d,D:%d,S:%d,C:%d>\n",
			smp_processor_id(), t0, t1, delta, slice, clocks);

	__restore_flags(flags);
}
Exemple #5
0
/*
 * Initialize controller registers with default values.
 */
static int __init initRegisters (void) {
	RegInitializer *p;
	byte t;
	unsigned long flags;

	__save_flags(flags);	/* local CPU only */
	__cli();		/* local CPU only */
	outb_p(regOn, basePort);
	for (p = initData; p->reg != 0; ++p)
		outReg(p->data, p->reg);
	outb_p(0x01, regPort);
	t = inb(regPort) & 0x01;
	outb_p(regOff, basePort);
	__restore_flags(flags);	/* local CPU only */
	return t;
}
static unsigned int
conf_read(unsigned long addr, unsigned char type1,
	  struct pci_controler *hose)
{
	unsigned long flags;
	unsigned long mid = MCPCIA_HOSE2MID(hose->index);
	unsigned int stat0, value, temp, cpu;

	cpu = smp_processor_id();

	__save_and_cli(flags);

	DBG_CFG(("conf_read(addr=0x%lx, type1=%d, hose=%d)\n",
		 addr, type1, mid));

	/* Reset status register to avoid losing errors.  */
	stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
	*(vuip)MCPCIA_CAP_ERR(mid) = stat0;
	mb();
	temp = *(vuip)MCPCIA_CAP_ERR(mid);
	DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0));

	mb();
	draina();
	mcheck_expected(cpu) = 1;
	mcheck_taken(cpu) = 0;
	mcheck_extra(cpu) = mid;
	mb();

	/* Access configuration space.  */
	value = *((vuip)addr);
	mb();
	mb();  /* magic */

	if (mcheck_taken(cpu)) {
		mcheck_taken(cpu) = 0;
		value = 0xffffffffU;
		mb();
	}
	mcheck_expected(cpu) = 0;
	mb();

	DBG_CFG(("conf_read(): finished\n"));

	__restore_flags(flags);
	return value;
}
static inline void send_IPI_mask_sequence(int mask, int vector)
{
	unsigned long cfg, flags;
	unsigned int query_cpu, query_mask;

	/*
	 * Hack. The clustered APIC addressing mode doesn't allow us to send 
	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 
	 * should be modified to do 1 message per cluster ID - mbligh
	 */ 

	__save_flags(flags);
	__cli();

	for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
		query_mask = 1 << query_cpu;
		if (query_mask & mask) {
		
			/*
			 * Wait for idle.
			 */
			apic_wait_icr_idle();
		
			/*
			 * prepare target chip field
			 */
			if(clustered_apic_mode == CLUSTERED_APIC_XAPIC)
				cfg = __prepare_ICR2(cpu_to_physical_apicid(query_cpu));
			else
				cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
			apic_write_around(APIC_ICR2, cfg);
		
			/*
			 * program the ICR 
			 */
			cfg = __prepare_ICR(0, vector);
			
			/*
			 * Send the IPI. The write to APIC_ICR fires this off.
			 */
			apic_write_around(APIC_ICR, cfg);
		}
	}
	__restore_flags(flags);
}
Exemple #8
0
void __init it8172_time_init(void)
{
        unsigned int est_freq, flags;

	__save_and_cli(flags);
        /* Set Data mode - binary. */ 
        CMOS_WRITE(CMOS_READ(RTC_CONTROL) | RTC_DM_BINARY, RTC_CONTROL);

	printk("calculating r4koff... ");
	r4k_offset = cal_r4koff();
	printk("%08lx(%d)\n", r4k_offset, (int) r4k_offset);

	est_freq = 2*r4k_offset*HZ;	
	est_freq += 5000;    /* round */
	est_freq -= est_freq%10000;
	printk("CPU frequency %d.%02d MHz\n", est_freq/1000000, 
	       (est_freq%1000000)*100/1000000);
	__restore_flags(flags);
}
Exemple #9
0
void flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry;

	__save_and_cli(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = (get_entryhi() & 0xff);
	set_entrylo0(0);
	set_entrylo1(0);
	for (entry = 0; entry < tlb_entries; entry++) {
		set_entryhi(KSEG0 + (PAGE_SIZE << 1) * entry);
		set_index(entry);
		tlb_write_indexed();
	}
	set_entryhi(old_ctx);
	__restore_flags(flags);	
}
Exemple #10
0
static void r49_flush_cache_range_d16i32(struct mm_struct *mm,
        unsigned long start,
        unsigned long end)
{
    if (mm->context != 0) {
        unsigned long flags, config;

#ifdef DEBUG_CACHE
        printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
#endif
        __save_and_cli(flags);
        blast_dcache16_wayLSB();
        /* disable icache (set ICE#) */
        config = read_32bit_cp0_register(CP0_CONFIG);
        write_32bit_cp0_register(CP0_CONFIG, config|TX49_CONF_IC);
        blast_icache32_wayLSB();
        write_32bit_cp0_register(CP0_CONFIG, config);
        __restore_flags(flags);
    }
}
Exemple #11
0
static void
r5900_dma_cache_inv_pc(unsigned long addr, unsigned long size)
{
	unsigned long end, a;

	if (size >= dcache_size) {
		flush_cache_all();
	} else {
		unsigned long flags;
		__save_and_cli(flags);
		a = addr & ~(dc_lsize - 1);
		end = (addr + size) & ~(dc_lsize - 1);
		while (1) {
			flush_dcache_line(a); /* Hit_Writeback_Inv_D */
			if (a == end) break;
			a += dc_lsize;
		}
		__restore_flags(flags);
	}
	bc_inv(addr, size);
}
Exemple #12
0
/* Drop into the prom, with the chance to continue with the 'go'
 * prom command.
 */
void
prom_cmdline(void)
{
    unsigned long flags;

    __save_and_cli(flags);

#ifdef CONFIG_SUN_CONSOLE
    if(!serial_console && prom_palette)
        prom_palette (1);
#endif

    /* We always arrive here via a serial interrupt.
     * So in order for everything to work reliably, even
     * on SMP, we need to drop the IRQ locks we hold.
     */
#ifdef CONFIG_SMP
    irq_exit(smp_processor_id(), 0);
    smp_capture();
#else
    local_irq_count(smp_processor_id())--;
#endif

    p1275_cmd ("enter", P1275_INOUT(0,0));

#ifdef CONFIG_SMP
    smp_release();
    irq_enter(smp_processor_id(), 0);
    spin_unlock_wait(&__br_write_locks[BR_GLOBALIRQ_LOCK].lock);
#else
    local_irq_count(smp_processor_id())++;
#endif

#ifdef CONFIG_SUN_CONSOLE
    if(!serial_console && prom_palette)
        prom_palette (0);
#endif

    __restore_flags(flags);
}
static void apic_pm_resume(void *data)
{
	unsigned int l, h;
	unsigned long flags;

	__save_flags(flags);
	__cli();

	/*
	 * Make sure the APICBASE points to the right address
	 *
	 * FIXME! This will be wrong if we ever support suspend on
	 * SMP! We'll need to do this as part of the CPU restore!
	 */
	rdmsr(MSR_IA32_APICBASE, l, h);
	l &= ~MSR_IA32_APICBASE_BASE;
	l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
	wrmsr(MSR_IA32_APICBASE, l, h);

	apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
	apic_write(APIC_ID, apic_pm_state.apic_id);
	apic_write(APIC_DFR, apic_pm_state.apic_dfr);
	apic_write(APIC_LDR, apic_pm_state.apic_ldr);
	apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
	apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
	apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
	apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
	apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
	apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
	apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
	apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
	apic_write(APIC_ESR, 0);
	apic_read(APIC_ESR);
	apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
	apic_write(APIC_ESR, 0);
	apic_read(APIC_ESR);
	__restore_flags(flags);
	if (apic_pm_state.perfctr_pmdev)
		pm_send(apic_pm_state.perfctr_pmdev, PM_RESUME, data);
}
Exemple #14
0
void
dump_tlb_addr(unsigned long addr)
{
	unsigned int flags, oldpid;
	int index;

	__save_and_cli(flags);
	oldpid = get_entryhi() & 0xff;
	set_entryhi((addr & PAGE_MASK) | oldpid);
	tlb_probe();
	index = get_index();
	set_entryhi(oldpid);
	__restore_flags(flags);

	if (index < 0) {
		printk("No entry for address 0x%08lx in TLB\n", addr);
		return;
	}

	printk("Entry %d maps address 0x%08lx\n", index, addr);
	dump_tlb(index, index);
}
static void apic_pm_suspend(void *data)
{
	unsigned long flags;

	if (apic_pm_state.perfctr_pmdev)
		pm_send(apic_pm_state.perfctr_pmdev, PM_SUSPEND, data);
	apic_pm_state.apic_id = apic_read(APIC_ID);
	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
	apic_pm_state.apic_ldr = apic_read(APIC_LDR);
	apic_pm_state.apic_dfr = apic_read(APIC_DFR);
	apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
	apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
	apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
	apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
	apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
	apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
	apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
	apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
	__save_flags(flags);
	__cli();
	disable_local_APIC();
	__restore_flags(flags);
}
Exemple #16
0
/* 
 * Figure out the r4k offset, the amount to increment the compare
 * register for each time tick. 
 * Use the RTC to calculate offset.
 */
static unsigned long __init cal_r4koff(void)
{
	unsigned int flags;

	__save_and_cli(flags);

	/* Start counter exactly on falling edge of update flag */
	while (CMOS_READ(RTC_REG_A) & RTC_UIP);
	while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));

	/* Start r4k counter. */
	write_32bit_cp0_register(CP0_COUNT, 0);

	/* Read counter exactly on falling edge of update flag */
	while (CMOS_READ(RTC_REG_A) & RTC_UIP);
	while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));

	mips_counter_frequency = read_32bit_cp0_register(CP0_COUNT);

	/* restore interrupts */
	__restore_flags(flags);
		
	return (mips_counter_frequency / HZ);
}
Exemple #17
0
void local_flush_tlb_all(void)
{
    unsigned long flags;
    unsigned long old_ctx;
    int entry;

#ifdef DEBUG_TLB
    printk("[tlball]");
#endif

    __save_and_cli(flags);
    /* Save old context and create impossible VPN2 value */
    old_ctx = (get_entryhi() & 0xff);
    set_entrylo0(0);
    set_entrylo1(0);
    BARRIER;

    entry = get_wired();

    /* Blast 'em all away. */
    while (entry < mips_cpu.tlbsize) {
        /*
         * Make sure all entries differ.  If they're not different
         * MIPS32 will take revenge ...
         */
        set_entryhi(KSEG0 + entry*0x2000);
        set_index(entry);
        BARRIER;
        tlb_write_indexed();
        BARRIER;
        entry++;
    }
    BARRIER;
    set_entryhi(old_ctx);
    __restore_flags(flags);
}
Exemple #18
0
/*
 * Verify that we are doing an approved SETFEATURES_XFER with respect
 * to the hardware being able to support request.  Since some hardware
 * can improperly report capabilties, we check to see if the host adapter
 * in combination with the device (usually a disk) properly detect
 * and acknowledge each end of the ribbon.
 */
int ide_ata66_check (ide_drive_t *drive, byte cmd, byte nsect, byte feature)
{
    if ((cmd == WIN_SETFEATURES) &&
            (nsect > XFER_UDMA_2) &&
            (feature == SETFEATURES_XFER))
    {
        if (!HWIF(drive)->udma_four)
        {
            printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", HWIF(drive)->name);
            return 1;
        }
#ifndef CONFIG_IDEDMA_IVB
        if ((drive->id->hw_config & 0x6000) == 0)
        {
#else /* !CONFIG_IDEDMA_IVB */
        if (((drive->id->hw_config & 0x2000) == 0) ||
                ((drive->id->hw_config & 0x4000) == 0))
        {
#endif /* CONFIG_IDEDMA_IVB */
            printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", drive->name);
            return 1;
        }
    }
    return 0;
}

/*
 * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
 * 1 : Safe to update drive->id DMA registers.
 * 0 : OOPs not allowed.
 */
int set_transfer (ide_drive_t *drive, byte cmd, byte nsect, byte feature)
{
    if ((cmd == WIN_SETFEATURES) &&
            (nsect >= XFER_SW_DMA_0) &&
            (feature == SETFEATURES_XFER) &&
            (drive->id->dma_ultra ||
             drive->id->dma_mword ||
             drive->id->dma_1word))
        return 1;

    return 0;
}

/*
 *  All hosts that use the 80c ribbon mus use!
 */
byte eighty_ninty_three (ide_drive_t *drive)
{
    return ((byte) ((HWIF(drive)->udma_four) &&
#ifndef CONFIG_IDEDMA_IVB
                    (drive->id->hw_config & 0x4000) &&
#endif /* CONFIG_IDEDMA_IVB */
                    (drive->id->hw_config & 0x6000)) ? 1 : 0);
}

/*
 * Similar to ide_wait_stat(), except it never calls ide_error internally.
 * This is a kludge to handle the new ide_config_drive_speed() function,
 * and should not otherwise be used anywhere.  Eventually, the tuneproc's
 * should be updated to return ide_startstop_t, in which case we can get
 * rid of this abomination again.  :)   -ml
 *
 * It is gone..........
 *
 * const char *msg == consider adding for verbose errors.
 */
int ide_config_drive_speed (ide_drive_t *drive, byte speed)
{
    ide_hwif_t *hwif = HWIF(drive);
    int	i, error = 1;
    byte stat;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
    byte unit = (drive->select.b.unit & 0x01);
    outb(inb(hwif->dma_base+2) & ~(1<<(5+unit)), hwif->dma_base+2);
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

    /*
     * Don't use ide_wait_cmd here - it will
     * attempt to set_geometry and recalibrate,
     * but for some reason these don't work at
     * this point (lost interrupt).
     */
    /*
     * Select the drive, and issue the SETFEATURES command
     */
    disable_irq(hwif->irq);	/* disable_irq_nosync ?? */
    udelay(1);
    SELECT_DRIVE(HWIF(drive), drive);
    SELECT_MASK(HWIF(drive), drive, 0);
    udelay(1);
    if (IDE_CONTROL_REG)
        OUT_BYTE(drive->ctl | 2, IDE_CONTROL_REG);
    OUT_BYTE(speed, IDE_NSECTOR_REG);
    OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG);
    OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG);
    if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
        OUT_BYTE(drive->ctl, IDE_CONTROL_REG);
    udelay(1);
    /*
     * Wait for drive to become non-BUSY
     */
    if ((stat = GET_STAT()) & BUSY_STAT)
    {
        unsigned long flags, timeout;
        __save_flags(flags);	/* local CPU only */
        ide__sti();		/* local CPU only -- for jiffies */
        timeout = jiffies + WAIT_CMD;
        while ((stat = GET_STAT()) & BUSY_STAT)
        {
            if (0 < (signed long)(jiffies - timeout))
                break;
        }
        __restore_flags(flags); /* local CPU only */
    }

    /*
     * Allow status to settle, then read it again.
     * A few rare drives vastly violate the 400ns spec here,
     * so we'll wait up to 10usec for a "good" status
     * rather than expensively fail things immediately.
     * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
     */
    for (i = 0; i < 10; i++)
    {
        udelay(1);
        if (OK_STAT((stat = GET_STAT()), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT))
        {
            error = 0;
            break;
        }
    }

    SELECT_MASK(HWIF(drive), drive, 0);

    enable_irq(hwif->irq);

    if (error)
    {
        (void) ide_dump_status(drive, "set_drive_speed_status", stat);
        return error;
    }

    drive->id->dma_ultra &= ~0xFF00;
    drive->id->dma_mword &= ~0x0F00;
    drive->id->dma_1word &= ~0x0F00;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
    if (speed > XFER_PIO_4)
    {
        outb(inb(hwif->dma_base+2)|(1<<(5+unit)), hwif->dma_base+2);
    }
    else
    {
        outb(inb(hwif->dma_base+2) & ~(1<<(5+unit)), hwif->dma_base+2);
    }
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

    switch(speed)
    {
    case XFER_UDMA_7:
        drive->id->dma_ultra |= 0x8080;
        break;
    case XFER_UDMA_6:
        drive->id->dma_ultra |= 0x4040;
        break;
    case XFER_UDMA_5:
        drive->id->dma_ultra |= 0x2020;
        break;
    case XFER_UDMA_4:
        drive->id->dma_ultra |= 0x1010;
        break;
    case XFER_UDMA_3:
        drive->id->dma_ultra |= 0x0808;
        break;
    case XFER_UDMA_2:
        drive->id->dma_ultra |= 0x0404;
        break;
    case XFER_UDMA_1:
        drive->id->dma_ultra |= 0x0202;
        break;
    case XFER_UDMA_0:
        drive->id->dma_ultra |= 0x0101;
        break;
    case XFER_MW_DMA_2:
        drive->id->dma_mword |= 0x0404;
        break;
    case XFER_MW_DMA_1:
        drive->id->dma_mword |= 0x0202;
        break;
    case XFER_MW_DMA_0:
        drive->id->dma_mword |= 0x0101;
        break;
    case XFER_SW_DMA_2:
        drive->id->dma_1word |= 0x0404;
        break;
    case XFER_SW_DMA_1:
        drive->id->dma_1word |= 0x0202;
        break;
    case XFER_SW_DMA_0:
        drive->id->dma_1word |= 0x0101;
        break;
    default:
        break;
    }
    return error;
}
Exemple #19
0
/*
 * Set a new transfer mode at the drive
 */
int cs5530_set_xfer_mode (ide_drive_t *drive, byte mode)
{
	int		i, error = 1;
	byte		stat;
	ide_hwif_t	*hwif = HWIF(drive);

	printk("%s: cs5530_set_xfer_mode(%s)\n", drive->name, strmode(mode));
	/*
	 * If this is a DMA mode setting, then turn off all DMA bits.
	 * We will set one of them back on afterwards, if all goes well.
	 *
	 * Not sure why this is needed (it looks very silly),
	 * but other IDE chipset drivers also do this fiddling.  ???? -ml
 	 */
	switch (mode) {
		case XFER_UDMA_4:
		case XFER_UDMA_3:
		case XFER_UDMA_2:
		case XFER_UDMA_1:
		case XFER_UDMA_0:
		case XFER_MW_DMA_2:
		case XFER_MW_DMA_1:
		case XFER_MW_DMA_0:
		case XFER_SW_DMA_2:
		case XFER_SW_DMA_1:
		case XFER_SW_DMA_0:
			drive->id->dma_ultra &= ~0xFF00;
			drive->id->dma_mword &= ~0x0F00;
			drive->id->dma_1word &= ~0x0F00;
	}

	/*
	 * Select the drive, and issue the SETFEATURES command
	 */
	disable_irq(hwif->irq);
	udelay(1);
	SELECT_DRIVE(HWIF(drive), drive);
	udelay(1);
	if (IDE_CONTROL_REG)
		OUT_BYTE(drive->ctl | 2, IDE_CONTROL_REG);
	OUT_BYTE(mode, IDE_NSECTOR_REG);
	OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG);
	OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG);
	udelay(1);	/* spec allows drive 400ns to assert "BUSY" */

	/*
	 * Wait for drive to become non-BUSY
	 */
	if ((stat = GET_STAT()) & BUSY_STAT) {
		unsigned long flags, timeout;
		__save_flags(flags);	/* local CPU only */
		ide__sti();		/* local CPU only -- for jiffies */
		timeout = jiffies + WAIT_CMD;
		while ((stat = GET_STAT()) & BUSY_STAT) {
			if (0 < (signed long)(jiffies - timeout))
				break;
		}
		__restore_flags(flags); /* local CPU only */
	}

	/*
	 * Allow status to settle, then read it again.
	 * A few rare drives vastly violate the 400ns spec here,
	 * so we'll wait up to 10usec for a "good" status
	 * rather than expensively fail things immediately.
	 */
	for (i = 0; i < 10; i++) {
		udelay(1);
		if (OK_STAT((stat = GET_STAT()), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
			error = 0;
			break;
		}
	}
	enable_irq(hwif->irq);

	/*
	 * Turn dma bit on if all is okay
	 */
	if (error) {
		(void) ide_dump_status(drive, "cs5530_set_xfer_mode", stat);
	} else {
		switch (mode) {
			case XFER_UDMA_4:   drive->id->dma_ultra |= 0x1010; break;
			case XFER_UDMA_3:   drive->id->dma_ultra |= 0x0808; break;
			case XFER_UDMA_2:   drive->id->dma_ultra |= 0x0404; break;
			case XFER_UDMA_1:   drive->id->dma_ultra |= 0x0202; break;
			case XFER_UDMA_0:   drive->id->dma_ultra |= 0x0101; break;
			case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
			case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
			case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
			case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
			case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
			case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
		}
	}
	return error;
}
static void __init sun4m_init_timers(void (*counter_fn)(int, void *, struct pt_regs *))
{
	int reg_count, irq, cpu;
	struct linux_prom_registers cnt_regs[PROMREG_MAX];
	int obio_node, cnt_node;
	struct resource r;

	cnt_node = 0;
	if((obio_node =
	    prom_searchsiblings (prom_getchild(prom_root_node), "obio")) == 0 ||
	   (obio_node = prom_getchild (obio_node)) == 0 ||
	   (cnt_node = prom_searchsiblings (obio_node, "counter")) == 0) {
		prom_printf("Cannot find /obio/counter node\n");
		prom_halt();
	}
	reg_count = prom_getproperty(cnt_node, "reg",
				     (void *) cnt_regs, sizeof(cnt_regs));
	reg_count = (reg_count/sizeof(struct linux_prom_registers));
    
	/* Apply the obio ranges to the timer registers. */
	prom_apply_obio_ranges(cnt_regs, reg_count);
    
	cnt_regs[4].phys_addr = cnt_regs[reg_count-1].phys_addr;
	cnt_regs[4].reg_size = cnt_regs[reg_count-1].reg_size;
	cnt_regs[4].which_io = cnt_regs[reg_count-1].which_io;
	for(obio_node = 1; obio_node < 4; obio_node++) {
		cnt_regs[obio_node].phys_addr =
			cnt_regs[obio_node-1].phys_addr + PAGE_SIZE;
		cnt_regs[obio_node].reg_size = cnt_regs[obio_node-1].reg_size;
		cnt_regs[obio_node].which_io = cnt_regs[obio_node-1].which_io;
	}

	memset((char*)&r, 0, sizeof(struct resource));
	/* Map the per-cpu Counter registers. */
	r.flags = cnt_regs[0].which_io;
	r.start = cnt_regs[0].phys_addr;
	sun4m_timers = (struct sun4m_timer_regs *) sbus_ioremap(&r, 0,
	    PAGE_SIZE*SUN4M_NCPUS, "sun4m_cpu_cnt");
	/* Map the system Counter register. */
	/* XXX Here we expect consequent calls to yeld adjusent maps. */
	r.flags = cnt_regs[4].which_io;
	r.start = cnt_regs[4].phys_addr;
	sbus_ioremap(&r, 0, cnt_regs[4].reg_size, "sun4m_sys_cnt");

	sun4m_timers->l10_timer_limit =  (((1000000/HZ) + 1) << 10);
	master_l10_counter = &sun4m_timers->l10_cur_count;
	master_l10_limit = &sun4m_timers->l10_timer_limit;

	irq = request_irq(TIMER_IRQ,
			  counter_fn,
			  (SA_INTERRUPT | SA_STATIC_ALLOC),
			  "timer", NULL);
	if (irq) {
		prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
		prom_halt();
	}
    
	if(linux_num_cpus > 1) {
		for(cpu = 0; cpu < 4; cpu++)
			sun4m_timers->cpu_timers[cpu].l14_timer_limit = 0;
		sun4m_interrupts->set = SUN4M_INT_E14;
	} else {
		sun4m_timers->cpu_timers[0].l14_timer_limit = 0;
	}
#ifdef CONFIG_SMP
	{
		unsigned long flags;
		extern unsigned long lvl14_save[4];
		struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];

		/* For SMP we use the level 14 ticker, however the bootup code
		 * has copied the firmwares level 14 vector into boot cpu's
		 * trap table, we must fix this now or we get squashed.
		 */
		__save_and_cli(flags);
		trap_table->inst_one = lvl14_save[0];
		trap_table->inst_two = lvl14_save[1];
		trap_table->inst_three = lvl14_save[2];
		trap_table->inst_four = lvl14_save[3];
		local_flush_cache_all();
		__restore_flags(flags);
	}
#endif
}
Exemple #21
0
int
kdb(int reason, int error, struct pt_regs *regs)
{
	char	cmdbuf[255];
	char	*cmd;
	int	diag;
	unsigned long flags;
	struct  pt_regs func_regs;

	kdb_new_cpu = -1;

	/*
	 * Remove the breakpoints to prevent double-faults
	 * if kdb happens to use a function where a breakpoint
	 * has been enabled.
	 */

	kdb_bp_remove();

	if (reason != KDB_REASON_DEBUG) {
		kdb_printf("Entering kdb ");
#if defined(__SMP__)
		kdb_printf("on processor %d ", smp_processor_id());
#endif
	}

	switch (reason) {
	case KDB_REASON_DEBUG:
		/*
		 * If re-entering kdb after a single step
		 * command, don't print the message.
		 */
		diag = kdb_db_trap(regs);
		if (diag == 0) {
			kdb_printf("Entering kdb ");
#if defined(__SMP__)
			kdb_printf("on processor %d ", smp_processor_id());
#endif
		} else if (diag == 2) {
			/*
			 * in middle of ssb command.  Just return.
			 */
			return 0;
		}
		break;
	case KDB_REASON_FAULT:
		break;
	case KDB_REASON_INT:
		kdb_printf("due to KDB_ENTER() call\n");
		break;
	case KDB_REASON_KEYBOARD:
		kdb_printf("due to Keyboard Entry\n");
		break;
	case KDB_REASON_SWITCH:
		kdb_printf("due to cpu switch\n");
		break;
	case KDB_REASON_ENTER:
		kdb_printf("due to function call\n");
		regs = &func_regs;
		regs->xcs = 0;
#if defined(CONFIG_KDB_FRAMEPTR)
		asm volatile("movl %%ebp,%0":"=m" (*(int *)&regs->ebp));
#endif
		asm volatile("movl %%esp,%0":"=m" (*(int *)&regs->esp));
		regs->eip = (long) &kdb;	/* for traceback. */
		break;
	case KDB_REASON_PANIC:
		kdb_printf("due to panic @ 0x%8.8x\n", regs->eip);
		kdbdumpregs(regs, NULL, NULL);
		break;
	case KDB_REASON_BREAK:
		kdb_printf("due to Breakpoint @ 0x%8.8x\n", regs->eip);
		break;
	default:
		break;
	}

#if defined(__SMP__)
	/*
	 * If SMP, stop other processors
	 */
	if (smp_num_cpus > 1) {
		/*
		 * Stop all other processors
		 */
		smp_kdb_stop(1);
	}
#endif	/* __SMP__ */

	/*
	 * Disable interrupts during kdb command processing 
	 */
	__save_flags(flags);
	__cli();

	while (1) {
		/*
		 * Initialize pager context.
		 */
		kdb_nextline = 1;

		/*
		 * Use kdb_setjmp/kdb_longjmp to break out of 
		 * the pager early.
		 */
		if (kdb_setjmp(&kdbjmpbuf)) {
			/*
			 * Command aborted (usually in pager)
			 */

			/*
		 	 * XXX - need to abort a SSB ?
			 */
			continue;
		}

		/*
		 * Fetch command from keyboard
		 */
		cmd = kbd_getstr(cmdbuf, sizeof(cmdbuf), kdbgetenv("PROMPT"));

		diag = kdb_parse(cmd, regs);
		if (diag == KDB_NOTFOUND) {
			kdb_printf("Unknown kdb command: '%s'\n", cmd);
			diag = 0;
		}
		if ((diag == KDB_GO)
		 || (diag == KDB_CPUSWITCH))
			break;	/* Go or cpu switch command */

		if (diag)
			kdb_cmderror(diag);
	}

	/*
	 * Set up debug registers.
	 */
	kdb_bp_install();

#if defined(__SMP__)
	if ((diag == KDB_CPUSWITCH)
	 && (kdb_new_cpu != -1)) {
		/*
		 * Leaving the other CPU's at the barrier, except the
		 * one we are switching to, we'll send ourselves a 
		 * kdb IPI before allowing interrupts so it will get
		 * caught ASAP and get this CPU back waiting at the barrier.
		 */
		
		smp_kdb_stop(0);	/* Stop ourself */

		/*
	 	 * let the new cpu go.
		 */
		clear_bit(kdb_new_cpu, &smp_kdb_wait);
	} else {
		/*
		 * Let the other processors continue.
		 */
		smp_kdb_wait = 0;
	}
#endif

	kdb_flags &= ~(KDB_FLAG_SUPRESS|KDB_FLAG_FAULT);

	__restore_flags(flags);


	return 0;
}
Exemple #22
0
/*
 * s390_machine_check_handler
 *
 * machine check handler, dequeueing machine check entries
 *  and processing them
 */
static int s390_machine_check_handler( void *parm)
{
	struct semaphore *sem = parm;
	unsigned long     flags;
	mache_t          *pmache;

	int               found = 0;

        /* set name to something sensible */
        strcpy (current->comm, "kmcheck");


        /* block all signals */
        sigfillset(&current->blocked);

#ifdef S390_MACHCHK_DEBUG
	printk( KERN_NOTICE "mach_handler : ready\n");
#endif	

	do {

#ifdef S390_MACHCHK_DEBUG
		printk( KERN_NOTICE "mach_handler : waiting for wakeup\n");
#endif	

		down_interruptible( sem );

#ifdef S390_MACHCHK_DEBUG
		printk( KERN_NOTICE "\nmach_handler : wakeup ... \n");
#endif	
		found = 0; /* init ... */

		__save_flags( flags );
		__cli();

		do {

		pmache = s390_dequeue_mchchk();

		if ( pmache )
		{
			found = 1;
		
			if ( pmache->mcic.mcc.mcd.cp )
			{
				crwe_t *pcrwe_n;
				crwe_t *pcrwe_h;

				s390_do_crw_pending( pmache->mc.crwe );

				pcrwe_h = pmache->mc.crwe;
				pcrwe_n = pmache->mc.crwe->crwe_next;

				pmache->mcic.mcc.mcd.cp = 0;
				pmache->mc.crwe         = NULL;

				spin_lock( &crw_queue_lock);

				while ( pcrwe_h )
				{
					pcrwe_h->crwe_next = crw_buffer_anchor;
					crw_buffer_anchor  = pcrwe_h;
					pcrwe_h            = pcrwe_n;

					if ( pcrwe_h != NULL )
						pcrwe_n = pcrwe_h->crwe_next;

				} /* endwhile */

				spin_unlock( &crw_queue_lock);

			} /* endif */

#ifdef CONFIG_MACHCHK_WARNING
			if ( pmache->mcic.mcc.mcd.w )
			{
				ctrl_alt_del();		// shutdown NOW!
#ifdef S390_MACHCHK_DEBUG
			printk( KERN_DEBUG "mach_handler : kill -SIGPWR init\n");
#endif
			} /* endif */
#endif

#ifdef CONFIG_MACHCHK_WARNING
			if ( pmache->mcic.mcc.mcd.w )
			{
				ctrl_alt_del();		// shutdown NOW!
#ifdef S390_MACHCHK_DEBUG
			printk( KERN_DEBUG "mach_handler : kill -SIGPWR init\n");
#endif
			} /* endif */
#endif

			s390_enqueue_free_mchchk( pmache );
		}
		else
		{

			// unconditional surrender ...
#ifdef S390_MACHCHK_DEBUG
			printk( KERN_DEBUG "mach_handler : nothing to do, sleeping\n");
#endif	

		} /* endif */	

		} while ( pmache );

		__restore_flags( flags );

	} while ( 1 );

	return( 0);
}