Пример #1
0
static int interrupt_test_worker(void *unused) 
{
	int id = ++irqtestcount;
	int it = 0;
			unsigned long flags, flags2;

	printk("ITW: thread %d started.\n", id);

	while(1) {
		__save_flags(flags2);
		if(jiffies % 3) {
			printk("ITW %2d %5d: irqsaving          (%lx)\n", id, it, flags2);
			spin_lock_irqsave(&int_test_spin, flags);
		} else {
			printk("ITW %2d %5d: spin_lock_irqing   (%lx)\n", id, it, flags2);
			spin_lock_irq(&int_test_spin);
		}

		__save_flags(flags2);
		printk("ITW %2d %5d: locked, sv_waiting (%lx).\n", id, it, flags2);
		sv_wait(&int_test_sv, 0, 0);

		__save_flags(flags2);
		printk("ITW %2d %5d: wait finished      (%lx), pausing\n", id, it, flags2);
		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(jiffies & 0xf);
		if(current->state != TASK_RUNNING)
		  printk("ITW:  current->state isn't RUNNING after schedule!\n");
		it++;
	}
}
Пример #2
0
static inline void send_IPI_mask_bitmask(int mask, int vector)
{
	unsigned long cfg;
	unsigned long flags;

	__save_flags(flags);
	__cli();

		
	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();
		
	/*
	 * prepare target chip field
	 */
	cfg = __prepare_ICR2(mask);
	apic_write_around(APIC_ICR2, cfg);
		
	/*
	 * program the ICR 
	 */
	cfg = __prepare_ICR(0, vector);
			
	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	apic_write_around(APIC_ICR, cfg);

	__restore_flags(flags);
}
Пример #3
0
/*
 * This routine is invoked from ide.c to prepare for access to a given drive.
 */
static void ht6560b_selectproc (ide_drive_t *drive)
{
    byte t;
    unsigned long flags;
    static byte current_select = 0;
    static byte current_timing = 0;
    byte select = ht6560b_selects[HWIF(drive)->index][drive->select.b.unit];
    byte timing = ht6560b_timings[HWIF(drive)->index][drive->select.b.unit];

    if (select != current_select || timing != current_timing) {
        current_select = select;
        current_timing = timing;
        __save_flags (flags);	/* local CPU only */
        __cli();		/* local CPU only */
        (void) inb(HT_SELECT_PORT);
        (void) inb(HT_SELECT_PORT);
        (void) inb(HT_SELECT_PORT);
        /*
         * Note: input bits are reversed to output bits!!
         */
        t = inb(HT_SELECT_PORT) ^ 0x3f;
        t &= (~0x21);
        t |= (current_select & 0x21);
        outb(t, HT_SELECT_PORT);
        /*
         * Set timing for this drive:
         */
        outb (timing, IDE_SELECT_REG);
        (void) inb (IDE_STATUS_REG);
        __restore_flags (flags);	/* local CPU only */
#ifdef DEBUG
        printk("ht6560b: %s: select=%#x timing=%#x\n", drive->name, t, timing);
#endif
    }
}
Пример #4
0
/*
 * This routine is invoked from ide.c to prepare for access to a given drive.
 */
static void ht6560b_selectproc (ide_drive_t *drive)
{
	unsigned long flags;
	static byte current_select = 0;
	static byte current_timing = 0;
	byte select, timing;
	
	__save_flags (flags);	/* local CPU only */
	__cli();		/* local CPU only */
	
	select = HT_CONFIG(drive);
	timing = HT_TIMING(drive);
	
	if (select != current_select || timing != current_timing) {
		current_select = select;
		current_timing = timing;
		if (drive->media != ide_disk || !drive->present)
			select |= HT_PREFETCH_MODE;
		(void) inb(HT_CONFIG_PORT);
		(void) inb(HT_CONFIG_PORT);
		(void) inb(HT_CONFIG_PORT);
		(void) inb(HT_CONFIG_PORT);
		outb(select, HT_CONFIG_PORT);
		/*
		 * Set timing for this drive:
		 */
		outb(timing, IDE_SELECT_REG);
		(void) inb(IDE_STATUS_REG);
#ifdef DEBUG
		printk("ht6560b: %s: select=%#x timing=%#x\n", drive->name, select, timing);
#endif
	}
	__restore_flags (flags);	/* local CPU only */
}
Пример #5
0
/*
 * SMP flags value to restore to:
 * 0 - global cli
 * 1 - global sti
 * 2 - local cli
 * 3 - local sti
 */
unsigned long __global_save_flags(void)
{
	int retval;
	int local_enabled;
	unsigned long flags;
	int cpu;

	__save_flags(flags);
	local_enabled = (flags & ST0_IE);
	/* default to local */
	retval = 2 + local_enabled;

	/* check for global flags if we're not in an interrupt */
	preempt_disable();
	cpu = smp_processor_id();
	if (!local_irq_count(cpu)) {
		if (local_enabled)
			retval = 1;
		if (global_irq_holder == cpu)
			retval = 0;
	}
	preempt_enable();

	return retval;
}
Пример #6
0
static void apic_pm_suspend(void *data)
{
	unsigned int l, h;
	unsigned long flags;

	if (apic_pm_state.perfctr_pmdev)
		pm_send(apic_pm_state.perfctr_pmdev, PM_SUSPEND, data);
	apic_pm_state.apic_id = apic_read(APIC_ID);
	apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
	apic_pm_state.apic_ldr = apic_read(APIC_LDR);
	apic_pm_state.apic_dfr = apic_read(APIC_DFR);
	apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
	apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
	apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
	apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
	apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
	apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
	apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
	apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
	__save_flags(flags);
	__cli();
	disable_local_APIC();
	rdmsr(MSR_IA32_APICBASE, l, h);
	l &= ~MSR_IA32_APICBASE_ENABLE;
	wrmsr(MSR_IA32_APICBASE, l, h);
	__restore_flags(flags);
}
Пример #7
0
void __init init_umc8672 (void)	/* called from ide.c */
{
	unsigned long flags;

	__save_flags(flags);	/* local CPU only */
	__cli();		/* local CPU only */
	if (check_region(0x108, 2)) {
		__restore_flags(flags);
		printk("\numc8672: PORTS 0x108-0x109 ALREADY IN USE\n");
		return;
	}
	outb_p (0x5A,0x108); /* enable umc */
	if (in_umc (0xd5) != 0xa0)
	{
		__restore_flags(flags);	/* local CPU only */
		printk ("umc8672: not found\n");
		return;  
	}
	outb_p (0xa5,0x108); /* disable umc */

	umc_set_speeds (current_speeds);
	__restore_flags(flags);	/* local CPU only */

	request_region(0x108, 2, "umc8672");
	ide_hwifs[0].chipset = ide_umc8672;
	ide_hwifs[1].chipset = ide_umc8672;
	ide_hwifs[0].tuneproc = &tune_umc;
	ide_hwifs[1].tuneproc = &tune_umc;
	ide_hwifs[0].mate = &ide_hwifs[1];
	ide_hwifs[1].mate = &ide_hwifs[0];
	ide_hwifs[1].channel = 1;
}
Пример #8
0
/*
 * Auto-detect the IDE controller port.
 */
static int __init findPort (void)
{
	int i;
	byte t;
	unsigned long flags;

	__save_flags(flags);	/* local CPU only */
	__cli();		/* local CPU only */
	for (i = 0; i < ALI_NUM_PORTS; ++i) {
		basePort = ports[i];
		regOff = inb(basePort);
		for (regOn = 0x30; regOn <= 0x33; ++regOn) {
			outb_p(regOn, basePort);
			if (inb(basePort) == regOn) {
				regPort = basePort + 4;
				dataPort = basePort + 8;
				t = inReg(0) & 0xf0;
				outb_p(regOff, basePort);
				__restore_flags(flags);	/* local CPU only */
				if (t != 0x50)
					return 0;
				return 1;  /* success */
			}
		}
		outb_p(regOff, basePort);
	}
	__restore_flags(flags);	/* local CPU only */
	return 0;
}
Пример #9
0
static void apic_pm_resume(void *data)
{
	unsigned int l, h;
	unsigned long flags;

	__save_flags(flags);
	__cli();
	rdmsr(MSR_IA32_APICBASE, l, h);
	l &= ~MSR_IA32_APICBASE_BASE;
	l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
	wrmsr(MSR_IA32_APICBASE, l, h);
	apic_write(APIC_ID, apic_pm_state.apic_id);
	apic_write(APIC_DFR, apic_pm_state.apic_dfr);
	apic_write(APIC_LDR, apic_pm_state.apic_ldr);
	apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
	apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
	apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
	apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
	apic_write(APIC_ESR, 0);
	apic_read(APIC_ESR);
	apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
	apic_write(APIC_ESR, 0);
	apic_read(APIC_ESR);
	apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
	apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
	apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
	apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
	__restore_flags(flags);
	if (apic_pm_state.perfctr_pmdev)
		pm_send(apic_pm_state.perfctr_pmdev, PM_RESUME, data);
}
Пример #10
0
static void flat_send_IPI_mask(unsigned long cpumask, int vector)
{
	unsigned long mask = cpumask;
	unsigned long cfg;
	unsigned long flags;

	__save_flags(flags);
	__cli();

	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();

	/*
	 * prepare target chip field
	 */
	cfg = __prepare_ICR2(mask);
	apic_write_around(APIC_ICR2, cfg);

	/*
	 * program the ICR
	 */
	cfg = __prepare_ICR(0, vector, APIC_DEST_LOGICAL);

	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	apic_write_around(APIC_ICR, cfg);
	__restore_flags(flags);
}
Пример #11
0
/*==========================================================================*
 * Name:         flush_tlb_others
 *
 * Description:  This routine requests other CPU to execute flush TLB.
 *               1.Setup parameters.
 *               2.Send 'INVALIDATE_TLB_IPI' to other CPU.
 *                 Request other CPU to execute 'smp_invalidate_interrupt()'.
 *               3.Wait for other CPUs operation finished.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    cpumask - bitmap of target CPUs
 *               *mm -  a pointer to the mm struct for flush TLB
 *               *vma -  a pointer to the vma struct include va
 *               va - virtual address for flush TLB
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
	struct vm_area_struct *vma, unsigned long va)
{
	unsigned long *mask;
#ifdef DEBUG_SMP
	unsigned long flags;
	__save_flags(flags);
	if (!(flags & 0x0040))	/* Interrupt Disable NONONO */
		BUG();
#endif /* DEBUG_SMP */

	/*
	 * A couple of (to be removed) sanity checks:
	 *
	 * - we do not send IPIs to not-yet booted CPUs.
	 * - current CPU must not be in mask
	 * - mask must exist :)
	 */
	BUG_ON(cpumask_empty(&cpumask));

	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
	BUG_ON(!mm);

	/* If a CPU which we ran on has gone down, OK. */
	cpumask_and(&cpumask, &cpumask, cpu_online_mask);
	if (cpumask_empty(&cpumask))
		return;

	/*
	 * i'm not happy about this global shared spinlock in the
	 * MM hot path, but we'll see how contended it is.
	 * Temporarily this turns IRQs off, so that lockups are
	 * detected by the NMI watchdog.
	 */
	spin_lock(&tlbstate_lock);

	flush_mm = mm;
	flush_vma = vma;
	flush_va = va;
	mask=cpumask_bits(&cpumask);
	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);

	/*
	 * We have to send the IPI only to
	 * CPUs affected.
	 */
	send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);

	while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
		/* nothing. lockup detection does not belong here */
		mb();
	}

	flush_mm = NULL;
	flush_vma = NULL;
	flush_va = 0;
	spin_unlock(&tlbstate_lock);
}
Пример #12
0
/*
 * Update the
 */
int ide_driveid_update (ide_drive_t *drive)
{
    /*
     * Re-read drive->id for possible DMA mode
     * change (copied from ide-probe.c)
     */
    struct hd_driveid *id;
    unsigned long timeout, flags;

    SELECT_MASK(HWIF(drive), drive, 1);
    if (IDE_CONTROL_REG)
        OUT_BYTE(drive->ctl,IDE_CONTROL_REG);
    ide_delay_50ms();
    OUT_BYTE(WIN_IDENTIFY, IDE_COMMAND_REG);
    timeout = jiffies + WAIT_WORSTCASE;
    do
    {
        if (0 < (signed long)(jiffies - timeout))
        {
            SELECT_MASK(HWIF(drive), drive, 0);
            return 0;	/* drive timed-out */
        }
        ide_delay_50ms();	/* give drive a breather */
    }
    while (IN_BYTE(IDE_ALTSTATUS_REG) & BUSY_STAT);
    ide_delay_50ms();	/* wait for IRQ and DRQ_STAT */
    if (!OK_STAT(GET_STAT(),DRQ_STAT,BAD_R_STAT))
    {
        SELECT_MASK(HWIF(drive), drive, 0);
        printk("%s: CHECK for good STATUS\n", drive->name);
        return 0;
    }
    __save_flags(flags);	/* local CPU only */
    __cli();		/* local CPU only; some systems need this */
    SELECT_MASK(HWIF(drive), drive, 0);
    id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
    if (!id)
    {
        __restore_flags(flags);	/* local CPU only */
        return 0;
    }
    ide_input_data(drive, id, SECTOR_WORDS);
    (void) GET_STAT();	/* clear drive IRQ */
    ide__sti();		/* local CPU only */
    __restore_flags(flags);	/* local CPU only */
    ide_fix_driveid(id);
    if (id)
    {
        drive->id->dma_ultra = id->dma_ultra;
        drive->id->dma_mword = id->dma_mword;
        drive->id->dma_1word = id->dma_1word;
        /* anything more ? */
        kfree(id);
    }

    return 1;
}
Пример #13
0
void __global_cli(void)
{
	unsigned long flags;

	__save_flags(flags);
	if (flags & ST0_IE) {
		int cpu = smp_processor_id();
		__cli();
		if (!local_irq_count(cpu))
			get_irqlock(cpu);
	}
}
Пример #14
0
static struct pci_ops * __devinit pci_check_direct(void)
{
	unsigned int tmp;
	unsigned long flags;

	__save_flags(flags); __cli();

	/*
	 * Check if configuration type 1 works.
	 */
	if (pci_probe & PCI_PROBE_CONF1) {
		outb (0x01, 0xCFB);
		tmp = inl (0xCF8);
		outl (0x80000000, 0xCF8);
		if (inl (0xCF8) == 0x80000000 &&
		    pci_sanity_check(&pci_direct_conf1)) {
			outl (tmp, 0xCF8);
			__restore_flags(flags);
			printk(KERN_INFO "PCI: Using configuration type 1\n");
			request_region(0xCF8, 8, "PCI conf1");

#ifdef CONFIG_MULTIQUAD			
			/* Multi-Quad has an extended PCI Conf1 */
			if(clustered_apic_mode == CLUSTERED_APIC_NUMAQ)
				return &pci_direct_mq_conf1;
#endif				
			return &pci_direct_conf1;
		}
		outl (tmp, 0xCF8);
	}

	/*
	 * Check if configuration type 2 works.
	 */
	if (pci_probe & PCI_PROBE_CONF2) {
		outb (0x00, 0xCFB);
		outb (0x00, 0xCF8);
		outb (0x00, 0xCFA);
		if (inb (0xCF8) == 0x00 && inb (0xCFA) == 0x00 &&
		    pci_sanity_check(&pci_direct_conf2)) {
			__restore_flags(flags);
			printk(KERN_INFO "PCI: Using configuration type 2\n");
			request_region(0xCF8, 4, "PCI conf2");
			return &pci_direct_conf2;
		}
	}

	__restore_flags(flags);
	return NULL;
}
Пример #15
0
/*==========================================================================*
 * Name:         smp_call_function
 *
 * Description:  This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs
 *               in the system.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    *func - The function to run. This must be fast and
 *                       non-blocking.
 *               *info - An arbitrary pointer to pass to the function.
 *               nonatomic - currently unused.
 *               wait - If true, wait (atomically) until function has
 *                      completed on other CPUs.
 *
 * Returns:      0 on success, else a negative status code. Does not return
 *               until remote CPUs are nearly ready to execute <<func>> or
 *               are or have executed.
 *
 * Cautions:     You must not call this function with disabled interrupts or
 *               from a hardware interrupt handler, you may call it from a
 *               bottom half handler.
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 *
 *==========================================================================*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
	int wait)
{
	struct call_data_struct data;
	int cpus;

#ifdef DEBUG_SMP
	unsigned long flags;
	__save_flags(flags);
	if (!(flags & 0x0040))	/* Interrupt Disable NONONO */
		BUG();
#endif /* DEBUG_SMP */

	/* Holding any lock stops cpus from going down. */
	spin_lock(&call_lock);
	cpus = num_online_cpus() - 1;

	if (!cpus) {
		spin_unlock(&call_lock);
		return 0;
	}

	/* Can deadlock when called with interrupts disabled */
	WARN_ON(irqs_disabled());

	data.func = func;
	data.info = info;
	atomic_set(&data.started, 0);
	data.wait = wait;
	if (wait)
		atomic_set(&data.finished, 0);

	call_data = &data;
	mb();

	/* Send a message to all other CPUs and wait for them to respond */
	send_IPI_allbutself(CALL_FUNCTION_IPI, 0);

	/* Wait for response */
	while (atomic_read(&data.started) != cpus)
		barrier();

	if (wait)
		while (atomic_read(&data.finished) != cpus)
			barrier();
	spin_unlock(&call_lock);

	return 0;
}
Пример #16
0
void setup_APIC_timer(void * data)
{
	unsigned int clocks = (unsigned int) data, slice, t0, t1;
	unsigned long flags;
	int delta;

	__save_flags(flags);
	__sti();
	/*
	 * ok, Intel has some smart code in their APIC that knows
	 * if a CPU was in 'hlt' lowpower mode, and this increases
	 * its APIC arbitration priority. To avoid the external timer
	 * IRQ APIC event being in synchron with the APIC clock we
	 * introduce an interrupt skew to spread out timer events.
	 *
	 * The number of slices within a 'big' timeslice is smp_num_cpus+1
	 */

	slice = clocks / (smp_num_cpus+1);
	printk("cpu: %d, clocks: %d, slice: %d\n",
		smp_processor_id(), clocks, slice);

	/*
	 * Wait for IRQ0's slice:
	 */
	wait_8254_wraparound();

	__setup_APIC_LVTT(clocks);

	t0 = apic_read(APIC_TMICT)*APIC_DIVISOR;
	/* Wait till TMCCT gets reloaded from TMICT... */
	do {
		t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
		delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
	} while (delta >= 0);
	/* Now wait for our slice for real. */
	do {
		t1 = apic_read(APIC_TMCCT)*APIC_DIVISOR;
		delta = (int)(t0 - t1 - slice*(smp_processor_id()+1));
	} while (delta < 0);

	__setup_APIC_LVTT(clocks);

	printk("CPU%d<T0:%d,T1:%d,D:%d,S:%d,C:%d>\n",
			smp_processor_id(), t0, t1, delta, slice, clocks);

	__restore_flags(flags);
}
Пример #17
0
/*
 * Initialize controller registers with default values.
 */
static int __init initRegisters (void) {
	RegInitializer *p;
	byte t;
	unsigned long flags;

	__save_flags(flags);	/* local CPU only */
	__cli();		/* local CPU only */
	outb_p(regOn, basePort);
	for (p = initData; p->reg != 0; ++p)
		outReg(p->data, p->reg);
	outb_p(0x01, regPort);
	t = inb(regPort) & 0x01;
	outb_p(regOff, basePort);
	__restore_flags(flags);	/* local CPU only */
	return t;
}
Пример #18
0
void sv_broadcast(sv_t *sv) 
{
#ifdef SV_DEBUG_INTERRUPT_STATE
	if(sv->sv_flags & SV_INTS) {
		unsigned long flags;
		__save_flags(flags);
		if(SV_TEST_INTERRUPTS_ENABLED(flags))
			printk(KERN_ERR "sv_broadcast: SV_INTS and "
			       "interrupts enabled! (flags: 0x%lx)\n", flags);
	}
#endif /* SV_DEBUG_INTERRUPT_STATE */

	sv_lock(sv);
	wake_up_all(&sv->sv_waiters);
	sv_unlock(sv);
}
Пример #19
0
static inline void send_IPI_mask_sequence(int mask, int vector)
{
	unsigned long cfg, flags;
	unsigned int query_cpu, query_mask;

	/*
	 * Hack. The clustered APIC addressing mode doesn't allow us to send 
	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 
	 * should be modified to do 1 message per cluster ID - mbligh
	 */ 

	__save_flags(flags);
	__cli();

	for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
		query_mask = 1 << query_cpu;
		if (query_mask & mask) {
		
			/*
			 * Wait for idle.
			 */
			apic_wait_icr_idle();
		
			/*
			 * prepare target chip field
			 */
			if(clustered_apic_mode == CLUSTERED_APIC_XAPIC)
				cfg = __prepare_ICR2(cpu_to_physical_apicid(query_cpu));
			else
				cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));
			apic_write_around(APIC_ICR2, cfg);
		
			/*
			 * program the ICR 
			 */
			cfg = __prepare_ICR(0, vector);
			
			/*
			 * Send the IPI. The write to APIC_ICR fires this off.
			 */
			apic_write_around(APIC_ICR, cfg);
		}
	}
	__restore_flags(flags);
}
Пример #20
0
static struct pci_ops * __devinit pci_check_direct(void)
{
	unsigned int tmp;
	unsigned long flags;

	__save_flags(flags); __cli();

	/*
	 * Check if configuration type 1 works.
	 */
	if (pci_probe & PCI_PROBE_CONF1) {
		outb (0x01, 0xCFB);
		tmp = inl (0xCF8);
		outl (0x80000000, 0xCF8);
		if (inl (0xCF8) == 0x80000000 &&
		    pci_sanity_check(&pci_direct_conf1)) {
			outl (tmp, 0xCF8);
			__restore_flags(flags);
			printk("PCI: Using configuration type 1\n");
			request_region(0xCF8, 8, "PCI conf1");
			return &pci_direct_conf1;
		}
		outl (tmp, 0xCF8);
	}

	/*
	 * Check if configuration type 2 works.
	 */
	if (pci_probe & PCI_PROBE_CONF2) {
		outb (0x00, 0xCFB);
		outb (0x00, 0xCF8);
		outb (0x00, 0xCFA);
		if (inb (0xCF8) == 0x00 && inb (0xCFA) == 0x00 &&
		    pci_sanity_check(&pci_direct_conf2)) {
			__restore_flags(flags);
			printk("PCI: Using configuration type 2\n");
			request_region(0xCF8, 4, "PCI conf2");
			return &pci_direct_conf2;
		}
	}

	__restore_flags(flags);
	return NULL;
}
Пример #21
0
void sv_signal(sv_t *sv) 
{
	/* If interrupts can acquire this lock, they can also acquire the
	   sv_mon_lock, which we must already have to have called this, so
	   interrupts must be disabled already.  If interrupts cannot
	   contend for this lock, we don't have to worry about it. */

#ifdef SV_DEBUG_INTERRUPT_STATE
	if(sv->sv_flags & SV_INTS) {
		unsigned long flags;
		__save_flags(flags);
		if(SV_TEST_INTERRUPTS_ENABLED(flags))
			printk(KERN_ERR "sv_signal: SV_INTS and "
			"interrupts enabled! (flags: 0x%lx)\n", flags);
	}
#endif /* SV_DEBUG_INTERRUPT_STATE */

	sv_lock(sv);
	wake_up(&sv->sv_waiters);
	sv_unlock(sv);
}
Пример #22
0
static void apic_pm_resume(void *data)
{
	unsigned int l, h;
	unsigned long flags;

	__save_flags(flags);
	__cli();

	/*
	 * Make sure the APICBASE points to the right address
	 *
	 * FIXME! This will be wrong if we ever support suspend on
	 * SMP! We'll need to do this as part of the CPU restore!
	 */
	rdmsr(MSR_IA32_APICBASE, l, h);
	l &= ~MSR_IA32_APICBASE_BASE;
	l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
	wrmsr(MSR_IA32_APICBASE, l, h);

	apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
	apic_write(APIC_ID, apic_pm_state.apic_id);
	apic_write(APIC_DFR, apic_pm_state.apic_dfr);
	apic_write(APIC_LDR, apic_pm_state.apic_ldr);
	apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
	apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
	apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
	apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
	apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
	apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
	apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
	apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
	apic_write(APIC_ESR, 0);
	apic_read(APIC_ESR);
	apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
	apic_write(APIC_ESR, 0);
	apic_read(APIC_ESR);
	__restore_flags(flags);
	if (apic_pm_state.perfctr_pmdev)
		pm_send(apic_pm_state.perfctr_pmdev, PM_RESUME, data);
}
Пример #23
0
int
kdb(int reason, int error, struct pt_regs *regs)
{
	char	cmdbuf[255];
	char	*cmd;
	int	diag;
	unsigned long flags;
	struct  pt_regs func_regs;

	kdb_new_cpu = -1;

	/*
	 * Remove the breakpoints to prevent double-faults
	 * if kdb happens to use a function where a breakpoint
	 * has been enabled.
	 */

	kdb_bp_remove();

	if (reason != KDB_REASON_DEBUG) {
		kdb_printf("Entering kdb ");
#if defined(__SMP__)
		kdb_printf("on processor %d ", smp_processor_id());
#endif
	}

	switch (reason) {
	case KDB_REASON_DEBUG:
		/*
		 * If re-entering kdb after a single step
		 * command, don't print the message.
		 */
		diag = kdb_db_trap(regs);
		if (diag == 0) {
			kdb_printf("Entering kdb ");
#if defined(__SMP__)
			kdb_printf("on processor %d ", smp_processor_id());
#endif
		} else if (diag == 2) {
			/*
			 * in middle of ssb command.  Just return.
			 */
			return 0;
		}
		break;
	case KDB_REASON_FAULT:
		break;
	case KDB_REASON_INT:
		kdb_printf("due to KDB_ENTER() call\n");
		break;
	case KDB_REASON_KEYBOARD:
		kdb_printf("due to Keyboard Entry\n");
		break;
	case KDB_REASON_SWITCH:
		kdb_printf("due to cpu switch\n");
		break;
	case KDB_REASON_ENTER:
		kdb_printf("due to function call\n");
		regs = &func_regs;
		regs->xcs = 0;
#if defined(CONFIG_KDB_FRAMEPTR)
		asm volatile("movl %%ebp,%0":"=m" (*(int *)&regs->ebp));
#endif
		asm volatile("movl %%esp,%0":"=m" (*(int *)&regs->esp));
		regs->eip = (long) &kdb;	/* for traceback. */
		break;
	case KDB_REASON_PANIC:
		kdb_printf("due to panic @ 0x%8.8x\n", regs->eip);
		kdbdumpregs(regs, NULL, NULL);
		break;
	case KDB_REASON_BREAK:
		kdb_printf("due to Breakpoint @ 0x%8.8x\n", regs->eip);
		break;
	default:
		break;
	}

#if defined(__SMP__)
	/*
	 * If SMP, stop other processors
	 */
	if (smp_num_cpus > 1) {
		/*
		 * Stop all other processors
		 */
		smp_kdb_stop(1);
	}
#endif	/* __SMP__ */

	/*
	 * Disable interrupts during kdb command processing 
	 */
	__save_flags(flags);
	__cli();

	while (1) {
		/*
		 * Initialize pager context.
		 */
		kdb_nextline = 1;

		/*
		 * Use kdb_setjmp/kdb_longjmp to break out of 
		 * the pager early.
		 */
		if (kdb_setjmp(&kdbjmpbuf)) {
			/*
			 * Command aborted (usually in pager)
			 */

			/*
		 	 * XXX - need to abort a SSB ?
			 */
			continue;
		}

		/*
		 * Fetch command from keyboard
		 */
		cmd = kbd_getstr(cmdbuf, sizeof(cmdbuf), kdbgetenv("PROMPT"));

		diag = kdb_parse(cmd, regs);
		if (diag == KDB_NOTFOUND) {
			kdb_printf("Unknown kdb command: '%s'\n", cmd);
			diag = 0;
		}
		if ((diag == KDB_GO)
		 || (diag == KDB_CPUSWITCH))
			break;	/* Go or cpu switch command */

		if (diag)
			kdb_cmderror(diag);
	}

	/*
	 * Set up debug registers.
	 */
	kdb_bp_install();

#if defined(__SMP__)
	if ((diag == KDB_CPUSWITCH)
	 && (kdb_new_cpu != -1)) {
		/*
		 * Leaving the other CPU's at the barrier, except the
		 * one we are switching to, we'll send ourselves a 
		 * kdb IPI before allowing interrupts so it will get
		 * caught ASAP and get this CPU back waiting at the barrier.
		 */
		
		smp_kdb_stop(0);	/* Stop ourself */

		/*
	 	 * let the new cpu go.
		 */
		clear_bit(kdb_new_cpu, &smp_kdb_wait);
	} else {
		/*
		 * Let the other processors continue.
		 */
		smp_kdb_wait = 0;
	}
#endif

	kdb_flags &= ~(KDB_FLAG_SUPRESS|KDB_FLAG_FAULT);

	__restore_flags(flags);


	return 0;
}
Пример #24
0
/*
 * The associated lock must be locked on entry.  It is unlocked on return.
 *
 * Return values:
 *
 * n < 0 : interrupted,  -n jiffies remaining on timeout, or -1 if timeout == 0
 * n = 0 : timeout expired
 * n > 0 : sv_signal()'d, n jiffies remaining on timeout, or 1 if timeout == 0
 */
signed long sv_wait(sv_t *sv, int sv_wait_flags, unsigned long timeout) 
{
	DECLARE_WAITQUEUE( wait, current );
	unsigned long flags;
	signed long ret = 0;

#ifdef SV_DEBUG_INTERRUPT_STATE
	{
	unsigned long flags;
	__save_flags(flags);

	if(sv->sv_flags & SV_INTS) {
		if(SV_TEST_INTERRUPTS_ENABLED(flags)) {
			printk(KERN_ERR "sv_wait: SV_INTS and interrupts "
			       "enabled (flags: 0x%lx)\n", flags);
			BUG();
		}
	} else {
		if (SV_TEST_INTERRUPTS_DISABLED(flags)) {
			printk(KERN_WARNING "sv_wait: !SV_INTS and interrupts "
			       "disabled! (flags: 0x%lx)\n", flags);
		}
	}
	}
#endif  /* SV_DEBUG_INTERRUPT_STATE */

	sv_lock(sv);

	sv->sv_mon_unlock_func(sv->sv_mon_lock);

	/* Add ourselves to the wait queue and set the state before
	 * releasing the sv_lock so as to avoid racing with the
	 * wake_up() in sv_signal() and sv_broadcast(). 
	 */

	/* don't need the _irqsave part, but there is no wq_write_lock() */
	wq_write_lock_irqsave(&sv->sv_waiters.lock, flags);

#ifdef EXCLUSIVE_IN_QUEUE
	wait.flags |= WQ_FLAG_EXCLUSIVE;
#endif

	switch(sv->sv_flags & SV_ORDER_MASK) {
	case SV_ORDER_FIFO:
		__add_wait_queue_tail(&sv->sv_waiters, &wait);
		break;
	case SV_ORDER_FILO:
		__add_wait_queue(&sv->sv_waiters, &wait);
		break;
	default:
		printk(KERN_ERR "sv_wait: unknown order!  (sv: 0x%p, flags: 0x%x)\n",
					(void *)sv, sv->sv_flags);
		BUG();
	}
	wq_write_unlock_irqrestore(&sv->sv_waiters.lock, flags);

	if(sv_wait_flags & SV_WAIT_SIG)
		set_current_state(TASK_EXCLUSIVE | TASK_INTERRUPTIBLE  );
	else
		set_current_state(TASK_EXCLUSIVE | TASK_UNINTERRUPTIBLE);

	spin_unlock(&sv->sv_lock);

	if(sv->sv_flags & SV_INTS)
		local_irq_enable();
	else if(sv->sv_flags & SV_BHS)
		local_bh_enable();

	if (timeout)
		ret = schedule_timeout(timeout);
	else
		schedule();

	if(current->state != TASK_RUNNING) /* XXX Is this possible? */ {
		printk(KERN_ERR "sv_wait: state not TASK_RUNNING after "
		       "schedule().\n");
		set_current_state(TASK_RUNNING);
	}

	remove_wait_queue(&sv->sv_waiters, &wait);

	/* Return cases:
	   - woken by a sv_signal/sv_broadcast
	   - woken by a signal
	   - woken by timeout expiring
	*/

	/* XXX This isn't really accurate; we may have been woken
           before the signal anyway.... */
	if(signal_pending(current))
		return timeout ? -ret : -1;
	return timeout ? ret : 1;
}
Пример #25
0
/*
 * s390_machine_check_handler
 *
 * machine check handler, dequeueing machine check entries
 *  and processing them
 */
static int s390_machine_check_handler( void *parm)
{
	struct semaphore *sem = parm;
	unsigned long     flags;
	mache_t          *pmache;

	int               found = 0;

        /* set name to something sensible */
        strcpy (current->comm, "kmcheck");


        /* block all signals */
        sigfillset(&current->blocked);

#ifdef S390_MACHCHK_DEBUG
	printk( KERN_NOTICE "mach_handler : ready\n");
#endif	

	do {

#ifdef S390_MACHCHK_DEBUG
		printk( KERN_NOTICE "mach_handler : waiting for wakeup\n");
#endif	

		down_interruptible( sem );

#ifdef S390_MACHCHK_DEBUG
		printk( KERN_NOTICE "\nmach_handler : wakeup ... \n");
#endif	
		found = 0; /* init ... */

		__save_flags( flags );
		__cli();

		do {

		pmache = s390_dequeue_mchchk();

		if ( pmache )
		{
			found = 1;
		
			if ( pmache->mcic.mcc.mcd.cp )
			{
				crwe_t *pcrwe_n;
				crwe_t *pcrwe_h;

				s390_do_crw_pending( pmache->mc.crwe );

				pcrwe_h = pmache->mc.crwe;
				pcrwe_n = pmache->mc.crwe->crwe_next;

				pmache->mcic.mcc.mcd.cp = 0;
				pmache->mc.crwe         = NULL;

				spin_lock( &crw_queue_lock);

				while ( pcrwe_h )
				{
					pcrwe_h->crwe_next = crw_buffer_anchor;
					crw_buffer_anchor  = pcrwe_h;
					pcrwe_h            = pcrwe_n;

					if ( pcrwe_h != NULL )
						pcrwe_n = pcrwe_h->crwe_next;

				} /* endwhile */

				spin_unlock( &crw_queue_lock);

			} /* endif */

#ifdef CONFIG_MACHCHK_WARNING
			if ( pmache->mcic.mcc.mcd.w )
			{
				ctrl_alt_del();		// shutdown NOW!
#ifdef S390_MACHCHK_DEBUG
			printk( KERN_DEBUG "mach_handler : kill -SIGPWR init\n");
#endif
			} /* endif */
#endif

#ifdef CONFIG_MACHCHK_WARNING
			if ( pmache->mcic.mcc.mcd.w )
			{
				ctrl_alt_del();		// shutdown NOW!
#ifdef S390_MACHCHK_DEBUG
			printk( KERN_DEBUG "mach_handler : kill -SIGPWR init\n");
#endif
			} /* endif */
#endif

			s390_enqueue_free_mchchk( pmache );
		}
		else
		{

			// unconditional surrender ...
#ifdef S390_MACHCHK_DEBUG
			printk( KERN_DEBUG "mach_handler : nothing to do, sleeping\n");
#endif	

		} /* endif */	

		} while ( pmache );

		__restore_flags( flags );

	} while ( 1 );

	return( 0);
}
Пример #26
0
/*
 * Set a new transfer mode at the drive
 */
int cs5530_set_xfer_mode (ide_drive_t *drive, byte mode)
{
	int		i, error = 1;
	byte		stat;
	ide_hwif_t	*hwif = HWIF(drive);

	printk("%s: cs5530_set_xfer_mode(%s)\n", drive->name, strmode(mode));
	/*
	 * If this is a DMA mode setting, then turn off all DMA bits.
	 * We will set one of them back on afterwards, if all goes well.
	 *
	 * Not sure why this is needed (it looks very silly),
	 * but other IDE chipset drivers also do this fiddling.  ???? -ml
 	 */
	switch (mode) {
		case XFER_UDMA_4:
		case XFER_UDMA_3:
		case XFER_UDMA_2:
		case XFER_UDMA_1:
		case XFER_UDMA_0:
		case XFER_MW_DMA_2:
		case XFER_MW_DMA_1:
		case XFER_MW_DMA_0:
		case XFER_SW_DMA_2:
		case XFER_SW_DMA_1:
		case XFER_SW_DMA_0:
			drive->id->dma_ultra &= ~0xFF00;
			drive->id->dma_mword &= ~0x0F00;
			drive->id->dma_1word &= ~0x0F00;
	}

	/*
	 * Select the drive, and issue the SETFEATURES command
	 */
	disable_irq(hwif->irq);
	udelay(1);
	SELECT_DRIVE(HWIF(drive), drive);
	udelay(1);
	if (IDE_CONTROL_REG)
		OUT_BYTE(drive->ctl | 2, IDE_CONTROL_REG);
	OUT_BYTE(mode, IDE_NSECTOR_REG);
	OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG);
	OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG);
	udelay(1);	/* spec allows drive 400ns to assert "BUSY" */

	/*
	 * Wait for drive to become non-BUSY
	 */
	if ((stat = GET_STAT()) & BUSY_STAT) {
		unsigned long flags, timeout;
		__save_flags(flags);	/* local CPU only */
		ide__sti();		/* local CPU only -- for jiffies */
		timeout = jiffies + WAIT_CMD;
		while ((stat = GET_STAT()) & BUSY_STAT) {
			if (0 < (signed long)(jiffies - timeout))
				break;
		}
		__restore_flags(flags); /* local CPU only */
	}

	/*
	 * Allow status to settle, then read it again.
	 * A few rare drives vastly violate the 400ns spec here,
	 * so we'll wait up to 10usec for a "good" status
	 * rather than expensively fail things immediately.
	 */
	for (i = 0; i < 10; i++) {
		udelay(1);
		if (OK_STAT((stat = GET_STAT()), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
			error = 0;
			break;
		}
	}
	enable_irq(hwif->irq);

	/*
	 * Turn dma bit on if all is okay
	 */
	if (error) {
		(void) ide_dump_status(drive, "cs5530_set_xfer_mode", stat);
	} else {
		switch (mode) {
			case XFER_UDMA_4:   drive->id->dma_ultra |= 0x1010; break;
			case XFER_UDMA_3:   drive->id->dma_ultra |= 0x0808; break;
			case XFER_UDMA_2:   drive->id->dma_ultra |= 0x0404; break;
			case XFER_UDMA_1:   drive->id->dma_ultra |= 0x0202; break;
			case XFER_UDMA_0:   drive->id->dma_ultra |= 0x0101; break;
			case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
			case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
			case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
			case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
			case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
			case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
		}
	}
	return error;
}
Пример #27
0
/*
 * Verify that we are doing an approved SETFEATURES_XFER with respect
 * to the hardware being able to support request.  Since some hardware
 * can improperly report capabilties, we check to see if the host adapter
 * in combination with the device (usually a disk) properly detect
 * and acknowledge each end of the ribbon.
 */
int ide_ata66_check (ide_drive_t *drive, byte cmd, byte nsect, byte feature)
{
    if ((cmd == WIN_SETFEATURES) &&
            (nsect > XFER_UDMA_2) &&
            (feature == SETFEATURES_XFER))
    {
        if (!HWIF(drive)->udma_four)
        {
            printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", HWIF(drive)->name);
            return 1;
        }
#ifndef CONFIG_IDEDMA_IVB
        if ((drive->id->hw_config & 0x6000) == 0)
        {
#else /* !CONFIG_IDEDMA_IVB */
        if (((drive->id->hw_config & 0x2000) == 0) ||
                ((drive->id->hw_config & 0x4000) == 0))
        {
#endif /* CONFIG_IDEDMA_IVB */
            printk("%s: Speed warnings UDMA 3/4/5 is not functional.\n", drive->name);
            return 1;
        }
    }
    return 0;
}

/*
 * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
 * 1 : Safe to update drive->id DMA registers.
 * 0 : OOPs not allowed.
 */
int set_transfer (ide_drive_t *drive, byte cmd, byte nsect, byte feature)
{
    if ((cmd == WIN_SETFEATURES) &&
            (nsect >= XFER_SW_DMA_0) &&
            (feature == SETFEATURES_XFER) &&
            (drive->id->dma_ultra ||
             drive->id->dma_mword ||
             drive->id->dma_1word))
        return 1;

    return 0;
}

/*
 *  All hosts that use the 80c ribbon mus use!
 */
byte eighty_ninty_three (ide_drive_t *drive)
{
    return ((byte) ((HWIF(drive)->udma_four) &&
#ifndef CONFIG_IDEDMA_IVB
                    (drive->id->hw_config & 0x4000) &&
#endif /* CONFIG_IDEDMA_IVB */
                    (drive->id->hw_config & 0x6000)) ? 1 : 0);
}

/*
 * Similar to ide_wait_stat(), except it never calls ide_error internally.
 * This is a kludge to handle the new ide_config_drive_speed() function,
 * and should not otherwise be used anywhere.  Eventually, the tuneproc's
 * should be updated to return ide_startstop_t, in which case we can get
 * rid of this abomination again.  :)   -ml
 *
 * It is gone..........
 *
 * const char *msg == consider adding for verbose errors.
 */
int ide_config_drive_speed (ide_drive_t *drive, byte speed)
{
    ide_hwif_t *hwif = HWIF(drive);
    int	i, error = 1;
    byte stat;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
    byte unit = (drive->select.b.unit & 0x01);
    outb(inb(hwif->dma_base+2) & ~(1<<(5+unit)), hwif->dma_base+2);
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

    /*
     * Don't use ide_wait_cmd here - it will
     * attempt to set_geometry and recalibrate,
     * but for some reason these don't work at
     * this point (lost interrupt).
     */
    /*
     * Select the drive, and issue the SETFEATURES command
     */
    disable_irq(hwif->irq);	/* disable_irq_nosync ?? */
    udelay(1);
    SELECT_DRIVE(HWIF(drive), drive);
    SELECT_MASK(HWIF(drive), drive, 0);
    udelay(1);
    if (IDE_CONTROL_REG)
        OUT_BYTE(drive->ctl | 2, IDE_CONTROL_REG);
    OUT_BYTE(speed, IDE_NSECTOR_REG);
    OUT_BYTE(SETFEATURES_XFER, IDE_FEATURE_REG);
    OUT_BYTE(WIN_SETFEATURES, IDE_COMMAND_REG);
    if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
        OUT_BYTE(drive->ctl, IDE_CONTROL_REG);
    udelay(1);
    /*
     * Wait for drive to become non-BUSY
     */
    if ((stat = GET_STAT()) & BUSY_STAT)
    {
        unsigned long flags, timeout;
        __save_flags(flags);	/* local CPU only */
        ide__sti();		/* local CPU only -- for jiffies */
        timeout = jiffies + WAIT_CMD;
        while ((stat = GET_STAT()) & BUSY_STAT)
        {
            if (0 < (signed long)(jiffies - timeout))
                break;
        }
        __restore_flags(flags); /* local CPU only */
    }

    /*
     * Allow status to settle, then read it again.
     * A few rare drives vastly violate the 400ns spec here,
     * so we'll wait up to 10usec for a "good" status
     * rather than expensively fail things immediately.
     * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
     */
    for (i = 0; i < 10; i++)
    {
        udelay(1);
        if (OK_STAT((stat = GET_STAT()), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT))
        {
            error = 0;
            break;
        }
    }

    SELECT_MASK(HWIF(drive), drive, 0);

    enable_irq(hwif->irq);

    if (error)
    {
        (void) ide_dump_status(drive, "set_drive_speed_status", stat);
        return error;
    }

    drive->id->dma_ultra &= ~0xFF00;
    drive->id->dma_mword &= ~0x0F00;
    drive->id->dma_1word &= ~0x0F00;

#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(CONFIG_DMA_NONPCI)
    if (speed > XFER_PIO_4)
    {
        outb(inb(hwif->dma_base+2)|(1<<(5+unit)), hwif->dma_base+2);
    }
    else
    {
        outb(inb(hwif->dma_base+2) & ~(1<<(5+unit)), hwif->dma_base+2);
    }
#endif /* (CONFIG_BLK_DEV_IDEDMA) && !(CONFIG_DMA_NONPCI) */

    switch(speed)
    {
    case XFER_UDMA_7:
        drive->id->dma_ultra |= 0x8080;
        break;
    case XFER_UDMA_6:
        drive->id->dma_ultra |= 0x4040;
        break;
    case XFER_UDMA_5:
        drive->id->dma_ultra |= 0x2020;
        break;
    case XFER_UDMA_4:
        drive->id->dma_ultra |= 0x1010;
        break;
    case XFER_UDMA_3:
        drive->id->dma_ultra |= 0x0808;
        break;
    case XFER_UDMA_2:
        drive->id->dma_ultra |= 0x0404;
        break;
    case XFER_UDMA_1:
        drive->id->dma_ultra |= 0x0202;
        break;
    case XFER_UDMA_0:
        drive->id->dma_ultra |= 0x0101;
        break;
    case XFER_MW_DMA_2:
        drive->id->dma_mword |= 0x0404;
        break;
    case XFER_MW_DMA_1:
        drive->id->dma_mword |= 0x0202;
        break;
    case XFER_MW_DMA_0:
        drive->id->dma_mword |= 0x0101;
        break;
    case XFER_SW_DMA_2:
        drive->id->dma_1word |= 0x0404;
        break;
    case XFER_SW_DMA_1:
        drive->id->dma_1word |= 0x0202;
        break;
    case XFER_SW_DMA_0:
        drive->id->dma_1word |= 0x0101;
        break;
    default:
        break;
    }
    return error;
}