Exemplo n.º 1
0
Arquivo: irq.c Projeto: nhanh0/hah
/*
** The alloc process needs to accept a parameter to accomodate limitations
** of the HW/SW which use these bits:
** Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
** V-class (EPIC):          6 bits
** N/L-class/A500:          8 bits (iosapic)
** PCI 2.2 MSI:             16 bits (I think)
** Existing PCI devices:    32-bits (NCR c720/ATM/GigE/HyperFabric)
**
** On the service provider side:
** o PA 1.1 (and PA2.0 narrow mode)     5-bits (width of EIR register)
** o PA 2.0 wide mode                   6-bits (per processor)
** o IA64                               8-bits (0-256 total)
**
** So a Legacy PA I/O device on a PA 2.0 box can't use all
** the bits supported by the processor...and the N/L-class
** I/O subsystem supports more bits than PA2.0 has. The first
** case is the problem.
*/
unsigned int
txn_alloc_data(int virt_irq, unsigned int bits_wide)
{
	/* XXX FIXME : bits_wide indicates how wide the transaction
	** data is allowed to be...we may need a different virt_irq
	** if this one won't work. Another reason to index virtual
	** irq's into a table which can manage CPU/IRQ bit seperately.
	*/
	if (IRQ_OFFSET(virt_irq) > (1 << (bits_wide -1)))
	{
		panic("Sorry -- didn't allocate valid IRQ for this device\n");
	}

	return(IRQ_OFFSET(virt_irq));
}
Exemplo n.º 2
0
Arquivo: irq.c Projeto: nhanh0/hah
void enable_irq(int irq) 
{
	struct irq_region *region;

#ifdef DEBUG_IRQ
	if (irq != TIMER_IRQ)
#endif
	DBG_IRQ("enable_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq));
	region = irq_region[IRQ_REGION(irq)];

	if(region->data.flags & IRQ_REG_DIS)
		region->ops.enable_irq(region->data.dev, IRQ_OFFSET(irq));
	else
		BUG();
}
Exemplo n.º 3
0
Arquivo: irq.c Projeto: nhanh0/hah
void free_irq(unsigned int irq, void *dev_id)
{
	struct irqaction *action, **p;

	action = &irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)];

	if(action->dev_id == dev_id) {
		if(action->next == NULL)
			action->handler = NULL;
		else
			memcpy(action, action->next, sizeof *action);

		return;
	}

	p = &action->next;
	action = action->next;

	for (; (action = *p) != NULL; p = &action->next) {
		if (action->dev_id != dev_id)
			continue;

		/* Found it - now free it */
		*p = action->next;
		kfree(action);

		return;
	}

	printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
}
Exemplo n.º 4
0
Arquivo: irq.c Projeto: nhanh0/hah
static inline void mask_irq(int irq)
{
	struct irq_region *region;
	
#ifdef DEBUG_IRQ
	if (irq != TIMER_IRQ)
#endif
	DBG_IRQ("mask_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq));

	if(IRQ_REGION(irq) != CPU_IRQ_REGION) {
		region = irq_region[IRQ_REGION(irq)];
		if(region->data.flags & IRQ_REG_MASK)
			region->ops.mask_irq(region->data.dev, IRQ_OFFSET(irq));
	} else {
		CLEAR_EIEM_BIT(irq);
	}
}
Exemplo n.º 5
0
static inline void
ipi_send(int cpu, enum ipi_message_type op)
{
	struct cpuinfo_parisc *p = &cpu_data[cpu];
	unsigned long flags;

	spin_lock_irqsave(&(p->lock),flags);
	p->pending_ipi |= 1 << op;
	__raw_writel(IRQ_OFFSET(IPI_IRQ), cpu_data[cpu].hpa);
	spin_unlock_irqrestore(&(p->lock),flags);
}
Exemplo n.º 6
0
Arquivo: irq.c Projeto: nhanh0/hah
int request_irq(unsigned int irq, 
		void (*handler)(int, void *, struct pt_regs *),
		unsigned long irqflags, 
		const char * devname,
		void *dev_id)
{
	struct irqaction * action;

#if 0
	printk(KERN_INFO "request_irq(%d, %p, 0x%lx, %s, %p)\n",irq, handler, irqflags, devname, dev_id);
#endif
	if(!handler) {
		printk(KERN_ERR "request_irq(%d,...): Augh! No handler for irq!\n",
			irq);
		return -EINVAL;
	}

	if ((IRQ_REGION(irq) == 0) || irq_region[IRQ_REGION(irq)] == NULL) {
		/*
		** Bug catcher for drivers which use "char" or u8 for
		** the IRQ number. They lose the region number which
		** is in pcidev->irq (an int).
		*/
		printk(KERN_ERR "%p (%s?) called request_irq with an invalid irq %d\n",
			__builtin_return_address(0), devname, irq);
		return -EINVAL;
	}

	action = &irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)];

	if(action->handler) {
		while(action->next)
			action = action->next;

		action->next = kmalloc(sizeof *action, GFP_ATOMIC);

		action = action->next;
	}			

	if(!action) {
		printk(KERN_ERR "request_irq():Augh! No action!\n") ;
		return -ENOMEM;
	}

	action->handler = handler;
	action->flags = irqflags;
	action->mask = 0;
	action->name = devname;
	action->next = NULL;
	action->dev_id = dev_id;

	enable_irq(irq);
	return 0;
}
Exemplo n.º 7
0
Arquivo: irq.c Projeto: nhanh0/hah
int
txn_claim_irq(int irq)
{
	if (irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)].handler ==NULL)
	{
		return irq;
	}

	/* unlikely, but be prepared */
	return -1;
}
Exemplo n.º 8
0
Arquivo: irq.c Projeto: nhanh0/hah
/* FIXME: SMP, flags, bottom halves, rest */
void do_irq(struct irqaction *action, int irq, struct pt_regs * regs)
{
	int cpu = smp_processor_id();

	irq_enter(cpu, irq);

#ifdef DEBUG_IRQ
	if (irq != TIMER_IRQ)
#endif
	DBG_IRQ("do_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq));
	if (action->handler == NULL)
		printk(KERN_ERR "No handler for interrupt %d !\n", irq);

	for(; action && action->handler; action = action->next) {
		action->handler(irq, action->dev_id, regs);
	}
	
	irq_exit(cpu, irq);

	/* don't need to care about unmasking and stuff */
	do_softirq();
}
Exemplo n.º 9
0
/*
 * Bring one cpu online.
 */
int __init smp_boot_one_cpu(int cpuid, int cpunum)
{
	struct task_struct *idle;
	long timeout;

	/* 
	 * Create an idle task for this CPU.  Note the address wed* give 
	 * to kernel_thread is irrelevant -- it's going to start
	 * where OS_BOOT_RENDEVZ vector in SAL says to start.  But
	 * this gets all the other task-y sort of data structures set
	 * up like we wish.   We need to pull the just created idle task 
	 * off the run queue and stuff it into the init_tasks[] array.  
	 * Sheesh . . .
	 */

	idle = fork_by_hand();
	if (IS_ERR(idle))
		panic("SMP: fork failed for CPU:%d", cpuid);

	wake_up_forked_process(idle);
	init_idle(idle, cpunum);
	unhash_process(idle);
	idle->thread_info->cpu = cpunum;

	/* Let _start know what logical CPU we're booting
	** (offset into init_tasks[],cpu_data[])
	*/
	cpu_now_booting = cpunum;

	/* 
	** boot strap code needs to know the task address since
	** it also contains the process stack.
	*/
	smp_init_current_idle_task = idle ;
	mb();

	/*
	** This gets PDC to release the CPU from a very tight loop.
	** See MEM_RENDEZ comments in head.S.
	*/
	__raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpunum].hpa);
	mb();

	/* 
	 * OK, wait a bit for that CPU to finish staggering about. 
	 * Slave will set a bit when it reaches smp_cpu_init().
	 * Once the "monarch CPU" sees the bit change, it can move on.
	 */
	for (timeout = 0; timeout < 10000; timeout++) {
		if(cpu_online(cpunum)) {
			/* Which implies Slave has started up */
			cpu_now_booting = 0;
			smp_init_current_idle_task = NULL;
			goto alive ;
		}
		udelay(100);
		barrier();
	}

	put_task_struct(idle);
	idle = NULL;

	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
	return -1;

alive:
	/* Remember the Slave data */
#if (kDEBUG>=100)
	printk(KERN_DEBUG "SMP: CPU:%d (num %d) came alive after %ld _us\n",
		cpuid,  cpunum, timeout * 100);
#endif /* kDEBUG */
#ifdef ENTRY_SYS_CPUS
	cpu_data[cpunum].state = STATE_RUNNING;
#endif
	return 0;
}
Exemplo n.º 10
0
/*
 * Bring one cpu online.
 */
int __init smp_boot_one_cpu(int cpuid)
{
	struct task_struct *idle;
	long timeout;

	/* 
	 * Create an idle task for this CPU.  Note the address wed* give 
	 * to kernel_thread is irrelevant -- it's going to start
	 * where OS_BOOT_RENDEVZ vector in SAL says to start.  But
	 * this gets all the other task-y sort of data structures set
	 * up like we wish.   We need to pull the just created idle task 
	 * off the run queue and stuff it into the init_tasks[] array.  
	 * Sheesh . . .
	 */

	idle = fork_idle(cpuid);
	if (IS_ERR(idle))
		panic("SMP: fork failed for CPU:%d", cpuid);

	idle->thread_info->cpu = cpuid;

	/* Let _start know what logical CPU we're booting
	** (offset into init_tasks[],cpu_data[])
	*/
	cpu_now_booting = cpuid;

	/* 
	** boot strap code needs to know the task address since
	** it also contains the process stack.
	*/
	smp_init_current_idle_task = idle ;
	mb();

	printk("Releasing cpu %d now, hpa=%lx\n", cpuid, cpu_data[cpuid].hpa);

	/*
	** This gets PDC to release the CPU from a very tight loop.
	**
	** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
	** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which 
	** is executed after receiving the rendezvous signal (an interrupt to 
	** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 
	** contents of memory are valid."
	*/
	__raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpuid].hpa);
	mb();

	/* 
	 * OK, wait a bit for that CPU to finish staggering about. 
	 * Slave will set a bit when it reaches smp_cpu_init().
	 * Once the "monarch CPU" sees the bit change, it can move on.
	 */
	for (timeout = 0; timeout < 10000; timeout++) {
		if(cpu_online(cpuid)) {
			/* Which implies Slave has started up */
			cpu_now_booting = 0;
			smp_init_current_idle_task = NULL;
			goto alive ;
		}
		udelay(100);
		barrier();
	}

	put_task_struct(idle);
	idle = NULL;

	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
	return -1;

alive:
	/* Remember the Slave data */
#if (kDEBUG>=100)
	printk(KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
		cpuid, timeout * 100);
#endif /* kDEBUG */
#ifdef ENTRY_SYS_CPUS
	cpu_data[cpuid].state = STATE_RUNNING;
#endif
	return 0;
}