Exemplo n.º 1
0
void
xics_end_irq(
	u_int	irq
	)
{
	int cpu = smp_processor_id();

	ops->cppr_info(cpu, 0); /* actually the value overwritten by ack */
	iosync();
	ops->xirr_info_set(cpu, ((0xff<<24) | (virt_irq_to_real(irq-XICS_IRQ_OFFSET))));
	iosync();
}
Exemplo n.º 2
0
static void icp_hv_eoi(struct irq_data *d)
{
	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);

	iosync();
	icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
}
Exemplo n.º 3
0
static void xics_eoi_lpar(unsigned int virq)
{
	unsigned int irq = (unsigned int)irq_map[virq].hwirq;

	iosync();
	lpar_xirr_info_set((pop_cppr() << 24) | irq);
}
Exemplo n.º 4
0
void xics_setup_cpu(void)
{
	int cpu = smp_processor_id();

	ops->cppr_info(cpu, 0xff);
	iosync();
}
Exemplo n.º 5
0
void
xics_mask_and_ack_irq(u_int	irq)
{
	int cpu = smp_processor_id();

	if( irq < XICS_IRQ_OFFSET ) {
		i8259_pic.ack(irq);
		iosync();
		ops->xirr_info_set(cpu, ((0xff<<24) | xics_irq_8259_cascade_real));
		iosync();
	}
	else {
		ops->cppr_info(cpu, 0xff);
		iosync();
	}
}
Exemplo n.º 6
0
static void xics_eoi_direct(unsigned int virq)
{
	unsigned int irq = (unsigned int)irq_map[virq].hwirq;

	iosync();
	direct_xirr_info_set((0xff << 24) | irq);
}
Exemplo n.º 7
0
u64 hipz_h_eoi(int irq)
{
	unsigned long xirr;

	iosync();
	xirr = (0xffULL << 24) | irq;

	return plpar_hcall_norets(H_EOI, xirr);
}
Exemplo n.º 8
0
void holly_restart(char *cmd)
{
	__be32 __iomem *ocn_bar1 = NULL;
	unsigned long bar;
	struct device_node *bridge = NULL;
	const void *prop;
	int size;
	phys_addr_t addr = 0xc0000000;

	local_irq_disable();

	bridge = of_find_node_by_type(NULL, "tsi-bridge");
	if (bridge) {
		prop = of_get_property(bridge, "reg", &size);
		addr = of_translate_address(bridge, prop);
	}
	addr += (TSI108_PB_OFFSET + 0x414);

	ocn_bar1 = ioremap(addr, 0x4);

	/* Turn on the BOOT bit so the addresses are correctly
	 * routed to the HLP interface */
	bar = ioread32be(ocn_bar1);
	bar |= 2;
	iowrite32be(bar, ocn_bar1);
	iosync();

	/* Set SRR0 to the reset vector and turn on MSR_IP */
	mtspr(SPRN_SRR0, 0xfff00100);
	mtspr(SPRN_SRR1, MSR_IP);

	/* Do an rfi to jump back to firmware.  Somewhat evil,
	 * but it works
	 */
	__asm__ __volatile__("rfi" : : : "memory");

	/* Spin until reset happens.  Shouldn't really get here */
	for (;;) ;
}
Exemplo n.º 9
0
static void icp_hv_set_cpu_priority(unsigned char cppr)
{
	xics_set_base_cppr(cppr);
	icp_hv_set_cppr(cppr);
	iosync();
}
Exemplo n.º 10
0
void ehca_process_eq(struct ehca_shca *shca, int is_irq)
{
	struct ehca_eq *eq = &shca->eq;
	struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
	u64 eqe_value, ret;
	unsigned long flags;
	int eqe_cnt, i;
	int eq_empty = 0;

	spin_lock_irqsave(&eq->irq_spinlock, flags);
	if (is_irq) {
		const int max_query_cnt = 100;
		int query_cnt = 0;
		int int_state = 1;
		do {
			int_state = hipz_h_query_int_state(
				shca->ipz_hca_handle, eq->ist);
			query_cnt++;
			iosync();
		} while (int_state && query_cnt < max_query_cnt);
		if (unlikely((query_cnt == max_query_cnt)))
			ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
				 int_state, query_cnt);
	}

	/* read out all eqes */
	eqe_cnt = 0;
	do {
		u32 token;
		eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
		if (!eqe_cache[eqe_cnt].eqe)
			break;
		eqe_value = eqe_cache[eqe_cnt].eqe->entry;
		if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
			token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
			read_lock(&ehca_cq_idr_lock);
			eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
			if (eqe_cache[eqe_cnt].cq)
				atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
			read_unlock(&ehca_cq_idr_lock);
			if (!eqe_cache[eqe_cnt].cq) {
				ehca_err(&shca->ib_device,
					 "Invalid eqe for non-existing cq "
					 "token=%x", token);
				continue;
			}
		} else
			eqe_cache[eqe_cnt].cq = NULL;
		eqe_cnt++;
	} while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
	if (!eqe_cnt) {
		if (is_irq)
			ehca_dbg(&shca->ib_device,
				 "No eqe found for irq event");
		goto unlock_irq_spinlock;
	} else if (!is_irq) {
		ret = hipz_h_eoi(eq->ist);
		if (ret != H_SUCCESS)
			ehca_err(&shca->ib_device,
				 "bad return code EOI -rc = %lld\n", ret);
		ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
	}
	if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
		ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
	/* enable irq for new packets */
	for (i = 0; i < eqe_cnt; i++) {
		if (eq->eqe_cache[i].cq)
			reset_eq_pending(eq->eqe_cache[i].cq);
	}
	/* check eq */
	spin_lock(&eq->spinlock);
	eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
	spin_unlock(&eq->spinlock);
	/* call completion handler for cached eqes */
	for (i = 0; i < eqe_cnt; i++)
		if (eq->eqe_cache[i].cq) {
			if (ehca_scaling_code)
				queue_comp_task(eq->eqe_cache[i].cq);
			else {
				struct ehca_cq *cq = eq->eqe_cache[i].cq;
				comp_event_callback(cq);
				if (atomic_dec_and_test(&cq->nr_events))
					wake_up(&cq->wait_completion);
			}
		} else {
			ehca_dbg(&shca->ib_device, "Got non completion event");
			parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
		}
	/* poll eq if not empty */
	if (eq_empty)
		goto unlock_irq_spinlock;
	do {
		struct ehca_eqe *eqe;
		eqe = ehca_poll_eq(shca, &shca->eq);
		if (!eqe)
			break;
		process_eqe(shca, eqe);
	} while (1);

unlock_irq_spinlock:
	spin_unlock_irqrestore(&eq->irq_spinlock, flags);
}
Exemplo n.º 11
0
void
xics_init_IRQ( void )
{
	int i;
	unsigned long intr_size = 0;
	struct device_node *np;
	uint *ireg, ilen, indx=0;

	ibm_get_xive = rtas_token("ibm,get-xive");
	ibm_set_xive = rtas_token("ibm,set-xive");
	ibm_int_off = rtas_token("ibm,int-off");

	np = find_type_devices("PowerPC-External-Interrupt-Presentation");
	if (!np) {
		printk(KERN_WARNING "Can't find Interrupt Presentation\n");
		udbg_printf("Can't find Interrupt Presentation\n");
		while (1);
	}
nextnode:
	ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
	if (ireg) {
		/*
		 * set node starting index for this node
		 */
		indx = *ireg;
	}

	ireg = (uint *)get_property(np, "reg", &ilen);
	if (!ireg) {
		printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
		udbg_printf("Can't find Interrupt Reg Property\n");
		while (1);
	}
	
	while (ilen) {
		inodes[indx].addr = (unsigned long long)*ireg++ << 32;
		ilen -= sizeof(uint);
		inodes[indx].addr |= *ireg++;
		ilen -= sizeof(uint);
		inodes[indx].size = (unsigned long long)*ireg++ << 32;
		ilen -= sizeof(uint);
		inodes[indx].size |= *ireg++;
		ilen -= sizeof(uint);
		indx++;
		if (indx >= NR_CPUS) break;
	}

	np = np->next;
	if ((indx < NR_CPUS) && np) goto nextnode;

	/* Find the server numbers for the boot cpu. */
	for (np = find_type_devices("cpu"); np; np = np->next) {
		ireg = (uint *)get_property(np, "reg", &ilen);
		if (ireg && ireg[0] == hard_smp_processor_id()) {
			ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
			i = ilen / sizeof(int);
			if (ireg && i > 0) {
				default_server = ireg[0];
				default_distrib_server = ireg[i-1]; /* take last element */
			}
			break;
		}
	}

	intr_base = inodes[0].addr;
	intr_size = (ulong)inodes[0].size;

	np = find_type_devices("interrupt-controller");
	if (!np) {
		printk(KERN_WARNING "xics:  no ISA Interrupt Controller\n");
		xics_irq_8259_cascade = -1;
	} else {
		ireg = (uint *) get_property(np, "interrupts", 0);
		if (!ireg) {
			printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
			udbg_printf("Can't find ISA Interrupts Property\n");
			while (1);
		}
		xics_irq_8259_cascade_real = *ireg;
		xics_irq_8259_cascade = virt_irq_create_mapping(xics_irq_8259_cascade_real);
	}

	if (naca->platform == PLATFORM_PSERIES) {
#ifdef CONFIG_SMP
		for (i = 0; i < naca->processorCount; ++i) {
			xics_info.per_cpu[i] =
			  __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr, 
				  (ulong)inodes[get_hard_smp_processor_id(i)].size, _PAGE_NO_CACHE);
		}
#else
		xics_info.per_cpu[0] = __ioremap((ulong)intr_base, intr_size, _PAGE_NO_CACHE);
#endif /* CONFIG_SMP */
#ifdef CONFIG_PPC_PSERIES
	/* actually iSeries does not use any of xics...but it has link dependencies
	 * for now, except this new one...
	 */
	} else if (naca->platform == PLATFORM_PSERIES_LPAR) {
		ops = &pSeriesLP_ops;
#endif
	}

	xics_8259_pic.enable = i8259_pic.enable;
	xics_8259_pic.disable = i8259_pic.disable;
	for (i = 0; i < 16; ++i)
		irq_desc[i].handler = &xics_8259_pic;
	for (; i < NR_IRQS; ++i)
		irq_desc[i].handler = &xics_pic;

	ops->cppr_info(0, 0xff);
	iosync();
	if (xics_irq_8259_cascade != -1) {
		if (request_irq(xics_irq_8259_cascade + XICS_IRQ_OFFSET, no_action,
				0, "8259 cascade", 0))
			printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
		i8259_init();
	}

#ifdef CONFIG_SMP
	real_irq_to_virt_map[XICS_IPI] = virt_irq_to_real_map[XICS_IPI] = XICS_IPI;
	request_irq(XICS_IPI + XICS_IRQ_OFFSET, xics_ipi_action, 0, "IPI", 0);
	irq_desc[XICS_IPI+XICS_IRQ_OFFSET].status |= IRQ_PER_CPU;
#endif
}