Exemple #1
0
static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
{
	int i = 0, old_cpu, cpu, int_on, k;
	u64 cur_ints;
	struct irq_desc *desc = irq_desc + irq;
	unsigned long flags;
	unsigned int irq_dirty;

	i = first_cpu(mask);
	if (next_cpu(i, mask) <= NR_CPUS) {
		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
		return;
	}

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	spin_lock_irqsave(&desc->lock, flags);
	spin_lock(&bcm1480_imr_lock);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = bcm1480_irq_owner[irq];
	irq_dirty = irq;
	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
		irq_dirty -= BCM1480_NR_IRQS_HALF;
	}

	for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
		cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		int_on = !(cur_ints & (((u64) 1) << irq_dirty));
		if (int_on) {
			/* If it was on, mask it */
			cur_ints |= (((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
		bcm1480_irq_owner[irq] = cpu;
		if (int_on) {
			/* unmask for the new CPU */
			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
			cur_ints &= ~(((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
	}
	spin_unlock(&bcm1480_imr_lock);
	spin_unlock_irqrestore(&desc->lock, flags);
}
Exemple #2
0
static void sb1250_set_affinity(unsigned int irq, cpumask_t mask)
{
	int i = 0, old_cpu, cpu, int_on;
	u64 cur_ints;
	struct irq_desc *desc = irq_desc + irq;
	unsigned long flags;

	i = first_cpu(mask);

	if (cpus_weight(mask) > 1) {
		printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
		return;
	}

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	spin_lock_irqsave(&desc->lock, flags);
	spin_lock(&sb1250_imr_lock);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = sb1250_irq_owner[irq];
	cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) +
					R_IMR_INTERRUPT_MASK));
	int_on = !(cur_ints & (((u64) 1) << irq));
	if (int_on) {
		/* If it was on, mask it */
		cur_ints |= (((u64) 1) << irq);
		____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) +
					R_IMR_INTERRUPT_MASK));
	}
	sb1250_irq_owner[irq] = cpu;
	if (int_on) {
		/* unmask for the new CPU */
		cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
		cur_ints &= ~(((u64) 1) << irq);
		____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
	}
	spin_unlock(&sb1250_imr_lock);
	spin_unlock_irqrestore(&desc->lock, flags);
}
Exemple #3
0
static int bcm1480_set_affinity(struct irq_data *d, const struct cpumask *mask,
				bool force)
{
	unsigned int irq_dirty, irq = d->irq;
	int i = 0, old_cpu, cpu, int_on, k;
	u64 cur_ints;
	unsigned long flags;

	i = cpumask_first(mask);

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	raw_spin_lock_irqsave(&bcm1480_imr_lock, flags);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = bcm1480_irq_owner[irq];
	irq_dirty = irq;
	if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
		irq_dirty -= BCM1480_NR_IRQS_HALF;
	}

	for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
		cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		int_on = !(cur_ints & (((u64) 1) << irq_dirty));
		if (int_on) {
			/* If it was on, mask it */
			cur_ints |= (((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
		bcm1480_irq_owner[irq] = cpu;
		if (int_on) {
			/* unmask for the new CPU */
			cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
			cur_ints &= ~(((u64) 1) << irq_dirty);
			____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
		}
	}
	raw_spin_unlock_irqrestore(&bcm1480_imr_lock, flags);

	return 0;
}
Exemple #4
0
static int sb1250_set_affinity(struct irq_data *d, const struct cpumask *mask,
			       bool force)
{
	int i = 0, old_cpu, cpu, int_on;
	unsigned int irq = d->irq;
	u64 cur_ints;
	unsigned long flags;

	i = cpumask_first(mask);

	/* Convert logical CPU to physical CPU */
	cpu = cpu_logical_map(i);

	/* Protect against other affinity changers and IMR manipulation */
	raw_spin_lock_irqsave(&sb1250_imr_lock, flags);

	/* Swizzle each CPU's IMR (but leave the IP selection alone) */
	old_cpu = sb1250_irq_owner[irq];
	cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(old_cpu) +
					R_IMR_INTERRUPT_MASK));
	int_on = !(cur_ints & (((u64) 1) << irq));
	if (int_on) {
		/* If it was on, mask it */
		cur_ints |= (((u64) 1) << irq);
		____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(old_cpu) +
					R_IMR_INTERRUPT_MASK));
	}
	sb1250_irq_owner[irq] = cpu;
	if (int_on) {
		/* unmask for the new CPU */
		cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
		cur_ints &= ~(((u64) 1) << irq);
		____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
	}
	raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);

	return 0;
}
Exemple #5
0
void sb1250_unmask_irq(int cpu, int irq)
{
	unsigned long flags;
	u64 cur_ints;

	raw_spin_lock_irqsave(&sb1250_imr_lock, flags);
	cur_ints = ____raw_readq(IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
	cur_ints &= ~(((u64) 1) << irq);
	____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
					R_IMR_INTERRUPT_MASK));
	raw_spin_unlock_irqrestore(&sb1250_imr_lock, flags);
}
Exemple #6
0
void bcm1480_unmask_irq(int cpu, int irq)
{
	unsigned long flags;
	u64 cur_ints,hl_spacing;

	spin_lock_irqsave(&bcm1480_imr_lock, flags);
	hl_spacing = 0;
	if ((irq >= BCM1480_NR_IRQS_HALF) && (irq <= BCM1480_NR_IRQS)) {
		hl_spacing = BCM1480_IMR_HL_SPACING;
		irq -= BCM1480_NR_IRQS_HALF;
	}
	cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
	cur_ints &= ~(((u64) 1) << irq);
	____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + hl_spacing));
	spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
}
static void __init rbtx4939_ebusc_setup(void)
{
	int i;
	unsigned int sp;

	/* use user-configured speed */
	sp = TX4939_EBUSC_CR(0) & 0x30;
	default_ebccr[0] |= sp;
	default_ebccr[1] |= sp;
	default_ebccr[2] |= sp;
	/* initialise by myself */
	for (i = 0; i < ARRAY_SIZE(default_ebccr); i++) {
		if (default_ebccr[i])
			____raw_writeq(default_ebccr[i],
				       &tx4939_ebuscptr->cr[i]);
		else
			____raw_writeq(____raw_readq(&tx4939_ebuscptr->cr[i])
				       & ~8,
				       &tx4939_ebuscptr->cr[i]);
	}
}
static void __init rbtx4939_update_ioc_pen(void)
{
	__u64 pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg);
	__u64 ccfg = ____raw_readq(&tx4939_ccfgptr->ccfg);
	__u8 pe1 = readb(rbtx4939_pe1_addr);
	__u8 pe2 = readb(rbtx4939_pe2_addr);
	__u8 pe3 = readb(rbtx4939_pe3_addr);
	if (pcfg & TX4939_PCFG_ATA0MODE)
		pe1 |= RBTX4939_PE1_ATA(0);
	else
		pe1 &= ~RBTX4939_PE1_ATA(0);
	if (pcfg & TX4939_PCFG_ATA1MODE) {
		pe1 |= RBTX4939_PE1_ATA(1);
		pe1 &= ~(RBTX4939_PE1_RMII(0) | RBTX4939_PE1_RMII(1));
	} else {
		pe1 &= ~RBTX4939_PE1_ATA(1);
		if (pcfg & TX4939_PCFG_ET0MODE)
			pe1 |= RBTX4939_PE1_RMII(0);
		else
			pe1 &= ~RBTX4939_PE1_RMII(0);
		if (pcfg & TX4939_PCFG_ET1MODE)
			pe1 |= RBTX4939_PE1_RMII(1);
		else
			pe1 &= ~RBTX4939_PE1_RMII(1);
	}
	if (ccfg & TX4939_CCFG_PTSEL)
		pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_P |
			 RBTX4939_PE3_VP_S);
	else {
		__u64 vmode = pcfg &
			(TX4939_PCFG_VSSMODE | TX4939_PCFG_VPSMODE);
		if (vmode == 0)
			pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_P |
				 RBTX4939_PE3_VP_S);
		else if (vmode == TX4939_PCFG_VPSMODE) {
			pe3 |= RBTX4939_PE3_VP_P;
			pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_S);
		} else if (vmode == TX4939_PCFG_VSSMODE) {
			pe3 |= RBTX4939_PE3_VP | RBTX4939_PE3_VP_S;
			pe3 &= ~RBTX4939_PE3_VP_P;
		} else {
			pe3 |= RBTX4939_PE3_VP | RBTX4939_PE3_VP_P;
			pe3 &= ~RBTX4939_PE3_VP_S;
		}
	}
	if (pcfg & TX4939_PCFG_SPIMODE) {
		if (pcfg & TX4939_PCFG_SIO2MODE_GPIO)
			pe2 &= ~(RBTX4939_PE2_SIO2 | RBTX4939_PE2_SIO0);
		else {
			if (pcfg & TX4939_PCFG_SIO2MODE_SIO2) {
				pe2 |= RBTX4939_PE2_SIO2;
				pe2 &= ~RBTX4939_PE2_SIO0;
			} else {
				pe2 |= RBTX4939_PE2_SIO0;
				pe2 &= ~RBTX4939_PE2_SIO2;
			}
		}
		if (pcfg & TX4939_PCFG_SIO3MODE)
			pe2 |= RBTX4939_PE2_SIO3;
		else
			pe2 &= ~RBTX4939_PE2_SIO3;
		pe2 &= ~RBTX4939_PE2_SPI;
	} else {
		pe2 |= RBTX4939_PE2_SPI;
		pe2 &= ~(RBTX4939_PE2_SIO3 | RBTX4939_PE2_SIO2 |
			 RBTX4939_PE2_SIO0);
	}
	if ((pcfg & TX4939_PCFG_I2SMODE_MASK) == TX4939_PCFG_I2SMODE_GPIO)
		pe2 |= RBTX4939_PE2_GPIO;
	else
		pe2 &= ~RBTX4939_PE2_GPIO;
	writeb(pe1, rbtx4939_pe1_addr);
	writeb(pe2, rbtx4939_pe2_addr);
	writeb(pe3, rbtx4939_pe3_addr);
}
Exemple #9
0
static u64 read_rng(void __iomem *base, unsigned int offset)
{
	return ____raw_readq(base + offset);
}
Exemple #10
0
static void __init rbtx4938_mem_setup(void)
{
	unsigned long long pcfg;

	if (txx9_master_clock == 0)
		txx9_master_clock = 25000000; /* 25MHz */

	tx4938_setup();

#ifdef CONFIG_PCI
	txx9_alloc_pci_controller(&txx9_primary_pcic, 0, 0, 0, 0);
	txx9_board_pcibios_setup = tx4927_pcibios_setup;
#else
	set_io_port_base(RBTX4938_ETHER_BASE);
#endif

	tx4938_sio_init(7372800, 0);

#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_PIO58_61
	pr_info("PIOSEL: disabling both ATA and NAND selection\n");
	txx9_clear64(&tx4938_ccfgptr->pcfg,
		     TX4938_PCFG_NDF_SEL | TX4938_PCFG_ATA_SEL);
#endif

#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_NAND
	pr_info("PIOSEL: enabling NAND selection\n");
	txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL);
	txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_ATA_SEL);
#endif

#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_ATA
	pr_info("PIOSEL: enabling ATA selection\n");
	txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_ATA_SEL);
	txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL);
#endif

#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP
	pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg);
	pr_info("PIOSEL: NAND %s, ATA %s\n",
		(pcfg & TX4938_PCFG_NDF_SEL) ? "enabled" : "disabled",
		(pcfg & TX4938_PCFG_ATA_SEL) ? "enabled" : "disabled");
#endif

	rbtx4938_spi_setup();
	pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg);	/* updated */
	/* fixup piosel */
	if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) ==
	    TX4938_PCFG_ATA_SEL)
		writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x04,
		       rbtx4938_piosel_addr);
	else if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) ==
		 TX4938_PCFG_NDF_SEL)
		writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x08,
		       rbtx4938_piosel_addr);
	else
		writeb(readb(rbtx4938_piosel_addr) & ~(0x08 | 0x04),
		       rbtx4938_piosel_addr);

	rbtx4938_fpga_resource.name = "FPGA Registers";
	rbtx4938_fpga_resource.start = CPHYSADDR(RBTX4938_FPGA_REG_ADDR);
	rbtx4938_fpga_resource.end = CPHYSADDR(RBTX4938_FPGA_REG_ADDR) + 0xffff;
	rbtx4938_fpga_resource.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
	if (request_resource(&txx9_ce_res[2], &rbtx4938_fpga_resource))
		printk(KERN_ERR "request resource for fpga failed\n");

	_machine_restart = rbtx4938_machine_restart;

	writeb(0xff, rbtx4938_led_addr);
	printk(KERN_INFO "RBTX4938 --- FPGA(Rev %02x) DIPSW:%02x,%02x\n",
	       readb(rbtx4938_fpga_rev_addr),
	       readb(rbtx4938_dipsw_addr), readb(rbtx4938_bdipsw_addr));
}