Example #1
0
File: nand.c Project: janfj/dd-wrt
int xway_nand_probe(struct platform_device *pdev)
{
//	ltq_gpio_request(PIN_CS1, 1, 0, 1, "NAND_CS1");
	ltq_gpio_request(PIN_CLE, 1, 0, 1, "NAND_CLE");
	ltq_gpio_request(PIN_ALE, 1, 0, 1, "NAND_ALE");
	if (ltq_is_ar9() || ltq_is_vr9()) {
		ltq_gpio_request(PIN_RDY, 1, 0, 0, "NAND_BSY");
		ltq_gpio_request(PIN_RD, 1, 0, 1, "NAND_RD");
	}

	ltq_ebu_w32((NAND_BASE_ADDRESS & 0x1fffff00)
		| ADDSEL1_MASK(3) | ADDSEL1_REGEN, LTQ_EBU_ADDSEL1);

	ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
		| BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
		| BUSCON1_CMULT4, LTQ_EBU_BUSCON1);

	ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
		| NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
		| NAND_CON_IN_CS0 | NAND_CON_OUT_CS0, LTQ_EBU_NAND_CON);

	ltq_w32(NAND_WRITE_CMD_RESET, ((u32*)(NAND_BASE_ADDRESS | NAND_WRITE_CMD)));
	while((ltq_ebu_r32(LTQ_EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0);

	return 0;
}
Example #2
0
void __init ltq_register_spi(struct ltq_spi_platform_data *pdata,
		struct spi_board_info const *info, unsigned n)
{
	if(ltq_is_ar9())
		ltq_spi.resource = ltq_spi_resources_ar9;
	spi_register_board_info(info, n);
	ltq_spi.dev.platform_data = pdata;
	platform_device_register(&ltq_spi);
}
Example #3
0
void __init
ltq_register_etop(struct ltq_eth_data *eth)
{
	/* only register the gphy on socs that have one */
	if (ltq_is_ar9() | ltq_is_vr9())
		ltq_etop.num_resources = 2;
	if (eth) {
		ltq_etop.dev.platform_data = eth;
		platform_device_register(&ltq_etop);
	}
}
void __init ltq_register_gpio(void)
{
	platform_device_register_simple("ltq_gpio", 0,
		&ltq_gpio_resource[0], 1);
	platform_device_register_simple("ltq_gpio", 1,
		&ltq_gpio_resource[1], 1);

	/* AR9 and VR9 have an extra gpio block */
	if (ltq_is_ar9() || ltq_is_vr9()) {
		platform_device_register_simple("ltq_gpio", 2,
			&ltq_gpio_resource[2], 1);
	}
}
Example #5
0
void __init arch_init_irq(void)
{
	int i;

	if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0)
		panic("Failed to insert icu memory");

	if (request_mem_region(ltq_icu_resource.start,
			resource_size(&ltq_icu_resource), "icu") < 0)
		panic("Failed to request icu memory");

	ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start,
				resource_size(&ltq_icu_resource));
	if (!ltq_icu_membase)
		panic("Failed to remap icu memory");

	if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0)
		panic("Failed to insert eiu memory");

	if (request_mem_region(ltq_eiu_resource.start,
			resource_size(&ltq_eiu_resource), "eiu") < 0)
		panic("Failed to request eiu memory");

	ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start,
				resource_size(&ltq_eiu_resource));
	if (!ltq_eiu_membase)
		panic("Failed to remap eiu memory");

	
	for (i = 0; i < 5; i++)
		ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET));

	
	ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));

	mips_cpu_irq_init();

	for (i = 2; i <= 6; i++)
		setup_irq(i, &cascade);

	if (cpu_has_vint) {
		pr_info("Setting up vectored interrupts\n");
		set_vi_handler(2, ltq_hw0_irqdispatch);
		set_vi_handler(3, ltq_hw1_irqdispatch);
		set_vi_handler(4, ltq_hw2_irqdispatch);
		set_vi_handler(5, ltq_hw3_irqdispatch);
		set_vi_handler(6, ltq_hw4_irqdispatch);
		set_vi_handler(7, ltq_hw5_irqdispatch);
	}

	for (i = INT_NUM_IRQ0;
		i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++)
		if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) ||
			(i == LTQ_EIU_IR2))
			irq_set_chip_and_handler(i, &ltq_eiu_type,
				handle_level_irq);
		
		else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) ||
			(i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9()))
			irq_set_chip_and_handler(i, &ltq_eiu_type,
				handle_level_irq);
		else
			irq_set_chip_and_handler(i, &ltq_irq_type,
				handle_level_irq);

#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
	set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
		IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#else
	set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
		IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#endif
}