Ejemplo n.º 1
0
/**********************************************
 * Routine: dram_init
 * Description: sets uboots idea of sdram size
 **********************************************/
int dram_init(void)
{
    #define NOT_EARLY 0
    DECLARE_GLOBAL_DATA_PTR;
	unsigned int size0 = 0, size1 = 0;
	u32 mtype, btype;

	btype = get_board_type();
	mtype = get_mem_type();
#ifndef CONFIG_3430ZEBU
	/* fixme... dont know why this func is crashing in ZeBu */
	display_board_info(btype);
#endif
    /* If a second bank of DDR is attached to CS1 this is
     * where it can be started.  Early init code will init
     * memory on CS0.
     */
	if ((mtype == DDR_COMBO) || (mtype == DDR_STACKED)) {
		do_sdrc_init(SDRC_CS1_OSET, NOT_EARLY);
	}
	size0 = get_sdr_cs_size(SDRC_CS0_OSET);
	size1 = get_sdr_cs_size(SDRC_CS1_OSET);

	gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
	gd->bd->bi_dram[0].size = size0;
	gd->bd->bi_dram[1].start = PHYS_SDRAM_1+size0;
	gd->bd->bi_dram[1].size = size1;

	return 0;
}
Ejemplo n.º 2
0
/******************************************************************************
 * Routine: dram_init
 * Description: sets uboots idea of sdram size
 *****************************************************************************/
int dram_init(void)
{
	DECLARE_GLOBAL_DATA_PTR;
	unsigned int size0 = 0, size1 = 0;
	u32 mtype, btype;

	btype = get_board_type();
	mtype = get_mem_type();

	display_board_info(btype);

	/* If a second bank of DDR is attached to CS1 this is
	 * where it can be started.  Early init code will init
	 * memory on CS0.
	 */
	if ((mtype == DDR_COMBO) || (mtype == DDR_STACKED))
		do_sdrc_init(SDRC_CS1_OSET, NOT_EARLY);

	size0 = get_sdr_cs_size(SDRC_CS0_OSET);
	size1 = get_sdr_cs_size(SDRC_CS1_OSET);

	gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
	gd->bd->bi_dram[0].size = size0;
	gd->bd->bi_dram[1].start = PHYS_SDRAM_1 + size0;
	gd->bd->bi_dram[1].size = size1;

	return 0;
}
Ejemplo n.º 3
0
/**********************************************
 * Routine: dram_init
 * Description: sets uboots idea of sdram size
 **********************************************/
int dram_init(void)
{
	unsigned int size0 = 0, size1 = 0;
	u32 mtype, btype, rev = 0, cpu = 0;
#define NOT_EARLY 0

	btype = get_board_type();
	mtype = get_mem_type();
	rev = get_cpu_rev();
	cpu = get_cpu_type();

	display_board_info(btype);

	if ((mtype == DDR_COMBO) || (mtype == DDR_STACKED)) {
		/* init other chip select */
		do_sdrc_init(SDRC_CS1_OSET, NOT_EARLY);
	}

	size0 = get_sdr_cs_size(SDRC_CS0_OSET);
	size1 = get_sdr_cs_size(SDRC_CS1_OSET);

	gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
	gd->bd->bi_dram[0].size = size0;
#if CONFIG_NR_DRAM_BANKS > 1
	gd->bd->bi_dram[1].start = PHYS_SDRAM_1 + size0;
	gd->bd->bi_dram[1].size = size1;
#endif

	return 0;
}
Ejemplo n.º 4
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
#ifndef CONFIG_SQUASHFS_DEBUGGER_AUTO_DIAGNOSE
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
Ejemplo n.º 5
0
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
{
    BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);

    return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
                              PCI_IO_VIRT_BASE + offset + SZ_64K,
                              phys_addr,
                              __pgprot(get_mem_type(MT_DEVICE)->prot_pte));
}
Ejemplo n.º 6
0
void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
	struct vm_struct *area;

	/*
	 * High mappings must be section aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped
	 */
	if (pfn_valid(pfn)) {
		WARN(1, "BUG: Your driver calls ioremap() on\n"
			"system memory.  This leads to architecturally\n"
			"unpredictable behaviour, and ioremap() will fail in\n"
			"the next kernel release. Please fix your driver.\n");
		return NULL;
	}

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
	if (!area)
		return NULL;
	addr = (unsigned long)area->addr;

	if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_UNICORE_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
		vunmap((void *)addr);
		return NULL;
	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
Ejemplo n.º 7
0
void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
				void *virt_base, unsigned long flags)
{
	int ret;
	unsigned int offset = buffer->priv_phys - phys_base;
	unsigned long start = ((unsigned long)virt_base) + offset;
	const struct mem_type *type = ION_IS_CACHED(flags) ?
				get_mem_type(MT_DEVICE_CACHED) :
				get_mem_type(MT_DEVICE);

	if (phys_base > buffer->priv_phys)
		return NULL;


	ret = ioremap_pages(start, buffer->priv_phys, buffer->size, type);

	if (!ret)
		return (void *)start;
	else
		return NULL;
}
Ejemplo n.º 8
0
/*
 * Remap an arbitrary physical address space into the kernel virtual
 * address space. Needed when the kernel wants to access high addresses
 * directly.
 *
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
 * have to convert them into an offset in a page-aligned mapping, but the
 * caller shouldn't need to know that small detail.
 *
 * 'flags' are the extra L_PTE_ flags that you want to specify for this
 * mapping.  See <asm/pgtable.h> for more information.
 */
void __iomem *
__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
		  unsigned int mtype)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

 	area = get_vm_area(size, VM_IOREMAP);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = remap_area_pages(addr, pfn, size, type);

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
/*
 * The trick of making the zero page strongly ordered no longer
 * works. We no longer want to make a second alias to the zero
 * page that is strongly ordered. Manually changing the bits
 * in the page table for the zero page would have side effects
 * elsewhere that aren't necessary. The result is that we need
 * to get a page from else where. Given when the first call
 * to write_to_strongly_ordered_memory occurs, using bootmem
 * to get a page makes the most sense.
 */
void map_page_strongly_ordered(void)
{
#if defined(CONFIG_ARCH_MSM7X27)
	long unsigned int phys;

	if (strongly_ordered_page)
		return;

	strongly_ordered_page = alloc_bootmem(PAGE_SIZE);
	phys = __pa(strongly_ordered_page);
	ioremap_page((long unsigned int) strongly_ordered_page,
		phys,
		get_mem_type(MT_DEVICE_STRONGLY_ORDERED));
	printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
#endif
}
Ejemplo n.º 10
0
void *fmem_map_virtual_area(int cacheability)
{
    unsigned long addr;
    const struct mem_type *type;
    int ret;

    addr = (unsigned long) fmem_data.area->addr;
    type = get_mem_type(cacheability);
    ret = ioremap_page_range(addr, addr + fmem_data.size,
                             fmem_data.phys, __pgprot(type->prot_pte));
    if (ret)
        return ERR_PTR(ret);

    fmem_data.virt = fmem_data.area->addr;

    return fmem_data.virt;
}
Ejemplo n.º 11
0
/**********************************************
 * Routine: dram_init
 * Description: sets uboots idea of sdram size
 **********************************************/
int dram_init(void)
{
	DECLARE_GLOBAL_DATA_PTR;
	unsigned int size0 = 0, size1 = 0;
	u32 mtype, btype;
#ifdef CONFIG_DRIVER_OMAP24XX_I2C
	u8 data;
#endif
#define NOT_EARLY 0

#ifdef CONFIG_DRIVER_OMAP24XX_I2C
	i2c_init(CFG_I2C_SPEED, CFG_I2C_SLAVE);
	select_bus(1, CFG_I2C_SPEED);	/* select bus with T2 on it */
#endif
	btype = get_board_type();
	mtype = get_mem_type();
	display_board_info(btype);
#ifdef CONFIG_DRIVER_OMAP24XX_I2C
	if (btype == BOARD_SDP_2430_T2) {		
		/* Enable VMODE following voltage switching */
		data = 0x24;  /* set the floor voltage to 1.05v */
		i2c_write(I2C_TRITON2, 0xBB, 1, &data, 1);   
		data = 0x38; /* set the roof voltage to 1.3V */
		i2c_write(I2C_TRITON2, 0xBC, 1, &data, 1);		
		data = 0x0; /* set jump mode for VDD voltage transition */
		i2c_write(I2C_TRITON2, 0xBD, 1, &data, 1);  
		data = 1; /* enable voltage scaling */
		i2c_write(I2C_TRITON2, 0xBA, 1, &data, 1); 
	}
#endif

	if ((mtype == DDR_COMBO) || (mtype == DDR_STACKED)) {
		/* init other chip select and map CS1 right after CS0 */
		do_sdrc_init(SDRC_CS1_OSET, NOT_EARLY);
	}
	size0 = get_sdr_cs_size(SDRC_CS0_OSET);
	size1 = get_sdr_cs_size(SDRC_CS1_OSET);

	gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
	gd->bd->bi_dram[0].size = size0;
	gd->bd->bi_dram[1].start = PHYS_SDRAM_1+size0;
	gd->bd->bi_dram[1].size = size1;

	return 0;
}
Ejemplo n.º 12
0
void *fmem_map_virtual_area(int cacheability)
{
	unsigned long addr;
	const struct mem_type *type;
	int ret;

	addr = (unsigned long) fmem_data.area->addr;
	type = get_mem_type(cacheability);

	if (type == NULL)
		return ERR_PTR(-EINVAL);

	ret = ioremap_pages(addr, fmem_data.phys, fmem_data.size, type);
	if (ret)
		return ERR_PTR(ret);

	fmem_data.virt = fmem_data.area->addr;

	return fmem_data.virt;
}
Ejemplo n.º 13
0
/**********************************************
 * Routine: dram_init
 * Description: sets uboots idea of sdram size
 **********************************************/
int dram_init (void)
{
	DECLARE_GLOBAL_DATA_PTR;
	unsigned int size0=0,size1=0;
	u32 mtype, btype, rev, cpu;
	u8 chg_on = 0x5; /* enable charge of back up battery */
	u8 vmode_on = 0x8C;
	#define NOT_EARLY 0

	i2c_init (CFG_I2C_SPEED, CFG_I2C_SLAVE); /* need this a bit early */

	btype = get_board_type();
	mtype = get_mem_type();
	rev = get_cpu_rev();
	cpu = get_cpu_type();

	display_board_info(btype);
	if (btype == BOARD_H4_MENELAUS){
		update_mux(btype,mtype); /* combo part on menelaus */
		i2c_write(I2C_MENELAUS, 0x20, 1, &chg_on, 1); /*fix POR reset bug */
		i2c_write(I2C_MENELAUS, 0x2, 1, &vmode_on, 1); /* VCORE change on VMODE */
	}

	if ((mtype == DDR_COMBO) || (mtype == DDR_STACKED)) {
		do_sdrc_init(SDRC_CS1_OSET, NOT_EARLY);	/* init other chip select */
	}
	size0 = get_sdr_cs_size(SDRC_CS0_OSET);
	size1 = get_sdr_cs_size(SDRC_CS1_OSET);

	gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
	gd->bd->bi_dram[0].size = size0;
	if(rev == CPU_2420_2422_ES1) /* ES1's 128MB remap granularity isn't worth doing */
		gd->bd->bi_dram[1].start = PHYS_SDRAM_2;
	else /* ES2 and above can remap at 32MB granularity */
		gd->bd->bi_dram[1].start = PHYS_SDRAM_1+size0;
	gd->bd->bi_dram[1].size = size1;

	return 0;
}
Ejemplo n.º 14
0
/*************************************************************************
 * do_sdrc_init(): initialize the SDRAM for use.
 *  -called from low level code with stack only.
 *  -code sets up SDRAM timing and muxing for 2422 or 2420.
 *  -optimal settings can be placed here, or redone after i2c
 *      inspection of board info
 *
 *  This is a bit ugly, but should handle all memory moduels
 *   used with the H4. The first time though this code from s_init()
 *   we configure the first chip select.  Later on we come back and
 *   will configure the 2nd chip select if it exists.
 *
 **************************************************************************/
void do_sdrc_init(u32 offset, u32 early)
{
	u32 cpu, dllen=0, rev, common=0, cs0=0, pmask=0, pass_type, mtype;
	sdrc_data_t *sdata;	 /* do not change type */
	u32 a, b, r;

	static const sdrc_data_t sdrc_2422 =
	{
		H4_2422_SDRC_SHARING, H4_2422_SDRC_MDCFG_0_DDR, 0 , H4_2422_SDRC_ACTIM_CTRLA_0,
		H4_2422_SDRC_ACTIM_CTRLB_0, H4_2422_SDRC_RFR_CTRL, H4_2422_SDRC_MR_0_DDR,
		0, H4_2422_SDRC_DLLAB_CTRL
	};
	static const sdrc_data_t sdrc_2420 =
	{
		H4_2420_SDRC_SHARING, H4_2420_SDRC_MDCFG_0_DDR, H4_2420_SDRC_MDCFG_0_SDR,
		H4_2420_SDRC_ACTIM_CTRLA_0, H4_2420_SDRC_ACTIM_CTRLB_0,
		H4_2420_SDRC_RFR_CTRL, H4_2420_SDRC_MR_0_DDR, H4_2420_SDRC_MR_0_SDR,
		H4_2420_SDRC_DLLAB_CTRL
	};

	if (offset == SDRC_CS0_OSET)
		cs0 = common = 1;  /* int regs shared between both chip select */

	cpu = get_cpu_type();
	rev = get_cpu_rev();

	/* warning generated, though code generation is correct. this may bite later,
	 * but is ok for now. there is only so much C code you can do on stack only
	 * operation.
	 */
	if (cpu == CPU_2422){
		sdata = (sdrc_data_t *)&sdrc_2422;
		pass_type = STACKED;
	} else{
		sdata = (sdrc_data_t *)&sdrc_2420;
		pass_type = IP_DDR;
	}

	__asm__ __volatile__("": : :"memory");  /* limit compiler scope */

	/* u-boot is compiled to run in DDR or SRAM at 8xxxxxxx or 4xxxxxxx.
	 * If we are running in flash prior to relocation and we use data
	 * here which is not pc relative we need to get the address correct.
	 * We need to find the current flash mapping to dress up the initial
	 * pointer load.  As long as this is const data we should be ok.
	 */
	if((early) && running_in_flash()){
		sdata = (sdrc_data_t *)(((u32)sdata & 0x0003FFFF) | get_gpmc0_base());
		/* NOR internal boot offset is 0x4000 from xloader signature */
		if(running_from_internal_boot())
			sdata = (sdrc_data_t *)((u32)sdata + 0x4000);
	}

	if (!early && (((mtype = get_mem_type()) == DDR_COMBO)||(mtype == DDR_STACKED))) {
		if(mtype == DDR_COMBO){
			pmask = BIT2;/* combo part has a shared CKE signal, can't use feature */
			pass_type = COMBO_DDR; /* CS1 config */
			__raw_writel((__raw_readl(SDRC_POWER)) & ~pmask, SDRC_POWER);
		}
		if(rev != CPU_2420_2422_ES1)	/* for es2 and above smooth things out */
			make_cs1_contiguous();
	}

next_mem_type:
	if (common) {	/* do a SDRC reset between types to clear regs*/
		__raw_writel(SOFTRESET, SDRC_SYSCONFIG);	/* reset sdrc */
		wait_on_value(BIT0, BIT0, SDRC_STATUS, 12000000);/* wait till reset done set */
		__raw_writel(0, SDRC_SYSCONFIG);		/* clear soft reset */
		__raw_writel(sdata->sdrc_sharing, SDRC_SHARING);
#ifdef POWER_SAVE
		__raw_writel(__raw_readl(SMS_SYSCONFIG)|SMART_IDLE, SMS_SYSCONFIG);
		__raw_writel(sdata->sdrc_sharing|SMART_IDLE, SDRC_SHARING);
		__raw_writel((__raw_readl(SDRC_POWER)|BIT6), SDRC_POWER);
#endif
	}

	if ((pass_type == IP_DDR) || (pass_type == STACKED)) /* (IP ddr-CS0),(2422-CS0/CS1) */
		__raw_writel(sdata->sdrc_mdcfg_0_ddr, SDRC_MCFG_0+offset);
	else if (pass_type == COMBO_DDR){ /* (combo-CS0/CS1) */
		__raw_writel(H4_2420_COMBO_MDCFG_0_DDR,SDRC_MCFG_0+offset);
	} else if (pass_type == IP_SDR){ /* ip sdr-CS0 */
		__raw_writel(sdata->sdrc_mdcfg_0_sdr, SDRC_MCFG_0+offset);
	}

	a = sdata->sdrc_actim_ctrla_0;
	b = sdata->sdrc_actim_ctrlb_0;
	r = sdata->sdrc_dllab_ctrl;

	/* work around ES1 DDR issues */
	if((pass_type != IP_SDR) && (rev == CPU_2420_2422_ES1)){
		a = H4_242x_SDRC_ACTIM_CTRLA_0_ES1;
		b = H4_242x_SDRC_ACTIM_CTRLB_0_ES1;
		r = H4_242x_SDRC_RFR_CTRL_ES1;
	}

	if (cs0) {
		__raw_writel(a, SDRC_ACTIM_CTRLA_0);
		__raw_writel(b, SDRC_ACTIM_CTRLB_0);
	} else {
		__raw_writel(a, SDRC_ACTIM_CTRLA_1);
		__raw_writel(b, SDRC_ACTIM_CTRLB_1);
	}
	__raw_writel(r, SDRC_RFR_CTRL+offset);

	/* init sequence for mDDR/mSDR using manual commands (DDR is a bit different) */
	__raw_writel(CMD_NOP, SDRC_MANUAL_0+offset);
	sdelay(5000);  /* susposed to be 100us per design spec for mddr/msdr */
	__raw_writel(CMD_PRECHARGE, SDRC_MANUAL_0+offset);
	__raw_writel(CMD_AUTOREFRESH, SDRC_MANUAL_0+offset);
	__raw_writel(CMD_AUTOREFRESH, SDRC_MANUAL_0+offset);

	/*
	 * CSx SDRC Mode Register
	 * Burst length = (4 - DDR) (2-SDR)
	 * Serial mode
	 * CAS latency = x
	 */
	if(pass_type == IP_SDR)
		__raw_writel(sdata->sdrc_mr_0_sdr, SDRC_MR_0+offset);
	else
		__raw_writel(sdata->sdrc_mr_0_ddr, SDRC_MR_0+offset);

	/* NOTE: ES1 242x _BUG_ DLL + External Bandwidth fix*/
	if (rev == CPU_2420_2422_ES1){
		dllen = (BIT0|BIT3); /* es1 clear both bit0 and bit3 */
		__raw_writel((__raw_readl(SMS_CLASS_ARB0)|BURSTCOMPLETE_GROUP7)
			,SMS_CLASS_ARB0);/* enable bust complete for lcd */
	}
	else
		dllen = BIT0|BIT1; /* es2, clear bit0, and 1 (set phase to 72) */

	/* enable & load up DLL with good value for 75MHz, and set phase to 90
	 * ES1 recommends 90 phase, ES2 recommends 72 phase.
	 */
	if (common && (pass_type != IP_SDR)) {
		__raw_writel(sdata->sdrc_dllab_ctrl, SDRC_DLLA_CTRL);
		__raw_writel(sdata->sdrc_dllab_ctrl & ~(BIT2|dllen), SDRC_DLLA_CTRL);
		__raw_writel(sdata->sdrc_dllab_ctrl, SDRC_DLLB_CTRL);
		__raw_writel(sdata->sdrc_dllab_ctrl & ~(BIT2|dllen) , SDRC_DLLB_CTRL);
	}
	sdelay(90000);

	if(mem_ok())
		return; /* STACKED, other configued type */
	++pass_type; /* IPDDR->COMBODDR->IPSDR for CS0 */
	goto next_mem_type;
}
Ejemplo n.º 15
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;
#endif

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	/*
	 * Try to reuse one of the static mapping whenever possible.
	 */
	read_lock(&vmlist_lock);
	for (area = vmlist; area; area = area->next) {
		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
			break;
		if (!(area->flags & VM_ARM_STATIC_MAPPING))
			continue;
		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
			continue;
		if (__phys_to_pfn(area->phys_addr) > pfn ||
		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
			continue;
		/* we can drop the lock here as we know *area is static */
		read_unlock(&vmlist_lock);
		addr = (unsigned long)area->addr;
		addr += __pfn_to_phys(pfn) - area->phys_addr;
		return (void __iomem *) (offset + addr);
	}
	read_unlock(&vmlist_lock);

#if 0 /* HACK - do allow RAM to be mapped, the problems are a bit overrated */
	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (WARN_ON(pfn_valid(pfn)))
		return NULL;
#endif

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
Ejemplo n.º 16
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
	unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
	const struct mem_type *type;
	int err;
	unsigned long addr;
 	struct vm_struct * area;

	/*
	 * High mappings must be supersection aligned
	 */
	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
		return NULL;

	/*
	 * Don't allow RAM to be mapped - this causes problems with ARMv6+
	 */
	if (pfn_valid(pfn)) {
		printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory.  This leads\n"
		       KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
		       KERN_WARNING "will fail in the next kernel release.  Please fix your driver.\n");
		WARN_ON(1);
	}

	type = get_mem_type(mtype);
	if (!type)
		return NULL;

	/*
	 * Page align the mapping size, taking account of any offset.
	 */
	size = PAGE_ALIGN(offset + size);

	area = get_vm_area_caller(size, VM_IOREMAP, caller);
 	if (!area)
 		return NULL;
 	addr = (unsigned long)area->addr;

#ifndef CONFIG_SMP
	if (DOMAIN_IO == 0 &&
	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
	       cpu_is_xsc3()) && pfn >= 0x100000 &&
	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_supersections(addr, pfn, size, type);
	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
		area->flags |= VM_ARM_SECTION_MAPPING;
		err = remap_area_sections(addr, pfn, size, type);
	} else
#endif
		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
					 __pgprot(type->prot_pte));

	if (err) {
 		vunmap((void *)addr);
 		return NULL;
 	}

	flush_cache_vmap(addr, addr + size);
	return (void __iomem *) (offset + addr);
}
Ejemplo n.º 17
0
void start_armboot (void)
{
  	init_fnc_t **init_fnc_ptr;
 	int i;
	uchar *buf;

   	for (init_fnc_ptr = init_sequence; *init_fnc_ptr; ++init_fnc_ptr) {
		if ((*init_fnc_ptr)() != 0) {
			hang ();
		}
	}

	misc_init_r();
	buf =  (uchar*) CFG_LOADADDR;

	/* Always first try mmc without checking boot pins */
#ifndef CONFIG_OMAP3_BEAGLE
	if ((get_mem_type() == MMC_ONENAND) || (get_mem_type() == MMC_NAND))
#endif	/* CONFIG_OMAP3_BEAGLE */
		buf += mmc_boot(buf);

	if (buf == (uchar *)CFG_LOADADDR) {
		if (get_mem_type() == GPMC_NAND){
#ifdef CFG_PRINTF
			printf("Booting from nand . . .\n");
#endif
			for (i = NAND_UBOOT_START; i < NAND_UBOOT_END; i+= NAND_BLOCK_SIZE){
				if (!nand_read_block(buf, i))
					buf += NAND_BLOCK_SIZE; /* advance buf ptr */
			}
		}

		if (get_mem_type() == GPMC_ONENAND){
#ifdef CFG_PRINTF
			printf("Booting from onenand . . .\n");
#endif
			for (i = ONENAND_START_BLOCK; i < ONENAND_END_BLOCK; i++){
				if (!onenand_read_block(buf, i))
					buf += ONENAND_BLOCK_SIZE;
			}
		}
	}

#if defined (CONFIG_AM3517EVM)
	/*
	 * FIXME: Currently coping uboot image,
	 * ideally we should leverage XIP feature here
	 */
	if (get_mem_type() == GPMC_NOR) {
		int size;
		printf("Booting from NOR Flash...\n");
		size = nor_read_boot(buf);
		if (size > 0)
			buf += size;
	}
#endif

	if (buf == (uchar *)CFG_LOADADDR)
		hang();

	/* go run U-Boot and never return */
  	printf("Starting OS Bootloader...\n");
 	((init_fnc_t *)CFG_LOADADDR)();

	/* should never come here */
}
Ejemplo n.º 18
0
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
                                        unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
    const struct mem_type *type;
    int err;
    unsigned long addr;
    struct vm_struct * area;

#ifndef CONFIG_ARM_LPAE
    if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
        return NULL;
#endif

    type = get_mem_type(mtype);
    if (!type)
        return NULL;

    size = PAGE_ALIGN(offset + size);

    read_lock(&vmlist_lock);
    for (area = vmlist; area; area = area->next) {
        if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
            break;
        if (!(area->flags & VM_ARM_STATIC_MAPPING))
            continue;
        if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
            continue;
        if (__phys_to_pfn(area->phys_addr) > pfn ||
                __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
            continue;

        read_unlock(&vmlist_lock);
        addr = (unsigned long)area->addr;
        addr += __pfn_to_phys(pfn) - area->phys_addr;
        return (void __iomem *) (offset + addr);
    }
    read_unlock(&vmlist_lock);

    if (WARN_ON(pfn_valid(pfn)))
        return NULL;

    area = get_vm_area_caller(size, VM_IOREMAP, caller);
    if (!area)
        return NULL;
    addr = (unsigned long)area->addr;

#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
    if (DOMAIN_IO == 0 &&
            (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
             cpu_is_xsc3()) && pfn >= 0x100000 &&
            !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_supersections(addr, pfn, size, type);
    } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
        area->flags |= VM_ARM_SECTION_MAPPING;
        err = remap_area_sections(addr, pfn, size, type);
    } else
#endif
        err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
                                 __pgprot(type->prot_pte));

    if (err) {
        vunmap((void *)addr);
        return NULL;
    }

    flush_cache_vmap(addr, addr + size);
    return (void __iomem *) (offset + addr);
}
Ejemplo n.º 19
0
/**********************************************
 * Routine: dram_init
 * Description: sets uboots idea of sdram size
 **********************************************/
int dram_init(void)
{
    #define NOT_EARLY 0
//20101215_Peter ++
//#define DEBUG
#if defined (CONFIG_EPXX_DDR_512MB)
	#define EARLY_INIT	1
#endif
//20101215_Peter --
    DECLARE_GLOBAL_DATA_PTR;
	unsigned int size0 = 0, size1 = 0;
	u32 mtype, btype;

	btype = get_board_type();
	mtype = get_mem_type();
#ifndef CONFIG_3430ZEBU
	/* fixme... dont know why this func is crashing in ZeBu */
	display_board_info(btype);
#endif
    /* If a second bank of DDR is attached to CS1 this is
     * where it can be started.  Early init code will init
     * memory on CS0.
     */
	if ((mtype == DDR_COMBO) || (mtype == DDR_STACKED)) {
//20101215_Peter ++
#if defined (CONFIG_EPXX_DDR_512MB)
		do_sdrc_init(SDRC_CS1_OSET, EARLY_INIT);
		make_cs1_contiguous();
#else
		do_sdrc_init(SDRC_CS1_OSET, NOT_EARLY);
#endif
//20101215_Peter --
	}

#ifdef DEBUG
 {
	unsigned int reg = 0;

	reg = __raw_readl(SDRC_MCFG_0);
	printf("SDRC_MCFG_0: %08x\n", reg);

	reg = __raw_readl(SDRC_MCFG_1);
	printf("SDRC_MCFG_1: %08x\n", reg);
	
	reg = __raw_readl(SDRC_ACTIM_CTRLA_0);
	__raw_writel(reg, SDRC_ACTIM_CTRLA_1);
	printf("SDRC_ACTIM_CTRLA_0: %08x\n", reg);
	
	reg = __raw_readl(SDRC_ACTIM_CTRLB_0);
	__raw_writel(reg, SDRC_ACTIM_CTRLB_1);
	printf("SDRC_ACTIM_CTRLB_0: %08x\n", reg);
	
	reg = __raw_readl(SDRC_ACTIM_CTRLA_1);
	printf("SDRC_ACTIM_CTRLA_1: %08x\n", reg);
	
	reg = __raw_readl(SDRC_ACTIM_CTRLB_1);
	printf("SDRC_ACTIM_CTRLB_1: %08x\n", reg);
	
	reg = __raw_readl(SDRC_MANUAL_0);
	printf("SDRC_MANUAL_0: %08x\n", reg);
	
	reg = __raw_readl(SDRC_MANUAL_1);
	printf("SDRC_MANUAL_1: %08x\n", reg);
	
	reg = __raw_readl(SDRC_MR_0);
	printf("SDRC_MR_0: %08x\n", reg);

	reg = __raw_readl(SDRC_MR_1);
	printf("SDRC_MR_1: %08x\n", reg);
	
	reg = __raw_readl(SDRC_RFR_CTRL_0);
	__raw_writel(reg, SDRC_RFR_CTRL_1);
	printf("SDRC_RFR_CTRL_0: %08x\n", reg);
	
	reg = __raw_readl(SDRC_RFR_CTRL_1);
	printf("SDRC_RFR_CTRL_1: %08x\n", reg);
	
	reg = __raw_readl(CONTROL_PROG_IO0);
	printf("CONTROL_PROG_IO0: %08x\n", reg);
	
	reg = __raw_readl(CONTROL_PROG_IO1);
	printf("CONTROL_PROG_IO1: %08x\n", reg);
	
	reg = __raw_readl(SDRC_DLLA_CTRL);
	printf("SDRC_DLLA_CTRL: %08x\n", reg);
	
 }
#endif

	size0 = get_sdr_cs_size(SDRC_CS0_OSET);
	size1 = get_sdr_cs_size(SDRC_CS1_OSET);

	gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
	gd->bd->bi_dram[0].size = size0;
	gd->bd->bi_dram[1].start = PHYS_SDRAM_1+size0;
	gd->bd->bi_dram[1].size = size1;

	return 0;
}