Exemple #1
0
int __init znet_probe(struct net_device *dev)
{
	int i;
	struct netidblk *netinfo;
	char *p;

	/* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
	for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++)
		if (*p == 'N'  &&  strncmp(p, "NETIDBLK", 8) == 0)
			break;

	if (p >= (char *)phys_to_virt(0x100000)) {
		if (znet_debug > 1)
			printk(KERN_INFO "No Z-Note ethernet adaptor found.\n");
		return -ENODEV;
	}
	netinfo = (struct netidblk *)p;
	dev->base_addr = netinfo->iobase1;
	dev->irq = netinfo->irq1;

	printk(KERN_INFO "%s: ZNET at %#3lx,", dev->name, dev->base_addr);

	/* The station address is in the "netidblk" at 0x0f0000. */
	for (i = 0; i < 6; i++)
		printk(" %2.2x", dev->dev_addr[i] = netinfo->netid[i]);

	printk(", using IRQ %d DMA %d and %d.\n", dev->irq, netinfo->dma1,
		netinfo->dma2);

	if (znet_debug > 1) {
		printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n",
			   dev->name, netinfo->vendor,
			   netinfo->irq1, netinfo->irq2,
			   netinfo->dma1, netinfo->dma2);
		printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n",
			   dev->name, netinfo->iobase1, netinfo->iosize1,
			   netinfo->iobase2, netinfo->iosize2, netinfo->nettype);
	}

	if (znet_debug > 0)
		printk("%s%s", KERN_INFO, version);

	dev->priv = (void *) &zn;
	zn.rx_dma = netinfo->dma1;
	zn.tx_dma = netinfo->dma2;
	zn.lock = SPIN_LOCK_UNLOCKED;

	/* These should never fail.  You can't add devices to a sealed box! */
	if (request_irq(dev->irq, &znet_interrupt, 0, "ZNet", dev)
		|| request_dma(zn.rx_dma,"ZNet rx")
		|| request_dma(zn.tx_dma,"ZNet tx")) {
		printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name);
		return -EBUSY;
	}

	/* Allocate buffer memory.	We can cross a 128K boundary, so we
	   must be careful about the allocation.  It's easiest to waste 8K. */
	if (dma_page_eq(dma_buffer1, &dma_buffer1[RX_BUF_SIZE/2-1]))
	  zn.rx_start = dma_buffer1;
	else 
	  zn.rx_start = dma_buffer2;

	if (dma_page_eq(dma_buffer3, &dma_buffer3[RX_BUF_SIZE/2-1]))
	  zn.tx_start = dma_buffer3;
	else
	  zn.tx_start = dma_buffer2;
	zn.rx_end = zn.rx_start + RX_BUF_SIZE/2;
	zn.tx_buf_len = TX_BUF_SIZE/2;
	zn.tx_end = zn.tx_start + zn.tx_buf_len;

	/* The ZNET-specific entries in the device structure. */
	dev->open = &znet_open;
	dev->hard_start_xmit = &znet_send_packet;
	dev->stop = &znet_close;
	dev->get_stats	= net_get_stats;
	dev->set_multicast_list = &set_multicast_list;
	dev->tx_timeout = znet_tx_timeout;
	dev->watchdog_timeo = TX_TIMEOUT;

	/* Fill in the 'dev' with ethernet-generic values. */
	ether_setup(dev);

	return 0;
}
Exemple #2
0
/*
 * paging_init() continues the virtual memory environment setup which
 * was begun by the code in arch/head.S.
 */
void __init paging_init(void)
{
	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
	unsigned long min_addr, max_addr;
	unsigned long addr, size, end;
	int i;

#ifdef DEBUG
	printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
#endif

	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
	if (CPU_IS_040_OR_060) {
		int i;
#ifndef mm_cachebits
		mm_cachebits = _PAGE_CACHE040;
#endif
		for (i = 0; i < 16; i++)
			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
	}

	min_addr = m68k_memory[0].addr;
	max_addr = min_addr + m68k_memory[0].size;
	for (i = 1; i < m68k_num_memory;) {
		if (m68k_memory[i].addr < min_addr) {
			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
				m68k_memory[i].addr, m68k_memory[i].size);
			printk("Fix your bootloader or use a memfile to make use of this area!\n");
			m68k_num_memory--;
			memmove(m68k_memory + i, m68k_memory + i + 1,
				(m68k_num_memory - i) * sizeof(struct mem_info));
			continue;
		}
		addr = m68k_memory[i].addr + m68k_memory[i].size;
		if (addr > max_addr)
			max_addr = addr;
		i++;
	}
	m68k_memoffset = min_addr - PAGE_OFFSET;
	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;

	module_fixup(NULL, __start_fixup, __stop_fixup);
	flush_icache();

	high_memory = phys_to_virt(max_addr);

	min_low_pfn = availmem >> PAGE_SHIFT;
	max_low_pfn = max_addr >> PAGE_SHIFT;

	for (i = 0; i < m68k_num_memory; i++) {
		addr = m68k_memory[i].addr;
		end = addr + m68k_memory[i].size;
		m68k_setup_node(i);
		availmem = PAGE_ALIGN(availmem);
		availmem += init_bootmem_node(NODE_DATA(i),
					      availmem >> PAGE_SHIFT,
					      addr >> PAGE_SHIFT,
					      end >> PAGE_SHIFT);
	}

	/*
	 * Map the physical memory available into the kernel virtual
	 * address space. First initialize the bootmem allocator with
	 * the memory we already mapped, so map_node() has something
	 * to allocate.
	 */
	addr = m68k_memory[0].addr;
	size = m68k_memory[0].size;
	free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr));
	map_node(0);
	if (size > INIT_MAPPED_SIZE)
		free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE);

	for (i = 1; i < m68k_num_memory; i++)
		map_node(i);

	flush_tlb_all();

	/*
	 * initialize the bad page table and bad page to point
	 * to a couple of allocated pages
	 */
	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
	memset(empty_zero_page, 0, PAGE_SIZE);

	/*
	 * Set up SFC/DFC registers
	 */
	set_fs(KERNEL_DS);

#ifdef DEBUG
	printk ("before free_area_init\n");
#endif
	for (i = 0; i < m68k_num_memory; i++) {
		zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
		free_area_init_node(i, pg_data_map + i, zones_size,
				    m68k_memory[i].addr >> PAGE_SHIFT, NULL);
	}
}
Exemple #3
0
static void __init
smp_85xx_kick_cpu(int nr)
{
	unsigned long flags;
	const u64 *cpu_rel_addr;
	__iomem u32 *bptr_vaddr;
	struct device_node *np;
	int n = 0;
	int ioremappable;

	WARN_ON (nr < 0 || nr >= NR_CPUS);

	pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);

	np = of_get_cpu_node(nr, NULL);
	cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);

	if (cpu_rel_addr == NULL) {
		printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
		return;
	}

	/*
	 * A secondary core could be in a spinloop in the bootpage
	 * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
	 * The bootpage and highmem can be accessed via ioremap(), but
	 * we need to directly access the spinloop if its in lowmem.
	 */
	ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);

	/* Map the spin table */
	if (ioremappable)
		bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
	else
		bptr_vaddr = phys_to_virt(*cpu_rel_addr);

	local_irq_save(flags);

	out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
#ifdef CONFIG_PPC32
	out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));

	if (!ioremappable)
		flush_dcache_range((ulong)bptr_vaddr,
				(ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));

	/* Wait a bit for the CPU to ack. */
	while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
		mdelay(1);
#else
	out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER),
		__pa((u64)*((unsigned long long *) generic_secondary_smp_init)));

	smp_generic_kick_cpu(nr);
#endif

	local_irq_restore(flags);

	if (ioremappable)
		iounmap(bptr_vaddr);

	pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
}
Exemple #4
0
static struct device_t * fb_rk3288_probe(struct driver_t * drv, struct dtnode_t * n)
{
	struct fb_rk3288_pdata_t * pdat;
	struct fb_t * fb;
	struct device_t * dev;
	virtual_addr_t virt = phys_to_virt(dt_read_address(n));
	char * clk = dt_read_string(n, "clock-name", NULL);

	if(!search_clk(clk))
		return NULL;

	pdat = malloc(sizeof(struct fb_rk3288_pdata_t));
	if(!pdat)
		return NULL;

	fb = malloc(sizeof(struct fb_t));
	if(!fb)
	{
		free(pdat);
		return NULL;
	}

	pdat->virtvop = virt;
	pdat->virtgrf = phys_to_virt(RK3288_GRF_BASE);
	pdat->virtlvds = phys_to_virt(RK3288_LVDS_BASE);
	pdat->lcd_avdd_3v3 = strdup(dt_read_string(n, "regulator-lcd-avdd-3v3", NULL));
	pdat->lcd_avdd_1v8 = strdup(dt_read_string(n, "regulator-lcd-avdd-1v8", NULL));
	pdat->lcd_avdd_1v0 = strdup(dt_read_string(n, "regulator-lcd-avdd-1v0", NULL));
	pdat->clk = strdup(clk);
	pdat->width = dt_read_int(n, "width", 1024);
	pdat->height = dt_read_int(n, "height", 600);
	pdat->xdpi = dt_read_int(n, "dots-per-inch-x", 160);
	pdat->ydpi = dt_read_int(n, "dots-per-inch-y", 160);
	pdat->bits_per_pixel = dt_read_int(n, "bits-per-pixel", 32);
	pdat->bytes_per_pixel = dt_read_int(n, "bytes-per-pixel", 4);
	pdat->index = 0;
	pdat->vram[0] = dma_alloc_noncoherent(pdat->width * pdat->height * pdat->bytes_per_pixel);
	pdat->vram[1] = dma_alloc_noncoherent(pdat->width * pdat->height * pdat->bytes_per_pixel);

	pdat->interface = RK3288_VOP_INTERFACE_RGB_LVDS;
	pdat->output = RK3288_LVDS_OUTPUT_RGB;
	pdat->format = RK3288_LVDS_FORMAT_JEIDA;

	pdat->mode.mirrorx = 0;
	pdat->mode.mirrory = 0;
	pdat->mode.swaprg = 0;
	pdat->mode.swaprb = 0;
	pdat->mode.swapbg = 0;

	pdat->timing.pixel_clock_hz = dt_read_long(n, "clock-frequency", 52000000);
	pdat->timing.h_front_porch = dt_read_int(n, "hfront-porch", 1);
	pdat->timing.h_back_porch = dt_read_int(n, "hback-porch", 1);
	pdat->timing.h_sync_len = dt_read_int(n, "hsync-len", 1);
	pdat->timing.v_front_porch = dt_read_int(n, "vfront-porch", 1);
	pdat->timing.v_back_porch = dt_read_int(n, "vback-porch", 1);
	pdat->timing.v_sync_len = dt_read_int(n, "vsync-len", 1);
	pdat->timing.h_sync_active = dt_read_bool(n, "hsync-active", 0);
	pdat->timing.v_sync_active = dt_read_bool(n, "vsync-active", 0);
	pdat->timing.den_active = dt_read_bool(n, "den-active", 0);
	pdat->timing.clk_active = dt_read_bool(n, "clk-active", 0);
	pdat->backlight = search_led(dt_read_string(n, "backlight", NULL));

	fb->name = alloc_device_name(dt_read_name(n), -1);
	fb->width = pdat->width;
	fb->height = pdat->height;
	fb->xdpi = pdat->xdpi;
	fb->ydpi = pdat->ydpi;
	fb->bpp = pdat->bits_per_pixel;
	fb->setbl = fb_setbl,
	fb->getbl = fb_getbl,
	fb->create = fb_create,
	fb->destroy = fb_destroy,
	fb->present = fb_present,
	fb->priv = pdat;

	regulator_set_voltage(pdat->lcd_avdd_3v3, 3300000);
	regulator_enable(pdat->lcd_avdd_3v3);
	regulator_set_voltage(pdat->lcd_avdd_1v8, 1800000);
	regulator_enable(pdat->lcd_avdd_1v8);
	regulator_set_voltage(pdat->lcd_avdd_1v0, 1000000);
	regulator_enable(pdat->lcd_avdd_1v0);
	clk_enable(pdat->clk);
	rk3288_fb_init(pdat);

	if(!register_fb(&dev, fb))
	{
		regulator_disable(pdat->lcd_avdd_3v3);
		free(pdat->lcd_avdd_3v3);
		regulator_disable(pdat->lcd_avdd_1v8);
		free(pdat->lcd_avdd_1v8);
		regulator_disable(pdat->lcd_avdd_1v0);
		free(pdat->lcd_avdd_1v0);
		clk_disable(pdat->clk);
		free(pdat->clk);
		dma_free_noncoherent(pdat->vram[0]);
		dma_free_noncoherent(pdat->vram[1]);

		free_device_name(fb->name);
		free(fb->priv);
		free(fb);
		return NULL;
	}
	dev->driver = drv;

	return dev;
}
static inline int install_logo_info(logo_object_t *plogo,char *para)
{
	static  para_info_pair_t para_info_pair[PARA_END+2]={
//head
	{"head",INVALID_INFO,		PARA_END+1,		1,	0,	PARA_END+1},

//dev		
	{"osd0",LOGO_DEV_OSD0,	PARA_FIRST_GROUP_START-1,	PARA_FIRST_GROUP_START+1,	PARA_FIRST_GROUP_START,	PARA_SECOND_GROUP_START-1},
	{"osd1",LOGO_DEV_OSD1,	PARA_FIRST_GROUP_START,		PARA_FIRST_GROUP_START+2,	PARA_FIRST_GROUP_START,	PARA_SECOND_GROUP_START-1},
	{"vid",LOGO_DEV_VID,		PARA_FIRST_GROUP_START+1,	PARA_FIRST_GROUP_START+3,	PARA_FIRST_GROUP_START,	PARA_SECOND_GROUP_START-1},  // 3
	{"mem",LOGO_DEV_MEM,	PARA_FIRST_GROUP_START+2,	PARA_FIRST_GROUP_START+4,	PARA_FIRST_GROUP_START,	PARA_SECOND_GROUP_START-1},
//vmode
	{"480i",VMODE_480I,		PARA_SECOND_GROUP_START-1,	PARA_SECOND_GROUP_START+1,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"480cvbs",VMODE_480CVBS,PARA_SECOND_GROUP_START,	PARA_SECOND_GROUP_START+2,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"480p",VMODE_480P,		PARA_SECOND_GROUP_START+1,	PARA_SECOND_GROUP_START+3,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"576i",VMODE_576I,		PARA_SECOND_GROUP_START+2,	PARA_SECOND_GROUP_START+4,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"576cvbs",VMODE_576CVBS,PARA_SECOND_GROUP_START+3,	PARA_SECOND_GROUP_START+5,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"576p",VMODE_576P,		PARA_SECOND_GROUP_START+4,	PARA_SECOND_GROUP_START+6,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"720p",VMODE_720P,		PARA_SECOND_GROUP_START+5,	PARA_SECOND_GROUP_START+7,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"1080i",VMODE_1080I,		PARA_SECOND_GROUP_START+6,	PARA_SECOND_GROUP_START+8,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"1080p",VMODE_1080P,	PARA_SECOND_GROUP_START+7,	PARA_SECOND_GROUP_START+9,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"panel",VMODE_LCD,			PARA_SECOND_GROUP_START+8,	PARA_SECOND_GROUP_START+10,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"720p50hz",VMODE_720P_50HZ,			PARA_SECOND_GROUP_START+9,	PARA_SECOND_GROUP_START+11,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"1080i50hz",VMODE_1080I_50HZ,			PARA_SECOND_GROUP_START+10,	PARA_SECOND_GROUP_START+12,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"1080p50hz",VMODE_1080P_50HZ,			PARA_SECOND_GROUP_START+11,	PARA_SECOND_GROUP_START+13,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"lvds1080p",VMODE_LVDS_1080P,			PARA_SECOND_GROUP_START+12,	PARA_SECOND_GROUP_START+14,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
	{"lvds1080p50hz",VMODE_LVDS_1080P_50HZ,			PARA_SECOND_GROUP_START+13,	PARA_SECOND_GROUP_START+15,	PARA_SECOND_GROUP_START,	PARA_THIRD_GROUP_START-1},
//display mode
	{"origin",DISP_MODE_ORIGIN,	PARA_THIRD_GROUP_START-1,	PARA_THIRD_GROUP_START+1,	PARA_THIRD_GROUP_START,PARA_FOURTH_GROUP_START-1},  //15
	{"center",DISP_MODE_CENTER,	PARA_THIRD_GROUP_START,		PARA_THIRD_GROUP_START+2,	PARA_THIRD_GROUP_START,PARA_FOURTH_GROUP_START-1},
	{"full",DISP_MODE_FULL_SCREEN,	PARA_THIRD_GROUP_START+1,	PARA_THIRD_GROUP_START+3,	PARA_THIRD_GROUP_START,PARA_FOURTH_GROUP_START-1},
//dbg
	{"dbg",LOGO_DBG_ENABLE,	PARA_FOURTH_GROUP_START-1,	PARA_FOURTH_GROUP_START+1,	PARA_FOURTH_GROUP_START,PARA_FIFTH_GROUP_START-1},  //18
//progress	
	{"progress",LOGO_PROGRESS_ENABLE,PARA_FIFTH_GROUP_START-1,PARA_FIFTH_GROUP_START+1,PARA_FIFTH_GROUP_START,PARA_SIXTH_GROUP_START-1},
//loaded
	{"loaded",LOGO_LOADED,PARA_SIXTH_GROUP_START-1,PARA_SIXTH_GROUP_START+1,PARA_SIXTH_GROUP_START,PARA_END},
//tail	
	{"tail",INVALID_INFO,PARA_END,0,0,PARA_END+1},
	};

	static u32 tail=PARA_END+1;
	u32 first=para_info_pair[0].next_idx ; 
	u32 i,addr;
	
	for(i=first;i<tail;i=para_info_pair[i].next_idx)
	{
		if(strcmp(para_info_pair[i].name,para)==0)
		{
			u32 group_start=para_info_pair[i].cur_group_start ;
			u32 group_end=para_info_pair[i].cur_group_end;
			u32	prev=para_info_pair[group_start].prev_idx;
			u32  next=para_info_pair[group_end].next_idx;
			amlog_level(LOG_LEVEL_MAX,"%s:%d\n",para_info_pair[i].name,para_info_pair[i].info);
			switch(para_info_pair[i].cur_group_start)
			{
				case PARA_FIRST_GROUP_START:
				plogo->para.output_dev_type=(platform_dev_t)para_info_pair[i].info;
				break;
				case PARA_SECOND_GROUP_START:
				plogo->para.vout_mode=(vmode_t)para_info_pair[i].info;
				break;
				case PARA_THIRD_GROUP_START:
				plogo->para.dis_mode=(logo_display_mode_t)para_info_pair[i].info;
				break;
				case PARA_FOURTH_GROUP_START:
				amlog_level(LOG_LEVEL_MAX,"select debug mode\n");	
				amlog_level_logo=AMLOG_DEFAULT_LEVEL;
				amlog_mask_logo=AMLOG_DEFAULT_MASK;
				break;
				case PARA_FIFTH_GROUP_START:
				plogo->para.progress=1;
				break;	
				case PARA_SIXTH_GROUP_START:
				plogo->para.loaded=1;
				amlog_level(LOG_LEVEL_MAX,"logo has been loaded\n");
				break;	
			}
			para_info_pair[prev].next_idx=next;
			para_info_pair[next].prev_idx=prev;
			return 0;
		}//addr we will deal with it specially. 
	}
	addr=simple_strtoul(para, NULL,16);
	//addr we will deal with it specially. 
	if(addr >=PHYS_OFFSET)
	{
		plogo->para.mem_addr=(char*)phys_to_virt(addr);
		amlog_mask_level(LOG_MASK_LOADER,LOG_LEVEL_LOW,"mem_addr:0x%p\n",plogo->para.mem_addr);
	}
	return 0;
}
Exemple #6
0
static void __init do_boot_cpu (int apicid)
{
	struct task_struct *idle;
	unsigned long boot_error;
	int timeout, cpu;
	unsigned long start_rip;

	cpu = ++cpucount;
	/*
	 * We can't use kernel_thread since we must avoid to
	 * reschedule the child.
	 */
	idle = fork_idle(cpu);
	if (IS_ERR(idle))
		panic("failed fork for CPU %d", cpu);
	x86_cpu_to_apicid[cpu] = apicid;

	cpu_pda[cpu].pcurrent = idle;

	start_rip = setup_trampoline();

	init_rsp = idle->thread.rsp; 
	per_cpu(init_tss,cpu).rsp0 = init_rsp;
	initial_code = start_secondary;
	clear_ti_thread_flag(idle->thread_info, TIF_FORK);

	printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid, 
	       start_rip, init_rsp);

	/*
	 * This grunge runs the startup process for
	 * the targeted processor.
	 */

	atomic_set(&init_deasserted, 0);

	Dprintk("Setting warm reset code and vector.\n");

	CMOS_WRITE(0xa, 0xf);
	local_flush_tlb();
	Dprintk("1.\n");
	*((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
	Dprintk("2.\n");
	*((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
	Dprintk("3.\n");

	/*
	 * Be paranoid about clearing APIC errors.
	 */
	if (APIC_INTEGRATED(apic_version[apicid])) {
		apic_read_around(APIC_SPIV);
		apic_write(APIC_ESR, 0);
		apic_read(APIC_ESR);
	}

	/*
	 * Status is now clean
	 */
	boot_error = 0;

	/*
	 * Starting actual IPI sequence...
	 */
	boot_error = wakeup_secondary_via_INIT(apicid, start_rip); 

	if (!boot_error) {
		/*
		 * allow APs to start initializing.
		 */
		Dprintk("Before Callout %d.\n", cpu);
		cpu_set(cpu, cpu_callout_map);
		Dprintk("After Callout %d.\n", cpu);

		/*
		 * Wait 5s total for a response
		 */
		for (timeout = 0; timeout < 50000; timeout++) {
			if (cpu_isset(cpu, cpu_callin_map))
				break;	/* It has booted */
			udelay(100);
		}

		if (cpu_isset(cpu, cpu_callin_map)) {
			/* number CPUs logically, starting from 1 (BSP is 0) */
			Dprintk("OK.\n");
			print_cpu_info(&cpu_data[cpu]);
			Dprintk("CPU has booted.\n");
		} else {
			boot_error = 1;
			if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
					== 0xA5)
				/* trampoline started but...? */
				printk("Stuck ??\n");
			else
				/* trampoline code not run */
				printk("Not responding.\n");
#if APIC_DEBUG
			inquire_remote_apic(apicid);
#endif
		}
	}
	if (boot_error) {
		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
		clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */
		cpucount--;
		x86_cpu_to_apicid[cpu] = BAD_APICID;
		x86_cpu_to_log_apicid[cpu] = BAD_APICID;
	}
}
Exemple #7
0
/* MM here stands for multi-media */
void bcm21553_mm_mem_init(void)
{
	int ret, size;
	uint32_t v3d_mem_phys_base = CONFIG_MM_MEMPOOL_BASE_ADDR;

#if (CONFIG_MM_MEMPOOL_BASE_ADDR <= 0)
#if defined (CONFIG_BMEM)
	bmem_phys_base = get_mmpool_base(BMEM_SIZE);
#else
#ifdef CONFIG_GE_WRAP
	ge_mem_phys_base = get_mmpool_base(gememalloc_SIZE);
#endif
#endif
#ifdef CONFIG_BRCM_V3D
	size = v3d_mempool_size;
#if defined (CONFIG_BMEM)
	size += BMEM_SIZE;
#else
#ifdef CONFIG_GE_WRAP
	size += gememalloc_SIZE;
#endif
#endif
	v3d_mem_phys_base = get_mmpool_base(size);
#endif
#else
#if defined(CONFIG_BRCM_V3D)
#if defined (CONFIG_BMEM)
	bmem_phys_base += v3d_mempool_size;
#else
#if defined(CONFIG_GE_WRAP)
	ge_mem_phys_base += v3d_mempool_size;
#endif
#endif
#endif
#endif

#ifdef CONFIG_BRCM_V3D
	if (v3d_mempool_size) {
	ret = reserve_bootmem(v3d_mem_phys_base, v3d_mempool_size, BOOTMEM_EXCLUSIVE);
	if (ret < 0) {
		printk(KERN_ERR "Failed to allocate memory for v3d\n");
		return;
	}

	v3d_mempool_base = phys_to_virt(v3d_mem_phys_base);
		pr_info("v3d phys[0x%08x] virt[0x%08x] size[0x%08x] \n",
			v3d_mem_phys_base, (uint32_t)v3d_mempool_base, (int)v3d_mempool_size);
	} else {
		v3d_mempool_base = NULL;
		v3d_mem_phys_base = 0;
	}
#endif

#if defined (CONFIG_BMEM)
	ret = reserve_bootmem(bmem_phys_base, BMEM_SIZE, BOOTMEM_EXCLUSIVE);
	if (ret < 0) {
		printk(KERN_ERR "Failed to allocate memory for ge\n");
		return;
	}

	bmem_mempool_base = phys_to_virt(bmem_phys_base);
	pr_info("bmem phys[0x%08x] virt[0x%08x] size[0x%08x] \n",
		bmem_phys_base, (uint32_t)bmem_mempool_base, BMEM_SIZE);
#else
#ifdef CONFIG_GE_WRAP
	ret = reserve_bootmem(ge_mem_phys_base, gememalloc_SIZE, BOOTMEM_EXCLUSIVE);
	if (ret < 0) {
		printk(KERN_ERR "Failed to allocate memory for ge\n");
		return;
	}

	ge_mempool_base = phys_to_virt(ge_mem_phys_base);
	pr_info("ge phys[0x%08x] virt[0x%08x] size[0x%08x] \n",
		ge_mem_phys_base, (uint32_t)ge_mempool_base, gememalloc_SIZE);
#endif
#endif

#if defined (CONFIG_BMEM)
#ifdef CONFIG_HANTRO_WRAP
	memalloc_mempool_base = alloc_bootmem_low_pages(2 * PAGE_SIZE);
	pr_info("memalloc(hantro) phys[0x%08x] virt[0x%08x] size[0x%08x] \n",
		(uint32_t)virt_to_phys(memalloc_mempool_base), (uint32_t)memalloc_mempool_base,
		(uint32_t)(2 * PAGE_SIZE));
#endif
	cam_mempool_base = alloc_bootmem_low_pages(2 * PAGE_SIZE);
	pr_info("pmem(camera) phys[0x%08x] virt[0x%08x] size[0x%08x] \n",
		(uint32_t)virt_to_phys(cam_mempool_base), (uint32_t)cam_mempool_base,
		(uint32_t)(2 * PAGE_SIZE));
#else
#ifdef CONFIG_HANTRO_WRAP
	memalloc_mempool_base = alloc_bootmem_low_pages(MEMALLOC_SIZE + SZ_2M);
#endif
	cam_mempool_base = alloc_bootmem_low_pages(1024 * 1024 * 8);
#endif
}
Exemple #8
0
static void cb_parse_mrc_cache(void *ptr, struct sysinfo_t *info)
{
	struct cb_cbmem_tab *const cbmem = (struct cb_cbmem_tab *)ptr;
	info->mrc_cache = phys_to_virt(cbmem->cbmem_tab);
}
Exemple #9
0
static int mfc_probe(struct platform_device *pdev)
{
	struct resource *res;
	size_t size;
	int ret;

	/* mfc clock enable should be here */

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL)
	{
		dev_err(&pdev->dev, "failed to get memory region resource\n");
		ret = -ENOENT;
		goto probe_out;
	}

	// 60K is required for mfc register (0x0 ~ 0xe008)
	size = (res->end - res->start) + 1;
	mfc_mem = request_mem_region(res->start, size, pdev->name);
	if (mfc_mem == NULL)
	{
		dev_err(&pdev->dev, "failed to get memory region\n");
		ret = -ENOENT;
		goto probe_out;
	}

	mfc_sfr_base_vaddr = ioremap(mfc_mem->start, mfc_mem->end - mfc_mem->start + 1);
	if (mfc_sfr_base_vaddr == NULL)
	{
		dev_err(&pdev->dev, "failed to ioremap address region\n");
		ret = -ENOENT;
		goto probe_out;
	}

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (res == NULL)
	{
		dev_err(&pdev->dev, "failed to get irq resource\n");
		ret = -ENOENT;
		goto probe_out;
	}

#if !defined(MFC_POLLING)
	ret = request_irq(res->start, mfc_irq, IRQF_DISABLED, pdev->name, pdev);
	if (ret != 0)
	{
		dev_err(&pdev->dev, "failed to install irq (%d)\n", ret);
		goto probe_out;
	}
#endif

	mutex_init(&mfc_mutex);

	/*
	 * buffer memory secure 
	 */
	mfc_port0_base_paddr = s3c_get_media_memory_bank(S3C_MDEV_MFC, 0);
	mfc_port0_base_paddr = ALIGN_TO_128KB(mfc_port0_base_paddr);
	mfc_port0_base_vaddr = phys_to_virt(mfc_port0_base_paddr);

	if (mfc_port0_base_vaddr == NULL)
	{
		mfc_err("fail to mapping port0 buffer\n");
		ret = -EPERM;
		goto probe_out;
	}

	mfc_port1_base_paddr = s3c_get_media_memory_bank(S3C_MDEV_MFC, 1);
	mfc_port1_base_paddr = ALIGN_TO_128KB(mfc_port1_base_paddr);
	mfc_port1_base_vaddr = phys_to_virt(mfc_port1_base_paddr);

	if (mfc_port1_base_vaddr == NULL)
	{
		mfc_err("fail to mapping port1 buffer\n");
		ret = -EPERM;
		goto probe_out;
	}

	mfc_debug("mfc_port0_base_paddr = 0x%08x, mfc_port1_base_paddr = 0x%08x <<\n",
		(unsigned int)mfc_port0_base_paddr, (unsigned int)mfc_port1_base_paddr);
	mfc_debug("mfc_port0_base_vaddr = 0x%08x, mfc_port1_base_vaddr = 0x%08x <<\n",
		(unsigned int)mfc_port0_base_vaddr, (unsigned int)mfc_port1_base_vaddr);
	
	/*
	 * MFC FW downloading
	 */
	if (mfc_load_firmware() < 0)
	{
		mfc_err("MFCINST_ERR_FW_INIT_FAIL\n");
		ret = -EPERM;
		goto probe_out;
	}

	mfc_init_mem_inst_no();
	mfc_init_buffer();

	mfc_clk = clk_get(&pdev->dev, "mfc");
	if (mfc_clk == NULL)
	{
		printk(KERN_ERR "failed to find mfc clock source\n");
		return -ENOENT;
	}

	ret = misc_register(&mfc_miscdev);

	return 0;

probe_out:
	dev_err(&pdev->dev, "not found (%d). \n", ret);
	return ret;
}
Exemple #10
0
static __init void s5p4418_clocksource_init(void)
{
	pdata.virt = phys_to_virt(pdata.phys);
	register_clocksource(&cs);
}
Exemple #11
0
static int s3c_pp_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
	s3c_pp_instance_context_t *current_instance;
	s3c_pp_params_t *parg;

	unsigned int temp = 0;

    mutex_lock(h_mutex);

	current_instance	= (s3c_pp_instance_context_t *) file->private_data;
	parg	            = (s3c_pp_params_t *) arg;

	switch ( cmd )
    {
		case S3C_PP_SET_PARAMS:
            {
                s3c_pp_out_path_t temp_out_path; 
                unsigned int temp_src_width, temp_src_height, temp_dst_width, temp_dst_height;
                s3c_color_space_t temp_src_color_space, temp_dst_color_space;

                get_user(temp_out_path, &parg->out_path);

                if ( (-1 != s3c_pp_instance_info.fifo_mode_instance_no )
                     || ((s3c_pp_instance_info.dma_mode_instance_count) && (FIFO_FREERUN == temp_out_path)) )
                {
                    printk ( KERN_ERR "\n%s: S3C_PP_SET_PARAMS can't be executed.\n", __FUNCTION__ );
                    mutex_unlock(h_mutex);
    			    return -EINVAL; 
                }

    			get_user(temp_src_width,       &parg->src_width);
    			get_user(temp_src_height,      &parg->src_height);
    			get_user(temp_dst_width,       &parg->dst_width);
    			get_user(temp_dst_height,      &parg->dst_height);

                // S3C6410 support that the source image is up to 4096 x 4096
                //     and the destination image is up to 2048 x 2048.
    			if (    (temp_src_width > 4096) || (temp_src_height > 4096) 
                     || (temp_dst_width > 2048) || (temp_dst_height > 2048) )
    			{
    				printk(KERN_ERR "\n%s: Size is too big to be supported.\n", __FUNCTION__);
    				mutex_unlock(h_mutex);
    				return -EINVAL;
    			}
                
                get_user(temp_src_color_space, &parg->src_color_space);
                get_user(temp_dst_color_space, &parg->dst_color_space);

                if (    ( (temp_src_color_space == YC420) && (temp_src_width % 8) )
                     || ( (temp_src_color_space == RGB16) && (temp_src_width % 2) ) 
                     || ( (temp_out_path == DMA_ONESHOT) && (    ((temp_dst_color_space == YC420) && (temp_dst_width % 8))
                                                              || ((temp_dst_color_space == RGB16) && (temp_dst_width % 2)))) )
                {
    				printk(KERN_ERR "\n%s: YUV420 image width must be a multiple of 8.\n", __FUNCTION__);
                    printk(KERN_ERR "%s: RGB16 must be a multiple of 2.\n", __FUNCTION__);
    				mutex_unlock(h_mutex);
    				return -EINVAL;
                } 
                
                
    			get_user(current_instance->src_full_width,  &parg->src_full_width);
    			get_user(current_instance->src_full_height, &parg->src_full_height);
    			get_user(current_instance->src_start_x,     &parg->src_start_x);
    			get_user(current_instance->src_start_y,     &parg->src_start_y);
    			current_instance->src_width                 = temp_src_width;
    			current_instance->src_height                = temp_src_height;
                current_instance->src_color_space           = temp_src_color_space;

    			get_user(current_instance->dst_full_width,  &parg->dst_full_width);
                get_user(current_instance->dst_full_height, &parg->dst_full_height);
                get_user(current_instance->dst_start_x,     &parg->dst_start_x);
    			get_user(current_instance->dst_start_y,     &parg->dst_start_y);
    			current_instance->dst_width                 = temp_dst_width;
    			current_instance->dst_height                = temp_dst_height;
                current_instance->dst_color_space           = temp_dst_color_space;

                current_instance->out_path                  = temp_out_path;

                if ( DMA_ONESHOT == current_instance->out_path )
                {
                    s3c_pp_instance_info.instance_state[current_instance->instance_no] = PP_INSTANCE_INUSE_DMA_ONESHOT;
                    s3c_pp_instance_info.dma_mode_instance_count++;               
                }
                else
                {
                    get_user(current_instance->scan_mode, &parg->scan_mode);

                    current_instance->dst_color_space = RGB30;

                    s3c_pp_instance_info.instance_state[current_instance->instance_no] = PP_INSTANCE_INUSE_FIFO_FREERUN;
                    s3c_pp_instance_info.fifo_mode_instance_no = current_instance->instance_no;
                    s3c_pp_instance_info.wincon0_value_before_fifo_mode = __raw_readl ( S3C_WINCON0 );
                
                    //.[ REDUCE_VCLK_SYOP_TIME
                    if ( current_instance->src_height > current_instance->dst_height )
                    {
                        int i;

                        for ( i=2; (current_instance->src_height >= (i * current_instance->dst_height)) && (i<8); i++ )
                        {
                        }

                        current_instance->src_full_width  *= i;
                        current_instance->src_full_height /= i;
                        current_instance->src_height      /= i;
                    }
                    //.] REDUCE_VCLK_SYOP_TIME
                }

                current_instance->value_changed |= PP_VALUE_CHANGED_PARAMS;
            }
			break;

		case S3C_PP_START:
            dprintk ( "%s: S3C_PP_START last_instance=%d, curr_instance=%d\n", __FUNCTION__, 
                        s3c_pp_instance_info.last_running_instance_no, current_instance->instance_no );

            if ( PP_INSTANCE_READY == s3c_pp_instance_info.instance_state[current_instance->instance_no] )
            {
                printk ( KERN_ERR "%s: S3C_PP_START must be executed after running S3C_PP_SET_PARAMS.\n", __FUNCTION__ );
                mutex_unlock(h_mutex);
			    return -EINVAL;
            }

            if ( current_instance->instance_no != s3c_pp_instance_info.last_running_instance_no )
            {
                __raw_writel(0x0<<31, s3c_pp_base + S3C_VPP_POSTENVID);
            
                temp = S3C_MODE2_ADDR_CHANGE_DISABLE | S3C_MODE2_CHANGE_AT_FRAME_END | S3C_MODE2_SOFTWARE_TRIGGER;
                __raw_writel(temp, s3c_pp_base + S3C_VPP_MODE_2);
                   
                set_clock_src(HCLK);
            
                // setting the src/dst color space
                set_data_format(current_instance);
            
                // setting the src/dst size 
                set_scaler(current_instance);
            
                // setting the src/dst buffer address
                set_src_addr(current_instance);
                set_dest_addr(current_instance);

                current_instance->value_changed = PP_VALUE_CHANGED_NONE;

                s3c_pp_instance_info.last_running_instance_no = current_instance->instance_no;
                s3c_pp_instance_info.running_instance_no = current_instance->instance_no;

                if ( PP_INSTANCE_INUSE_DMA_ONESHOT == s3c_pp_instance_info.instance_state[current_instance->instance_no] )
                { // DMA OneShot Mode
                    dprintk ( "%s: DMA_ONESHOT mode\n", __FUNCTION__ );

                    post_int_enable(1);
                    pp_dma_mode_set_and_start();


                    if ( !(file->f_flags & O_NONBLOCK) )
                    {
                        if (interruptible_sleep_on_timeout(&waitq, 500) == 0) 
                        {
                            printk(KERN_ERR "\n%s: Waiting for interrupt is timeout\n", __FUNCTION__);
                        }
                    }
                }
                else
                { // FIFO freerun Mode
                    dprintk ( "%s: FIFO_freerun mode\n", __FUNCTION__ );
                    s3c_pp_instance_info.fifo_mode_instance_no = current_instance->instance_no;

                    post_int_enable(1);
                    pp_fifo_mode_set_and_start(current_instance); 
                }
            }
            else
            {
                if ( current_instance->value_changed != PP_VALUE_CHANGED_NONE )
                {
                    __raw_writel(0x0<<31, s3c_pp_base + S3C_VPP_POSTENVID);

                    if ( current_instance->value_changed & PP_VALUE_CHANGED_PARAMS )
                    {
                        set_data_format(current_instance);
                        set_scaler(current_instance);
                    }

                    if ( current_instance->value_changed & PP_VALUE_CHANGED_SRC_BUF_ADDR_PHY )
                    {
                        set_src_addr(current_instance);
                    }

                    if ( current_instance->value_changed & PP_VALUE_CHANGED_DST_BUF_ADDR_PHY )
                    {
                        set_dest_addr(current_instance);
                    }

                    current_instance->value_changed = PP_VALUE_CHANGED_NONE;
                }

                s3c_pp_instance_info.running_instance_no = current_instance->instance_no;

                post_int_enable(1);
                start_processing();

                if ( !(file->f_flags & O_NONBLOCK) )
                {
                    if (interruptible_sleep_on_timeout(&waitq, 500) == 0) 
                    {
                        printk(KERN_ERR "\n%s: Waiting for interrupt is timeout\n", __FUNCTION__);
                    }
                }
            }
			break;

		case S3C_PP_GET_SRC_BUF_SIZE:

            if ( PP_INSTANCE_READY == s3c_pp_instance_info.instance_state[current_instance->instance_no] )
            {
                dprintk ( "%s: S3C_PP_GET_SRC_BUF_SIZE must be executed after running S3C_PP_SET_PARAMS.\n", __FUNCTION__ );
                mutex_unlock(h_mutex);
			    return -EINVAL;
            }

            temp = cal_data_size ( current_instance->src_color_space, current_instance->src_full_width, current_instance->src_full_height );

			mutex_unlock(h_mutex);
			return temp;


		case S3C_PP_SET_SRC_BUF_ADDR_PHY:

            get_user(current_instance->src_buf_addr_phy, &parg->src_buf_addr_phy);
            current_instance->value_changed |= PP_VALUE_CHANGED_SRC_BUF_ADDR_PHY;
			break;

        case S3C_PP_SET_SRC_BUF_NEXT_ADDR_PHY:

            if ( current_instance->instance_no != s3c_pp_instance_info.fifo_mode_instance_no )
            { // if FIFO Mode is not Active
                dprintk (KERN_DEBUG "%s: S3C_PP_SET_SRC_BUF_NEXT_ADDR_PHY can't be executed.\n", __FUNCTION__ );
                mutex_unlock(h_mutex);
                return -EINVAL;
            }            

            get_user(current_instance->src_next_buf_addr_phy, &parg->src_next_buf_addr_phy);

            temp = __raw_readl(s3c_pp_base + S3C_VPP_MODE_2);
            temp |= (0x1<<4);
            __raw_writel(temp, s3c_pp_base + S3C_VPP_MODE_2);
    
            set_src_next_buf_addr(current_instance);

            temp = __raw_readl(s3c_pp_base + S3C_VPP_MODE_2);
            temp &= ~(0x1<<4);
            __raw_writel(temp, s3c_pp_base + S3C_VPP_MODE_2);
            break;

		case S3C_PP_GET_DST_BUF_SIZE:
            
            if ( PP_INSTANCE_READY == s3c_pp_instance_info.instance_state[current_instance->instance_no] )
            {
                dprintk ( "%s: S3C_PP_GET_DST_BUF_SIZE must be executed after running S3C_PP_SET_PARAMS.\n", __FUNCTION__ );
                mutex_unlock(h_mutex);
			    return -EINVAL;
            }

            temp = cal_data_size ( current_instance->dst_color_space, current_instance->dst_full_width, current_instance->dst_full_height );

			mutex_unlock(h_mutex);
			return temp;

		case S3C_PP_SET_DST_BUF_ADDR_PHY:

            get_user(current_instance->dst_buf_addr_phy, &parg->dst_buf_addr_phy);
            current_instance->value_changed |= PP_VALUE_CHANGED_DST_BUF_ADDR_PHY;
			break;


        case S3C_PP_ALLOC_KMEM:
            {
                s3c_pp_mem_alloc_t param;
                
                if (copy_from_user(&param, (s3c_pp_mem_alloc_t *)arg, sizeof(s3c_pp_mem_alloc_t)))
                {
                    mutex_unlock(h_mutex);
                    return -EFAULT;
                }
                
                flag = ALLOC_KMEM;
                
                param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
                dprintk (KERN_DEBUG "param.vir_addr = %08x\n", param.vir_addr);
                            
                flag = 0;

                if(param.vir_addr == -EINVAL) {
                    printk(KERN_ERR "%s: PP_MEM_ALLOC FAILED\n", __FUNCTION__);
                    mutex_unlock(h_mutex);
                    return -EFAULT;
                }
                param.phy_addr = physical_address;
                
                dprintk (KERN_DEBUG "KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);
                
                if (copy_to_user((s3c_pp_mem_alloc_t *)arg, &param, sizeof(s3c_pp_mem_alloc_t)))
                {
                    mutex_unlock(h_mutex);
                    return -EFAULT;
                }
            }
            break;

        case S3C_PP_FREE_KMEM:
            {
                s3c_pp_mem_alloc_t param;
                struct mm_struct *mm = current->mm;
                void *virt_addr;

                if ( copy_from_user(&param, (s3c_pp_mem_alloc_t *)arg, sizeof(s3c_pp_mem_alloc_t)) )
                {
                    mutex_unlock(h_mutex);
                    return -EFAULT;
                }
            
                dprintk (KERN_DEBUG "KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);
            
                if ( do_munmap(mm, param.vir_addr, param.size ) < 0 ) 
                {
                    dprintk("do_munmap() failed !!\n");
                    mutex_unlock(h_mutex);
                    return -EINVAL;
                }
                virt_addr = phys_to_virt(param.phy_addr);
                dprintk ( "KERNEL : virt_addr = 0x%X\n", (unsigned int) virt_addr );
            
                kfree(virt_addr);
                param.size = 0;

                dprintk(KERN_DEBUG "do_munmap() succeed !!\n");
            }
            break;

        case S3C_PP_GET_RESERVED_MEM_SIZE:
            mutex_unlock(h_mutex);
            return PP_RESERVED_MEM_SIZE;

        case S3C_PP_GET_RESERVED_MEM_ADDR_PHY:
            mutex_unlock(h_mutex);
            return PP_RESERVED_MEM_ADDR_PHY;

		default:
			mutex_unlock(h_mutex);
			return -EINVAL;
	}

	mutex_unlock(h_mutex);
	
	return 0;
}
static struct machine_desc * __init setup_machine_fdt(phys_addr_t dt_phys)
{
	struct boot_param_header *devtree;
	struct machine_desc *mdesc, *mdesc_best = NULL;
	unsigned int score, mdesc_score = ~1;
	unsigned long dt_root;

	/* Check we have a non-NULL DT pointer */
	if (!dt_phys) {
		early_print("\n"
			"Error: NULL or invalid device tree blob\n"
			"The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
			"\nPlease check your bootloader.\n");

		while (true)
			cpu_relax();

	}

	devtree = phys_to_virt(dt_phys);

	/* Check device tree validity */
	if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) {
		early_print("\n"
			"Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
			"Expected 0x%x, found 0x%x\n"
			"\nPlease check your bootloader.\n",
			dt_phys, devtree, OF_DT_HEADER,
			be32_to_cpu(devtree->magic));

		while (true)
			cpu_relax();
	}

	initial_boot_params = devtree;
	dt_root = of_get_flat_dt_root();

	for_each_machine_desc(mdesc) {
		score = of_flat_dt_match(dt_root, mdesc->dt_compat);
		if (score > 0 && score < mdesc_score) {
			mdesc_best = mdesc;
			mdesc_score = score;
		}
	}
	if (!mdesc_best) {
		const char *prop;
		long size;

		pr_info("\nError: unrecognized/unsupported "
			    "device tree compatible list:\n[ ");

		prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
		while (size > 0) {
			pr_info("'%s' ", prop);
			size -= strlen(prop) + 1;
			prop += strlen(prop) + 1;
		}
		pr_info("]\n\n");

		while (true)
			/* can't use cpu_relax() here as it may require MMU setup */;
	}

	machine_name = of_get_flat_dt_prop(dt_root, "model", NULL);
	if (!machine_name)
		machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
	if (!machine_name)
		machine_name = "<unknown>";
	pr_info("Machine: %s\n", machine_name);

	/* Retrieve various information from the /chosen node */
	of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
	/* Initialize {size,address}-cells info */
	of_scan_flat_dt(early_init_dt_scan_root, NULL);
	/* Setup memory, calling early_init_dt_add_memory_arch */
	of_scan_flat_dt(early_init_dt_scan_memory, NULL);

	return mdesc_best;
}
Exemple #13
0
void * __weak swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t address)
{
	return phys_to_virt(swiotlb_bus_to_phys(hwdev, address));
}
int s3cfb_draw_logo(struct fb_info *fb)
{
#ifdef CONFIG_FB_S5P_SPLASH_SCREEN
#ifdef RGB_BOOTSCREEN
	struct fb_fix_screeninfo *fix = &fb->fix;
	struct fb_var_screeninfo *var = &fb->var;
#endif
#if 0
	struct s3c_platform_fb *pdata = to_fb_plat(fbdev->dev);
	memcpy(fbdev->fb[pdata->default_win]->screen_base,
	       LOGO_RGB24, fix->line_length * var->yres);
#else
#ifdef RGB_BOOTSCREEN
	u32 height = var->yres / 3;
	u32 line = fix->line_length;
	u32 i, j;

	for (i = 0; i < height; i++) {
		for (j = 0; j < var->xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

	for (i = height; i < height * 2; i++) {
		for (j = 0; j < var->xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

	for (i = height * 2; i < height * 3; i++) {
		for (j = 0; j < var->xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}
#else /* #ifdef RGB_BOOTSCREEN */
	u8 *logo_virt_buf;

	if (bootloaderfb)
		printk(KERN_INFO "Bootloader sent 'bootloaderfb' to Kernel Successfully : %d", bootloaderfb);
	else {
		bootloaderfb = BOOT_FB_BASE_ADDR;
		printk(KERN_ERR "Fail to get 'bootloaderfb' from Bootloader. so we must set  this value as %d", bootloaderfb);
	}

	logo_virt_buf = phys_to_virt(bootloaderfb);
	memcpy(fb->screen_base, logo_virt_buf, fb->var.yres * fb->fix.line_length);
#endif /* #ifdef RGB_BOOTSCREEN */
#endif
#endif

	return 0;
}
Exemple #15
0
int ixpdev_init(int __nds_count, struct net_device **__nds,
		void (*__set_port_admin_status)(int port, int up))
{
	int i;
	int err;

	BUILD_BUG_ON(RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192);

	printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION);

	nds_count = __nds_count;
	nds = __nds;
	set_port_admin_status = __set_port_admin_status;

	for (i = 0; i < RX_BUF_COUNT; i++) {
		void *buf;

		buf = (void *)get_zeroed_page(GFP_KERNEL);
		if (buf == NULL) {
			err = -ENOMEM;
			while (--i >= 0)
				free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
			goto err_out;
		}
		rx_desc[i].buf_addr = virt_to_phys(buf);
		rx_desc[i].buf_length = PAGE_SIZE;
	}

	/* @@@ Maybe we shouldn't be preallocating TX buffers.  */
	for (i = 0; i < TX_BUF_COUNT; i++) {
		void *buf;

		buf = (void *)get_zeroed_page(GFP_KERNEL);
		if (buf == NULL) {
			err = -ENOMEM;
			while (--i >= 0)
				free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
			goto err_free_rx;
		}
		tx_desc[i].buf_addr = virt_to_phys(buf);
	}

	/* 256 entries, ring status set means 'empty', base address 0x0000.  */
	ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
	ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
	ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);

	/* 256 entries, ring status set means 'full', base address 0x0400.  */
	ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
	ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
	ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);

	for (i = 0; i < RX_BUF_COUNT; i++) {
		ixp2000_reg_write(RING_RX_PENDING,
			RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
	}

	ixp2000_uengine_load(0, &ixp2400_rx);
	ixp2000_uengine_start_contexts(0, 0xff);

	/* 256 entries, ring status set means 'empty', base address 0x0800.  */
	ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
	ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
	ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);

	/* 256 entries, ring status set means 'full', base address 0x0c00.  */
	ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
	ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
	ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);

	ixp2000_uengine_load(1, &ixp2400_tx);
	ixp2000_uengine_start_contexts(1, 0xff);

	for (i = 0; i < nds_count; i++) {
		err = register_netdev(nds[i]);
		if (err) {
			while (--i >= 0)
				unregister_netdev(nds[i]);
			goto err_free_tx;
		}
	}

	for (i = 0; i < nds_count; i++) {
		printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), "
			"%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", nds[i]->name, i,
			nds[i]->dev_addr[0], nds[i]->dev_addr[1],
			nds[i]->dev_addr[2], nds[i]->dev_addr[3],
			nds[i]->dev_addr[4], nds[i]->dev_addr[5]);
	}

	return 0;

err_free_tx:
	for (i = 0; i < TX_BUF_COUNT; i++)
		free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));

err_free_rx:
	for (i = 0; i < RX_BUF_COUNT; i++)
		free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));

err_out:
	return err;
} 
Exemple #16
0
static int greth_rx(struct net_device *dev, int limit)
{
	struct greth_private *greth;
	struct greth_bd *bdp;
	struct sk_buff *skb;
	int pkt_len;
	int bad, count;
	u32 status, dma_addr;
	unsigned long flags;

	greth = netdev_priv(dev);

	for (count = 0; count < limit; ++count) {

		bdp = greth->rx_bd_base + greth->rx_cur;
		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
		mb();
		status = greth_read_bd(&bdp->stat);

		if (unlikely(status & GRETH_BD_EN)) {
			break;
		}

		dma_addr = greth_read_bd(&bdp->addr);
		bad = 0;

		/* Check status for errors. */
		if (unlikely(status & GRETH_RXBD_STATUS)) {
			if (status & GRETH_RXBD_ERR_FT) {
				dev->stats.rx_length_errors++;
				bad = 1;
			}
			if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
				dev->stats.rx_frame_errors++;
				bad = 1;
			}
			if (status & GRETH_RXBD_ERR_CRC) {
				dev->stats.rx_crc_errors++;
				bad = 1;
			}
		}
		if (unlikely(bad)) {
			dev->stats.rx_errors++;

		} else {

			pkt_len = status & GRETH_BD_LEN;

			skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);

			if (unlikely(skb == NULL)) {

				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - " "packet dropped\n");

				dev->stats.rx_dropped++;

			} else {
				skb_reserve(skb, NET_IP_ALIGN);

				dma_sync_single_for_cpu(greth->dev,
							dma_addr,
							pkt_len,
							DMA_FROM_DEVICE);

				if (netif_msg_pktdata(greth))
					greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);

				memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);

				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_bytes += pkt_len;
				dev->stats.rx_packets++;
				netif_receive_skb(skb);
			}
		}

		status = GRETH_BD_EN | GRETH_BD_IE;
		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
			status |= GRETH_BD_WR;
		}

		wmb();
		greth_write_bd(&bdp->stat, status);

		dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);

		spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
		greth_enable_rx(greth);
		spin_unlock_irqrestore(&greth->devlock, flags);

		greth->rx_cur = NEXT_RX(greth->rx_cur);
	}

	return count;
}
Exemple #17
0
static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
{
	return phys_to_virt((unsigned long) dma_addr);
}
Exemple #18
0
static int greth_rx_gbit(struct net_device *dev, int limit)
{
	struct greth_private *greth;
	struct greth_bd *bdp;
	struct sk_buff *skb, *newskb;
	int pkt_len;
	int bad, count = 0;
	u32 status, dma_addr;
	unsigned long flags;

	greth = netdev_priv(dev);

	for (count = 0; count < limit; ++count) {

		bdp = greth->rx_bd_base + greth->rx_cur;
		skb = greth->rx_skbuff[greth->rx_cur];
		GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
		mb();
		status = greth_read_bd(&bdp->stat);
		bad = 0;

		if (status & GRETH_BD_EN)
			break;

		/* Check status for errors. */
		if (unlikely(status & GRETH_RXBD_STATUS)) {

			if (status & GRETH_RXBD_ERR_FT) {
				dev->stats.rx_length_errors++;
				bad = 1;
			} else if (status &
				   (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
				dev->stats.rx_frame_errors++;
				bad = 1;
			} else if (status & GRETH_RXBD_ERR_CRC) {
				dev->stats.rx_crc_errors++;
				bad = 1;
			}
		}

		/* Allocate new skb to replace current, not needed if the
		 * current skb can be reused */
		if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
			skb_reserve(newskb, NET_IP_ALIGN);

			dma_addr = dma_map_single(greth->dev,
						      newskb->data,
						      MAX_FRAME_SIZE + NET_IP_ALIGN,
						      DMA_FROM_DEVICE);

			if (!dma_mapping_error(greth->dev, dma_addr)) {
				/* Process the incoming frame. */
				pkt_len = status & GRETH_BD_LEN;

				dma_unmap_single(greth->dev,
						 greth_read_bd(&bdp->addr),
						 MAX_FRAME_SIZE + NET_IP_ALIGN,
						 DMA_FROM_DEVICE);

				if (netif_msg_pktdata(greth))
					greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);

				skb_put(skb, pkt_len);

				if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
					skb->ip_summed = CHECKSUM_UNNECESSARY;
				else
					skb_checksum_none_assert(skb);

				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_packets++;
				dev->stats.rx_bytes += pkt_len;
				netif_receive_skb(skb);

				greth->rx_skbuff[greth->rx_cur] = newskb;
				greth_write_bd(&bdp->addr, dma_addr);
			} else {
				if (net_ratelimit())
					dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
				dev_kfree_skb(newskb);
				/* reusing current skb, so it is a drop */
				dev->stats.rx_dropped++;
			}
		} else if (bad) {
			/* Bad Frame transfer, the skb is reused */
			dev->stats.rx_dropped++;
		} else {
			/* Failed Allocating a new skb. This is rather stupid
			 * but the current "filled" skb is reused, as if
			 * transfer failure. One could argue that RX descriptor
			 * table handling should be divided into cleaning and
			 * filling as the TX part of the driver
			 */
			if (net_ratelimit())
				dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
			/* reusing current skb, so it is a drop */
			dev->stats.rx_dropped++;
		}

		status = GRETH_BD_EN | GRETH_BD_IE;
		if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
			status |= GRETH_BD_WR;
		}

		wmb();
		greth_write_bd(&bdp->stat, status);
		spin_lock_irqsave(&greth->devlock, flags);
		greth_enable_rx(greth);
		spin_unlock_irqrestore(&greth->devlock, flags);
		greth->rx_cur = NEXT_RX(greth->rx_cur);
	}

	return count;

}
Exemple #19
0
static void __init smp_boot_cpus(unsigned int max_cpus)
{
	unsigned apicid, cpu, bit, kicked;

	nmi_watchdog_default();

	/*
	 * Setup boot CPU information
	 */
	smp_store_cpu_info(0); /* Final full version of the data */
	printk(KERN_INFO "CPU%d: ", 0);
	print_cpu_info(&cpu_data[0]);

	current_thread_info()->cpu = 0;
	smp_tune_scheduling();

	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
		       hard_smp_processor_id());
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}

	/*
	 * If we couldn't find an SMP configuration at boot time,
	 * get out of here now!
	 */
	if (!smp_found_config) {
		printk(KERN_NOTICE "SMP motherboard not detected.\n");
		io_apic_irqs = 0;
		cpu_online_map = cpumask_of_cpu(0);
		cpu_set(0, cpu_sibling_map[0]);
		phys_cpu_present_map = physid_mask_of_physid(0);
		if (APIC_init_uniprocessor())
			printk(KERN_NOTICE "Local APIC not detected."
					   " Using dummy APIC emulation.\n");
		goto smp_done;
	}

	/*
	 * Should not be necessary because the MP table should list the boot
	 * CPU too, but we do it for the sake of robustness anyway.
	 */
	if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
		printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
								 boot_cpu_id);
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}

	/*
	 * If we couldn't find a local APIC, then get out of here now!
	 */
	if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
			boot_cpu_id);
		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
		io_apic_irqs = 0;
		cpu_online_map = cpumask_of_cpu(0);
		cpu_set(0, cpu_sibling_map[0]);
		phys_cpu_present_map = physid_mask_of_physid(0);
		disable_apic = 1;
		goto smp_done;
	}

	verify_local_APIC();

	/*
	 * If SMP should be disabled, then really disable it!
	 */
	if (!max_cpus) {
		smp_found_config = 0;
		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
		io_apic_irqs = 0;
		cpu_online_map = cpumask_of_cpu(0);
		cpu_set(0, cpu_sibling_map[0]);
		phys_cpu_present_map = physid_mask_of_physid(0);
		disable_apic = 1;
		goto smp_done;
	}

	connect_bsp_APIC();
	setup_local_APIC();

	if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id)
		BUG();

	x86_cpu_to_apicid[0] = boot_cpu_id;

	/*
	 * Now scan the CPU present map and fire up the other CPUs.
	 */
	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));

	kicked = 1;
	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
		apicid = cpu_present_to_apicid(bit);
		/*
		 * Don't even attempt to start the boot CPU!
		 */
		if (apicid == boot_cpu_id || (apicid == BAD_APICID))
			continue;

		if (!physid_isset(apicid, phys_cpu_present_map))
			continue;
		if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
			continue;

		do_boot_cpu(apicid);
		++kicked;
	}

	/*
	 * Cleanup possible dangling ends...
	 */
	{
		/*
		 * Install writable page 0 entry to set BIOS data area.
		 */
		local_flush_tlb();

		/*
		 * Paranoid:  Set warm reset code and vector here back
		 * to default values.
		 */
		CMOS_WRITE(0, 0xf);

		*((volatile int *) phys_to_virt(0x467)) = 0;
	}

	/*
	 * Allow the user to impress friends.
	 */

	Dprintk("Before bogomips.\n");
	if (!cpucount) {
		printk(KERN_INFO "Only one processor found.\n");
	} else {
		unsigned long bogosum = 0;
		for (cpu = 0; cpu < NR_CPUS; cpu++)
			if (cpu_isset(cpu, cpu_callout_map))
				bogosum += cpu_data[cpu].loops_per_jiffy;
		printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
			cpucount+1,
			bogosum/(500000/HZ),
			(bogosum/(5000/HZ))%100);
		Dprintk("Before bogocount - setting activated=1.\n");
	}

	/*
	 * Construct cpu_sibling_map[], so that we can tell the
	 * sibling CPU efficiently.
	 */
	for (cpu = 0; cpu < NR_CPUS; cpu++)
		cpus_clear(cpu_sibling_map[cpu]);

	for (cpu = 0; cpu < NR_CPUS; cpu++) {
		int siblings = 0;
		int i;
		if (!cpu_isset(cpu, cpu_callout_map))
			continue;

		if (smp_num_siblings > 1) {
			for (i = 0; i < NR_CPUS; i++) {
				if (!cpu_isset(i, cpu_callout_map))
					continue;
				if (phys_proc_id[cpu] == phys_proc_id[i]) {
					siblings++;
					cpu_set(i, cpu_sibling_map[cpu]);
				}
			}
		} else { 
			siblings++;
			cpu_set(cpu, cpu_sibling_map[cpu]);
		}

		if (siblings != smp_num_siblings) {
			printk(KERN_WARNING 
	       "WARNING: %d siblings found for CPU%d, should be %d\n", 
			       siblings, cpu, smp_num_siblings);
			smp_num_siblings = siblings;
		}       
	}

	Dprintk("Boot done.\n");

	/*
	 * Here we can be sure that there is an IO-APIC in the system. Let's
	 * go and set it up:
	 */
	if (!skip_ioapic_setup && nr_ioapics)
		setup_IO_APIC();
	else
		nr_ioapics = 0;

	setup_boot_APIC_clock();

	/*
	 * Synchronize the TSC with the AP
	 */
	if (cpu_has_tsc && cpucount)
		synchronize_tsc_bp();

 smp_done:
	time_init_smp();
}
Exemple #20
0
/* Packet receive function */
static int sh_eth_rx(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	struct sh_eth_rxdesc *rxdesc;

	int entry = mdp->cur_rx % RX_RING_SIZE;
	int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
	struct sk_buff *skb;
	u16 pkt_len = 0;
	u32 desc_status;

	rxdesc = &mdp->rx_ring[entry];
	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
		desc_status = edmac_to_cpu(mdp, rxdesc->status);
		pkt_len = rxdesc->frame_length;

		if (--boguscnt < 0)
			break;

		if (!(desc_status & RDFEND))
			mdp->stats.rx_length_errors++;

		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
			mdp->stats.rx_errors++;
			if (desc_status & RD_RFS1)
				mdp->stats.rx_crc_errors++;
			if (desc_status & RD_RFS2)
				mdp->stats.rx_frame_errors++;
			if (desc_status & RD_RFS3)
				mdp->stats.rx_length_errors++;
			if (desc_status & RD_RFS4)
				mdp->stats.rx_length_errors++;
			if (desc_status & RD_RFS6)
				mdp->stats.rx_missed_errors++;
			if (desc_status & RD_RFS10)
				mdp->stats.rx_over_errors++;
		} else {
			if (!mdp->cd->hw_swap)
				sh_eth_soft_swap(
					phys_to_virt(ALIGN(rxdesc->addr, 4)),
					pkt_len + 2);
			skb = mdp->rx_skbuff[entry];
			mdp->rx_skbuff[entry] = NULL;
			if (mdp->cd->rpadir)
				skb_reserve(skb, NET_IP_ALIGN);
			skb_put(skb, pkt_len);
			skb->protocol = eth_type_trans(skb, ndev);
			netif_rx(skb);
			mdp->stats.rx_packets++;
			mdp->stats.rx_bytes += pkt_len;
		}
		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
		entry = (++mdp->cur_rx) % RX_RING_SIZE;
		rxdesc = &mdp->rx_ring[entry];
	}

	/* Refill the Rx ring buffers. */
	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
		entry = mdp->dirty_rx % RX_RING_SIZE;
		rxdesc = &mdp->rx_ring[entry];
		/* The size of the buffer is 16 byte boundary. */
		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);

		if (mdp->rx_skbuff[entry] == NULL) {
			skb = dev_alloc_skb(mdp->rx_buf_sz);
			mdp->rx_skbuff[entry] = skb;
			if (skb == NULL)
				break;	/* Better luck next round. */
			dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
					DMA_FROM_DEVICE);
			skb->dev = ndev;
			sh_eth_set_receive_align(skb);

			skb_checksum_none_assert(skb);
			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
		}
		if (entry >= RX_RING_SIZE - 1)
			rxdesc->status |=
				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
		else
			rxdesc->status |=
				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
	}

	/* Restart Rx engine if stopped. */
	/* If we don't need to check status, don't. -KDU */
	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
		sh_eth_write(ndev, EDRRR_R, EDRRR);

	return 0;
}
Exemple #21
0
void __iomem *
ioremap (unsigned long phys_addr, unsigned long size)
{
	void __iomem *addr;
	struct vm_struct *area;
	unsigned long offset;
	pgprot_t prot;
	u64 attr;
	unsigned long gran_base, gran_size;
	unsigned long page_base;

	/*
	 * For things in kern_memmap, we must use the same attribute
	 * as the rest of the kernel.  For more details, see
	 * Documentation/ia64/aliasing.txt.
	 */
	attr = kern_mem_attribute(phys_addr, size);
	if (attr & EFI_MEMORY_WB)
		return (void __iomem *) phys_to_virt(phys_addr);
	else if (attr & EFI_MEMORY_UC)
		return __ioremap_uc(phys_addr);

	/*
	 * Some chipsets don't support UC access to memory.  If
	 * WB is supported for the whole granule, we prefer that.
	 */
	gran_base = GRANULEROUNDDOWN(phys_addr);
	gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
	if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
		return (void __iomem *) phys_to_virt(phys_addr);

	/*
	 * WB is not supported for the whole granule, so we can't use
	 * the region 7 identity mapping.  If we can safely cover the
	 * area with kernel page table mappings, we can use those
	 * instead.
	 */
	page_base = phys_addr & PAGE_MASK;
	size = PAGE_ALIGN(phys_addr + size) - page_base;
	if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
		prot = PAGE_KERNEL;

		/*
		 * Mappings have to be page-aligned
		 */
		offset = phys_addr & ~PAGE_MASK;
		phys_addr &= PAGE_MASK;

		/*
		 * Ok, go for it..
		 */
		area = get_vm_area(size, VM_IOREMAP);
		if (!area)
			return NULL;

		area->phys_addr = phys_addr;
		addr = (void __iomem *) area->addr;
		if (ioremap_page_range((unsigned long) addr,
				(unsigned long) addr + size, phys_addr, prot)) {
			vunmap((void __force *) addr);
			return NULL;
		}

		return (void __iomem *) (offset + (char __iomem *)addr);
	}

	return __ioremap_uc(phys_addr);
}
void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
{
    return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
}
Exemple #23
0
int g2d_do_blit(struct g2d_global *g2d_dev, g2d_params *params)
{
	unsigned long 	pgd;
	int need_dst_clean = true;

	if ((params->src_rect.addr == NULL) 
		|| (params->dst_rect.addr == NULL)) {
		FIMG2D_ERROR("error : addr Null\n");
		return false;
	}		

	if (params->flag.memory_type == G2D_MEMORY_KERNEL) {
#if defined(CONFIG_S5P_MEM_CMA)
		if (!cma_is_registered_region((unsigned int)params->src_rect.addr,
				GET_RECT_SIZE(params->src_rect))) {
			printk(KERN_ERR "[%s] SRC Surface is not included in CMA region\n", __func__);
			return -1;
		}
		if (!cma_is_registered_region((unsigned int)params->dst_rect.addr,
				GET_RECT_SIZE(params->dst_rect))) {
			printk(KERN_ERR "[%s] DST Surface is not included in CMA region\n", __func__);
			return -1;
		}
#endif
		params->src_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->src_rect.addr);
		params->dst_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->dst_rect.addr);
		pgd = (unsigned long)init_mm.pgd;
	} else {
		pgd = (unsigned long)current->mm->pgd;
	}

	if (params->flag.memory_type == G2D_MEMORY_USER)
	{
		g2d_clip clip_src;
		g2d_clip_for_src(&params->src_rect, &params->dst_rect, &params->clip, &clip_src);

		if (g2d_check_overlap(params->src_rect, params->dst_rect, params->clip))
			return false;

		g2d_dev->src_attribute =
			g2d_check_pagetable((unsigned char *)GET_START_ADDR(params->src_rect),
				(unsigned int)GET_RECT_SIZE(params->src_rect) + 8,
					(u32)virt_to_phys((void *)pgd));
		if (g2d_dev->src_attribute == G2D_PT_NOTVALID) {
			FIMG2D_DEBUG("Src is not in valid pagetable\n");
			return false;
		}

		g2d_dev->dst_attribute = 
			g2d_check_pagetable((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip),
				(unsigned int)GET_RECT_SIZE_C(params->dst_rect, params->clip),
					(u32)virt_to_phys((void *)pgd));
		if (g2d_dev->dst_attribute == G2D_PT_NOTVALID) {
			FIMG2D_DEBUG("Dst is not in valid pagetable\n");
			return false;
		}

		g2d_pagetable_clean((unsigned char *)GET_START_ADDR(params->src_rect),
				(u32)GET_RECT_SIZE(params->src_rect) + 8,
				(u32)virt_to_phys((void *)pgd));
		g2d_pagetable_clean((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip),
				(u32)GET_RECT_SIZE_C(params->dst_rect, params->clip),
				(u32)virt_to_phys((void *)pgd));

		if (params->flag.render_mode & G2D_CACHE_OP) {
			/*g2d_mem_cache_oneshot((void *)GET_START_ADDR(params->src_rect), 
				(void *)GET_START_ADDR(params->dst_rect),
				(unsigned int)GET_REAL_SIZE(params->src_rect), 
				(unsigned int)GET_REAL_SIZE(params->dst_rect));*/
		//	need_dst_clean = g2d_check_need_dst_cache_clean(params);
			g2d_mem_inner_cache(params);
			g2d_mem_outer_cache(g2d_dev, params, &need_dst_clean);
		}
	}

	s5p_sysmmu_set_tablebase_pgd(g2d_dev->dev,
					(u32)virt_to_phys((void *)pgd));

	if(g2d_init_regs(g2d_dev, params) < 0) {
		return false;
	}

	/* Do bitblit */
	g2d_start_bitblt(g2d_dev, params);

	if (!need_dst_clean)
		g2d_mem_outer_cache_inv(params);

	return true;
}
/**
 * s3c_pm_runcheck() - helper to check a resource on restore.
 * @res: The resource to check
 * @vak: Pointer to list of CRC32 values to check.
 *
 * Called from the s3c_pm_check_restore() via s3c_pm_run_sysram(), this
 * function runs the given memory resource checking it against the stored
 * CRC to ensure that memory is restored. The function tries to skip as
 * many of the areas used during the suspend process.
 */
static u32 *s3c_pm_runcheck(struct resource *res, u32 *val)
{
<<<<<<< HEAD
=======
	void *save_at = phys_to_virt(s3c_sleep_save_phys);
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
	unsigned long addr;
	unsigned long left;
	void *stkpage;
	void *ptr;
	u32 calc;

	stkpage = (void *)((u32)&calc & ~PAGE_MASK);

	for (addr = res->start; addr < res->end;
	     addr += CHECK_CHUNKSIZE) {
		left = res->end - addr;

		if (left > CHECK_CHUNKSIZE)
			left = CHECK_CHUNKSIZE;
Exemple #25
0
static void __init map_node(int node)
{
#define PTRTREESIZE (256*1024)
#define ROOTTREESIZE (32*1024*1024)
	unsigned long physaddr, virtaddr, size;
	pgd_t *pgd_dir;
	pmd_t *pmd_dir;
	pte_t *pte_dir;

	size = m68k_memory[node].size;
	physaddr = m68k_memory[node].addr;
	virtaddr = (unsigned long)phys_to_virt(physaddr);
	physaddr |= m68k_supervisor_cachemode |
		    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
	if (CPU_IS_040_OR_060)
		physaddr |= _PAGE_GLOBAL040;

	while (size > 0) {
#ifdef DEBUG
		if (!(virtaddr & (PTRTREESIZE-1)))
			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
				virtaddr);
#endif
		pgd_dir = pgd_offset_k(virtaddr);
		if (virtaddr && CPU_IS_020_OR_030) {
			if (!(virtaddr & (ROOTTREESIZE-1)) &&
			    size >= ROOTTREESIZE) {
#ifdef DEBUG
				printk ("[very early term]");
#endif
				pgd_val(*pgd_dir) = physaddr;
				size -= ROOTTREESIZE;
				virtaddr += ROOTTREESIZE;
				physaddr += ROOTTREESIZE;
				continue;
			}
		}
		if (!pgd_present(*pgd_dir)) {
			pmd_dir = kernel_ptr_table();
#ifdef DEBUG
			printk ("[new pointer %p]", pmd_dir);
#endif
			pgd_set(pgd_dir, pmd_dir);
		} else
			pmd_dir = pmd_offset(pgd_dir, virtaddr);

		if (CPU_IS_020_OR_030) {
			if (virtaddr) {
#ifdef DEBUG
				printk ("[early term]");
#endif
				pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
				physaddr += PTRTREESIZE;
			} else {
				int i;
#ifdef DEBUG
				printk ("[zero map]");
#endif
				zero_pgtable = kernel_ptr_table();
				pte_dir = (pte_t *)zero_pgtable;
				pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
					_PAGE_TABLE | _PAGE_ACCESSED;
				pte_val(*pte_dir++) = 0;
				physaddr += PAGE_SIZE;
				for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
					pte_val(*pte_dir++) = physaddr;
			}
			size -= PTRTREESIZE;
			virtaddr += PTRTREESIZE;
		} else {
			if (!pmd_present(*pmd_dir)) {
#ifdef DEBUG
				printk ("[new table]");
#endif
				pte_dir = kernel_page_table();
				pmd_set(pmd_dir, pte_dir);
			}
			pte_dir = pte_offset_kernel(pmd_dir, virtaddr);

			if (virtaddr) {
				if (!pte_present(*pte_dir))
					pte_val(*pte_dir) = physaddr;
			} else
				pte_val(*pte_dir) = 0;
			size -= PAGE_SIZE;
			virtaddr += PAGE_SIZE;
			physaddr += PAGE_SIZE;
		}

	}
#ifdef DEBUG
	printk("\n");
#endif
}
Exemple #26
0
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
		size_t size, enum dma_data_direction dir)
{
	__dma_unmap_area(phys_to_virt(paddr), size, dir);
}
static int mfc_probe(struct platform_device *pdev)
{
	struct s3c_platform_mfc *pdata;
	struct resource *res;
	size_t size;
	int ret;
	unsigned int mfc_port1_alloc_paddr;

	if (!pdev || !pdev->dev.platform_data) {
		dev_err(&pdev->dev, "Unable to probe mfc!\n");
		return -1;
	}

	pdata = pdev->dev.platform_data;

	/* mfc clock enable should be here */

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "failed to get memory region resource\n");
		ret = -ENOENT;
		goto probe_out;
	}

	/* 60K is required for mfc register (0x0 ~ 0xe008) */
	size = (res->end - res->start) + 1;
	mfc_mem = request_mem_region(res->start, size, pdev->name);
	if (mfc_mem == NULL) {
		dev_err(&pdev->dev, "failed to get memory region\n");
		ret = -ENOENT;
		goto err_mem_req;
	}

	mfc_sfr_base_vaddr = ioremap(mfc_mem->start, mfc_mem->end - mfc_mem->start + 1);
	if (mfc_sfr_base_vaddr == NULL) {
		dev_err(&pdev->dev, "failed to ioremap address region\n");
		ret = -ENOENT;
		goto err_mem_map;
	}

	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "failed to get irq resource\n");
		ret = -ENOENT;
		goto err_irq_res;
	}

#if !defined(MFC_POLLING)
	ret = request_irq(res->start, mfc_irq, IRQF_DISABLED, pdev->name, pdev);
	if (ret != 0) {
		dev_err(&pdev->dev, "failed to install irq (%d)\n", ret);
		goto err_irq_req;
	}
#endif

	mutex_init(&mfc_mutex);

	/*
	 * buffer memory secure
	 */
	mfc_port0_base_paddr =(unsigned int)pdata->buf_phy_base[0];
	mfc_port0_memsize =  (unsigned int)pdata->buf_phy_size[0];

	mfc_debug(" mfc_port0_base_paddr= 0x%x \n", mfc_port0_base_paddr);
	mfc_debug(" mfc_port0_memsize = 0x%x \n", mfc_port0_memsize);

	mfc_port0_base_paddr = ALIGN_TO_128KB(mfc_port0_base_paddr);
	mfc_port0_base_vaddr = phys_to_virt(mfc_port0_base_paddr);

	if (mfc_port0_base_vaddr == NULL) {
		mfc_err("fail to mapping port0 buffer\n");
		ret = -EPERM;
		goto err_vaddr_map;
	}

	mfc_port1_alloc_paddr = (unsigned int)pdata->buf_phy_base[1];
	mfc_port1_memsize =  (unsigned int)pdata->buf_phy_size[1];

	mfc_port1_base_paddr = (unsigned int)s5p_get_media_membase_bank(1);
	mfc_port1_base_paddr = ALIGN_TO_128KB(mfc_port1_base_paddr);

	mfc_debug(" mfc_port1_base_paddr= 0x%x \n", mfc_port1_base_paddr);
	mfc_debug(" mfc_port1_memsize = 0x%x \n", mfc_port1_memsize);

	mfc_port1_alloc_paddr = ALIGN_TO_128KB(mfc_port1_alloc_paddr);
	mfc_port1_base_vaddr = phys_to_virt(mfc_port1_alloc_paddr);

	if (mfc_port1_base_vaddr == NULL) {
		mfc_err("fail to mapping port1 buffer\n");
		ret = -EPERM;
		goto err_vaddr_map;
	}

	mfc_set_port1_buff_paddr(mfc_port1_alloc_paddr);

	mfc_debug("mfc_port0_base_paddr = 0x%08x, mfc_port1_base_paddr = 0x%08x <<\n",
		(unsigned int)mfc_port0_base_paddr, (unsigned int)mfc_port1_base_paddr);
	mfc_debug("mfc_port0_base_vaddr = 0x%08x, mfc_port1_base_vaddr = 0x%08x <<\n",
		(unsigned int)mfc_port0_base_vaddr, (unsigned int)mfc_port1_base_vaddr);
	mfc_debug("mfc_port1_alloc_paddr = 0x%08x <<\n", (unsigned int)mfc_port1_alloc_paddr);

	/* Get mfc power domain regulator */
	mfc_pd_regulator = regulator_get(&pdev->dev, "pd");
	if (IS_ERR(mfc_pd_regulator)) {
		mfc_err("failed to find mfc power domain\n");
		ret = PTR_ERR(mfc_pd_regulator);
		goto err_regulator_get;
	}

	mfc_sclk = clk_get(&pdev->dev, "sclk_mfc");
	if (IS_ERR(mfc_sclk)) {
		mfc_err("failed to find mfc clock source\n");
		ret = PTR_ERR(mfc_sclk);
		goto err_clk_get;
	}

	mfc_init_mem_inst_no();
	mfc_init_buffer();

	ret = misc_register(&mfc_miscdev);
	if (ret) {
		mfc_err("MFC can't misc register on minor\n");
		goto err_misc_reg;
	}

	/*
	 * MFC FW downloading
	 */
	ret = request_firmware_nowait(THIS_MODULE,
				      FW_ACTION_HOTPLUG,
				      MFC_FW_NAME,
				      &pdev->dev,
				      GFP_KERNEL,
				      pdev,
				      mfc_firmware_request_complete_handler);
	if (ret) {
		mfc_err("MFCINST_ERR_FW_INIT_FAIL\n");
		ret = -EPERM;
		goto err_req_fw;
	}

	return 0;

err_req_fw:
	misc_deregister(&mfc_miscdev);
err_misc_reg:
	clk_put(mfc_sclk);
err_clk_get:
	regulator_put(mfc_pd_regulator);
err_regulator_get:
err_vaddr_map:
	free_irq(res->start, pdev);
	mutex_destroy(&mfc_mutex);
err_irq_req:
err_irq_res:
	iounmap(mfc_sfr_base_vaddr);
err_mem_map:
	release_mem_region(mfc_mem, size);
err_mem_req:
probe_out:
	dev_err(&pdev->dev, "not found (%d).\n", ret);
	return ret;
}
Exemple #28
0
static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
{
	return (sysmmu_pte_t *)phys_to_virt(
				lv2table_base(sent)) + lv2ent_offset(iova);
}
Exemple #29
0
static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
{
	unsigned long addr = plat_dma_addr_to_phys(dma_addr);

	return (unsigned long)phys_to_virt(addr);
}
Exemple #30
0
int init()
{
    IMG_UINT32 screen_w, screen_h;
    IMG_UINT32 pa_fb, va_fb;
    IMG_UINT32 byteSize;
    int	i;

    int rgb_format, bytes_per_pixel;

    struct fb_fix_screeninfo fix;
    struct fb_var_screeninfo var;


    s3cfb_direct_ioctl(FB_NUM, FBIOGET_FSCREENINFO, (unsigned long)&fix);
    s3cfb_direct_ioctl(FB_NUM, FBIOGET_VSCREENINFO, (unsigned long)&var);

    screen_w = var.xres;
    screen_h = var.yres;
    pa_fb = fix.smem_start;
    printk("PA FB = 0x%X, bits per pixel = %d\n", (unsigned int)fix.smem_start, (unsigned int)var.bits_per_pixel);
    va_fb = (unsigned long)phys_to_virt(pa_fb);

    printk("screen width=%d height=%d va=0x%x pa=0x%x\n", (int)screen_w, (int)screen_h, (unsigned int)va_fb, (unsigned int)pa_fb);

#if 1
    regs = (volatile unsigned int)ioremap(0xF8000000, 0x00100000);
#endif
    //spin_lock_init(g_psLCDInfo->psSwapChainLock);

    if (g_psLCDInfo == NULL)
    {
        PFN_CMD_PROC	pfnCmdProcList[DC_S3C_LCD_COMMAND_COUNT];
        IMG_UINT32	aui32SyncCountList[DC_S3C_LCD_COMMAND_COUNT][2];

        g_psLCDInfo = (S3C_LCD_DEVINFO*)kmalloc(sizeof(S3C_LCD_DEVINFO),GFP_KERNEL);


        g_psLCDInfo->ui32NumFormats = S3C_DISPLAY_FORMAT_NUM;
        switch (var.bits_per_pixel)
        {
        case 16:
            rgb_format = PVRSRV_PIXEL_FORMAT_RGB565;
            bytes_per_pixel = 2;
            break;
        case 32:
            rgb_format = PVRSRV_PIXEL_FORMAT_ARGB8888;
            bytes_per_pixel = 4;
            break;
        default:
            rgb_format = PVRSRV_PIXEL_FORMAT_ARGB8888;
            bytes_per_pixel = 4;
            break;
        }

        g_psLCDInfo->asDisplayForamtList[0].pixelformat = rgb_format;
        g_psLCDInfo->ui32NumDims = S3C_DISPLAY_DIM_NUM;
        g_psLCDInfo->asDisplayDimList[0].ui32ByteStride = (bytes_per_pixel) * screen_w;
        g_psLCDInfo->asDisplayDimList[0].ui32Height = screen_h;
        g_psLCDInfo->asDisplayDimList[0].ui32Width = screen_w;

        g_psLCDInfo->sSysBuffer.bufferPAddr.uiAddr = pa_fb;
        g_psLCDInfo->sSysBuffer.bufferVAddr = (IMG_CPU_VIRTADDR)va_fb;
        byteSize = screen_w * screen_h * bytes_per_pixel;
        g_psLCDInfo->sSysBuffer.byteSize = (IMG_UINT32)byteSize;

        for (i=0; i<S3C_MAX_BACKBUFFERRS; i++)
        {
            g_psLCDInfo->asBackBuffers[i].byteSize = g_psLCDInfo->sSysBuffer.byteSize;
#if 1
            // modified by jamie (2010.04.09)
            // to use the frame buffer already allocated by LCD driver.
            g_psLCDInfo->asBackBuffers[i].bufferPAddr.uiAddr = pa_fb + byteSize * (i+1);
            g_psLCDInfo->asBackBuffers[i].bufferVAddr = (IMG_CPU_VIRTADDR)phys_to_virt(g_psLCDInfo->asBackBuffers[i].bufferPAddr.uiAddr);

#else
            if(AllocLinearMemory(
                        g_psLCDInfo->asBackBuffers[i].byteSize,
                        (IMG_UINT32*)&(g_psLCDInfo->asBackBuffers[i].bufferVAddr),
                        &(g_psLCDInfo->asBackBuffers[i].bufferPAddr.uiAddr)))
                return 1;
#endif

            printk("Back frameBuffer[%d].VAddr=%p PAddr=%p size=%d\n",
                   i,
                   (void*)g_psLCDInfo->asBackBuffers[i].bufferVAddr,
                   (void*)g_psLCDInfo->asBackBuffers[i].bufferPAddr.uiAddr,
                   (int)g_psLCDInfo->asBackBuffers[i].byteSize);
        }

        g_psLCDInfo->bFlushCommands = S3C_FALSE;
        g_psLCDInfo->psSwapChain = NULL;

        PVRGetDisplayClassJTable(&(g_psLCDInfo->sPVRJTable));

        g_psLCDInfo->sDCJTable.ui32TableSize = sizeof(PVRSRV_DC_SRV2DISP_KMJTABLE);
        g_psLCDInfo->sDCJTable.pfnOpenDCDevice = OpenDCDevice;
        g_psLCDInfo->sDCJTable.pfnCloseDCDevice = CloseDCDevice;
        g_psLCDInfo->sDCJTable.pfnEnumDCFormats = EnumDCFormats;
        g_psLCDInfo->sDCJTable.pfnEnumDCDims = EnumDCDims;
        g_psLCDInfo->sDCJTable.pfnGetDCSystemBuffer = GetDCSystemBuffer;
        g_psLCDInfo->sDCJTable.pfnGetDCInfo = GetDCInfo;
        g_psLCDInfo->sDCJTable.pfnGetBufferAddr = GetDCBufferAddr;
        g_psLCDInfo->sDCJTable.pfnCreateDCSwapChain = CreateDCSwapChain;
        g_psLCDInfo->sDCJTable.pfnDestroyDCSwapChain = DestroyDCSwapChain;
        g_psLCDInfo->sDCJTable.pfnSetDCDstRect = SetDCDstRect;
        g_psLCDInfo->sDCJTable.pfnSetDCSrcRect = SetDCSrcRect;
        g_psLCDInfo->sDCJTable.pfnSetDCDstColourKey = SetDCDstColourKey;
        g_psLCDInfo->sDCJTable.pfnSetDCSrcColourKey = SetDCSrcColourKey;
        g_psLCDInfo->sDCJTable.pfnGetDCBuffers = GetDCBuffers;
        g_psLCDInfo->sDCJTable.pfnSwapToDCBuffer = SwapToDCBuffer;
        g_psLCDInfo->sDCJTable.pfnSwapToDCSystem = SwapToDCSystem;
        g_psLCDInfo->sDCJTable.pfnSetDCState = S3CSetState;

        g_psLCDInfo->sDisplayInfo.ui32MinSwapInterval=0;
        g_psLCDInfo->sDisplayInfo.ui32MaxSwapInterval=0;
        g_psLCDInfo->sDisplayInfo.ui32MaxSwapChains=1;
        g_psLCDInfo->sDisplayInfo.ui32MaxSwapChainBuffers=S3C_NUM_TOTAL_BUFFER;
        g_psLCDInfo->sDisplayInfo.ui32PhysicalWidthmm=var.width;	// width of lcd in mm
        g_psLCDInfo->sDisplayInfo.ui32PhysicalHeightmm=var.height;	// height of lcd in mm

        strncpy(g_psLCDInfo->sDisplayInfo.szDisplayName, "s3c_lcd", MAX_DISPLAY_NAME_SIZE);

        if(g_psLCDInfo->sPVRJTable.pfnPVRSRVRegisterDCDevice	(&(g_psLCDInfo->sDCJTable),
                (IMG_UINT32 *)(&(g_psLCDInfo->ui32DisplayID))) != PVRSRV_OK)
        {
            return 1;
        }

        //printk("deviceID:%d\n",(int)g_psLCDInfo->ui32DisplayID);

        // register flip command
        pfnCmdProcList[DC_FLIP_COMMAND] = ProcessFlip;
        aui32SyncCountList[DC_FLIP_COMMAND][0] = 0;
        aui32SyncCountList[DC_FLIP_COMMAND][1] = 2;

        if (g_psLCDInfo->sPVRJTable.pfnPVRSRVRegisterCmdProcList(g_psLCDInfo->ui32DisplayID,
                &pfnCmdProcList[0], aui32SyncCountList, DC_S3C_LCD_COMMAND_COUNT)
                != PVRSRV_OK)
        {
            printk("failing register commmand proc list   deviceID:%d\n",(int)g_psLCDInfo->ui32DisplayID);
            return PVRSRV_ERROR_CANT_REGISTER_CALLBACK;
        }

        LCDControllerBase = (volatile unsigned int *)ioremap(0xf8000000,1024);
    }

    return 0;

}