Ejemplo n.º 1
0
/* allocate user space mmapable block of memory in the kernel space */
void * rvmalloc(unsigned long size)
{
	void * mem;
	unsigned long adr, page;
        
#if LINUX_VERSION_CODE < 0x020300
	mem=vmalloc(size);
#else
	mem=vmalloc_32(size);
#endif
	if (mem) 
	{
		memset(mem, 0, size); /* Clear the ram out, no junk to the user */
	        adr=(unsigned long) mem;
		while (size > 0) 
                {
#if LINUX_VERSION_CODE < 0x020300
	                page = kvirt_to_phys(adr);
			mem_map_reserve(MAP_NR(phys_to_virt(page)));
#else
	                page = kvirt_to_pa(adr);
			mem_map_reserve(virt_to_page(__va(page)));
#endif
			adr+=PAGE_SIZE;
			size-=PAGE_SIZE;
		}
	}
	return mem;
}
Ejemplo n.º 2
0
static void *rvmalloc(unsigned long size)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
	struct page *page;
#endif
	void *mem;
	unsigned long adr;

	size = PAGE_ALIGN(size);
	mem = vmalloc_32(size);
	if (!mem)
		return NULL;
	memset(mem, 0, size); /* Clear the ram out, no junk to the user */
	adr = (unsigned long) mem;
	while (size > 0) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
		page = vmalloc_to_page((void *)adr);
		mem_map_reserve(page);
#else
		SetPageReserved(vmalloc_to_page((void *)adr));
#endif
		adr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}

	return mem;
}
Ejemplo n.º 3
0
static int alloc_buffer(struct vino_device *v, int size)
{
	int count, i, j, err;

	err = i = 0;
	count = (size / PAGE_SIZE + 4) & ~3;
	v->desc = (unsigned long *) kmalloc(count * sizeof(unsigned long),
					    GFP_KERNEL);
	if (!v->desc)
		return -ENOMEM;

	v->dma_desc.cpu = pci_alloc_consistent(NULL, PAGE_RATIO * (count+4) *
					       sizeof(dma_addr_t),
					       &v->dma_desc.dma);
	if (!v->dma_desc.cpu) {
		err = -ENOMEM;
		goto out_free_desc;
	}
	while (i < count) {
		dma_addr_t dma;

		v->desc[i] = get_zeroed_page(GFP_KERNEL | GFP_DMA);
		if (!v->desc[i])
			break;
		dma = pci_map_single(NULL, (void *)v->desc[i], PAGE_SIZE,
				     PCI_DMA_FROMDEVICE);
		for (j = 0; j < PAGE_RATIO; j++)
			v->dma_desc.cpu[PAGE_RATIO * i + j ] = 
				dma + VINO_PAGE_SIZE * j;
		mem_map_reserve(virt_to_page(v->desc[i]));
		i++;
	}
	v->dma_desc.cpu[PAGE_RATIO * count] = VINO_DESC_STOP;
	if (i-- < count) {
		while (i >= 0) {
			mem_map_unreserve(virt_to_page(v->desc[i]));
			pci_unmap_single(NULL, v->dma_desc.cpu[PAGE_RATIO * i],
					 PAGE_SIZE, PCI_DMA_FROMDEVICE);
			free_page(v->desc[i]);
			i--;
		}
		pci_free_consistent(NULL,
				    PAGE_RATIO * (count+4) * sizeof(dma_addr_t),
				    (void *)v->dma_desc.cpu, v->dma_desc.dma);
		err = -ENOBUFS;
		goto out_free_desc;
	}
	v->page_count = count;
	return 0;

out_free_desc:
	kfree(v->desc);
	return err;
}
Ejemplo n.º 4
0
static void reserve_pages(struct contiguous_page **array,int nr,int flag) { 
  int i;
  for(i=0;i<nr;++i) {
    struct page *page = array[i]->page;
    if(flag) {
          atomic_inc(&page->count);
          mem_map_reserve(page);
    }else {
      atomic_dec(&page->count);
      mem_map_unreserve(page); //unreserve
  }
  }
}
Ejemplo n.º 5
0
static inline void *dmaalloc(size_t size)
{
	unsigned long addr;

	if (size == 0) {
		return NULL;
	}
	addr = __get_dma_pages(GFP_KERNEL, get_order(size));
	if (addr) {
		struct page *page;

		for (page = virt_to_page(addr); page < virt_to_page(addr+size); page++)
			mem_map_reserve(page);
	}
	return (void *)addr;
}
Ejemplo n.º 6
0
static int sscape_alloc_dma(sscape_info *devc)
{
	char *start_addr, *end_addr;
	int dma_pagesize;
	int sz, size;
	struct page *page;

	if (devc->raw_buf != NULL) return 0;	/* Already done */
	dma_pagesize = (devc->dma < 4) ? (64 * 1024) : (128 * 1024);
	devc->raw_buf = NULL;
	devc->buffsize = 8192*4;
	if (devc->buffsize > dma_pagesize) devc->buffsize = dma_pagesize;
	start_addr = NULL;
	/*
	 * Now loop until we get a free buffer. Try to get smaller buffer if
	 * it fails. Don't accept smaller than 8k buffer for performance
	 * reasons.
	 */
	while (start_addr == NULL && devc->buffsize > PAGE_SIZE) {
		for (sz = 0, size = PAGE_SIZE; size < devc->buffsize; sz++, size <<= 1);
		devc->buffsize = PAGE_SIZE * (1 << sz);
		start_addr = (char *) __get_free_pages(GFP_ATOMIC|GFP_DMA, sz);
		if (start_addr == NULL) devc->buffsize /= 2;
	}

	if (start_addr == NULL) {
		printk(KERN_ERR "sscape pnp init error: Couldn't allocate DMA buffer\n");
		return 0;
	} else {
		/* make some checks */
		end_addr = start_addr + devc->buffsize - 1;		
		/* now check if it fits into the same dma-pagesize */

		if (((long) start_addr & ~(dma_pagesize - 1)) != ((long) end_addr & ~(dma_pagesize - 1))
		    || end_addr >= (char *) (MAX_DMA_ADDRESS)) {
			printk(KERN_ERR "sscape pnp: Got invalid address 0x%lx for %db DMA-buffer\n", (long) start_addr, devc->buffsize);
			return 0;
		}
	}
	devc->raw_buf = start_addr;
	devc->raw_buf_phys = virt_to_bus(start_addr);

	for (page = virt_to_page(start_addr); page <= virt_to_page(end_addr); page++)
		mem_map_reserve(page);
	return 1;
}
Ejemplo n.º 7
0
static inline void *dmaalloc(size_t size)
{
	unsigned long addr;

	if (size == 0) {
		return NULL;
	}
	addr = __get_dma_pages(GFP_KERNEL, __get_order(size));
	if (addr) {
		int i;

		for (i = MAP_NR(addr); i < MAP_NR(addr+size); i++) {
			mem_map_reserve(i);
		}
	}
	return (void *)addr;
}
Ejemplo n.º 8
0
void * rvmalloc(signed long size)
{
	void * mem;
	unsigned long adr, page;

	mem = VMALLOC_32(size);
	if (!mem)
		return NULL;

	memset(mem, 0, size);

	adr=(unsigned long) mem;
	while (size > 0) {
		page = kvirt_to_pa(adr);
		mem_map_reserve(virt_to_page((unsigned long)__va(page)));
		adr += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	return mem;
}
Ejemplo n.º 9
0
void* bmalloc(unsigned long size)
{
	void* mem;
#ifdef CONFIG_BIGPHYS_AREA
	mem = bigphysarea_alloc_pages(size/PAGE_SIZE, 1, GFP_KERNEL);
#else
	/*
	 * The following function got a lot of memory at boottime,
	 * so we know its always there...
	 */
	mem = (void*)__get_free_pages(GFP_USER|GFP_DMA,get_order(size));
#endif
	if (mem) {
		unsigned long adr = (unsigned long)mem;
		while (size > 0) {
			mem_map_reserve(virt_to_page(phys_to_virt(adr)));
			adr += PAGE_SIZE;
			size -= PAGE_SIZE;
		}
	}
	return mem;
}
static unsigned char *
alloc_framebuffer()
{
	
	/*return (unsigned char *) consistent_alloc(GFP_KERNEL,
						  PAGE_ALIGN(RUBY_PHYSICAL_MEM_SIZE + PAGE_SIZE),
						  &fb_info.
						  FrameBufferDmaAddress);
	*/
	int *kmalloc_ptr = NULL;
	int *kmalloc_area = NULL;
	unsigned long virt_addr;
	kmalloc_ptr = kmalloc(RUBY_PHYSICAL_MEM_SIZE + 2 * PAGE_SIZE, GFP_KERNEL);
	kmalloc_area = (int *)(((unsigned long)kmalloc_ptr + PAGE_SIZE -1) & PAGE_MASK);
	for(virt_addr = (unsigned long)kmalloc_area; virt_addr<(unsigned long)kmalloc_area
							+ RUBY_PHYSICAL_MEM_SIZE; virt_addr += PAGE_SIZE)
	{
			/* reserver all pages to make them remapable */
			mem_map_reserve(virt_to_page(virt_addr));
	}

	return (unsigned char *)kmalloc_area;

}
Ejemplo n.º 11
0
static int __init
dev_nvram_init(void)
{
	int order = 0, ret = 0;
	struct page *page, *end;
	unsigned int i;

	/* Allocate and reserve memory to mmap() */
	while ((PAGE_SIZE << order) < NVRAM_SPACE)
		order++;
	end = virt_to_page(nvram_buf + (PAGE_SIZE << order) - 1);
	for (page = virt_to_page(nvram_buf); page <= end; page++)
		mem_map_reserve(page);

#ifdef CONFIG_MTD
	/* Find associated MTD device */
	for (i = 0; i < MAX_MTD_DEVICES; i++) {
		nvram_mtd = get_mtd_device(NULL, i);
		if (nvram_mtd) {
			if (!strcmp(nvram_mtd->name, "nvram") &&
			    nvram_mtd->size >= NVRAM_SPACE)
				break;
			put_mtd_device(nvram_mtd);
		}
	}
	if (i >= MAX_MTD_DEVICES)
		nvram_mtd = NULL;
#endif

	/* Initialize hash table lock */
	spin_lock_init(&nvram_lock);

	/* Initialize commit semaphore */
	init_MUTEX(&nvram_sem);

	/* Register char device */
	if ((nvram_major = devfs_register_chrdev(0, "nvram", &dev_nvram_fops)) < 0) {
		ret = nvram_major;
		goto err;
	}

	/* Initialize hash table */
	_nvram_init(sbh);

	/* Create /dev/nvram handle */
	nvram_handle = devfs_register(NULL, "nvram", DEVFS_FL_NONE, nvram_major, 0,
				      S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, &dev_nvram_fops, NULL);

	/* Set the SDRAM NCDL value into NVRAM if not already done */
	if (getintvar(NULL, "sdram_ncdl") == 0) {
		unsigned int ncdl;
		char buf[] = "0x00000000";

		if ((ncdl = sb_memc_get_ncdl(sbh))) {
			sprintf(buf, "0x%08x", ncdl);
			nvram_set("sdram_ncdl", buf);
			nvram_commit();
		}
	}

	return 0;

 err:
	dev_nvram_exit();
	return ret;
}
Ejemplo n.º 12
0
/*============================================================================
 * Do window creation here
 */
static int __create_slave_window(vme_slave_handle_t handle,
				 struct __vme_slave_window *window, int num,
				 uint32_t ctl, uint32_t vme_addr, size_t size,
				 void *phys_addr)
{
	uint32_t base, bound, to, off;
#ifndef ARCH
	struct page *page;
#endif
	int resolution;

	/* Windows 0 and 4 have a 4kb resolution, others have
	   64kb resolution
	 */
	resolution = (num % 4) ? 0x10000 : 0x1000;
	off = vme_addr % resolution;
	vme_addr -= off;
	size += off;
	size += (size % resolution) ? resolution - (size % resolution) : 0;

	/* If we're given the physical address, then use it,
	   otherwise, let the kernel allocate the memory
	   wherever it wants to.
	 */
	if (phys_addr) {
		phys_addr -= off;
		if ((uint32_t) phys_addr % resolution) {
			write_unlock(&slave_rwlock);
			printk(KERN_ERR "VME: Invalid physical address for "
			       "slave window %d\n", num);
			return -EINVAL;
		}
	} else {
		window->vptr = pci_alloc_consistent(universe_pci_dev, size,
						    &window->resource);
		if (NULL == window->vptr) {
			window->resource = 0;
			window->vptr = NULL;
			write_unlock(&slave_rwlock);
			printk(KERN_ERR "VME: Failed to allocate memory for "
			       "slave window %d\n", num);
			return -ENOMEM;
		}
#ifdef ARCH
	    memset(window->vptr, 0, size);
	}
#else
		/* The memory manager wants to remove the
		   allocated pages from main memory.  We don't
		   want that because the user ends up seeing
		   all zero's so we set the PG_RESERVED bit
		   on each page.
		 */
		for (page = virt_to_page(window->vptr);
		     page < virt_to_page(window->vptr + size); ++page)
		{		     
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,68)
			mem_map_reserve(page);
#else			
			SetPageReserved(page);
#endif			
		}

		phys_addr = (void *) virt_to_phys(window->vptr);
#endif
		base = vme_addr;
		bound = base + size;
#ifdef ARCH
	to = (uint32_t) window->resource - base;
	window->phys_base = (uint32_t) window->vptr;
#else
		to = (uint32_t) phys_addr - base;
	}
Ejemplo n.º 13
0
int
sound_alloc_dmap (int dev, struct dma_buffparms *dmap, int chan)
{
  char           *start_addr, *end_addr;
  int             i, dma_pagesize;

  dmap->mapping_flags &= ~DMA_MAP_MAPPED;

  if (dmap->raw_buf != NULL)
    return 0;			/* Already done */

  if (dma_buffsize < 4096)
    dma_buffsize = 4096;

  if (chan < 4)
    dma_pagesize = 64 * 1024;
  else
    dma_pagesize = 128 * 1024;

  dmap->raw_buf = NULL;

  if (debugmem)
    printk ("sound: buffsize[%d] = %lu\n", dev, audio_devs[dev]->buffsize);

  audio_devs[dev]->buffsize = dma_buffsize;

  if (audio_devs[dev]->buffsize > dma_pagesize)
    audio_devs[dev]->buffsize = dma_pagesize;

  start_addr = NULL;

/*
 * Now loop until we get a free buffer. Try to get smaller buffer if
 * it fails.
 */

  while (start_addr == NULL && audio_devs[dev]->buffsize > PAGE_SIZE)
    {
      int             sz, size;

      for (sz = 0, size = PAGE_SIZE;
	   size < audio_devs[dev]->buffsize;
	   sz++, size <<= 1);

      audio_devs[dev]->buffsize = PAGE_SIZE * (1 << sz);

      if ((start_addr = (char *) __get_dma_pages (GFP_ATOMIC, sz)) == NULL)
	audio_devs[dev]->buffsize /= 2;
    }

  if (start_addr == NULL)
    {
      printk ("Sound error: Couldn't allocate DMA buffer\n");
      return -(ENOMEM);
    }
  else
    {
      /* make some checks */
      end_addr = start_addr + audio_devs[dev]->buffsize - 1;

      if (debugmem)
	printk ("sound: start 0x%lx, end 0x%lx\n",
		(long) start_addr, (long) end_addr);

      /* now check if it fits into the same dma-pagesize */

      if (((long) start_addr & ~(dma_pagesize - 1))
	  != ((long) end_addr & ~(dma_pagesize - 1))
	  || end_addr >= (char *) (MAX_DMA_ADDRESS))
	{
	  printk (
		   "sound: Got invalid address 0x%lx for %ldb DMA-buffer\n",
		   (long) start_addr,
		   audio_devs[dev]->buffsize);
	  return -(EFAULT);
	}
    }
  dmap->raw_buf = start_addr;
  dmap->raw_buf_phys = virt_to_bus (start_addr);

  for (i = MAP_NR (start_addr); i <= MAP_NR (end_addr); i++)
    {
      mem_map_reserve (i);
    }

  return 0;
}
Ejemplo n.º 14
0
int AndorLockDMABuffers(int iCardNo, unsigned long ulArg, unsigned long* pulData)
{
  unsigned long dma_addr;
  unsigned long addr;
  unsigned int sz;
  unsigned long* virt;
  unsigned long offset;

  if(copy_from_user(pulData, (unsigned long*)ulArg, 3*sizeof(unsigned long)))
    return -EFAULT;
    
if(DMA_MODE==0){
//  Fixing the page size to 2 pages [PAGE_SIZE=4K on intel]
  dma_addr = __get_free_pages(GFP_KERNEL | GFP_DMA32, (ulong) DMA_PAGE_ORD);
  if(!dma_addr) return -EFAULT;

  gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd = (unsigned long*)dma_addr;
  gpAndorDev[iCardNo].AndorDMABuffer[0].Physical = __pa((void*)dma_addr);
  gpAndorDev[iCardNo].AndorDMABuffer[0].Size = PAGE_SIZE<<DMA_PAGE_ORD;

  for (addr = dma_addr, sz = gpAndorDev[iCardNo].AndorDMABuffer[0].Size;
       sz > 0;
       addr += PAGE_SIZE, sz -= PAGE_SIZE) {
#    if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
                // reserve all pages to make them remapable. /
                mem_map_reserve(MAP_NR(addr));
#    elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
                mem_map_reserve(virt_to_page(addr));
#    else
    SetPageReserved(virt_to_page(addr));
#    endif
  }
}
else{
  //Work out Size to be allowed for each image
  if(DMA_SIZE==0){
    if(dma_first_size==0 || (dma_first_size < pulData[0])){
      dma_first_size = pulData[0];
    }
    sz = dma_first_size;
  }
  else{
    sz = (DMA_SIZE*1024*1024)/(giBoardCount*2);
  }

  offset = DMA_OFFSET*1024*1024;

  //Work out the physical address for this boards DMA
  if(DMA_ADDRESS==0){
    dma_addr = NUM_PHYSPAGES*PAGE_SIZE + offset + sz*2*iCardNo;
  }
  else{
    dma_addr = (DMA_ADDRESS*1024*1024) + offset + sz*2*iCardNo;
  }

  virt = ioremap(dma_addr, sz);

    if (virt == NULL) {
      printk("<7>andordrvlx: Failed to allocate DMA region 0\n");
      printk("<7>andordrvlx: DMA Addr [%lX] Size [%u bytes]\n", dma_addr, sz);    
      printk("<7>andordrvlx: See INSTALL file, 'Supported Kernels'\n");
      return -EFAULT;
    }
    
  gpAndorDev[iCardNo].AndorDMABuffer[0].VirtualAdd = virt;
  gpAndorDev[iCardNo].AndorDMABuffer[0].Physical = dma_addr;
  gpAndorDev[iCardNo].AndorDMABuffer[0].Size = sz;
}


if(DMA_MODE==0){
  dma_addr = __get_free_pages(GFP_KERNEL | GFP_DMA32, (ulong) DMA_PAGE_ORD);
  if(!dma_addr) return -EFAULT;

  gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd = (unsigned long*)dma_addr;
  gpAndorDev[iCardNo].AndorDMABuffer[1].Physical = __pa((void*)dma_addr);
  gpAndorDev[iCardNo].AndorDMABuffer[1].Size = PAGE_SIZE<<DMA_PAGE_ORD;

  for (addr = dma_addr, sz = gpAndorDev[iCardNo].AndorDMABuffer[1].Size;
       sz > 0;
       addr += PAGE_SIZE, sz -= PAGE_SIZE) {
#    if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
                // reserve all pages to make them remapable. /
                mem_map_reserve(MAP_NR(addr));
#    elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
                mem_map_reserve(virt_to_page(addr));
#    else
    SetPageReserved(virt_to_page(addr));
#    endif
  }
}
else{
  //Work out Size to be allowed for each image
  if(DMA_SIZE==0){
    if(dma_first_size==0 || (dma_first_size < pulData[0])){
      dma_first_size = pulData[0];
    }
    sz = dma_first_size;
  }
  else{
    sz = (DMA_SIZE*1024*1024)/(giBoardCount*2);
  }

  offset = DMA_OFFSET*1024*1024;

  //Work out the physical address for this boards DMA
  if(DMA_ADDRESS==0){
    dma_addr = NUM_PHYSPAGES*PAGE_SIZE + offset + sz*(2*iCardNo + 1);
  }
  else{
    dma_addr = (DMA_ADDRESS*1024*1024) + offset + sz*(2*iCardNo + 1);
  }

  virt = ioremap(dma_addr, sz);

    if (virt == NULL) {
      printk("<7>andordrvlx: Failed to allocate DMA region 1\n");
      printk("<7>andordrvlx: DMA Addr [%lX]  Size [%u bytes]\n", dma_addr, sz);    
      printk("<7>andordrvlx: See Readme section 'Supported Kernels'\n");
      return -EFAULT;
  }
  
  gpAndorDev[iCardNo].AndorDMABuffer[1].VirtualAdd = virt;
  gpAndorDev[iCardNo].AndorDMABuffer[1].Physical = dma_addr;
  gpAndorDev[iCardNo].AndorDMABuffer[1].Size = sz;
}

  pulData[1] = (unsigned long)gpAndorDev[iCardNo].AndorDMABuffer[0].Physical;
  pulData[2] = (unsigned long)gpAndorDev[iCardNo].AndorDMABuffer[1].Physical;

  if(copy_to_user((unsigned long*)ulArg, pulData, 3*sizeof(unsigned long)))
    return -EFAULT;

  return 0;
}
Ejemplo n.º 15
0
static int __init
probe(int index)
{
	u32 *phys_reg_addr;
	struct xilinxfb_info *i;
	struct page *page, *end_page;

	switch (index) {
#if defined(CONFIG_XILINX_TFT_CNTLR_REF_0_INSTANCE)
	case 0:
		phys_reg_addr = 
			(u32 *) CONFIG_XILINX_TFT_CNTLR_REF_0_DCR_BASEADDR;
		break;
#if defined(CONFIG_XILINX_TFT_CNTRLR_REF_1_INSTANCE)
	case 1:
		phys_reg_addr = 
			(u32 *) CONFIG_XILINX_TFT_CNTLR_REF_1_DCR_BASEADDR;
		break;
#if defined(CONFIG_XILINX_TFT_CNTRLR_REF_2_INSTANCE)
	case 2:
		phys_reg_addr = 
			(u32 *) CONFIG_XILINX_TFT_CNTLR_REF_2_DCR_BASEADDR;
		break;
#if defined(CONFIG_XILINX_TFT_CNTLR_REF_3_INSTANCE)
#error Edit this file to add more devices.
#endif				/* 3 */
#endif				/* 2 */
#endif				/* 1 */
#endif				/* 0 */
	default:
		return -ENODEV;
	}

	/* Convert DCR register address to OPB address */
	phys_reg_addr = (unsigned *)(((unsigned)phys_reg_addr*4)+0xd0000000);

	/* Allocate the info and zero it out. */
	i = (struct xilinxfb_info *) kmalloc(sizeof (struct xilinxfb_info),
					     GFP_KERNEL);
	if (!i) {
		printk(KERN_ERR "Could not allocate Xilinx "
		       "frame buffer #%d information.\n", index);
		return -ENOMEM;
	}
	memset(i, 0, sizeof (struct xilinxfb_info));

	/* Make it the head of info_list. */
	spin_lock(&info_lock);
	i->next = info_list;
	info_list = i;
	spin_unlock(&info_lock);

	/*
	 * At this point, things are ok for us to call remove_head_info() to
	 * clean up if we run into any problems; i is on info_list and
	 * all the pointers are zeroed because of the memset above.
	 */

	i->fb_virt_start = (unsigned long) consistent_alloc(GFP_KERNEL|GFP_DMA,
							    FB_SIZE,
							    &i->fb_phys);
	if (!i->fb_virt_start) {
		printk(KERN_ERR "Could not allocate frame buffer memory "
		       "for Xilinx device #%d.\n", index);
		remove_head_info();
		return -ENOMEM;
	}

	/*
	 * The 2.4 PPC version of consistent_alloc does not set the
	 * pages reserved.  The pages need to be reserved so that mmap
	 * will work.  This means that we need the following code.  When
	 * consistent_alloc gets fixed, this will no longer be needed.
	 * Note that in 2.4, consistent_alloc doesn't free up the extra
	 * pages either.  This is already fixed in 2.5.
	 */
	page = virt_to_page(__va(i->fb_phys));
	end_page = page + ((FB_SIZE+PAGE_SIZE-1)/PAGE_SIZE);
	while (page < end_page)
		mem_map_reserve(page++);

	/* Clear the frame buffer. */
	memset((void *) i->fb_virt_start, 0, FB_SIZE);

	/* Map the control registers in. */
	i->regs = (u32 *) ioremap((unsigned long) phys_reg_addr, NUM_REGS);

	/* Tell the hardware where the frame buffer is. */
	out_be32(i->regs + REG_FB_ADDR, i->fb_phys);

	/* Turn on the display. */
	out_be32(i->regs + REG_CTRL, REG_CTRL_DEFAULT);

	current_par.var.xres = XRES;
	current_par.var.xres_virtual = XRES_VIRTUAL;
	current_par.var.yres = YRES;
	current_par.var.yres_virtual = YRES_VIRTUAL;
	current_par.var.bits_per_pixel = BITS_PER_PIXEL;

	i->gen.parsize = sizeof (struct xilinxfb_par);
	i->gen.fbhw = &xilinx_switch;

	strcpy(i->gen.info.modename, "Xilinx LCD");
	i->gen.info.changevar = NULL;
	i->gen.info.node = -1;

	i->gen.info.fbops = &xilinxfb_ops;
	i->gen.info.disp = &i->disp;
	i->gen.info.switch_con = &fbgen_switch;
	i->gen.info.updatevar = &fbgen_update_var;
	i->gen.info.blank = &fbgen_blank;
	i->gen.info.flags = FBINFO_FLAG_DEFAULT;

	/* This should give a reasonable default video mode */
	fbgen_get_var(&i->disp.var, -1, &i->gen.info);
	fbgen_do_set_var(&i->disp.var, 1, &i->gen);
	fbgen_set_disp(-1, &i->gen);
	fbgen_install_cmap(0, &i->gen);
	if (register_framebuffer(&i->gen.info) < 0) {
		printk(KERN_ERR "Could not register frame buffer "
		       "for Xilinx device #%d.\n", index);
		remove_head_info();
		return -EINVAL;
	}
	printk(KERN_INFO "fb%d: %s frame buffer at 0x%08X mapped to 0x%08lX\n",
	       GET_FB_IDX(i->gen.info.node), i->gen.info.modename,
	       i->fb_phys, i->fb_virt_start);

	return 0;
}
Ejemplo n.º 16
0
/*============================================================================
 * Do window creation here
 */
static int __create_slave_window(vme_slave_handle_t handle,
				 struct __vme_slave_window *window, int num,
				 uint32_t ctl, uint32_t vme_addr, size_t size,
				 void *phys_addr)
{
	uint32_t base, bound, to, off;
	struct page *page;
	int resolution;

	/* Windows 0 and 4 have a 4kb resolution, others have
	   64kb resolution
	 */
	resolution = (num % 4) ? 0x10000 : 0x1000;
	off = vme_addr % resolution;
	vme_addr -= off;
	size += off;
	size += (size % resolution) ? resolution - (size % resolution) : 0;

	/* If we're given the physical address, then use it,
	   otherwise, let the kernel allocate the memory
	   wherever it wants to.
	 */
	if (phys_addr) {
		phys_addr -= off;
		if ((uint32_t) phys_addr % resolution) {
			write_unlock(&slave_rwlock);
			printk(KERN_ERR "VME: Invalid physical address for "
			       "slave window %d\n", num);
			return -EINVAL;
		}
	} else {
		window->vptr = pci_alloc_consistent(universe_pci_dev, size,
						    &window->resource);
		if (NULL == window->vptr) {
			window->resource = 0;
			window->vptr = NULL;
			write_unlock(&slave_rwlock);
			printk(KERN_ERR "VME: Failed to allocate memory for "
			       "slave window %d\n", num);
			return -ENOMEM;
		}

		/* The memory manager wants to remove the
		   allocated pages from main memory.  We don't
		   want that because the user ends up seeing
		   all zero's so we set the PG_RESERVED bit
		   on each page.
		 */
		for (page = virt_to_page(window->vptr);
		     page < virt_to_page(window->vptr + size); ++page)
		{		     
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,68)
			mem_map_reserve(page);
#else			
			SetPageReserved(page);
#endif			
		}

		phys_addr = (void *) virt_to_phys(window->vptr);

		base = vme_addr;
		bound = base + size;
		to = (uint32_t) phys_addr - base;
	}

	base = vme_addr;
	bound = base + size;
	to = (uint32_t) phys_addr - base;

	window->phys_base = (uint32_t) phys_addr;
	window->vme_base = base;
	window->size = size;

	writel(ctl, universe_base + UNIV_VSI_CTL(num));
	writel(base, universe_base + UNIV_VSI_BS(num));
	writel(bound, universe_base + UNIV_VSI_BD(num));
	writel(to, universe_base + UNIV_VSI_TO(num));

	/* Double check that the window setup is consistant
	   with what we expect. If this check fails, then I've
	   probably screwed up something in the driver.
	 */
	base = readl(universe_base + UNIV_VSI_BS(num));
	bound = readl(universe_base + UNIV_VSI_BD(num));
	to = readl(universe_base + UNIV_VSI_TO(num));

	if ((base != window->vme_base) || ((base + to) != window->phys_base) ||
	    ((bound - base) != window->size)) {
		write_unlock(&slave_rwlock);
		printk(KERN_ERR "VME: Slave window %d fails consistancy "
		       "check\n", num);
		DPRINTF("Expected base=%#x bound=%#x to=%#x\n",
			window->vme_base,
			window->vme_base + window->size,
			window->phys_base - window->vme_base);
		DPRINTF("Found base=%#x bound=%#x to=%#x\n", base, bound, to);
		return -EIO;
	}

	if (handle) {
		insert_slave_handle(window, handle);
		handle->off = vme_addr - base;
	}

	return 0;
}