Beispiel #1
0
static uint32_t
sys_munmap(uint32_t arg[]) {
    uintptr_t addr = (uintptr_t)arg[0];
    size_t len = (size_t)arg[1];
    return do_munmap(addr, len);
}
Beispiel #2
0
/*
* This is the ioctl implementation.
*/
static long kern_unlocked_ioctl(struct file *filp, unsigned int cmd,
		unsigned long arg)
{
	/* int i; */
	char str[256];
	void *ptr;
	unsigned int order;

	unsigned long private;
	unsigned long adjusted;
	unsigned int diff;
	int ret;
	struct vm_area_struct *vma;
	struct mm_struct *mm;
	void *kernel_addr;
	unsigned long flags;
	PR_DEBUG("start");
	switch (cmd) {
	/*
	*	Exploring VMA issues
	*/
	case IOCTL_MMAP_PRINT:
		ptr = (void *)arg;
		PR_DEBUG("ptr is %p", ptr);
		vma = find_vma(current->mm, arg);
		PR_DEBUG("vma is %p", vma);
		diff = arg - vma->vm_start;
		PR_DEBUG("diff is %d", diff);
		private = (unsigned long)vma->vm_private_data;
		PR_DEBUG("private (ul) is %lu", private);
		PR_DEBUG("private (p) is %p", (void *)private);
		adjusted = private + diff;
		PR_DEBUG("adjusted (ul) is %lu", adjusted);
		PR_DEBUG("adjusted (p) is %p", (void *)adjusted);
		return 0;

	/*
	*	This is asking the kernel to read the memory
	*/
	case IOCTL_MMAP_READ:
		PR_DEBUG("starting to read");
		memcpy(str, vaddr, 256);
		str[255] = '\0';
		PR_DEBUG("data is %s", str);
		return 0;

	/*
	*	This is asking the kernel to write the memory
	*/
	case IOCTL_MMAP_WRITE:
		PR_DEBUG("starting to write");
		memset(vaddr, arg, size);
		return 0;

	/*
	*	This demos how to take the user space pointer and turn it
	*	into a kernel space pointer
	*/
	case IOCTL_MMAP_WRITE_USER:
		PR_DEBUG("starting to write using us pointer");
		ptr = (void *)arg;
		PR_DEBUG("ptr is %p", ptr);
		return 0;

	/*
	*	mmap a region from an ioctl
	*/
	case IOCTL_MMAP_MMAP:
		PR_DEBUG("trying to mmap");

		/*
		* if(do_kmalloc) {
		*	kaddr=kmalloc(ioctl_size,GFP_KERNEL);
		* } else {
		*	order=get_order(ioctl_size);
		*	kaddr=(void*)__get_free_pages(GFP_KERNEL,order);
		* }
		*/
		mm = current->mm;
		flags = MAP_POPULATE | MAP_SHARED;
		flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
		/* must hold process memory map semaphore because next
		function will change memory layout for the process.
		This also means that this code must be in a path that can
		sleep.
		*/
		/*
		 * vm_mmap does not need the semaphore to be held
		 * down_write(&mm->mmap_sem);
		 */
		addr = vm_mmap(
			filp,/* file pointer */
			0,/* recommended use space address */
			ioctl_size,/* size */
			PROT_READ | PROT_WRITE,/* protection */
			flags,/* flags */
			0/* pg offset */
		);
		/* remmember to release the semaphore! */
		/*
		 * vm_mmap does not need the semaphore to be held
		 * up_write(&mm->mmap_sem);
		 */
		/*
		PR_DEBUG("kaddr is (p) %p",kaddr);
		PR_DEBUG("real size is (d) %d",ioctl_size);
		*/
		PR_DEBUG(
			"addr for user space is (lu) %lu / (p) %p",
			addr, (void *)addr);
		return addr;

	/*
	*	unmap a region
	*/
	case IOCTL_MMAP_UNMAP:
		PR_DEBUG("trying to unmap");
		vma = find_vma(current->mm, addr);
		kernel_addr = vma->vm_private_data;
		size = vma->vm_end - vma->vm_start;
		PR_DEBUG("deduced kernel_addr is %p", kernel_addr);
		PR_DEBUG("deduced size is (d) %d", size);
		PR_DEBUG("real size is (d) %d", ioctl_size);
		PR_DEBUG("real kaddr is (p) %p", kaddr);
		ret = do_munmap(current->mm, addr, ioctl_size);
		if (ret) {
			PR_ERROR("error from do_munmap");
			return ret;
		}
		if (do_kmalloc)
			kfree(kernel_addr);
		else {
			order = get_order(size);
			free_pages((unsigned long)kernel_addr, order);
		}
		return ret;

	/*
	*	The the size of the region
	*/
	case IOCTL_MMAP_SETSIZE:
		PR_DEBUG("setting the size");
		ioctl_size = arg;
		PR_DEBUG("size is %d", ioctl_size);
		return 0;
	}
Beispiel #3
0
static int sdcardfs_mmap(struct file *file, struct vm_area_struct *vma)
{
	int err = 0;
	bool willwrite;
	struct file *lower_file;
	const struct vm_operations_struct *saved_vm_ops = NULL;
	/* this might be deferred to mmap's writepage */
	willwrite = ((vma->vm_flags | VM_SHARED | VM_WRITE) == vma->vm_flags);

	/*
	 * File systems which do not implement ->writepage may use
	 * generic_file_readonly_mmap as their ->mmap op.  If you call
	 * generic_file_readonly_mmap with VM_WRITE, you'd get an -EINVAL.
	 * But we cannot call the lower ->mmap op, so we can't tell that
	 * writeable mappings won't work.  Therefore, our only choice is to
	 * check if the lower file system supports the ->writepage, and if
	 * not, return EINVAL (the same error that
	 * generic_file_readonly_mmap returns in that case).
	 */
	lower_file = sdcardfs_lower_file(file);
	if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
		err = -EINVAL;
		printk(KERN_ERR "sdcardfs: lower file system does not "
		       "support writeable mmap\n");
		goto out;
	}

	/*
	 * find and save lower vm_ops.
	 *
	 * XXX: the VFS should have a cleaner way of finding the lower vm_ops
	 */
	if (!SDCARDFS_F(file)->lower_vm_ops) {
		err = lower_file->f_op->mmap(lower_file, vma);
		if (err) {
			printk(KERN_ERR "sdcardfs: lower mmap failed %d\n", err);
			goto out;
		}
		saved_vm_ops = vma->vm_ops; /* save: came from lower ->mmap */
		err = do_munmap(current->mm, vma->vm_start,
				vma->vm_end - vma->vm_start);
		if (err) {
			printk(KERN_ERR "sdcardfs: do_munmap failed %d\n", err);
			goto out;
		}
	}

	/*
	 * Next 3 lines are all I need from generic_file_mmap.  I definitely
	 * don't want its test for ->readpage which returns -ENOEXEC.
	 */
	file_accessed(file);
	vma->vm_ops = &sdcardfs_vm_ops;

	file->f_mapping->a_ops = &sdcardfs_aops; /* set our aops */
	if (!SDCARDFS_F(file)->lower_vm_ops) /* save for our ->fault */
		SDCARDFS_F(file)->lower_vm_ops = saved_vm_ops;

out:
	return err;
}
Beispiel #4
0
static int wrapfs_mmap(struct file *file, struct vm_area_struct *vma)
{
	int err = 0;
	bool willwrite;
	struct file *lower_file;
	const struct vm_operations_struct *saved_vm_ops = NULL;
	
	printk("wrapfs_mmap: '%s'\n", file->f_dentry->d_iname);

#ifdef EXTRA_CREDIT
	if(wrapfs_get_debug(file->f_dentry->d_sb) & DEBUG_FILE)
		DEBUG_MESG("Enter");
#endif

	/* this might be deferred to mmap's writepage */
	willwrite = ((vma->vm_flags | VM_SHARED | VM_WRITE) == vma->vm_flags);

	/*
	 * File systems which do not implement ->writepage may use
	 * generic_file_readonly_mmap as their ->mmap op.  If you call
	 * generic_file_readonly_mmap with VM_WRITE, you'd get an -EINVAL.
	 * But we cannot call the lower ->mmap op, so we can't tell that
	 * writeable mappings won't work.  Therefore, our only choice is to
	 * check if the lower file system supports the ->writepage, and if
	 * not, return EINVAL (the same error that
	 * generic_file_readonly_mmap returns in that case).
	 */
	lower_file = wrapfs_lower_file(file);
	if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
		err = -EINVAL;
		printk(KERN_ERR "wrapfs: lower file system does not "
		       "support writeable mmap\n");
		goto out;
	}

	/*
	 * find and save lower vm_ops.
	 *
	 * XXX: the VFS should have a cleaner way of finding the lower vm_ops
	 */
	if (!WRAPFS_F(file)->lower_vm_ops) {
		err = lower_file->f_op->mmap(lower_file, vma);
		if (err) {
			printk(KERN_ERR "wrapfs: lower mmap failed %d\n", err);
			goto out;
		}
		saved_vm_ops = vma->vm_ops; /* save: came from lower ->mmap */
		err = do_munmap(current->mm, vma->vm_start, vma->vm_end - vma->vm_start);
		if (err) {
			printk(KERN_ERR "wrapfs: do_munmap failed %d\n", err);
			goto out;
		}
	}

	/*
	 * Next 3 lines are all I need from generic_file_mmap.  I definitely
	 * don't want its test for ->readpage which returns -ENOEXEC.
	 */
	file_accessed(file);
	vma->vm_ops = &wrapfs_vm_ops;
	vma->vm_flags |= VM_CAN_NONLINEAR;

	if(WRAPFS_SB(file->f_dentry->d_sb)->mount_options.mmap == TRUE)
		file->f_mapping->a_ops = &wrapfs_mmap_aops; /* set mmap address_ops */
	else
		file->f_mapping->a_ops = &wrapfs_dummy_aops; /* set dummy address_aops */


	if (!WRAPFS_F(file)->lower_vm_ops) /* save for our ->fault */
		WRAPFS_F(file)->lower_vm_ops = saved_vm_ops;

out:

#ifdef EXTRA_CREDIT
	if(wrapfs_get_debug(file->f_dentry->d_sb) & DEBUG_FILE)
		DEBUG_RETURN("Exit", err);
#endif

	return err;
}
Beispiel #5
0
/*
* This is the ioctl implementation.
*/
static long kern_unlocked_ioctl(struct file *filp, unsigned int cmd,
		unsigned long arg)
{
	/* for results from functions */
	int res;

	PR_DEBUG("start with cmd %d", cmd);
	switch (cmd) {
	/*
	*	Asking the kernel to mmap into user space.
	*	Only argument is size.
	*/
	case IOCTL_DEMO_MAP:
		PR_DEBUG("trying to mmap");
		size = arg;
		kptr = alloc_mem(size);
		if (kptr == NULL) {
			PR_ERROR("ERROR: could not allocate memory");
			return -EFAULT;
		}
		PR_DEBUG("After alloc_mem with kptr=%p", kptr);
		uptr = map_to_user(filp, kptr, size);
		if (IS_ERR_VALUE(uptr)) {
			PR_ERROR("ERROR: quiting on process of mmaping");
			return -EFAULT;
		}
		PR_DEBUG("After map_to_user");
		PR_DEBUG("Successful exit");
		return uptr;

	/*
	*	Asking the kernel to munmap user space.
	*	No arguments are required.
	*/
	case IOCTL_DEMO_UNMAP:
		PR_DEBUG("trying to munmap");
		res = do_munmap(current->mm, uptr, size);
		if (res)
			return res;
		PR_DEBUG("After unmap");
		free_mem(kptr, size);
		PR_DEBUG("Successful exit");
		/* so we won't accidentaly use these pointers */
		kptr = NULL;
		size = -1;
		uptr = -1;
		return 0;

	/*
	*	Asking the kernel to write to the buffer.
	*	One argument which is the value to write.
	*/
	case IOCTL_DEMO_WRITE:
		if (kptr == NULL) {
			PR_ERROR("ERROR: kptr is NULL?!?");
			return -EFAULT;
		}
		memset(kptr, arg, size);
		return 0;

	/*
	*	Asking the kernel to check that the buffer is a certain value.
	*	One argument which is the value to check.
	*/
	case IOCTL_DEMO_READ:
		if (kptr == NULL) {
			PR_ERROR("ERROR: kptr is NULL?!?");
			return -EFAULT;
		}
		return memcheck(kptr, arg, size);

	/*
	*	Asking the kernel to copy the in kernel buffer to user space.
	*	One argument which is the pointer to the user space buffer.
	*/
	case IOCTL_DEMO_COPY:
		if (kptr == NULL) {
			PR_ERROR("ERROR: kptr is NULL?!?");
			return -EFAULT;
		}
		return copy_to_user((void *)arg, kptr, size);
	}
	return -EINVAL;
}
Beispiel #6
0
int sys_munmap(uintptr_t addr, size_t length)
{
  return do_munmap(addr, length);
}
Beispiel #7
0
/* Map a non page aligned fbchain into user space.  This
 * requires creating a iovec and populating it correctly */
static int map_fb_to_user_sg(struct file *filep, struct pme_fbchain *buffers,
				unsigned long *user_addr, size_t *size)
{
	void *data;
	size_t data_size;
	struct iovec *vect;
	int vector_size, ret, list_count, index = 0;
	unsigned long paddr;
	struct vm_area_struct *vma;
	struct pme_fb_vma *mem_node, *iovec_mem_node;
	list_count = pme_fbchain_num(buffers);

	vector_size = sizeof(struct iovec) * list_count;
	iovec_mem_node = fb_vma_create(NULL, fb_phys_mapped, 1, list_count,
			vector_size, 0);
	if (!iovec_mem_node)
		return -ENOMEM;

	/* The space for the iovec is allocate as whole pages and
	 * a kernel mapping needs to be created in case they were
	 * allocated from high mem */
	vect = kmap(iovec_mem_node->iovec_pages);
	/* Create a mem node to keep track of the fbchain
	 * Otherwise, we won't know when to release the freebuff list */
	mem_node = fb_vma_create(buffers, fb_phys_mapped, 0, 0, 0, 0);
	if (!mem_node) {
		fb_vma_free(iovec_mem_node);
		kunmap(iovec_mem_node->iovec_pages);
		return -ENOMEM;
	}
	/* For each freebuff, map it to user space, storing the
	 * userspace data in the iovec */
	data = pme_fbchain_current(buffers);

	down_write(&current->mm->mmap_sem);

	while (data) {
		data_size = pme_fbchain_current_bufflen(buffers);
		vect[index].iov_base = (void *) do_mmap(filep, 0,
							data_size +
							offset_in_page(data),
							PROT_READ | PROT_WRITE,
							MAP_PRIVATE,
							virt_to_phys(data) &
							PAGE_MASK);
		ret = check_mmap_result(vect[index].iov_base);
		if (ret)
			/*  Need to unmap any previous sucesses */
			goto err;

		vma = find_vma(current->mm,
				(unsigned long) vect[index].iov_base);

		vma->vm_private_data = mem_node;
		atomic_inc(&mem_node->ref_count);

		vect[index].iov_base += offset_in_page(data);
		vect[index].iov_len = data_size;
		++index;
		data = pme_fbchain_next(buffers);
	}

	/* Now map the iovec into user spcae */
	paddr = page_to_pfn(iovec_mem_node->iovec_pages) << PAGE_SHIFT;
	*user_addr = (unsigned long) do_mmap(filep, 0,
					     vector_size +
					     offset_in_page(paddr),
					     PROT_READ |
					     PROT_WRITE, MAP_PRIVATE,
					     paddr & PAGE_MASK);

	ret = check_mmap_result((void *) *user_addr);
	if (ret)
		goto err;

	vma = find_vma(current->mm, (unsigned long) *user_addr);

	vma->vm_private_data = iovec_mem_node;

	up_write(&current->mm->mmap_sem);
	*user_addr += offset_in_page(paddr);
	*size = list_count;
	kunmap(iovec_mem_node->iovec_pages);
	return PME_MEM_SG;
err:
	while (index--)
		do_munmap(current->mm,
			((unsigned long)vect[index].iov_base) & PAGE_MASK,
			 vect[index].iov_len +
			 offset_in_page(vect[index].iov_base));

	up_write(&current->mm->mmap_sem);
	kunmap(iovec_mem_node->iovec_pages);
	return -EINVAL;
}
static int s3c_g3d_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
	u32 val;
	DMA_BLOCK_STRUCT dma_block;
	s3c_3d_dma_info dma_info;
	DECLARE_COMPLETION_ONSTACK(complete);

	struct mm_struct *mm = current->mm;
	struct s3c_3d_mem_alloc param;
	struct s3c_3d_pm_status param_pm;

	unsigned int timer;
	
	switch (cmd) {
	case WAIT_FOR_FLUSH:
		//if fifo has already been flushed, return;
		val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE);
		//printk("read pipestate = 0x%x\n",val);
		if((val & arg) ==0) break;

		// enable interrupt
		interrupt_already_recevied = 0;
		__raw_writel(0x0001171f,s3c_g3d_base+FGGB_PIPEMASK);
		__raw_writel(1,s3c_g3d_base+FGGB_INTMASK);

		//printk("wait for flush (arg=0x%lx)\n",arg);

		timer = 1000000;

		while(timer) {
			wait_event_interruptible_timeout(waitq, (interrupt_already_recevied>0), 1*HZ);

			__raw_writel(0,s3c_g3d_base+FGGB_INTMASK);
			interrupt_already_recevied = 0;
			//if(interrupt_already_recevied==0)interruptible_sleep_on(&waitq);
			val = __raw_readl(s3c_g3d_base+FGGB_PIPESTATE);
			//printk("in while read pipestate = 0x%x\n",val);
			if(val & arg){
			} else{
				break;
			}
			__raw_writel(1,s3c_g3d_base+FGGB_INTMASK);
			timer --;
		}
		break;

	case GET_CONFIG:
		if (copy_to_user((void *)arg,&g3d_config,sizeof(G3D_CONFIG_STRUCT))) {
			printk("G3D: copy_to_user failed to get g3d_config\n");
			return -EFAULT;		
		}
		break;

	case START_DMA_BLOCK:
		if (copy_from_user(&dma_block,(void *)arg,sizeof(DMA_BLOCK_STRUCT))) {
			printk("G3D: copy_to_user failed to get dma_block\n");
			return -EFAULT;		
		}

		if (dma_block.offset%4!=0) {
			printk("G3D: dma offset is not aligned by word\n");
			return -EINVAL;
		}
		if (dma_block.size%4!=0) {
			printk("G3D: dma size is not aligned by word\n");
			return -EINVAL;
		}
		if (dma_block.offset+dma_block.size >g3d_config.dma_buffer_size) {
			printk("G3D: offset+size exceeds dam buffer\n");
			return -EINVAL;
		}

		dma_info.src = g3d_config.dma_buffer_addr+dma_block.offset;
		dma_info.len = dma_block.size;
		dma_info.dst = s3c_g3d_base_physical+FGGB_HOSTINTERFACE;

		DEBUG(" dma src=0x%x\n", dma_info.src);
		DEBUG(" dma len =%u\n", dma_info.len);
		DEBUG(" dma dst = 0x%x\n", dma_info.dst);

		dma_3d_done = &complete;

		if (s3c2410_dma_request(DMACH_3D_M2M, &s3c6410_3d_dma_client, NULL)) {
			printk(KERN_WARNING "Unable to get DMA channel(DMACH_3D_M2M).\n");
			return -EFAULT;
		}

		s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_g3d_dma_finish);
		s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, 1, (u_long) dma_info.src);
		s3c2410_dma_config(DMACH_3D_M2M, 4, 4);
		s3c2410_dma_setflags(DMACH_3D_M2M, S3C2410_DMAF_AUTOSTART);

		//consistent_sync((void *) dma_info.dst, dma_info.len, DMA_FROM_DEVICE);
	//	s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) virt_to_dma(NULL, dma_info.dst), dma_info.len);
		s3c2410_dma_enqueue(DMACH_3D_M2M, NULL, (dma_addr_t) dma_info.dst, dma_info.len);

	//	printk("wait for end of dma operation\n");
		wait_for_completion(&complete);
	//	printk("dma operation is performed\n");

		s3c2410_dma_free(DMACH_3D_M2M, &s3c6410_3d_dma_client);

		break;

	case S3C_3D_MEM_ALLOC:		
		mutex_lock(&mem_alloc_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}
       
		flag = MEM_ALLOC;
		
		param.size = s3c_g3d_available_chunk_size(param.size,(unsigned int)file->private_data);

		if (param.size == 0){
			printk("S3C_3D_MEM_ALLOC FAILED because there is no block memory bigger than you request\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}			
             
		param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x\n", param.vir_addr);

		if(param.vir_addr == -EINVAL) {
			printk("S3C_3D_MEM_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);			
			return -EFAULT;
		}
		param.phy_addr = physical_address;

       // printk("alloc %d\n", param.size);
		DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			flag = 0;
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;		
		}

		flag = 0;
		
//		printk("\n\n====Success the malloc from kernel=====\n");
		mutex_unlock(&mem_alloc_lock);
		
		break;

	case S3C_3D_MEM_FREE:	
		mutex_lock(&mem_free_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}

		DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		/*
		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk("do_munmap() failed !!\n");
			mutex_unlock(&mem_free_lock);
			return -EINVAL;
		}
		*/

		s3c_g3d_release_chunk(param.phy_addr, param.size);
		//printk("KERNEL : virt_addr = 0x%X\n", virt_addr);
		//printk("free %d\n", param.size);


		param.size = 0;
		DEBUG("do_munmap() succeed !!\n");

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}
		
		mutex_unlock(&mem_free_lock);
		
		break;

	case S3C_3D_SFR_LOCK:
		mutex_lock(&mem_sfr_lock);
		mutex_lock_processID = (unsigned int)file->private_data;
		DEBUG("s3c_g3d_ioctl() : You got a muxtex lock !!\n");
		break;

	case S3C_3D_SFR_UNLOCK:
		mutex_lock_processID = 0;
		mutex_unlock(&mem_sfr_lock);
		DEBUG("s3c_g3d_ioctl() : The muxtex unlock called !!\n");
		break;

	case S3C_3D_MEM_ALLOC_SHARE:		
		mutex_lock(&mem_alloc_share_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_SHARE;

		physical_address = param.phy_addr;

		DEBUG("param.phy_addr = %08x\n", physical_address);

		param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x\n", param.vir_addr);

		if(param.vir_addr == -EINVAL) {
			printk("S3C_3D_MEM_ALLOC_SHARE FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;
		}

		DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			flag = 0;
			mutex_unlock(&mem_alloc_share_lock);
			return -EFAULT;		
		}

		flag = 0;
		
		mutex_unlock(&mem_alloc_share_lock);
		
		break;

	case S3C_3D_MEM_SHARE_FREE:	
		mutex_lock(&mem_share_free_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;		
		}

		DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);

		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk("do_munmap() failed - MEM_SHARE_FREE!!\n");
			mutex_unlock(&mem_share_free_lock);
			return -EINVAL;
		}

		param.vir_addr = 0;
		DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");

		if(copy_to_user((struct s3c_3d_mem_alloc *)arg, &param, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;		
		}

		mutex_unlock(&mem_share_free_lock);
		
		break;

	case S3C_3D_CACHE_INVALID:
		mutex_lock(&cache_invalid_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&cache_invalid_lock);
			return -EFAULT;	
		}
		dmac_inv_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_invalid_lock);
		break;

	case S3C_3D_CACHE_CLEAN:
		mutex_lock(&cache_clean_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&cache_clean_lock);
			return -EFAULT;	
		}
		dmac_clean_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_clean_lock);
		break;

	case S3C_3D_CACHE_CLEAN_INVALID:
		mutex_lock(&cache_clean_invalid_lock);
		if(copy_from_user(&param, (struct s3c_3d_mem_alloc *)arg, sizeof(struct s3c_3d_mem_alloc))){
			mutex_unlock(&cache_clean_invalid_lock);
			printk("ERR: Invalid Cache Error\n");	
			return -EFAULT;	
		}
		dmac_flush_range((unsigned int) param.vir_addr,(unsigned int)param.vir_addr + param.size);
		mutex_unlock(&cache_clean_invalid_lock);
		break;

	case S3C_3D_POWER_INIT:
		if(copy_from_user(&param_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){
			printk("ERR: Invalid Cache Error\n");	
			return -EFAULT;	
		}
		break;

	case S3C_3D_CRITICAL_SECTION:
#ifdef USE_G3D_DOMAIN_GATING
		mutex_lock(&pm_critical_section_lock);
		if(copy_from_user(&param_pm, (struct s3c_3d_pm_status *)arg, sizeof(struct s3c_3d_pm_status))){
			printk("ERR: Invalid Cache Error\n");	
			mutex_unlock(&pm_critical_section_lock);
			return -EFAULT;	
		}

//		param_pm.memStatus = check_memStatus((unsigned int)file->private_data);

		if(param_pm.criticalSection) g_G3D_CriticalFlag++;
		else g_G3D_CriticalFlag--;

		if(g_G3D_CriticalFlag==0)
		{/*kick power off*/
			/*power off*/
			/*kick timer*/
			mod_timer(&g3d_pm_timer, jiffies + TIMER_INTERVAL);
		}
		else if(g_G3D_CriticalFlag>0)
		{/*kick power on*/
			if(domain_off_check(S3C64XX_DOMAIN_G))
			{/*if powered off*/                        
				if(g_G3D_SelfPowerOFF)
				{/*powered off by 3D PM or by Resume*/
					/*power on*/
					s3c_set_normal_cfg(S3C64XX_DOMAIN_G, S3C64XX_ACTIVE_MODE, S3C64XX_3D);
					if(s3c_wait_blk_pwr_ready(S3C64XX_BLK_G)) {
						printk("[3D] s3c_wait_blk_pwr_ready err\n");
						mutex_unlock(&pm_critical_section_lock);
						return -EFAULT;	
					}
					clk_g3d_enable();
					/*Need here??*/
					softReset_g3d();
					// printk("[3D] Power on\n");  
				}
				else
				{
					/*powered off by the system :: error*/
					printk("Error on the system :: app tries to work during sleep\n");
					mutex_unlock(&pm_critical_section_lock);
					return -EFAULT;	
				}
			}
			else
			{
				/*already powered on : nothing to do*/
				//g_G3D_SelfPowerOFF=0;
			}
		}
		else if(g_G3D_CriticalFlag < 0) 
		{
			printk("Error on the system :: g_G3D_CriticalFlag < 0\n");
		}
//		printk("S3C_3D_CRITICAL_SECTION: param_pm.criticalSection=%d\n",param_pm.criticalSection);

		if (copy_to_user((void *)arg,&param_pm,sizeof(struct s3c_3d_pm_status)))
		{
			printk("G3D: copy_to_user failed to get s3c_3d_pm_status\n");

			mutex_unlock(&pm_critical_section_lock);
			return -EFAULT;		
		}
		mutex_unlock(&pm_critical_section_lock);
#endif /* USE_G3D_DOMAIN_GATING */
		break;

	default:
		DEBUG("s3c_g3d_ioctl() : default !!\n");
		return -EINVAL;
	}
	
	return 0;
}
Beispiel #9
0
/* SunOS is completely broken... it returns 0 on success, otherwise
 * ENOMEM.  For sys_sbrk() it wants the new brk value as a return
 * on success and ENOMEM as before on failure.
 */
asmlinkage int sunos_brk(unsigned long brk)
{
	int freepages;
	unsigned long rlim;
	unsigned long newbrk, oldbrk;

	if (brk < current->mm->end_code)
		return -ENOMEM;

	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(current->mm->brk);
	if (oldbrk == newbrk) {
		current->mm->brk = brk;
		return 0;
	}

	/*
	 * Always allow shrinking brk
	 */
	if (brk <= current->mm->brk) {
		current->mm->brk = brk;
		do_munmap(newbrk, oldbrk-newbrk);
		return 0;
	}
	/*
	 * Check against rlimit and stack..
	 */
	rlim = current->rlim[RLIMIT_DATA].rlim_cur;
	if (rlim >= RLIM_INFINITY)
		rlim = ~0;
	if (brk - current->mm->end_code > rlim)
		return -ENOMEM;

	/*
	 * Check against existing mmap mappings.
	 */
	if (find_vma_intersection(current, oldbrk, newbrk+PAGE_SIZE))
		return -ENOMEM;

	/*
	 * stupid algorithm to decide if we have enough memory: while
	 * simple, it hopefully works in most obvious cases.. Easy to
	 * fool it, but this should catch most mistakes.
	 */
	freepages = buffermem >> PAGE_SHIFT;
        freepages += page_cache_size;
	freepages >>= 1;
	freepages += nr_free_pages;
	freepages += nr_swap_pages;
	freepages -= MAP_NR(high_memory) >> 4;
	freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
	if (freepages < 0)
		return -ENOMEM;
	/*
	 * Ok, we have probably got enough memory - let it rip.
	 */
	current->mm->brk = brk;
	do_mmap(NULL, oldbrk, newbrk-oldbrk,
		PROT_READ|PROT_WRITE|PROT_EXEC,
		MAP_FIXED|MAP_PRIVATE, 0);
	return 0;
}
Beispiel #10
0
static int unionfs_mmap(struct file *file, struct vm_area_struct *vma)
{
	int err = 0;
	bool willwrite;
	struct file *lower_file;
	struct dentry *dentry = file->f_path.dentry;
	struct dentry *parent;
	struct vm_operations_struct *saved_vm_ops = NULL;

	unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
	parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
	unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);

	/* This might be deferred to mmap's writepage */
	willwrite = ((vma->vm_flags | VM_SHARED | VM_WRITE) == vma->vm_flags);
	err = unionfs_file_revalidate(file, parent, willwrite);
	if (unlikely(err))
		goto out;
	unionfs_check_file(file);

	/*
	 * File systems which do not implement ->writepage may use
	 * generic_file_readonly_mmap as their ->mmap op.  If you call
	 * generic_file_readonly_mmap with VM_WRITE, you'd get an -EINVAL.
	 * But we cannot call the lower ->mmap op, so we can't tell that
	 * writeable mappings won't work.  Therefore, our only choice is to
	 * check if the lower file system supports the ->writepage, and if
	 * not, return EINVAL (the same error that
	 * generic_file_readonly_mmap returns in that case).
	 */
	lower_file = unionfs_lower_file(file);
	if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
		err = -EINVAL;
		printk(KERN_ERR "unionfs: branch %d file system does not "
		       "support writeable mmap\n", fbstart(file));
		goto out;
	}

	/*
	 * find and save lower vm_ops.
	 *
	 * XXX: the VFS should have a cleaner way of finding the lower vm_ops
	 */
	if (!UNIONFS_F(file)->lower_vm_ops) {
		err = lower_file->f_op->mmap(lower_file, vma);
		if (err) {
			printk(KERN_ERR "unionfs: lower mmap failed %d\n", err);
			goto out;
		}
		saved_vm_ops = vma->vm_ops;
		err = do_munmap(current->mm, vma->vm_start,
				vma->vm_end - vma->vm_start);
		if (err) {
			printk(KERN_ERR "unionfs: do_munmap failed %d\n", err);
			goto out;
		}
	}

	file->f_mapping->a_ops = &unionfs_dummy_aops;
	err = generic_file_mmap(file, vma);
	file->f_mapping->a_ops = &unionfs_aops;
	if (err) {
		printk(KERN_ERR "unionfs: generic_file_mmap failed %d\n", err);
		goto out;
	}
	vma->vm_ops = &unionfs_vm_ops;
	if (!UNIONFS_F(file)->lower_vm_ops)
		UNIONFS_F(file)->lower_vm_ops = saved_vm_ops;

out:
	if (!err) {
		/* copyup could cause parent dir times to change */
		unionfs_copy_attr_times(parent->d_inode);
		unionfs_check_file(file);
	}
	unionfs_unlock_dentry(dentry);
	unionfs_unlock_parent(dentry, parent);
	unionfs_read_unlock(dentry->d_sb);
	return err;
}
static ssize_t write_proc_mm(struct file *file, const char *buffer,
			     size_t count, loff_t *ppos)
{
	struct mm_struct *mm = file->private_data;
	struct proc_mm_op req;
	int n, ret;

	if(count > sizeof(req))
		return(-EINVAL);

	n = copy_from_user(&req, buffer, count);
	if(n != 0)
		return(-EFAULT);

	ret = count;
	switch(req.op){
	case MM_MMAP: {
		struct mm_mmap *map = &req.u.mmap;

		ret = do_mmap2(mm, map->addr, map->len, map->prot, 
			       map->flags, map->fd, map->offset >> PAGE_SHIFT);
		if((ret & ~PAGE_MASK) == 0)
			ret = count;
	
		break;
	}
	case MM_MUNMAP: {
		struct mm_munmap *unmap = &req.u.munmap;

		down_write(&mm->mmap_sem);
		ret = do_munmap(mm, unmap->addr, unmap->len);
		up_write(&mm->mmap_sem);

		if(ret == 0)
			ret = count;
		break;
	}
	case MM_MPROTECT: {
		struct mm_mprotect *protect = &req.u.mprotect;

		ret = do_mprotect(mm, protect->addr, protect->len, 
				  protect->prot);
		if(ret == 0)
			ret = count;
		break;
	}

	case MM_COPY_SEGMENTS: {
		struct mm_struct *from = proc_mm_get_mm(req.u.copy_segments);

		if(IS_ERR(from)){
			ret = PTR_ERR(from);
			break;
		}

		mm_copy_segments(from, mm);
		break;
	}
	default:
		ret = -EINVAL;
		break;
	}

	return(ret);
}
Beispiel #12
0
unsigned long do_mmap(struct file *file,unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long off)
{
    int error;
    struct vm_area_struct *vma;

    if(len <= 0)
        return -EINVAL;

    if((len = PAGE_ALIGN(len)) == 0)
        return addr;

    if(addr > PAGE_OFFSET || len > PAGE_OFFSET || (addr + len) > PAGE_OFFSET)
        return -EINVAL;

    if(!file)
    {
        switch (flags & MAP_TYPE)
        {
            case MAP_SHARED:
                if((prot & PROT_WRITE) && (file->f_mode & FILE_WRITE))
                    return -EACCES;
                break;
            case MAP_PRIVATE:
                if(!(file->f_mode & FILE_READ))
                    return -EACCES;
            default:
                return -EINVAL;
        }
        if(file->f_inode->i_count > 0 && flags & MAP_DENYWRITE)
            return -ETXTBSY;
    }
    else if((flags & MAP_TYPE) != MAP_PRIVATE)
        return -EINVAL;

    if(flags & MAP_FIXED)
    {
        if(addr & ~ PAGE_MASK)
            return -EINVAL;
        if(len > PAGE_OFFSET || addr + len > PAGE_OFFSET)
            return -EINVAL;
    }
    else
    {
        addr = get_unmmapped_area(len);
        if(!addr)
            return -ENOMEM;
    }

    if(file && (!file->f_op || !file->f_op->mmap))
        return -ENODEV;

    vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),GFP_KERNEL);
    vma->vm_task = current;
    vma->vm_start = addr;
    vma->vm_end = addr + len;
    vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
    vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);

    if(file)
    {
        if(file->f_mode & FILE_READ)
            vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
        if(flags & MAP_SHARED)
            vma->vm_flags |= VM_SHARED | VM_MAYSHARE;

        if(file->f_mode & FILE_WRITE)
            vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
    }
    else
        vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;


    //	vma->vm_page_prot = protection_map[vma->vm_flags & 0X0F];
    vma->vm_ops = NULL;
    vma->vm_offset = off;
    vma->vm_inode = NULL;
    vma->vm_pte = 0;

    do_munmap(addr, len);

    if(file)
        error = file->f_op->mmap(file->f_inode, file, vma);
    else 
        error = anon_map(NULL, NULL, vma);
    if(error)
    {
        kfree(vma);
        return error;
    }

    insert_vm_struct(current, vma);
    merge_segments(current, vma->vm_start, vma->vm_end);
    return addr;

    return 0;
}
Beispiel #13
0
static unsigned long move_vma(struct vm_area_struct * vma,
	unsigned long addr, unsigned long old_len, unsigned long new_len,
	unsigned long new_addr)
{
	struct mm_struct * mm = vma->vm_mm;
	struct vm_area_struct * new_vma, * next, * prev;
	int allocated_vma;


	new_vma = NULL;
	next = find_vma_prev(mm, new_addr, &prev);
	if (next) {
		if (prev && prev->vm_end == new_addr &&
		    can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			prev->vm_end = new_addr + new_len;
			spin_unlock(&mm->page_table_lock);
			new_vma = prev;
			if (next != prev->vm_next)
				BUG();
			if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
				spin_lock(&mm->page_table_lock);
				prev->vm_end = next->vm_end;
				__vma_unlink(mm, next, prev);
				spin_unlock(&mm->page_table_lock);
				if (vma == next)
					vma = prev;
				mm->map_count--;
				kmem_cache_free(vm_area_cachep, next);
			}
		} else if (next->vm_start == new_addr + new_len &&
			   can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			next->vm_start = new_addr;
			spin_unlock(&mm->page_table_lock);
			new_vma = next;
		}
	} else {
		prev = find_vma(mm, new_addr-1);
		if (prev && prev->vm_end == new_addr &&
		    can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			prev->vm_end = new_addr + new_len;
			spin_unlock(&mm->page_table_lock);
			new_vma = prev;
		}
	}

	allocated_vma = 0;
	if (!new_vma) {
		new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
		if (!new_vma)
			goto out;
		allocated_vma = 1;
	}

	if (!move_page_tables(vma, new_addr, addr, old_len)) {
		if (allocated_vma) {
			*new_vma = *vma;
			new_vma->vm_start = new_addr;
			new_vma->vm_end = new_addr+new_len;
			new_vma->vm_pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
			new_vma->vm_raend = 0;
			if (new_vma->vm_file)
				get_file(new_vma->vm_file);
			if (new_vma->vm_ops && new_vma->vm_ops->open)
				new_vma->vm_ops->open(new_vma);
			insert_vm_struct(current->mm, new_vma);
		}
		/* The old VMA has been accounted for, don't double account */
		do_munmap(current->mm, addr, old_len, 0);
		current->mm->total_vm += new_len >> PAGE_SHIFT;
		if (new_vma->vm_flags & VM_LOCKED) {
			current->mm->locked_vm += new_len >> PAGE_SHIFT;
			make_pages_present(new_vma->vm_start,
					   new_vma->vm_end);
		}
		return new_addr;
	}
/**
 * @brief   Chunkmem device ioctl function
 */
static long chunkmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	chunk_block_t block;
	void *ka;           /* kernel_addr */
	unsigned int va;    /* user_addr */
	unsigned int pa;    /* phy_addr*/
	long ret = 0;
	unsigned int offset = 0;

	switch (cmd) {
	case CHUNK_MEM_ALLOC:
	case CHUNK_MEM_SHARE:
	case CHUNK_MEM_MMAP:
		{
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				ret = -EFAULT;
				break;
			}

			/* alloc|share|mmap memory */
			if (cmd == CHUNK_MEM_MMAP) {
				DIAG_VERB("CHUNK_MEM_MMAP:\n");
				ka = gp_chunk_va(block.phy_addr);
				if (ka == NULL) {
					DIAG_ERROR("CHUNK_MEM_MMAP: bad address! (%s:%08X)\n", current->comm, block.phy_addr);
					ret = -EFAULT; /* mmap fail */
					break;
				}
				/* page alignment */
				offset = block.phy_addr & ~PAGE_MASK;
				ka = (void *)((unsigned long)ka & PAGE_MASK);
				DIAG_VERB("CHUNK_MEM_MMAP: phy_addr                  = %08X\n", block.phy_addr);
				DIAG_VERB("CHUNK_MEM_MMAP: size                      = %08X\n", block.size);
				DIAG_VERB("CHUNK_MEM_MMAP: ka                        = %08X\n", (unsigned int)ka);
				DIAG_VERB("CHUNK_MEM_MMAP: offset                    = %08X\n", offset);
				DIAG_VERB("CHUNK_MEM_MMAP: PAGE_ALIGN(size + offset) = %08X\n", PAGE_ALIGN(block.size + offset));
			}
			else {
				if (cmd == CHUNK_MEM_ALLOC) {
					DIAG_VERB("CHUNK_MEM_ALLOC:\n");
					DIAG_VERB("size = %08X (%d)\n", block.size, block.size);
					ka = gp_chunk_malloc(current->tgid, block.size);
					DIAG_VERB("gp_chunk_malloc return ka=%08X\n", ka);
					if (ka == NULL) {
						DIAG_ERROR("CHUNK_MEM_ALLOC: out of memory! (%s:%08X)\n", current->comm, block.size);
						dlMalloc_Status(NULL);
						ret = -ENOMEM;
						break;
					}
					block.phy_addr = gp_chunk_pa(ka);
				}
				else { /* CHUNK_MEM_SHARE */
					DIAG_VERB("CHUNK_MEM_SHARE:\n");
					ka = gp_chunk_va(block.phy_addr);
					if ((ka == NULL) || (dlShare(ka) == 0)) {
						DIAG_ERROR("CHUNK_MEM_SHARE: bad address! (%s:%08X)\n", current->comm, block.phy_addr);
						ret = -EFAULT; /* share fail */
						break;
					}
				}
				block.size = dlMalloc_Usable_Size(ka) & PAGE_MASK; /* actual allocated size */
				DIAG_VERB("actual size = %08X (%d)\n", block.size, block.size);
				DIAG_VERB("ka = %08X\n", (unsigned int)ka);
			}

			/* mmap to userspace */
			down(&chunkmem->sem);
			down_write(&current->mm->mmap_sem);
			chunkmem->mmap_enable = 1; /* enable mmap in CHUNK_MEM_ALLOC */
			va = do_mmap_pgoff(
				file, 0, PAGE_ALIGN(block.size + offset),
				PROT_READ|PROT_WRITE,
				MAP_SHARED,
				(ka - chunkmem->vbase) >> PAGE_SHIFT);
			chunkmem->mmap_enable = 0; /* disable it */
			up_write(&current->mm->mmap_sem);
			up(&chunkmem->sem);
			if (IS_ERR_VALUE(va)) {
				ret = va; /* errcode */
				DIAG_ERROR("%s: chunkmem mmap fail(%d)! (%s)\n",
						   (cmd == CHUNK_MEM_MMAP) ? "CHUNK_MEM_MMAP" : ((cmd == CHUNK_MEM_ALLOC) ? "CHUNK_MEM_ALLOC" : "CHUNK_MEM_SHARE"),
						   ret, current->comm);
				break;
			}
			va += offset;
			block.addr = (void *)va;
			DIAG_VERB("va = %08X\n\n", va);

			if (copy_to_user((void __user*)arg, &block, sizeof(block))) {
				ret = -EFAULT;
				break;
			}
		}
		break;

	case CHUNK_MEM_FREE:
		{
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				ret = -EFAULT;
				break;
			}

			/* translate user_va to ka */
			DIAG_VERB("CHUNK_MEM_FREE:\n");
			DIAG_VERB("va = %08X\n", (unsigned int)block.addr);
			pa = gp_user_va_to_pa(block.addr);    /* user_addr to phy_addr */
			if (pa == 0) {
				DIAG_ERROR("CHUNK_MEM_FREE: chunkmem user_va_to_pa fail! (%s:%08X)\n", current->comm, block.addr);
				ret = -EFAULT;
				break;
			}
			DIAG_VERB("pa = %08X\n", pa);
			ka = gp_chunk_va(pa);                  /* phy_addr to kernel_addr */
			if (ka == NULL) {
				DIAG_ERROR("CHUNK_MEM_FREE: not a chunkmem address! (%s:%08X)\n", current->comm, pa);
				ret = -EFAULT;
				break;
			}
			block.size = dlMalloc_Usable_Size(ka) & PAGE_MASK;
			DIAG_VERB("ka = %08X\n", (unsigned int)ka);
			DIAG_VERB("actual size = %08X (%d)\n\n", block.size, block.size);

			/* munmap memory */
			down_write(&current->mm->mmap_sem);
			do_munmap(current->mm, (unsigned int)block.addr, block.size);
			up_write(&current->mm->mmap_sem);

			/* free memory */
			gp_chunk_free(ka);
#if (DIAG_LEVEL >= DIAG_LVL_VERB) && !defined(DIAG_VERB_OFF)
			dlMalloc_Status(NULL);
#endif
		}
		break;

	case CHUNK_MEM_INFO:
		{
			chunk_info_t info;

            if (copy_from_user(&info, (void __user*)arg, sizeof(info))) {
                ret = -EFAULT;
                break;
            }

            if (info.pid == (unsigned int)(-1)) {
                info.pid = current->tgid;
            }

#if CHUNK_SUSPEND_TEST
			if (info.pid) {
				dlMalloc_Status(NULL);
			}
			else {
				gp_chunk_suspend(my_save_data);
				memset(chunkmem->vbase, 0, chunkmem->size);
				/* restore */
				while (blocks != NULL) {
					data_block_t *block = blocks;
					blocks = block->next;
					DIAG_DEBUG("restore data: %p %08X\n", block->addr, block->size);
					memcpy(block->addr, &block->data, block->size);
					kfree(block);
				}
			}
#else
			down(&chunkmem->sem);
			dlMalloc_Status((mem_info_t *)&info);
			up(&chunkmem->sem);
#endif
			if (copy_to_user((void __user*)arg, &info, sizeof(info))) {
				ret = -EFAULT;
				break;
			}
		}
		break;

	case CHUNK_MEM_VA2PA:
		{
			ret = -EFAULT;
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				break;
			}

			pa = gp_user_va_to_pa(block.addr);    /* user_addr to phy_addr */
			if (pa != 0) {
				ka = gp_chunk_va(pa);             /* phy_addr to kernel_addr */
				if (ka != NULL) {
					block.phy_addr = pa;
					if (copy_to_user((void __user*)arg, &block, sizeof(block)) == 0) {
						ret = 0;
					}
				}
			}
		}
		break;

	case CHUNK_MEM_MUNMAP:
		{
			if (copy_from_user(&block, (void __user*)arg, sizeof(block))) {
				ret = -EFAULT;
				break;
			}

			va = (unsigned int)block.addr;
			/* page alignment */
			offset = va & ~PAGE_MASK;
			va &= PAGE_MASK;

			/* munmap memory */
			down_write(&current->mm->mmap_sem);
			do_munmap(current->mm, va, PAGE_ALIGN(block.size + offset));
			up_write(&current->mm->mmap_sem);
		}
		break;
	
	case CHUNK_MEM_FREEALL:
		gp_chunk_free_all((unsigned int)arg);
		printk(KERN_WARNING "CHUNK_MEM_FREEALL(%ld)\n", arg);
		break;
	
	case CHUNK_MEM_DUMP:
		dlMalloc_Status(0);
		break;
	
	default:
		ret = -ENOTTY; /* Inappropriate ioctl for device */
		break;
	}

	return ret;
}
Beispiel #15
0
/* SunOS is completely broken... it returns 0 on success, otherwise
 * ENOMEM.  For sys_sbrk() it wants the old brk value as a return
 * on success and ENOMEM as before on failure.
 */
asmlinkage int sunos_brk(unsigned long brk)
{
    int freepages, retval = -ENOMEM;
    unsigned long rlim;
    unsigned long newbrk, oldbrk;

    down_write(&current->mm->mmap_sem);
    if (ARCH_SUN4C_SUN4) {
        if (brk >= 0x20000000 && brk < 0xe0000000) {
            goto out;
        }
    }

    if (brk < current->mm->end_code)
        goto out;

    newbrk = PAGE_ALIGN(brk);
    oldbrk = PAGE_ALIGN(current->mm->brk);
    retval = 0;
    if (oldbrk == newbrk) {
        current->mm->brk = brk;
        goto out;
    }

    /*
     * Always allow shrinking brk
     */
    if (brk <= current->mm->brk) {
        current->mm->brk = brk;
        do_munmap(current->mm, newbrk, oldbrk-newbrk);
        goto out;
    }
    /*
     * Check against rlimit and stack..
     */
    retval = -ENOMEM;
    rlim = current->rlim[RLIMIT_DATA].rlim_cur;
    if (rlim >= RLIM_INFINITY)
        rlim = ~0;
    if (brk - current->mm->end_code > rlim)
        goto out;

    /*
     * Check against existing mmap mappings.
     */
    if (find_vma_intersection(current->mm, oldbrk, newbrk+PAGE_SIZE))
        goto out;

    /*
     * stupid algorithm to decide if we have enough memory: while
     * simple, it hopefully works in most obvious cases.. Easy to
     * fool it, but this should catch most mistakes.
     */
    freepages = get_page_cache_size();
    freepages >>= 1;
    freepages += nr_free_pages();
    freepages += nr_swap_pages;
    freepages -= num_physpages >> 4;
    freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
    if (freepages < 0)
        goto out;
    /*
     * Ok, we have probably got enough memory - let it rip.
     */
    current->mm->brk = brk;
    do_brk(oldbrk, newbrk-oldbrk);
    retval = 0;
out:
    up_write(&current->mm->mmap_sem);
    return retval;
}
Beispiel #16
0
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
	unsigned long rlim, retval;
	unsigned long newbrk, oldbrk;
	struct mm_struct *mm = current->mm;
	unsigned long min_brk;

	down_write(&mm->mmap_sem);

#ifdef CONFIG_COMPAT_BRK
	/*
	 * CONFIG_COMPAT_BRK can still be overridden by setting
	 * randomize_va_space to 2, which will still cause mm->start_brk
	 * to be arbitrarily shifted
	 */
	if (current->brk_randomized)
		min_brk = mm->start_brk;
	else
		min_brk = mm->end_data;
#else
	min_brk = mm->start_brk;
#endif
	if (brk < min_brk)
		goto out;

	/*
	 * Check against rlimit here. If this check is done later after the test
	 * of oldbrk with newbrk then it can escape the test and let the data
	 * segment grow beyond its set limit the in case where the limit is
	 * not page aligned -Ram Gupta
	 */
	rlim = rlimit(RLIMIT_DATA);
	if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
			(mm->end_data - mm->start_data) > rlim)
		goto out;

	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(mm->brk);
	if (oldbrk == newbrk)
		goto set_brk;

	/* Always allow shrinking brk. */
	if (brk <= mm->brk) {
		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
			goto set_brk;
		goto out;
	}

	/* Check against existing mmap mappings. */
	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
		goto out;

	/* Ok, looks good - let it rip. */
	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
		goto out;
set_brk:
	mm->brk = brk;
out:
	retval = mm->brk;
	up_write(&mm->mmap_sem);
	return retval;
}
Beispiel #17
0
int s3c_mem_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
	unsigned long *virt_addr;
	struct mm_struct *mm = current->mm;
	struct s3c_mem_alloc param;
	struct s3c_mem_dma_param dma_param;

	switch (cmd) {
		case S3C_MEM_ALLOC:
			mutex_lock(&mem_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC;
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_ALLOC FAILED\n");
				flag = 0;
				mutex_unlock(&mem_alloc_lock);
				return -EFAULT;
			}
			param.phy_addr = physical_address;
			DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_alloc_lock);

			break;

		case S3C_MEM_CACHEABLE_ALLOC:
			mutex_lock(&mem_cacheable_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_cacheable_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC_CACHEABLE;
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_ALLOC FAILED\n");
				flag = 0;
				mutex_unlock(&mem_cacheable_alloc_lock);
				return -EFAULT;
			}
			param.phy_addr = physical_address;
			DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_cacheable_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_cacheable_alloc_lock);

			break;

		case S3C_MEM_SHARE_ALLOC:
			mutex_lock(&mem_share_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_share_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC_SHARE;
			physical_address = param.phy_addr;
			DEBUG("param.phy_addr = %08x, %d\n", physical_address, __LINE__);
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_SHARE_ALLOC FAILED\n");
				flag = 0;
				mutex_unlock(&mem_share_alloc_lock);
				return -EFAULT;
			}
			DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_share_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_share_alloc_lock);

			break;

		case S3C_MEM_CACHEABLE_SHARE_ALLOC:
			mutex_lock(&mem_cacheable_share_alloc_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_cacheable_share_alloc_lock);
				return -EFAULT;
			}
			flag = MEM_ALLOC_CACHEABLE_SHARE;
			physical_address = param.phy_addr;
			DEBUG("param.phy_addr = %08x, %d\n", physical_address, __LINE__);
			param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
			DEBUG("param.vir_addr = %08x, %d\n", param.vir_addr, __LINE__);
			if(param.vir_addr == -EINVAL) {
				printk("S3C_MEM_SHARE_ALLOC FAILED\n");
			flag = 0;
				mutex_unlock(&mem_cacheable_share_alloc_lock);
				return -EFAULT;
			}
			DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				flag = 0;
				mutex_unlock(&mem_cacheable_share_alloc_lock);
				return -EFAULT;
			}
			flag = 0;
			mutex_unlock(&mem_cacheable_share_alloc_lock);

			break;

		case S3C_MEM_FREE:
			mutex_lock(&mem_free_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_free_lock);
				return -EFAULT;
			}

			DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if (do_munmap(mm, param.vir_addr, param.size) < 0) {
				printk("do_munmap() failed !!\n");
				mutex_unlock(&mem_free_lock);
				return -EINVAL;
			}
			virt_addr = (unsigned long *)phys_to_virt(param.phy_addr);

			kfree(virt_addr);
			param.size = 0;
			DEBUG("do_munmap() succeed !!\n");

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_free_lock);
				return -EFAULT;
			}

			mutex_unlock(&mem_free_lock);

			break;

		case S3C_MEM_SHARE_FREE:
			mutex_lock(&mem_share_free_lock);
			if(copy_from_user(&param, (struct s3c_mem_alloc *)arg, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_share_free_lock);
				return -EFAULT;
			}

			DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X, %d\n", param.phy_addr, param.size, param.vir_addr, __LINE__);

			if (do_munmap(mm, param.vir_addr, param.size) < 0) {
				printk("do_munmap() failed - MEM_SHARE_FREE!!\n");
				mutex_unlock(&mem_share_free_lock);
				return -EINVAL;
			}

			param.vir_addr = 0;
			DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");

			if(copy_to_user((struct s3c_mem_alloc *)arg, &param, sizeof(struct s3c_mem_alloc))){
				mutex_unlock(&mem_share_free_lock);
				return -EFAULT;
			}

			mutex_unlock(&mem_share_free_lock);

			break;


		case S3C_MEM_DMA_COPY:
			if(copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}
			//printk("S3C_MEM_DMA_COPY called\n");

			if (s3c2410_dma_request(DMACH_3D_M2M, &s3c_m2m_dma_client, NULL)) {
				printk(KERN_WARNING "Unable to get DMA channel.\n");
				return -1;
			}

			s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_m2m_dma_finish);

			//dma_cache_maint(dma_param.src_addr,sizeof(unsigned long long), DMA_BIDIRECTIONAL);

 //  		    	printk("MEMCPY src=%p,dst=%p,size=%d\n", dma_param.src_addr,dma_param.dst_addr, dma_param.size);

			/* Source address */
#ifdef CONFIG_S3C_DMA_PL080
			s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM_P, 1, dma_param.src_addr);
			s3c2410_dma_config(DMACH_3D_M2M, 4, 0);
#else
			s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM, 1, dma_param.src_addr);
			s3c2410_dma_config(DMACH_3D_M2M, 8, 0);
#endif

			/* Destination address : Data buffer address */
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, dma_param.dst_addr, dma_param.size);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);

			wait_for_completion(&s3c_m2m_dma_complete);
#if 0
			/* Destination address : Data buffer address */
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000, 0x4000);
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x10000, 0x4000);
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x20000, 0x4000);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);

			wait_for_completion(&s3c_m2m_dma_complete);
			//wait_for_completion(&s3c_m2m_dma_complete);
			//wait_for_completion(&s3c_m2m_dma_complete);

			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x30000, 0x4000);
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x40000, 0x4000);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);
			wait_for_completion(&s3c_m2m_dma_complete);
			//wait_for_completion(&s3c_m2m_dma_complete);

			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, 0x27a00000+0x50000, 0x4000);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);
			wait_for_completion(&s3c_m2m_dma_complete);
#endif

			s3c2410_dma_free(DMACH_3D_M2M, &s3c_m2m_dma_client);

			if(copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}

			break;

		case S3C_MEM_DMA_SET:
			if(copy_from_user(&dma_param, (struct s3c_mem_dma_param *)arg, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}

			if (s3c2410_dma_request(DMACH_3D_M2M, &s3c_m2m_dma_client, NULL)) {
				printk(KERN_WARNING "Unable to get DMA channel.\n");
				return -1;
			}

			s3c2410_dma_set_buffdone_fn(DMACH_3D_M2M, s3c_m2m_dma_finish);

			//dma_cache_maint(dma_param.src_addr,sizeof(unsigned long long), DMA_BIDIRECTIONAL);

//   		    	printk("MEMSET src=%p,dst=%p,size=%d\n", dma_param.src_addr,dma_param.dst_addr, dma_param.size);

			/* Source address */
			s3c2410_dma_devconfig(DMACH_3D_M2M, S3C_DMA_MEM2MEM_SET, 1,dma_param.src_addr); 
			s3c2410_dma_config(DMACH_3D_M2M, 8, 0);
			
			/* Destination address : Data buffer address */
			s3c2410_dma_enqueue(DMACH_3D_M2M, 0, dma_param.dst_addr, dma_param.size);
			s3c2410_dma_ctrl(DMACH_3D_M2M, S3C2410_DMAOP_START);

			wait_for_completion(&s3c_m2m_dma_complete);

			s3c2410_dma_free(DMACH_3D_M2M, &s3c_m2m_dma_client);

			if(copy_to_user((struct s3c_mem_dma_param *)arg, &dma_param, sizeof(struct s3c_mem_dma_param))) {
				return -EFAULT;
			}
			break;

		default:
			DEBUG("s3c_mem_ioctl() : default !!\n");
			return -EINVAL;
	}

	return 0;
}
Beispiel #18
0
int s3c_mem_ioctl(struct inode *inode, struct file *file,
		unsigned int cmd, unsigned long arg)
{
#ifdef USE_DMA_ALLOC
	unsigned long virt_addr;
#else
	unsigned long *virt_addr;
#endif

	struct mm_struct *mm = current->mm;
	struct s3c_mem_alloc param;

	switch (cmd) {
	case S3C_MEM_ALLOC:
		mutex_lock(&mem_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC;
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
						param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;
		}
		param.phy_addr = physical_address;
#ifdef USE_DMA_ALLOC
		param.kvir_addr = virtual_address;
#endif

		DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_alloc_lock);

		break;

	case S3C_MEM_CACHEABLE_ALLOC:
		mutex_lock(&mem_cacheable_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_cacheable_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_CACHEABLE;
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
				param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_cacheable_alloc_lock);
			return -EFAULT;
		}
		param.phy_addr = physical_address;
		DEBUG("KERNEL MALLOC : param.phy_addr = 0x%X"
				" \t size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_cacheable_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_cacheable_alloc_lock);

		break;

	case S3C_MEM_SHARE_ALLOC:
		mutex_lock(&mem_share_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_share_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_SHARE;
		physical_address = param.phy_addr;
		DEBUG("param.phy_addr = %08x, %d\n",
				physical_address, __LINE__);
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
				param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_SHARE_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_share_alloc_lock);
			return -EFAULT;
		}
		DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_share_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_share_alloc_lock);

		break;

	case S3C_MEM_CACHEABLE_SHARE_ALLOC:
		mutex_lock(&mem_cacheable_share_alloc_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_cacheable_share_alloc_lock);
			return -EFAULT;
		}
		flag = MEM_ALLOC_CACHEABLE_SHARE;
		physical_address = param.phy_addr;
		DEBUG("param.phy_addr = %08x, %d\n",
				physical_address, __LINE__);
		param.vir_addr = do_mmap(file, 0, param.size,
				(PROT_READ|PROT_WRITE), MAP_SHARED, 0);
		DEBUG("param.vir_addr = %08x, %d\n",
				param.vir_addr, __LINE__);
		if (param.vir_addr == -EINVAL) {
			printk(KERN_INFO "S3C_MEM_SHARE_ALLOC FAILED\n");
			flag = 0;
			mutex_unlock(&mem_cacheable_share_alloc_lock);
			return -EFAULT;
		}
		DEBUG("MALLOC_SHARE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			flag = 0;
			mutex_unlock(&mem_cacheable_share_alloc_lock);
			return -EFAULT;
		}
		flag = 0;
		mutex_unlock(&mem_cacheable_share_alloc_lock);

		break;

	case S3C_MEM_FREE:
		mutex_lock(&mem_free_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}

		DEBUG("KERNEL FREE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk(KERN_INFO "do_munmap() failed !!\n");
			mutex_unlock(&mem_free_lock);
			return -EINVAL;
		}

#ifdef USE_DMA_ALLOC
		virt_addr = param.kvir_addr;
		dma_free_writecombine(NULL, param.size,
				(unsigned int *) virt_addr, param.phy_addr);
#else
		virt_addr = (unsigned long *)phys_to_virt(param.phy_addr);
		kfree(virt_addr);
#endif
		param.size = 0;
		DEBUG("do_munmap() succeed !!\n");

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_free_lock);
			return -EFAULT;
		}

		mutex_unlock(&mem_free_lock);

		break;

	case S3C_MEM_SHARE_FREE:
		mutex_lock(&mem_share_free_lock);
		if (copy_from_user(&param, (struct s3c_mem_alloc *)arg,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT; }

		DEBUG("MEM_SHARE_FREE : param.phy_addr = 0x%X \t "
				"size = %d \t param.vir_addr = 0x%X, %d\n",
				param.phy_addr, param.size, param.vir_addr,
				__LINE__);

		if (do_munmap(mm, param.vir_addr, param.size) < 0) {
			printk(KERN_INFO "do_munmap() failed - MEM_SHARE_FREE!!\n");
			mutex_unlock(&mem_share_free_lock);
			return -EINVAL;
		}

		param.vir_addr = 0;
		DEBUG("do_munmap() succeed !! - MEM_SHARE_FREE\n");

		if (copy_to_user((struct s3c_mem_alloc *)arg, &param,
					sizeof(struct s3c_mem_alloc))) {
			mutex_unlock(&mem_share_free_lock);
			return -EFAULT;
		}

		mutex_unlock(&mem_share_free_lock);

		break;

	default:
		DEBUG("s3c_mem_ioctl() : default !!\n");
		return -EINVAL;
	}

	return 0;
}
Beispiel #19
0
/* Map a freebuffer chain into a processes virtual address space
 * when the buffers are a multiple of PAGE_SIZE */
static int fb_to_user_page_size(struct file *filep,
				struct pme_fbchain *buffers,
				unsigned long *user_addr,
				size_t *size)
{
	struct vm_area_struct *vma;
	int index, ret;
	void *data;
	size_t data_size;
	struct pme_fb_vma *mem_node;

	int list_count = pme_fbchain_num(buffers);
	/* These buffers are page aligned and occupy
	 * complete pages.  This means we can mmap it all at once */
	*size = list_count * pme_fbchain_max(buffers);

	/* We need to lock the mmap_sem because other threads
	 * could be modifying the address space layout */
	down_write(&current->mm->mmap_sem);

	*user_addr = do_mmap(filep, 0, *size,
			     PROT_READ | PROT_WRITE, MAP_PRIVATE, 0);
	ret = check_mmap_result((void *) *user_addr);
	if (ret)
		goto err;

	/* Lookup the new VMA and stuff the fbchain into
	 * it so when a page fault occurs, we can find the
	 * proper page and return it */
	vma = find_vma(current->mm, (unsigned long) *user_addr);

	mem_node = vma->vm_private_data = fb_vma_create(buffers,
			fb_page_mapped, 1, *size, 0,
			(*size + PAGE_SIZE - 1) / PAGE_SIZE);
	if (!mem_node) {
		ret = -ENOMEM;
		/* Make sure we clean the mapped area out of
		 * the users process space */
		 do_munmap(current->mm, (*user_addr) & PAGE_MASK,
			 *size + offset_in_page(*user_addr));
		goto err;
	}
	/* Pre compute the page* for each page in the buffer.  This step makes
	 * the nopage implementation easy as we have already determined
	 * which page* to return */
	index = 0;
	data = pme_fbchain_current(buffers);
	data_size = pme_fbchain_current_bufflen(buffers);
	while (data_size) {
		while (data_size) {
			mem_node->page_array[index] = virt_to_page(data);
			index++;
			if (data_size > PAGE_SIZE) {
				data_size -= PAGE_SIZE;
				data += PAGE_SIZE;
			} else
				data_size = 0;
		}
		data = pme_fbchain_next(buffers);
		data_size = pme_fbchain_current_bufflen(buffers);
	}
	up_write(&current->mm->mmap_sem);
	/* Re-adjust the size to be the actual data length of the buffer */
	*size = pme_fbchain_length(buffers);
	return PME_MEM_CONTIG;
err:
	up_write(&current->mm->mmap_sem);
	return ret;
}
Beispiel #20
0
/**
 *
 * test_fault_runtest - Allocate and free a number of pages from a zone
 * @params: Parameters read from the proc entry
 * @argc:   Number of parameters actually entered
 * @procentry: Proc buffer to write to
 *
 * If pages is set to 0, pages will be allocated until the pages_high watermark
 * is hit
 * Returns
 * 0  on success
 * -1 on failure
 *
 */
int test_fault_runtest(int *params, int argc, int procentry) {
	unsigned long nopages;		/* Number of pages to allocate */
	int nopasses;			/* Number of times to run test */
	C_ZONE *zone;			/* Zone been tested on */
	unsigned long freelimit;	/* The min no. free pages in zone */
	unsigned long alloccount;	/* Number of pages alloced */
	unsigned long present;		/* Number of pages present */
	unsigned long addr=0;		/* Address mapped area starts */
	unsigned long len;		/* Length of mapped area */
	unsigned long sched_count;	/* How many times schedule is called */
	unsigned long start;		/* Start of a test in jiffies */
	int totalpasses;		/* Total number of passes */
	int failed=0;			/* Failed mappings */

	/* Get the parameters */
	nopasses = params[0];
	nopages  = params[1];

	/* Make sure a buffer is available */
	if (vmrproc_checkbuffer(testinfo[procentry])) BUG();
	vmrproc_openbuffer(&testinfo[procentry]);

	/* Make sure passes is valid */
	if (nopasses <= 0)
	{
		vmr_printk("Cannot make 0 or negative number of passes\n");
		return -1;
	}

	/* Print header */
	printp("%s Test Results (" UTS_RELEASE ").\n\n", testinfo[procentry].name);

	/* Get the parameters for the test */
	if (test_fault_calculate_parameters(procentry, &zone, &nopages, &freelimit) == -1) {
		printp("Test failed\n");
		return -1;
	}
	len = nopages * PAGE_SIZE;

	/*
	 * map a region of memory where our pages are going to be stored 
	 * This is the same as the system call to mmap
	 *
	 */
	addr =  do_mmap(NULL,		/* No struct file */
			0,		/* No starting address */
			len,		/* Length of address space */
			PROT_WRITE | PROT_READ, /* Protection */
			MAP_PRIVATE | MAP_ANONYMOUS,	/* Private mapping */
			0);
			
	/* get_unmapped area has a horrible way of returning errors */
	if (addr == -1) {
		printp("Failed to mmap");
		return -1;
	}

	/* Print area information */
	printp("Mapped Area Information\n");
	printp("o address:  0x%lX\n", addr);
	printp("o length:   %lu (%lu pages)\n", len, nopages);
	printp("\n");

	/* Begin test */
	printp("Test Parameters\n");
	printp("o Passes:	       %d\n",  nopasses);
	printp("o Starting Free pages: %lu\n", zone->free_pages);
	printp("o Free page limit:     %lu\n", freelimit);
	printp("o References:	       %lu\n", nopages);
	printp("\n");

	printp("Test Results\n");
	printp("Pass       Refd     Present   Time\n");
	totalpasses = nopasses;

	/* Copy the string into every page once to alloc all ptes */
	alloccount=0;
	start = jiffies;
	while (nopages-- > 0) {
		check_resched(sched_count);

		copy_to_user((unsigned long *)(addr + (nopages * PAGE_SIZE)),
			test_string,
			strlen(test_string));

		alloccount++;
	}

	/*
	 * Step through the page tables pass number of times swapping in
	 * pages as necessary
	 */
	for (;;) {

		/* Count the number of pages present */
		present = countpages_mm(current->mm, addr, len, &sched_count);

		/* Print test info */
		printp("%-8d %8lu %8lu %8lums\n", totalpasses-nopasses,
							alloccount,
							present,
							jiffies_to_ms(start));

		if (nopasses-- == 0) break;

		/* Touch all the pages in the mapped area */
		start = jiffies;
		alloccount = forall_pte_mm(current->mm, addr, len, 
				&sched_count, NULL, touch_pte);

	}
	
	printp("\nPost Test Information\n");
	printp("o Finishing Free pages: %lu\n", zone->free_pages);
	printp("o Schedule() calls:     %lu\n", sched_count);
	printp("o Failed mappings:      %u\n",  failed);
	printp("\n");

	printp("Test completed successfully\n");

	/* Print out a process map */
	vmr_printmap(current->mm, addr, len, &sched_count, &testinfo[procentry]);
	/* Unmap the area */
	if (do_munmap(current->mm, addr, len) == -1) {
		printp("WARNING: Failed to unmap memory area"); }

	vmrproc_closebuffer(&testinfo[procentry]);
	return 0;
}
Beispiel #21
0
int sys_munmap(unsigned long addr, unsigned long len)
{
	return do_munmap(addr, len);
}
Beispiel #22
0
static int s3c_pp_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
	s3c_pp_instance_context_t *current_instance;
	s3c_pp_params_t *parg;

	unsigned int temp = 0;

    mutex_lock(h_mutex);

	current_instance	= (s3c_pp_instance_context_t *) file->private_data;
	parg	            = (s3c_pp_params_t *) arg;

	switch ( cmd )
    {
		case S3C_PP_SET_PARAMS:
            {
                s3c_pp_out_path_t temp_out_path; 
                unsigned int temp_src_width, temp_src_height, temp_dst_width, temp_dst_height;
                s3c_color_space_t temp_src_color_space, temp_dst_color_space;

                get_user(temp_out_path, &parg->out_path);

                if ( (-1 != s3c_pp_instance_info.fifo_mode_instance_no )
                     || ((s3c_pp_instance_info.dma_mode_instance_count) && (FIFO_FREERUN == temp_out_path)) )
                {
                    printk ( KERN_ERR "\n%s: S3C_PP_SET_PARAMS can't be executed.\n", __FUNCTION__ );
                    mutex_unlock(h_mutex);
    			    return -EINVAL; 
                }

    			get_user(temp_src_width,       &parg->src_width);
    			get_user(temp_src_height,      &parg->src_height);
    			get_user(temp_dst_width,       &parg->dst_width);
    			get_user(temp_dst_height,      &parg->dst_height);

                // S3C6410 support that the source image is up to 4096 x 4096
                //     and the destination image is up to 2048 x 2048.
    			if (    (temp_src_width > 4096) || (temp_src_height > 4096) 
                     || (temp_dst_width > 2048) || (temp_dst_height > 2048) )
    			{
    				printk(KERN_ERR "\n%s: Size is too big to be supported.\n", __FUNCTION__);
    				mutex_unlock(h_mutex);
    				return -EINVAL;
    			}
                
                get_user(temp_src_color_space, &parg->src_color_space);
                get_user(temp_dst_color_space, &parg->dst_color_space);

                if (    ( (temp_src_color_space == YC420) && (temp_src_width % 8) )
                     || ( (temp_src_color_space == RGB16) && (temp_src_width % 2) ) 
                     || ( (temp_out_path == DMA_ONESHOT) && (    ((temp_dst_color_space == YC420) && (temp_dst_width % 8))
                                                              || ((temp_dst_color_space == RGB16) && (temp_dst_width % 2)))) )
                {
    				printk(KERN_ERR "\n%s: YUV420 image width must be a multiple of 8.\n", __FUNCTION__);
                    printk(KERN_ERR "%s: RGB16 must be a multiple of 2.\n", __FUNCTION__);
    				mutex_unlock(h_mutex);
    				return -EINVAL;
                } 
                
                
    			get_user(current_instance->src_full_width,  &parg->src_full_width);
    			get_user(current_instance->src_full_height, &parg->src_full_height);
    			get_user(current_instance->src_start_x,     &parg->src_start_x);
    			get_user(current_instance->src_start_y,     &parg->src_start_y);
    			current_instance->src_width                 = temp_src_width;
    			current_instance->src_height                = temp_src_height;
                current_instance->src_color_space           = temp_src_color_space;

    			get_user(current_instance->dst_full_width,  &parg->dst_full_width);
                get_user(current_instance->dst_full_height, &parg->dst_full_height);
                get_user(current_instance->dst_start_x,     &parg->dst_start_x);
    			get_user(current_instance->dst_start_y,     &parg->dst_start_y);
    			current_instance->dst_width                 = temp_dst_width;
    			current_instance->dst_height                = temp_dst_height;
                current_instance->dst_color_space           = temp_dst_color_space;

                current_instance->out_path                  = temp_out_path;

                if ( DMA_ONESHOT == current_instance->out_path )
                {
                    s3c_pp_instance_info.instance_state[current_instance->instance_no] = PP_INSTANCE_INUSE_DMA_ONESHOT;
                    s3c_pp_instance_info.dma_mode_instance_count++;               
                }
                else
                {
                    get_user(current_instance->scan_mode, &parg->scan_mode);

                    current_instance->dst_color_space = RGB30;

                    s3c_pp_instance_info.instance_state[current_instance->instance_no] = PP_INSTANCE_INUSE_FIFO_FREERUN;
                    s3c_pp_instance_info.fifo_mode_instance_no = current_instance->instance_no;
                    s3c_pp_instance_info.wincon0_value_before_fifo_mode = __raw_readl ( S3C_WINCON0 );
                
                    //.[ REDUCE_VCLK_SYOP_TIME
                    if ( current_instance->src_height > current_instance->dst_height )
                    {
                        int i;

                        for ( i=2; (current_instance->src_height >= (i * current_instance->dst_height)) && (i<8); i++ )
                        {
                        }

                        current_instance->src_full_width  *= i;
                        current_instance->src_full_height /= i;
                        current_instance->src_height      /= i;
                    }
                    //.] REDUCE_VCLK_SYOP_TIME
                }

                current_instance->value_changed |= PP_VALUE_CHANGED_PARAMS;
            }
			break;

		case S3C_PP_START:
            dprintk ( "%s: S3C_PP_START last_instance=%d, curr_instance=%d\n", __FUNCTION__, 
                        s3c_pp_instance_info.last_running_instance_no, current_instance->instance_no );

            if ( PP_INSTANCE_READY == s3c_pp_instance_info.instance_state[current_instance->instance_no] )
            {
                printk ( KERN_ERR "%s: S3C_PP_START must be executed after running S3C_PP_SET_PARAMS.\n", __FUNCTION__ );
                mutex_unlock(h_mutex);
			    return -EINVAL;
            }

            if ( current_instance->instance_no != s3c_pp_instance_info.last_running_instance_no )
            {
                __raw_writel(0x0<<31, s3c_pp_base + S3C_VPP_POSTENVID);
            
                temp = S3C_MODE2_ADDR_CHANGE_DISABLE | S3C_MODE2_CHANGE_AT_FRAME_END | S3C_MODE2_SOFTWARE_TRIGGER;
                __raw_writel(temp, s3c_pp_base + S3C_VPP_MODE_2);
                   
                set_clock_src(HCLK);
            
                // setting the src/dst color space
                set_data_format(current_instance);
            
                // setting the src/dst size 
                set_scaler(current_instance);
            
                // setting the src/dst buffer address
                set_src_addr(current_instance);
                set_dest_addr(current_instance);

                current_instance->value_changed = PP_VALUE_CHANGED_NONE;

                s3c_pp_instance_info.last_running_instance_no = current_instance->instance_no;
                s3c_pp_instance_info.running_instance_no = current_instance->instance_no;

                if ( PP_INSTANCE_INUSE_DMA_ONESHOT == s3c_pp_instance_info.instance_state[current_instance->instance_no] )
                { // DMA OneShot Mode
                    dprintk ( "%s: DMA_ONESHOT mode\n", __FUNCTION__ );

                    post_int_enable(1);
                    pp_dma_mode_set_and_start();


                    if ( !(file->f_flags & O_NONBLOCK) )
                    {
                        if (interruptible_sleep_on_timeout(&waitq, 500) == 0) 
                        {
                            printk(KERN_ERR "\n%s: Waiting for interrupt is timeout\n", __FUNCTION__);
                        }
                    }
                }
                else
                { // FIFO freerun Mode
                    dprintk ( "%s: FIFO_freerun mode\n", __FUNCTION__ );
                    s3c_pp_instance_info.fifo_mode_instance_no = current_instance->instance_no;

                    post_int_enable(1);
                    pp_fifo_mode_set_and_start(current_instance); 
                }
            }
            else
            {
                if ( current_instance->value_changed != PP_VALUE_CHANGED_NONE )
                {
                    __raw_writel(0x0<<31, s3c_pp_base + S3C_VPP_POSTENVID);

                    if ( current_instance->value_changed & PP_VALUE_CHANGED_PARAMS )
                    {
                        set_data_format(current_instance);
                        set_scaler(current_instance);
                    }

                    if ( current_instance->value_changed & PP_VALUE_CHANGED_SRC_BUF_ADDR_PHY )
                    {
                        set_src_addr(current_instance);
                    }

                    if ( current_instance->value_changed & PP_VALUE_CHANGED_DST_BUF_ADDR_PHY )
                    {
                        set_dest_addr(current_instance);
                    }

                    current_instance->value_changed = PP_VALUE_CHANGED_NONE;
                }

                s3c_pp_instance_info.running_instance_no = current_instance->instance_no;

                post_int_enable(1);
                start_processing();

                if ( !(file->f_flags & O_NONBLOCK) )
                {
                    if (interruptible_sleep_on_timeout(&waitq, 500) == 0) 
                    {
                        printk(KERN_ERR "\n%s: Waiting for interrupt is timeout\n", __FUNCTION__);
                    }
                }
            }
			break;

		case S3C_PP_GET_SRC_BUF_SIZE:

            if ( PP_INSTANCE_READY == s3c_pp_instance_info.instance_state[current_instance->instance_no] )
            {
                dprintk ( "%s: S3C_PP_GET_SRC_BUF_SIZE must be executed after running S3C_PP_SET_PARAMS.\n", __FUNCTION__ );
                mutex_unlock(h_mutex);
			    return -EINVAL;
            }

            temp = cal_data_size ( current_instance->src_color_space, current_instance->src_full_width, current_instance->src_full_height );

			mutex_unlock(h_mutex);
			return temp;


		case S3C_PP_SET_SRC_BUF_ADDR_PHY:

            get_user(current_instance->src_buf_addr_phy, &parg->src_buf_addr_phy);
            current_instance->value_changed |= PP_VALUE_CHANGED_SRC_BUF_ADDR_PHY;
			break;

        case S3C_PP_SET_SRC_BUF_NEXT_ADDR_PHY:

            if ( current_instance->instance_no != s3c_pp_instance_info.fifo_mode_instance_no )
            { // if FIFO Mode is not Active
                dprintk (KERN_DEBUG "%s: S3C_PP_SET_SRC_BUF_NEXT_ADDR_PHY can't be executed.\n", __FUNCTION__ );
                mutex_unlock(h_mutex);
                return -EINVAL;
            }            

            get_user(current_instance->src_next_buf_addr_phy, &parg->src_next_buf_addr_phy);

            temp = __raw_readl(s3c_pp_base + S3C_VPP_MODE_2);
            temp |= (0x1<<4);
            __raw_writel(temp, s3c_pp_base + S3C_VPP_MODE_2);
    
            set_src_next_buf_addr(current_instance);

            temp = __raw_readl(s3c_pp_base + S3C_VPP_MODE_2);
            temp &= ~(0x1<<4);
            __raw_writel(temp, s3c_pp_base + S3C_VPP_MODE_2);
            break;

		case S3C_PP_GET_DST_BUF_SIZE:
            
            if ( PP_INSTANCE_READY == s3c_pp_instance_info.instance_state[current_instance->instance_no] )
            {
                dprintk ( "%s: S3C_PP_GET_DST_BUF_SIZE must be executed after running S3C_PP_SET_PARAMS.\n", __FUNCTION__ );
                mutex_unlock(h_mutex);
			    return -EINVAL;
            }

            temp = cal_data_size ( current_instance->dst_color_space, current_instance->dst_full_width, current_instance->dst_full_height );

			mutex_unlock(h_mutex);
			return temp;

		case S3C_PP_SET_DST_BUF_ADDR_PHY:

            get_user(current_instance->dst_buf_addr_phy, &parg->dst_buf_addr_phy);
            current_instance->value_changed |= PP_VALUE_CHANGED_DST_BUF_ADDR_PHY;
			break;


        case S3C_PP_ALLOC_KMEM:
            {
                s3c_pp_mem_alloc_t param;
                
                if (copy_from_user(&param, (s3c_pp_mem_alloc_t *)arg, sizeof(s3c_pp_mem_alloc_t)))
                {
                    mutex_unlock(h_mutex);
                    return -EFAULT;
                }
                
                flag = ALLOC_KMEM;
                
                param.vir_addr = do_mmap(file, 0, param.size, PROT_READ|PROT_WRITE, MAP_SHARED, 0);
                dprintk (KERN_DEBUG "param.vir_addr = %08x\n", param.vir_addr);
                            
                flag = 0;

                if(param.vir_addr == -EINVAL) {
                    printk(KERN_ERR "%s: PP_MEM_ALLOC FAILED\n", __FUNCTION__);
                    mutex_unlock(h_mutex);
                    return -EFAULT;
                }
                param.phy_addr = physical_address;
                
                dprintk (KERN_DEBUG "KERNEL MALLOC : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);
                
                if (copy_to_user((s3c_pp_mem_alloc_t *)arg, &param, sizeof(s3c_pp_mem_alloc_t)))
                {
                    mutex_unlock(h_mutex);
                    return -EFAULT;
                }
            }
            break;

        case S3C_PP_FREE_KMEM:
            {
                s3c_pp_mem_alloc_t param;
                struct mm_struct *mm = current->mm;
                void *virt_addr;

                if ( copy_from_user(&param, (s3c_pp_mem_alloc_t *)arg, sizeof(s3c_pp_mem_alloc_t)) )
                {
                    mutex_unlock(h_mutex);
                    return -EFAULT;
                }
            
                dprintk (KERN_DEBUG "KERNEL FREE : param.phy_addr = 0x%X \t size = %d \t param.vir_addr = 0x%X\n", param.phy_addr, param.size, param.vir_addr);
            
                if ( do_munmap(mm, param.vir_addr, param.size ) < 0 ) 
                {
                    dprintk("do_munmap() failed !!\n");
                    mutex_unlock(h_mutex);
                    return -EINVAL;
                }
                virt_addr = phys_to_virt(param.phy_addr);
                dprintk ( "KERNEL : virt_addr = 0x%X\n", (unsigned int) virt_addr );
            
                kfree(virt_addr);
                param.size = 0;

                dprintk(KERN_DEBUG "do_munmap() succeed !!\n");
            }
            break;

        case S3C_PP_GET_RESERVED_MEM_SIZE:
            mutex_unlock(h_mutex);
            return PP_RESERVED_MEM_SIZE;

        case S3C_PP_GET_RESERVED_MEM_ADDR_PHY:
            mutex_unlock(h_mutex);
            return PP_RESERVED_MEM_ADDR_PHY;

		default:
			mutex_unlock(h_mutex);
			return -EINVAL;
	}

	mutex_unlock(h_mutex);
	
	return 0;
}