Ejemplo n.º 1
0
static int snd_hwdep_open(struct inode *inode, struct file * file)
{
	int major = imajor(inode);
	int cardnum;
	int device;
	snd_hwdep_t *hw;
	int err;
	wait_queue_t wait;

	if (major == snd_major) {
		cardnum = SNDRV_MINOR_CARD(iminor(inode));
		device = SNDRV_MINOR_DEVICE(iminor(inode)) - SNDRV_MINOR_HWDEP;
#ifdef CONFIG_SND_OSSEMUL
	} else if (major == SOUND_MAJOR) {
		cardnum = SNDRV_MINOR_OSS_CARD(iminor(inode));
		device = 0;
#endif
	} else
		return -ENXIO;
	cardnum %= SNDRV_CARDS;
	device %= SNDRV_MINOR_HWDEPS;
	hw = snd_hwdep_devices[(cardnum * SNDRV_MINOR_HWDEPS) + device];
	if (hw == NULL)
		return -ENODEV;

	if (!hw->ops.open)
		return -ENXIO;
#ifdef CONFIG_SND_OSSEMUL
	if (major == SOUND_MAJOR && hw->oss_type < 0)
		return -ENXIO;
#endif

	if (!try_module_get(hw->card->module))
		return -EFAULT;

	init_waitqueue_entry(&wait, current);
	add_wait_queue(&hw->open_wait, &wait);
	down(&hw->open_mutex);
	while (1) {
		if (hw->exclusive && hw->used > 0) {
			err = -EBUSY;
			break;
		}
		err = hw->ops.open(hw, file);
		if (err >= 0)
			break;
		if (err == -EAGAIN) {
			if (file->f_flags & O_NONBLOCK) {
				err = -EBUSY;
				break;
			}
		} else
			break;
		set_current_state(TASK_INTERRUPTIBLE);
		up(&hw->open_mutex);
		schedule();
		down(&hw->open_mutex);
		if (signal_pending(current)) {
			err = -ERESTARTSYS;
			break;
		}
	}
	remove_wait_queue(&hw->open_wait, &wait);
	if (err >= 0) {
		err = snd_card_file_add(hw->card, file);
		if (err >= 0) {
			file->private_data = hw;
			hw->used++;
		} else {
			if (hw->ops.release)
				hw->ops.release(hw, file);
		}
	}
	up(&hw->open_mutex);
	if (err < 0)
		module_put(hw->card->module);
	return err;
}
Ejemplo n.º 2
0
/*
 * edac_device_register_sysfs_main_kobj
 *
 *	perform the high level setup for the new edac_device instance
 *
 * Return:  0 SUCCESS
 *         !0 FAILURE
 */
int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
{
	struct sysdev_class *edac_class;
	int err;

	debugf1("%s()\n", __func__);

	/* get the /sys/devices/system/edac reference */
	edac_class = edac_get_sysfs_class();
	if (edac_class == NULL) {
		debugf1("%s() no edac_class error\n", __func__);
		err = -ENODEV;
		goto err_out;
	}

	/* Point to the 'edac_class' this instance 'reports' to */
	edac_dev->edac_class = edac_class;

	/* Init the devices's kobject */
	memset(&edac_dev->kobj, 0, sizeof(struct kobject));

	/* Record which module 'owns' this control structure
	 * and bump the ref count of the module
	 */
	edac_dev->owner = THIS_MODULE;

	if (!try_module_get(edac_dev->owner)) {
		err = -ENODEV;
		goto err_mod_get;
	}

	/* register */
	err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
				   &edac_class->kset.kobj,
				   "%s", edac_dev->name);
	if (err) {
		debugf1("%s()Failed to register '.../edac/%s'\n",
			__func__, edac_dev->name);
		goto err_kobj_reg;
	}
	kobject_uevent(&edac_dev->kobj, KOBJ_ADD);

	/* At this point, to 'free' the control struct,
	 * edac_device_unregister_sysfs_main_kobj() must be used
	 */

	debugf4("%s() Registered '.../edac/%s' kobject\n",
		__func__, edac_dev->name);

	return 0;

	/* Error exit stack */
err_kobj_reg:
	module_put(edac_dev->owner);

err_mod_get:
	edac_put_sysfs_class();

err_out:
	return err;
}
Ejemplo n.º 3
0
/*
  file is opened
  
  we don't care but increment the module's reference count
  
  returns 0 for success
//*/
int procfs_open(struct inode* inode, struct file* file)
{
  try_module_get(THIS_MODULE);
  return 0;
}
Ejemplo n.º 4
0
int draw_rgb888_screen(void)
{
	struct fb_info *fb = registered_fb[0];
	u32 height = fb->var.yres / 5;
	u32 line = fb->fix.line_length;
	u32 i, j;
#ifndef CONFIG_FRAMEBUFFER_CONSOLE
		struct module *owner;
#endif

	pr_info( "##############%s\n", __func__);

#ifndef CONFIG_FRAMEBUFFER_CONSOLE
		owner = fb->fbops->owner;
		if (!try_module_get(owner))
			return -ENODEV;
		if (fb->fbops->fb_open && fb->fbops->fb_open(fb, 0)) {
			module_put(owner);
			return -ENODEV;
		}
#endif
	for (i = 0; i < height; i++) {
		for (j = 0; j < fb->var.xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

	for (i = height; i < height * 2; i++) {
		for (j = 0; j < fb->var.xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

	for (i = height * 2; i < height * 3; i++) {
		for (j = 0; j < fb->var.xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

	for (i = height * 3; i < height * 4; i++) {
		for (j = 0; j < fb->var.xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0x00, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0xff, 1);
		}
	}

	for (i = height * 4; i < height * 5; i++) {
		for (j = 0; j < fb->var.xres; j++) {
			memset(fb->screen_base + i * line + j * 4 + 0, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 1, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 2, 0xff, 1);
			memset(fb->screen_base + i * line + j * 4 + 3, 0x00, 1);
		}
	}

#if defined(CONFIG_FB_MSM_MIPI_NOVATEK_BOE_CMD_WVGA_PT)
		flush_cache_all();
		outer_flush_all();
#endif
	return 0;
}
Ejemplo n.º 5
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *parent_tidptr,
					int __user *child_tidptr,
					int pid)
{
	int retval;
	struct task_struct *p = NULL;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

#ifdef CONFIG_TRACE_IRQFLAGS
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
				p->user != &root_user)
			goto bad_fork_free;
	}

	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);
	get_group_info(p->group_info);

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	if (p->binfmt && !try_module_get(p->binfmt->module))
		goto bad_fork_cleanup_put_domain;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	p->pid = pid;
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
			goto bad_fork_cleanup_delays_binfmt;

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	clear_tsk_thread_flag(p, TIF_SIGPENDING);
	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
 	p->sched_time = 0;
	p->rchar = 0;		/* I/O counter: bytes read */
	p->wchar = 0;		/* I/O counter: bytes written */
	p->syscr = 0;		/* I/O counter: read syscalls */
	p->syscw = 0;		/* I/O counter: write syscalls */
	acct_clear_integrals(p);

 	p->it_virt_expires = cputime_zero;
	p->it_prof_expires = cputime_zero;
 	p->it_sched_expires = 0;
 	INIT_LIST_HEAD(&p->cpu_timers[0]);
 	INIT_LIST_HEAD(&p->cpu_timers[1]);
 	INIT_LIST_HEAD(&p->cpu_timers[2]);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->security = NULL;
	p->io_context = NULL;
	p->io_wait = NULL;
	p->audit_context = NULL;
	cpuset_fork(p);
#ifdef CONFIG_NUMA
 	p->mempolicy = mpol_copy(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cpuset;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
	p->hardirqs_enabled = 0;
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

	rt_mutex_init_task(p);

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif

	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if ((retval = security_task_alloc(p)))
		goto bad_fork_cleanup_policy;
	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_security;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_keys(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_namespace(clone_flags, p)))
		goto bad_fork_cleanup_keys;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_namespace;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;

	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	   
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
 	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_cleanup_namespace;
	}

	if (clone_flags & CLONE_THREAD) {
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);

		if (!cputime_eq(current->signal->it_virt_expires,
				cputime_zero) ||
		    !cputime_eq(current->signal->it_prof_expires,
				cputime_zero) ||
		    current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
		    !list_empty(&current->signal->cpu_timers[0]) ||
		    !list_empty(&current->signal->cpu_timers[1]) ||
		    !list_empty(&current->signal->cpu_timers[2])) {
			/*
			 * Have child wake up on its first tick to check
			 * for process CPU timers.
			 */
			p->it_prof_expires = jiffies_to_cputime(1);
		}
	}

	/*
	 * inherit ioprio
	 */
	p->ioprio = current->ioprio;

	if (likely(p->pid)) {
		add_parent(p);
		if (unlikely(p->ptrace & PT_PTRACED))
			__ptrace_link(p, current->parent);

		if (thread_group_leader(p)) {
			p->signal->tty = current->signal->tty;
			p->signal->pgrp = process_group(current);
			p->signal->session = current->signal->session;
			attach_pid(p, PIDTYPE_PGID, process_group(p));
			attach_pid(p, PIDTYPE_SID, p->signal->session);

			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__get_cpu_var(process_counts)++;
		}
		attach_pid(p, PIDTYPE_PID, p->pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	return p;

bad_fork_cleanup_namespace:
	exit_namespace(p);
bad_fork_cleanup_keys:
	exit_keys(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	cleanup_signal(p);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_security:
	security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_free(p->mempolicy);
bad_fork_cleanup_cpuset:
#endif
	cpuset_exit(p);
bad_fork_cleanup_delays_binfmt:
	delayacct_tsk_free(p);
	if (p->binfmt)
		module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	put_group_info(p->group_info);
	atomic_dec(&p->user->processes);
	free_uid(p->user);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Ejemplo n.º 6
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static task_t *copy_process(unsigned long clone_flags,
				 unsigned long stack_start,
				 struct pt_regs *regs,
				 unsigned long stack_size,
				 int __user *parent_tidptr,
				 int __user *child_tidptr,
				 int pid)
{
	int retval;
	struct task_struct *p = NULL;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	retval = -EAGAIN;
	if (atomic_read(&p->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
				p->user != &root_user)
			goto bad_fork_free;
	}

	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);
	get_group_info(p->group_info);

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(p->thread_info->exec_domain->module))
		goto bad_fork_cleanup_count;

	if (p->binfmt && !try_module_get(p->binfmt->module))
		goto bad_fork_cleanup_put_domain;

	p->did_exec = 0;
	copy_flags(clone_flags, p);
	p->pid = pid;
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
			goto bad_fork_cleanup;

	p->proc_dentry = NULL;

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);
	spin_lock_init(&p->proc_lock);

	clear_tsk_thread_flag(p, TIF_SIGPENDING);
	init_sigpending(&p->pending);

	p->it_real_value = 0;
	p->it_real_incr = 0;
	p->it_virt_value = cputime_zero;
	p->it_virt_incr = cputime_zero;
	p->it_prof_value = cputime_zero;
	p->it_prof_incr = cputime_zero;
	init_timer(&p->real_timer);
	p->real_timer.data = (unsigned long) p;

	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->rchar = 0;		/* I/O counter: bytes read */
	p->wchar = 0;		/* I/O counter: bytes written */
	p->syscr = 0;		/* I/O counter: read syscalls */
	p->syscw = 0;		/* I/O counter: write syscalls */
	acct_clear_integrals(p);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->security = NULL;
	p->io_context = NULL;
	p->io_wait = NULL;
	p->audit_context = NULL;
#ifdef CONFIG_NUMA
 	p->mempolicy = mpol_copy(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup;
 	}
#endif

	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if ((retval = security_task_alloc(p)))
		goto bad_fork_cleanup_policy;
	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_security;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_keys(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_namespace(clone_flags, p)))
		goto bad_fork_cleanup_keys;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_namespace;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	   
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/* Perform scheduler related setup */
	sched_fork(p);

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so cpus_allowed mask cannot
	 * have changed. The cpus_allowed mask of the parent may have
	 * changed after it was copied first time, and it may then move to
	 * another CPU - so we re-copy it here and set the child's CPU to
	 * the parent's CPU. This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	set_task_cpu(p, smp_processor_id());

	/*
	 * Check for pending SIGKILL! The new thread should not be allowed
	 * to slip out of an OOM kill. (or normal SIGKILL.)
	 */
	if (sigismember(&current->pending.signal, SIGKILL)) {
		write_unlock_irq(&tasklist_lock);
		retval = -EINTR;
		goto bad_fork_cleanup_namespace;
	}

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

	if (clone_flags & CLONE_THREAD) {
		spin_lock(&current->sighand->siglock);
		/*
		 * Important: if an exit-all has been started then
		 * do not create this new thread - the whole thread
		 * group is supposed to exit anyway.
		 */
		if (current->signal->flags & SIGNAL_GROUP_EXIT) {
			spin_unlock(&current->sighand->siglock);
			write_unlock_irq(&tasklist_lock);
			retval = -EAGAIN;
			goto bad_fork_cleanup_namespace;
		}
		p->group_leader = current->group_leader;

		if (current->signal->group_stop_count > 0) {
			/*
			 * There is an all-stop in progress for the group.
			 * We ourselves will stop as soon as we check signals.
			 * Make the new thread part of that group stop too.
			 */
			current->signal->group_stop_count++;
			set_tsk_thread_flag(p, TIF_SIGPENDING);
		}

		spin_unlock(&current->sighand->siglock);
	}

	SET_LINKS(p);
	if (unlikely(p->ptrace & PT_PTRACED))
		__ptrace_link(p, current->parent);

	attach_pid(p, PIDTYPE_PID, p->pid);
	attach_pid(p, PIDTYPE_TGID, p->tgid);
	if (thread_group_leader(p)) {
		attach_pid(p, PIDTYPE_PGID, process_group(p));
		attach_pid(p, PIDTYPE_SID, p->signal->session);
		if (p->pid)
			__get_cpu_var(process_counts)++;
	}

	nr_threads++;
	total_forks++;
	write_unlock_irq(&tasklist_lock);
	retval = 0;

fork_out:
	if (retval)
		return ERR_PTR(retval);
	return p;

bad_fork_cleanup_namespace:
	exit_namespace(p);
bad_fork_cleanup_keys:
	exit_keys(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	exit_signal(p);
bad_fork_cleanup_sighand:
	exit_sighand(p);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_security:
	security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_free(p->mempolicy);
#endif
bad_fork_cleanup:
	if (p->binfmt)
		module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
	module_put(p->thread_info->exec_domain->module);
bad_fork_cleanup_count:
	put_group_info(p->group_info);
	atomic_dec(&p->user->processes);
	free_uid(p->user);
bad_fork_free:
	free_task(p);
	goto fork_out;
}
Ejemplo n.º 7
0
Archivo: pty.c Proyecto: 7799/linux
/**
 *	pty_common_install		-	set up the pty pair
 *	@driver: the pty driver
 *	@tty: the tty being instantiated
 *	@bool: legacy, true if this is BSD style
 *
 *	Perform the initial set up for the tty/pty pair. Called from the
 *	tty layer when the port is first opened.
 *
 *	Locking: the caller must hold the tty_mutex
 */
static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
		bool legacy)
{
	struct tty_struct *o_tty;
	struct tty_port *ports[2];
	int idx = tty->index;
	int retval = -ENOMEM;

	o_tty = alloc_tty_struct();
	if (!o_tty)
		goto err;
	ports[0] = kmalloc(sizeof **ports, GFP_KERNEL);
	ports[1] = kmalloc(sizeof **ports, GFP_KERNEL);
	if (!ports[0] || !ports[1])
		goto err_free_tty;
	if (!try_module_get(driver->other->owner)) {
		/* This cannot in fact currently happen */
		goto err_free_tty;
	}
	initialize_tty_struct(o_tty, driver->other, idx);

	if (legacy) {
		/* We always use new tty termios data so we can do this
		   the easy way .. */
		retval = tty_init_termios(tty);
		if (retval)
			goto err_deinit_tty;

		retval = tty_init_termios(o_tty);
		if (retval)
			goto err_free_termios;

		driver->other->ttys[idx] = o_tty;
		driver->ttys[idx] = tty;
	} else {
		memset(&tty->termios_locked, 0, sizeof(tty->termios_locked));
		tty->termios = driver->init_termios;
		memset(&o_tty->termios_locked, 0, sizeof(tty->termios_locked));
		o_tty->termios = driver->other->init_termios;
	}

	/*
	 * Everything allocated ... set up the o_tty structure.
	 */
	tty_driver_kref_get(driver->other);
	if (driver->subtype == PTY_TYPE_MASTER)
		o_tty->count++;
	/* Establish the links in both directions */
	tty->link   = o_tty;
	o_tty->link = tty;
	tty_port_init(ports[0]);
	tty_port_init(ports[1]);
	o_tty->port = ports[0];
	tty->port = ports[1];
	o_tty->port->itty = o_tty;

	tty_driver_kref_get(driver);
	tty->count++;
	return 0;
err_free_termios:
	if (legacy)
		tty_free_termios(tty);
err_deinit_tty:
	deinitialize_tty_struct(o_tty);
	module_put(o_tty->driver->owner);
err_free_tty:
	kfree(ports[0]);
	kfree(ports[1]);
	free_tty_struct(o_tty);
err:
	return retval;
}
Ejemplo n.º 8
0
Archivo: pty.c Proyecto: DenisLug/mptcp
/**
 *	pty_common_install		-	set up the pty pair
 *	@driver: the pty driver
 *	@tty: the tty being instantiated
 *	@legacy: true if this is BSD style
 *
 *	Perform the initial set up for the tty/pty pair. Called from the
 *	tty layer when the port is first opened.
 *
 *	Locking: the caller must hold the tty_mutex
 */
static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
		bool legacy)
{
	struct tty_struct *o_tty;
	struct tty_port *ports[2];
	int idx = tty->index;
	int retval = -ENOMEM;

	/* Opening the slave first has always returned -EIO */
	if (driver->subtype != PTY_TYPE_MASTER)
		return -EIO;

	ports[0] = kmalloc(sizeof **ports, GFP_KERNEL);
	ports[1] = kmalloc(sizeof **ports, GFP_KERNEL);
	if (!ports[0] || !ports[1])
		goto err;
	if (!try_module_get(driver->other->owner)) {
		/* This cannot in fact currently happen */
		goto err;
	}
	o_tty = alloc_tty_struct(driver->other, idx);
	if (!o_tty)
		goto err_put_module;

	tty_set_lock_subclass(o_tty);
	lockdep_set_subclass(&o_tty->termios_rwsem, TTY_LOCK_SLAVE);

	if (legacy) {
		/* We always use new tty termios data so we can do this
		   the easy way .. */
		retval = tty_init_termios(tty);
		if (retval)
			goto err_deinit_tty;

		retval = tty_init_termios(o_tty);
		if (retval)
			goto err_free_termios;

		driver->other->ttys[idx] = o_tty;
		driver->ttys[idx] = tty;
	} else {
		memset(&tty->termios_locked, 0, sizeof(tty->termios_locked));
		tty->termios = driver->init_termios;
		memset(&o_tty->termios_locked, 0, sizeof(tty->termios_locked));
		o_tty->termios = driver->other->init_termios;
	}

	/*
	 * Everything allocated ... set up the o_tty structure.
	 */
	tty_driver_kref_get(driver->other);
	/* Establish the links in both directions */
	tty->link   = o_tty;
	o_tty->link = tty;
	tty_port_init(ports[0]);
	tty_port_init(ports[1]);
	tty_buffer_set_limit(ports[0], 8192);
	tty_buffer_set_limit(ports[1], 8192);
	o_tty->port = ports[0];
	tty->port = ports[1];
	o_tty->port->itty = o_tty;

	tty_buffer_set_lock_subclass(o_tty->port);

	tty_driver_kref_get(driver);
	tty->count++;
	o_tty->count++;
	return 0;
err_free_termios:
	if (legacy)
		tty_free_termios(tty);
err_deinit_tty:
	deinitialize_tty_struct(o_tty);
	free_tty_struct(o_tty);
err_put_module:
	module_put(driver->other->owner);
err:
	kfree(ports[0]);
	kfree(ports[1]);
	return retval;
}
Ejemplo n.º 9
0
static int snd_info_entry_open(struct inode *inode, struct file *file)
{
	struct snd_info_entry *entry;
	struct snd_info_private_data *data;
	struct snd_info_buffer *buffer;
	struct proc_dir_entry *p;
	int mode, err;

	mutex_lock(&info_mutex);
	p = PDE(inode);
	entry = p == NULL ? NULL : (struct snd_info_entry *)p->data;
	if (entry == NULL || ! entry->p) {
		mutex_unlock(&info_mutex);
		return -ENODEV;
	}
	if (!try_module_get(entry->module)) {
		err = -EFAULT;
		goto __error1;
	}
	mode = file->f_flags & O_ACCMODE;
	if (mode == O_RDONLY || mode == O_RDWR) {
		if ((entry->content == SNDRV_INFO_CONTENT_DATA &&
		     entry->c.ops->read == NULL)) {
		    	err = -ENODEV;
		    	goto __error;
		}
	}
	if (mode == O_WRONLY || mode == O_RDWR) {
		if ((entry->content == SNDRV_INFO_CONTENT_DATA &&
		     entry->c.ops->write == NULL)) {
		    	err = -ENODEV;
		    	goto __error;
		}
	}
	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (data == NULL) {
		err = -ENOMEM;
		goto __error;
	}
	data->entry = entry;
	switch (entry->content) {
	case SNDRV_INFO_CONTENT_TEXT:
		if (mode == O_RDONLY || mode == O_RDWR) {
			buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
			if (buffer == NULL)
				goto __nomem;
			data->rbuffer = buffer;
			buffer->len = PAGE_SIZE;
			buffer->buffer = kmalloc(buffer->len, GFP_KERNEL);
			if (buffer->buffer == NULL)
				goto __nomem;
		}
		if (mode == O_WRONLY || mode == O_RDWR) {
			buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
			if (buffer == NULL)
				goto __nomem;
			data->wbuffer = buffer;
			buffer->len = PAGE_SIZE;
			buffer->buffer = kmalloc(buffer->len, GFP_KERNEL);
			if (buffer->buffer == NULL)
				goto __nomem;
		}
		break;
	case SNDRV_INFO_CONTENT_DATA:	/* data */
		if (entry->c.ops->open) {
			if ((err = entry->c.ops->open(entry, mode,
						      &data->file_private_data)) < 0) {
				kfree(data);
				goto __error;
			}
		}
		break;
	}
	file->private_data = data;
	mutex_unlock(&info_mutex);
	if (entry->content == SNDRV_INFO_CONTENT_TEXT &&
	    (mode == O_RDONLY || mode == O_RDWR)) {
		if (entry->c.text.read) {
			mutex_lock(&entry->access);
			entry->c.text.read(entry, data->rbuffer);
			mutex_unlock(&entry->access);
		}
	}
	return 0;

 __nomem:
	if (data->rbuffer) {
		kfree(data->rbuffer->buffer);
		kfree(data->rbuffer);
	}
	if (data->wbuffer) {
		kfree(data->wbuffer->buffer);
		kfree(data->wbuffer);
	}
	kfree(data);
	err = -ENOMEM;
      __error:
	module_put(entry->module);
      __error1:
	mutex_unlock(&info_mutex);
	return err;
}
Ejemplo n.º 10
0
static int i2s_open(struct inode *inode, struct file *filp)
{
	int Ret;
	unsigned long data;
	int minor = iminor(inode);

	if (minor >= I2S_MAX_DEV)
		return -ENODEV;
	
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
	MOD_INC_USE_COUNT;
#else
	try_module_get(THIS_MODULE);
#endif

	if (filp->f_flags & O_NONBLOCK) {
		MSG("filep->f_flags O_NONBLOCK set\n");
		return -EAGAIN;
	}

	/* set i2s_config */
	pi2s_config = (i2s_config_type*)kmalloc(sizeof(i2s_config_type), GFP_KERNEL);
	if(pi2s_config==NULL)
		return -1;

	filp->private_data = pi2s_config;
	memset(pi2s_config, 0, sizeof(i2s_config_type));
	
#ifdef I2S_STATISTIC
	pi2s_status = (i2s_status_type*)kmalloc(sizeof(i2s_status_type), GFP_KERNEL);
	if(pi2s_status==NULL)
		return -1;
	memset(pi2s_status, 0, sizeof(i2s_status_type));	
#endif

	pi2s_config->flag = 0;
	pi2s_config->dmach = GDMA_I2S_TX0;
	pi2s_config->tx_ff_thres = CONFIG_I2S_TFF_THRES;
	pi2s_config->tx_ch_swap = CONFIG_I2S_CH_SWAP;
	pi2s_config->rx_ff_thres = CONFIG_I2S_TFF_THRES;
	pi2s_config->rx_ch_swap = CONFIG_I2S_CH_SWAP;
	pi2s_config->slave_en = CONFIG_I2S_SLAVE_EN;
	
	pi2s_config->srate = 44100;
	pi2s_config->txvol = 0;
	pi2s_config->rxvol = 0;
	pi2s_config->lbk = CONFIG_I2S_INLBK;
	pi2s_config->extlbk = CONFIG_I2S_EXLBK;
	pi2s_config->fmt = CONFIG_I2S_FMT;
	
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35)
	Ret = request_irq(SURFBOARDINT_I2S, i2s_irq_isr, IRQF_DISABLED, "Ralink_I2S", NULL);
#else
	Ret = request_irq(SURFBOARDINT_I2S, i2s_irq_isr, SA_INTERRUPT, "Ralink_I2S", NULL);
#endif
	
	if(Ret){
		MSG("IRQ %d is not free.\n", SURFBOARDINT_I2S);
		i2s_release(inode, filp);
		return -1;
	}
	
	pi2s_config->dmach = GDMA_I2S_TX0;
 
    init_waitqueue_head(&(pi2s_config->i2s_tx_qh));
    init_waitqueue_head(&(pi2s_config->i2s_rx_qh));
/*
#if defined(CONFIG_RALINK_RT63365)	
	data = i2s_inw(RALINK_SYSCTL_BASE+0x834);
	data |=1<<17;
	i2s_outw(RALINK_SYSCTL_BASE+0x834, data);
	data = i2s_inw(RALINK_SYSCTL_BASE+0x834);
	data &=~(1<<17);
	i2s_outw(RALINK_SYSCTL_BASE+0x834, data);
	audiohw_preinit();
#endif	
*/
	return 0;
}
Ejemplo n.º 11
0
/**
 * ubi_open_volume - open UBI volume.
 * @ubi_num: UBI device number
 * @vol_id: volume ID
 * @mode: open mode
 *
 * The @mode parameter specifies if the volume should be opened in read-only
 * mode, read-write mode, or exclusive mode. The exclusive mode guarantees that
 * nobody else will be able to open this volume. UBI allows to have many volume
 * readers and one writer at a time.
 *
 * If a static volume is being opened for the first time since boot, it will be
 * checked by this function, which means it will be fully read and the CRC
 * checksum of each logical eraseblock will be checked.
 *
 * This function returns volume descriptor in case of success and a negative
 * error code in case of failure.
 */
struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
{
	int err;
	struct ubi_volume_desc *desc;
	struct ubi_device *ubi;
	struct ubi_volume *vol;

	dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode);

	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
		return ERR_PTR(-EINVAL);

	if (mode != UBI_READONLY && mode != UBI_READWRITE &&
	    mode != UBI_EXCLUSIVE && mode != UBI_METAONLY)
		return ERR_PTR(-EINVAL);

	/*
	 * First of all, we have to get the UBI device to prevent its removal.
	 */
	ubi = ubi_get_device(ubi_num);
	if (!ubi)
		return ERR_PTR(-ENODEV);

	if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
		err = -EINVAL;
		goto out_put_ubi;
	}

	desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
	if (!desc) {
		err = -ENOMEM;
		goto out_put_ubi;
	}

	err = -ENODEV;
	if (!try_module_get(THIS_MODULE))
		goto out_free;

	spin_lock(&ubi->volumes_lock);
	vol = ubi->volumes[vol_id];
	if (!vol)
		goto out_unlock;

	err = -EBUSY;
	switch (mode) {
	case UBI_READONLY:
		if (vol->exclusive)
			goto out_unlock;
		vol->readers += 1;
		break;

	case UBI_READWRITE:
		if (vol->exclusive || vol->writers > 0)
			goto out_unlock;
		vol->writers += 1;
		break;

	case UBI_EXCLUSIVE:
		if (vol->exclusive || vol->writers || vol->readers ||
		    vol->metaonly)
			goto out_unlock;
		vol->exclusive = 1;
		break;

	case UBI_METAONLY:
		if (vol->metaonly || vol->exclusive)
			goto out_unlock;
		vol->metaonly = 1;
		break;
	}
	get_device(&vol->dev);
	vol->ref_count += 1;
	spin_unlock(&ubi->volumes_lock);

	desc->vol = vol;
	desc->mode = mode;

	mutex_lock(&ubi->ckvol_mutex);
	if (!vol->checked) {
		/* This is the first open - check the volume */
		err = ubi_check_volume(ubi, vol_id);
		if (err < 0) {
			mutex_unlock(&ubi->ckvol_mutex);
			ubi_close_volume(desc);
			return ERR_PTR(err);
		}
		if (err == 1) {
			ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
				 vol_id, ubi->ubi_num);
			vol->corrupted = 1;
		}
		vol->checked = 1;
	}
	mutex_unlock(&ubi->ckvol_mutex);

	return desc;

out_unlock:
	spin_unlock(&ubi->volumes_lock);
	module_put(THIS_MODULE);
out_free:
	kfree(desc);
out_put_ubi:
	ubi_put_device(ubi);
	ubi_err(ubi, "cannot open device %d, volume %d, error %d",
		ubi_num, vol_id, err);
	return ERR_PTR(err);
}
Ejemplo n.º 12
0
static int k_looper_open(struct inode * inode, struct file *filp)
{
	return try_module_get(THIS_MODULE)? 0 : -EINVAL;
}
Ejemplo n.º 13
0
int load_565rle_image(char *filename, bool bf_supported)
{
    int fd, err = 0;
    unsigned count, max;
    unsigned short *data, *bits, *ptr;
    struct fb_info *info;
#if 0
    struct module *owner;
#endif
    info = registered_fb[0];

    if (!info) {
        printk(KERN_WARNING "%s: Can not access framebuffer\n",
               __func__);
        return -ENODEV;
    }
#if 0
    owner = info->fbops->owner;
    if (!try_module_get(owner))
        return NULL;
    if (info->fbops->fb_open && info->fbops->fb_open(info, 0)) {
        module_put(owner);
        return NULL;
    }
#endif
    fd = sys_open(filename, O_RDONLY, 0);
    if (fd < 0) {
        printk(KERN_WARNING "%s: Can not open %s\n",
               __func__, filename);
        return -ENOENT;
    }
    printk("%s: open OK! %s\n",__func__, filename);
    count = (unsigned)sys_lseek(fd, (off_t)0, 2);
    if (count == 0) {
        sys_close(fd);
        err = -EIO;
        goto err_logo_close_file;
    }
    printk("%s: count %d\n",__func__, count);
    sys_lseek(fd, (off_t)0, 0);
    data = kmalloc(count, GFP_KERNEL);
    if (!data) {
        printk(KERN_WARNING "%s: Can not alloc data\n", __func__);
        err = -ENOMEM;
        goto err_logo_close_file;
    }
    if ((unsigned)sys_read(fd, (char *)data, count) != count) {
        err = -EIO;
        goto err_logo_free_data;
    }

    max = fb_width(info) * fb_height(info);

    ptr = data;
    bits = (unsigned short *)(info->screen_base);
    printk("%s: max %d, n %d 0x%x\n",__func__, max, ptr[0], (unsigned int)bits);
    while (count > 3) {
        unsigned n = ptr[0];
        if (n > max)
            break;

        memset16_rgb8888(bits, ptr[1], n << 1);
        bits += n*2; // for rgb8888
        max -= n;
        ptr += 2;
        count -= 4;
    }
#if !defined (CONFIG_USA_OPERATOR_ATT) && !defined (CONFIG_JPN_MODEL_SC_03D) && !defined (CONFIG_CAN_OPERATOR_RWC)
    if (!is_lpcharging_state() && !sec_debug_is_recovery_mode())
        s3cfb_start_progress(info);
#endif

err_logo_free_data:
    kfree(data);
err_logo_close_file:
    sys_close(fd);
    return err;
}
Ejemplo n.º 14
0
static int load_exeso_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{
    struct elfhdr *elf_ex;
    struct elf_phdr *elf_phdata = NULL;
    struct mm_struct *mm;
    unsigned long load_addr = 0;
    unsigned long error;
    int retval = 0;
    unsigned long pe_entry, ntdll_load_addr = 0;
    unsigned long start_code, end_code, start_data, end_data;
    unsigned long ntdll_entry;
    int executable_stack = EXSTACK_DEFAULT;
    unsigned long def_flags = 0;
    unsigned long stack_top;
#ifdef NTDLL_SO
    unsigned long	interp_load_addr;
    unsigned long	interp_entry;
#endif
    struct eprocess	*process;
    struct ethread	*thread;
    PRTL_USER_PROCESS_PARAMETERS	ppb;
    OBJECT_ATTRIBUTES	ObjectAttributes;
    INITIAL_TEB	init_teb;

    BOOLEAN is_win32=FALSE;
    struct startup_info *info=NULL;
    struct eprocess	*parent_eprocess=NULL;
    struct ethread	*parent_ethread=NULL;
    struct w32process* child_w32process =NULL;
    struct w32process* parent_w32process =NULL;

    elf_ex = (struct elfhdr *)bprm->buf;
    retval = -ENOEXEC;
    /* First of all, some simple consistency checks */
    if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
        goto out;
    if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
        goto out;
    if (!elf_check_arch(elf_ex))
        goto out;
    if (!bprm->file->f_op||!bprm->file->f_op->mmap)
        goto out;

    if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
        goto out;
    if (elf_ex->e_phnum < 1 ||
            elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
        goto out;

    if(!check_exeso(bprm))
        goto out;

    start_code = ~0UL;
    end_code = 0;
    start_data = 0;
    end_data = 0;

    if(current->parent->ethread)
    {
        is_win32 = TRUE;
        parent_ethread = current->parent->ethread;
        parent_eprocess = parent_ethread->threads_process;
    }

    /* Flush all traces of the currently running executable */
    retval = flush_old_exec(bprm);
    if (retval) {
        goto out;
    }

    /* OK, This is the point of no return */
    mm = current->mm;
    current->flags &= ~PF_FORKNOEXEC;
    mm->def_flags = def_flags;

    current->signal->rlim[RLIMIT_STACK].rlim_cur = WIN32_STACK_LIMIT;
    current->signal->rlim[RLIMIT_STACK].rlim_max = WIN32_STACK_LIMIT;
    current->personality |= ADDR_COMPAT_LAYOUT;
    arch_pick_mmap_layout(mm);

    /* Do this so that we can load the ntdll, if need be.  We will
       change some of these later */
    mm->free_area_cache = mm->mmap_base = WIN32_UNMAPPED_BASE;
    mm->cached_hole_size = 0;
    stack_top = WIN32_STACK_LIMIT + WIN32_LOWEST_ADDR;
    retval = setup_arg_pages(bprm, stack_top, executable_stack);
    if (retval < 0)
        goto out_free_file;

    down_write(&mm->mmap_sem);
    /* reserve first 0x100000 */
    do_mmap_pgoff(NULL, 0, WIN32_LOWEST_ADDR, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0);
    /* reserve first 0x7fff0000 - 0x80000000 */
    do_mmap_pgoff(NULL, WIN32_TASK_SIZE - 0x10000, 0x10000,
                  PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0);
    /* reserve first 0x81000000 - 0xc0000000
     * 0x80000000 - 0x81000000 used for wine SYSTEM_HEAP */
    do_mmap_pgoff(NULL, WIN32_TASK_SIZE + WIN32_SYSTEM_HEAP_SIZE,
                  TASK_SIZE - WIN32_TASK_SIZE - WIN32_SYSTEM_HEAP_SIZE,
                  PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0);
    up_write(&mm->mmap_sem);

#ifdef NTDLL_SO
    /* search ntdll.dll.so in $PATH, default is /usr/local/lib/wine/ntdll.dll.so */
    if (!*ntdll_name)
        search_ntdll();

    /* map ntdll.dll.so */
    map_system_dll(current, ntdll_name, &ntdll_load_addr, &interp_load_addr);

    pe_entry = get_pe_entry();
    ntdll_entry = get_ntdll_entry();
    interp_entry = get_interp_entry();
#endif

    set_binfmt(&exeso_format);

#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
    retval = arch_setup_additional_pages(bprm, executable_stack);
    if (retval < 0) {
        goto out_free_file;
    }
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */

    install_exec_creds(bprm);
    current->flags &= ~PF_FORKNOEXEC;

#ifdef NTDLL_SO
    /* copy argv, env, and auxvec to stack, all for interpreter */
    create_elf_tables_aux(bprm,
                          ntdll_load_addr, ntdll_phoff, ntdll_phnum, get_ntdll_start_thunk(),
                          load_addr, elf_ex->e_phoff, elf_ex->e_phnum, 0,
                          interp_load_addr, interp_entry, 0);
#endif

    mm->end_code = end_code;
    mm->start_code = start_code;
    mm->start_data = start_data;
    mm->end_data = end_data;
    mm->start_stack = bprm->p;

    if (current->personality & MMAP_PAGE_ZERO) {
        /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
           and some applications "depend" upon this behavior.
           Since we do not have the power to recompile these, we
           emulate the SVr4 behavior.  Sigh.  */
        down_write(&mm->mmap_sem);
        error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
                        MAP_FIXED | MAP_PRIVATE, 0);
        up_write(&mm->mmap_sem);
    }


    /* create win-related structure */
    INIT_OBJECT_ATTR(&ObjectAttributes, NULL, 0, NULL, NULL);

    /* Create EPROCESS */
    retval = create_object(KernelMode,
                           process_object_type,
                           &ObjectAttributes,
                           KernelMode,
                           NULL,
                           sizeof(struct eprocess),
                           0,
                           0,
                           (PVOID *)&process);
    if (retval != STATUS_SUCCESS) {
        goto out_free_file;
    }

    /* init eprocess */
    eprocess_init(NULL, FALSE, process);
    process->unique_processid = create_cid_handle(process, process_object_type);
    if (!process->unique_processid)
        goto out_free_eproc;

    /* initialize EProcess and KProcess */
    process->section_base_address = (void *)load_addr;

    /* FIXME: PsCreateCidHandle */

    /* Create PEB */
    if ((retval = create_peb(process)))
        goto out_free_process_cid;

    /* Create PPB */
    if(is_win32 == FALSE)
    {
        create_ppb(&ppb, process, bprm, bprm->filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
        ((PEB *)process->peb)->ProcessParameters = ppb;
    }
    /* allocate a Win32 thread object */
    retval = create_object(KernelMode,
                           thread_object_type,
                           &ObjectAttributes,
                           KernelMode,
                           NULL,
                           sizeof(struct ethread),
                           0,
                           0,
                           (PVOID *)&thread);
    if (retval) {
        goto out_free_process_cid;
    }

    thread->cid.unique_thread = create_cid_handle(thread, thread_object_type);
    thread->cid.unique_process = process->unique_processid;
    if (!thread->cid.unique_thread)
        goto out_free_ethread;

    /* set the teb */
    init_teb.StackBase = (PVOID)(bprm->p);
    init_teb.StackLimit = (PVOID)WIN32_LOWEST_ADDR + PAGE_SIZE;
    thread->tcb.teb = create_teb(process, (PCLIENT_ID)&thread->cid, &init_teb);
    if (IS_ERR(thread->tcb.teb)) {
        retval = PTR_ERR(thread->tcb.teb);
        goto out_free_thread_cid;
    }

    /* Init KThreaad */
    ethread_init(thread, process, current);

    sema_init(&thread->exec_semaphore,0);
    if (is_win32 == TRUE) //parent is a windows process
    {
        down(&thread->exec_semaphore);  //wait for the parent

        child_w32process = process->win32process;
        parent_w32process = parent_eprocess->win32process;
        info = child_w32process->startup_info;

        //now parent has finished its work
        if(thread->inherit_all)
        {
            create_handle_table(parent_eprocess, TRUE, process);
            child_w32process = create_w32process(parent_w32process, TRUE, process);
        }
    }

    deref_object(process);
    deref_object(thread);

    set_teb_selector(current, (long)thread->tcb.teb);

    thread->start_address = (void *)pe_entry;	/* FIXME */

    /* save current trap frame */
    thread->tcb.trap_frame = (struct ktrap_frame *)regs;

    /* init apc, to call LdrInitializeThunk */
#if 0
    thread_apc = kmalloc(sizeof(KAPC), GFP_KERNEL);
    if (!thread_apc) {
        retval = -ENOMEM;
        goto out_free_thread_cid;
    }
    apc_init(thread_apc,
             &thread->tcb,
             OriginalApcEnvironment,
             thread_special_apc,
             NULL,
             (PKNORMAL_ROUTINE)ntdll_entry,
             UserMode,
             (void *)(bprm->p + 12));
    insert_queue_apc(thread_apc, (void *)interp_entry, (void *)extra_page, IO_NO_INCREMENT);
#ifndef TIF_APC
#define	TIF_APC	13
#endif
    set_tsk_thread_flag(current, TIF_APC);
#endif

#ifdef ELF_PLAT_INIT
    /*
     * The ABI may specify that certain registers be set up in special
     * ways (on i386 %edx is the address of a DT_FINI function, for
     * example.  In addition, it may also specify (eg, PowerPC64 ELF)
     * that the e_entry field is the address of the function descriptor
     * for the startup routine, rather than the address of the startup
     * routine itself.  This macro performs whatever initialization to
     * the regs structure is required as well as any relocations to the
     * function descriptor entries when executing dynamically links apps.
     */
    ELF_PLAT_INIT(regs, reloc_func_desc);
#endif

    start_thread(regs, interp_entry, bprm->p);
    if (unlikely(current->ptrace & PT_PTRACED)) {
        if (current->ptrace & PT_TRACE_EXEC)
            ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
        else
            send_sig(SIGTRAP, current, 0);
    }

    retval = 0;

    try_module_get(THIS_MODULE);

    /* return from w32syscall_exit, not syscall_exit */
    ((unsigned long *)regs)[-1] = (unsigned long)w32syscall_exit;
    regs->fs = TEB_SELECTOR;

out:
    if(elf_phdata)
        kfree(elf_phdata);
    return retval;

    /* error cleanup */
out_free_thread_cid:
    delete_cid_handle(thread->cid.unique_thread, thread_object_type);
out_free_ethread:
    deref_object(thread);
out_free_process_cid:
    delete_cid_handle(process->unique_processid, process_object_type);
out_free_eproc:
    deref_object(process);
out_free_file:
    send_sig(SIGKILL, current, 0);
    goto out;
}
Ejemplo n.º 15
0
static int nfnl_compat_get_rcu(struct net *net, struct sock *nfnl,
			       struct sk_buff *skb, const struct nlmsghdr *nlh,
			       const struct nlattr * const tb[],
			       struct netlink_ext_ack *extack)
{
	int ret = 0, target;
	struct nfgenmsg *nfmsg;
	const char *fmt;
	const char *name;
	u32 rev;
	struct sk_buff *skb2;

	if (tb[NFTA_COMPAT_NAME] == NULL ||
	    tb[NFTA_COMPAT_REV] == NULL ||
	    tb[NFTA_COMPAT_TYPE] == NULL)
		return -EINVAL;

	name = nla_data(tb[NFTA_COMPAT_NAME]);
	rev = ntohl(nla_get_be32(tb[NFTA_COMPAT_REV]));
	target = ntohl(nla_get_be32(tb[NFTA_COMPAT_TYPE]));

	nfmsg = nlmsg_data(nlh);

	switch(nfmsg->nfgen_family) {
	case AF_INET:
		fmt = "ipt_%s";
		break;
	case AF_INET6:
		fmt = "ip6t_%s";
		break;
	case NFPROTO_BRIDGE:
		fmt = "ebt_%s";
		break;
	case NFPROTO_ARP:
		fmt = "arpt_%s";
		break;
	default:
		pr_err("nft_compat: unsupported protocol %d\n",
			nfmsg->nfgen_family);
		return -EINVAL;
	}

	if (!try_module_get(THIS_MODULE))
		return -EINVAL;

	rcu_read_unlock();
	try_then_request_module(xt_find_revision(nfmsg->nfgen_family, name,
						 rev, target, &ret),
						 fmt, name);
	if (ret < 0)
		goto out_put;

	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
	if (skb2 == NULL) {
		ret = -ENOMEM;
		goto out_put;
	}

	/* include the best revision for this extension in the message */
	if (nfnl_compat_fill_info(skb2, NETLINK_CB(skb).portid,
				  nlh->nlmsg_seq,
				  NFNL_MSG_TYPE(nlh->nlmsg_type),
				  NFNL_MSG_COMPAT_GET,
				  nfmsg->nfgen_family,
				  name, ret, target) <= 0) {
		kfree_skb(skb2);
		goto out_put;
	}

	ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
				MSG_DONTWAIT);
	if (ret > 0)
		ret = 0;
out_put:
	rcu_read_lock();
	module_put(THIS_MODULE);
	return ret == -EAGAIN ? -ENOBUFS : ret;
}
Ejemplo n.º 16
0
static int check_perm(struct inode * inode, struct file * file)
{
	struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent);
	struct configfs_attribute * attr = to_attr(file->f_path.dentry);
	struct configfs_buffer * buffer;
	struct configfs_item_operations * ops = NULL;
	int error = 0;

	if (!item || !attr)
		goto Einval;

	/* Grab the module reference for this attribute if we have one */
	if (!try_module_get(attr->ca_owner)) {
		error = -ENODEV;
		goto Done;
	}

	if (item->ci_type)
		ops = item->ci_type->ct_item_ops;
	else
		goto Eaccess;

	/* File needs write support.
	 * The inode's perms must say it's ok,
	 * and we must have a store method.
	 */
	if (file->f_mode & FMODE_WRITE) {

		if (!(inode->i_mode & S_IWUGO) || !ops->store_attribute)
			goto Eaccess;

	}

	/* File needs read support.
	 * The inode's perms must say it's ok, and we there
	 * must be a show method for it.
	 */
	if (file->f_mode & FMODE_READ) {
		if (!(inode->i_mode & S_IRUGO) || !ops->show_attribute)
			goto Eaccess;
	}

	/* No error? Great, allocate a buffer for the file, and store it
	 * it in file->private_data for easy access.
	 */
	buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
	if (!buffer) {
		error = -ENOMEM;
		goto Enomem;
	}
	mutex_init(&buffer->mutex);
	buffer->needs_read_fill = 1;
	buffer->ops = ops;
	file->private_data = buffer;
	goto Done;

 Einval:
	error = -EINVAL;
	goto Done;
 Eaccess:
	error = -EACCES;
 Enomem:
	module_put(attr->ca_owner);
 Done:
	if (error && item)
		config_item_put(item);
	return error;
}
void *pil_get(const char *name)
{
	int ret;
	struct pil_device *pil;
	struct pil_device *pil_d;
	void *retval;
#ifdef CONFIG_MSM8960_ONLY
	static int modem_initialized = 0;
	int loop_count = 0;
#endif

	if (!name)
		return NULL;

	pil = retval = find_peripheral(name);
	if (!pil)
		return ERR_PTR(-ENODEV);
	if (!try_module_get(pil->owner)) {
		put_device(&pil->dev);
		return ERR_PTR(-ENODEV);
	}

	pil_d = pil_get(pil->desc->depends_on);
	if (IS_ERR(pil_d)) {
		retval = pil_d;
		goto err_depends;
	}

#ifdef CONFIG_MSM8960_ONLY
	if (!strcmp("modem", name)) {
		while (unlikely(!modem_initialized && strcmp("rmt_storage", current->comm) && loop_count++ < 10)) {
			
			printk("%s: %s(%d) waiting for rmt_storage %d\n", __func__, current->comm, current->pid, loop_count);
			msleep(500);
		}
	}
#endif
	mutex_lock(&pil->lock);
	if (!pil->count) {
		if (!strcmp("modem", name)) {
			printk("%s: %s(%d) for %s\n", __func__, current->comm, current->pid, name);
#ifdef CONFIG_MSM8960_ONLY
			modem_initialized = 1;
#endif
		}
		ret = load_image(pil);
		if (ret) {
			retval = ERR_PTR(ret);
			goto err_load;
		}
	}
	pil->count++;
	pil_set_state(pil, PIL_ONLINE);
	mutex_unlock(&pil->lock);
#if defined(CONFIG_MSM8930_ONLY)
	if (!strcmp("modem", name)) {
		complete_all(&pil_work_finished);
	}
#elif defined(CONFIG_ARCH_APQ8064)
		complete_all(&pil_work_finished);
#endif
out:
	return retval;
err_load:
	mutex_unlock(&pil->lock);
	pil_put(pil_d);
err_depends:
	put_device(&pil->dev);
	module_put(pil->owner);
	goto out;
}
Ejemplo n.º 18
0
static int isert_cm_conn_req_handler(struct rdma_cm_id *cm_id,
				     struct rdma_cm_event *event)
{
	/* passed in rdma_create_id */
	struct isert_portal *portal = cm_id->context;
	struct ib_device *ib_dev = cm_id->device;
	struct isert_device *isert_dev;
	struct isert_connection *isert_conn;
	struct rdma_conn_param *ini_conn_param;
	struct rdma_conn_param tgt_conn_param;
	struct isert_cm_hdr cm_hdr = { 0 };
	int err;

	TRACE_ENTRY();

	if (unlikely(!try_module_get(THIS_MODULE))) {
		err = -EINVAL;
		goto fail_get;
	}

	mutex_lock(&dev_list_mutex);
	isert_dev = isert_device_find(ib_dev);
	if (!isert_dev) {
		isert_dev = isert_device_create(ib_dev);
		if (unlikely(IS_ERR(isert_dev))) {
			err = PTR_ERR(isert_dev);
			mutex_unlock(&dev_list_mutex);
			goto fail_dev_create;
		}
	}
	isert_dev->refcnt++;
	mutex_unlock(&dev_list_mutex);

	isert_conn = isert_conn_create(cm_id, isert_dev);
	if (unlikely(IS_ERR(isert_conn))) {
		err = PTR_ERR(isert_conn);
		goto fail_conn_create;
	}

	isert_conn->state = ISER_CONN_HANDSHAKE;
	isert_conn->portal = portal;

	mutex_lock(&dev_list_mutex);
	list_add_tail(&isert_conn->portal_node, &portal->conn_list);
	mutex_unlock(&dev_list_mutex);

	/* initiator is dst, target is src */
	memcpy(&isert_conn->peer_addr, &cm_id->route.addr.dst_addr,
	       sizeof(isert_conn->peer_addr));
	memcpy(&isert_conn->self_addr, &cm_id->route.addr.src_addr,
	       sizeof(isert_conn->self_addr));

	ini_conn_param = &event->param.conn;
	memset(&tgt_conn_param, 0, sizeof(tgt_conn_param));
	tgt_conn_param.flow_control =
		ini_conn_param->flow_control;
	tgt_conn_param.rnr_retry_count =
		ini_conn_param->rnr_retry_count;

	tgt_conn_param.initiator_depth = isert_dev->device_attr.max_qp_init_rd_atom;
	if (tgt_conn_param.initiator_depth > ini_conn_param->initiator_depth)
		tgt_conn_param.initiator_depth = ini_conn_param->initiator_depth;

	tgt_conn_param.private_data_len = sizeof(cm_hdr);
	tgt_conn_param.private_data = &cm_hdr;
	cm_hdr.flags = ISER_ZBVA_NOT_SUPPORTED | ISER_SEND_W_INV_NOT_SUPPORTED;

	kref_get(&isert_conn->kref);

	err = rdma_accept(cm_id, &tgt_conn_param);
	if (unlikely(err)) {
		pr_err("Failed to accept conn request, err:%d\n", err);
		goto fail_accept;
	}

	switch (isert_conn->peer_addr.ss_family) {
	case AF_INET:
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
		pr_info("iser accepted connection cm_id:%p "
			 NIPQUAD_FMT "->" NIPQUAD_FMT "\n", cm_id,
			 NIPQUAD(((struct sockaddr_in *)&isert_conn->peer_addr)->sin_addr.s_addr),
			 NIPQUAD(((struct sockaddr_in *)&isert_conn->self_addr)->sin_addr.s_addr));
#else
		pr_info("iser accepted connection cm_id:%p "
			"%pI4->%pI4\n", cm_id,
			&((struct sockaddr_in *)&isert_conn->peer_addr)->sin_addr.s_addr,
			&((struct sockaddr_in *)&isert_conn->self_addr)->sin_addr.s_addr);
#endif
		break;
	case AF_INET6:
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
		pr_info("iser accepted connection cm_id:%p "
			 NIP6_FMT "->" NIP6_FMT "\n", cm_id,
			 NIP6(((struct sockaddr_in6 *)&isert_conn->peer_addr)->sin6_addr),
			 NIP6(((struct sockaddr_in6 *)&isert_conn->self_addr)->sin6_addr));
#else
		pr_info("iser accepted connection cm_id:%p "
			"%pI6->%pI6\n", cm_id,
			&((struct sockaddr_in6 *)&isert_conn->peer_addr)->sin6_addr,
			&((struct sockaddr_in6 *)&isert_conn->self_addr)->sin6_addr);
#endif
		break;
	default:
		pr_info("iser accepted connection cm_id:%p\n", cm_id);
	}

out:
	TRACE_EXIT_RES(err);
	return err;

fail_accept:
	set_bit(ISERT_CONNECTION_ABORTED, &isert_conn->flags);
	isert_cm_timewait_exit_handler(cm_id, NULL);
	err = 0;
	goto out;

fail_conn_create:
	mutex_lock(&dev_list_mutex);
	isert_deref_device(isert_dev);
	mutex_unlock(&dev_list_mutex);
fail_dev_create:
	rdma_reject(cm_id, NULL, 0);
fail_get:
	module_put(THIS_MODULE);
	goto out;
}
static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
{
	struct sh_mobile_lcdc_chan *ch;
	struct sh_mobile_lcdc_board_cfg	*board_cfg;
	unsigned long tmp;
	int bpp = 0;
	unsigned long ldddsr;
	int k, m;
	int ret = 0;

	/* enable clocks before accessing the hardware */
	for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
		if (priv->ch[k].enabled) {
			sh_mobile_lcdc_clk_on(priv);
			if (!bpp)
				bpp = priv->ch[k].info->var.bits_per_pixel;
		}
	}

	/* reset */
	lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) | LCDC_RESET);
	lcdc_wait_bit(priv, _LDCNT2R, LCDC_RESET, 0);

	/* enable LCDC channels */
	tmp = lcdc_read(priv, _LDCNT2R);
	tmp |= priv->ch[0].enabled;
	tmp |= priv->ch[1].enabled;
	lcdc_write(priv, _LDCNT2R, tmp);

	/* read data from external memory, avoid using the BEU for now */
	lcdc_write(priv, _LDCNT2R, lcdc_read(priv, _LDCNT2R) & ~DISPLAY_BEU);

	/* stop the lcdc first */
	sh_mobile_lcdc_start_stop(priv, 0);

	/* configure clocks */
	tmp = priv->lddckr;
	for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
		ch = &priv->ch[k];

		if (!priv->ch[k].enabled)
			continue;

		m = ch->cfg.clock_divider;
		if (!m)
			continue;

		if (m == 1)
			m = 1 << 6;
		tmp |= m << (lcdc_chan_is_sublcd(ch) ? 8 : 0);

		/* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider denominator */
		lcdc_write_chan(ch, LDDCKPAT1R, 0);
		lcdc_write_chan(ch, LDDCKPAT2R, (1 << (m/2)) - 1);
	}

	lcdc_write(priv, _LDDCKR, tmp);

	/* start dotclock again */
	lcdc_write(priv, _LDDCKSTPR, 0);
	lcdc_wait_bit(priv, _LDDCKSTPR, ~0, 0);

	/* interrupts are disabled to begin with */
	lcdc_write(priv, _LDINTR, 0);

	for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
		ch = &priv->ch[k];

		if (!ch->enabled)
			continue;

		sh_mobile_lcdc_geometry(ch);

		/* power supply */
		lcdc_write_chan(ch, LDPMR, 0);

		board_cfg = &ch->cfg.board_cfg;
		if (board_cfg->setup_sys)
			ret = board_cfg->setup_sys(board_cfg->board_data, ch,
						   &sh_mobile_lcdc_sys_bus_ops);
		if (ret)
			return ret;
	}

	/* word and long word swap */
	ldddsr = lcdc_read(priv, _LDDDSR);
	if  (priv->ch[0].info->var.nonstd)
		lcdc_write(priv, _LDDDSR, ldddsr | 7);
	else {
		switch (bpp) {
		case 16:
			lcdc_write(priv, _LDDDSR, ldddsr | 6);
			break;
		case 24:
			lcdc_write(priv, _LDDDSR, ldddsr | 7);
			break;
		case 32:
			lcdc_write(priv, _LDDDSR, ldddsr | 4);
			break;
		}
	}

	for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
		ch = &priv->ch[k];

		if (!priv->ch[k].enabled)
			continue;

		/* set bpp format in PKF[4:0] */
		tmp = lcdc_read_chan(ch, LDDFR);
		tmp &= ~0x0003031f;
		if (ch->info->var.nonstd) {
			tmp |= (ch->info->var.nonstd << 16);
			switch (ch->info->var.bits_per_pixel) {
			case 12:
				break;
			case 16:
				tmp |= (0x1 << 8);
				break;
			case 24:
				tmp |= (0x2 << 8);
				break;
			}
		} else {
			switch (ch->info->var.bits_per_pixel) {
			case 16:
				tmp |= 0x03;
				break;
			case 24:
				tmp |= 0x0b;
				break;
			case 32:
				break;
			}
		}
		lcdc_write_chan(ch, LDDFR, tmp);

		/* point out our frame buffer */
		lcdc_write_chan(ch, LDSA1R, ch->info->fix.smem_start);
		if (ch->info->var.nonstd)
			lcdc_write_chan(ch, LDSA2R,
				ch->info->fix.smem_start +
				ch->info->var.xres *
				ch->info->var.yres_virtual);

		/* set line size */
		lcdc_write_chan(ch, LDMLSR, ch->info->fix.line_length);

		/* setup deferred io if SYS bus */
		tmp = ch->cfg.sys_bus_cfg.deferred_io_msec;
		if (ch->ldmt1r_value & (1 << 12) && tmp) {
			ch->defio.deferred_io = sh_mobile_lcdc_deferred_io;
			ch->defio.delay = msecs_to_jiffies(tmp);
			ch->info->fbdefio = &ch->defio;
			fb_deferred_io_init(ch->info);

			/* one-shot mode */
			lcdc_write_chan(ch, LDSM1R, 1);

			/* enable "Frame End Interrupt Enable" bit */
			lcdc_write(priv, _LDINTR, LDINTR_FE);

		} else {
			/* continuous read mode */
			lcdc_write_chan(ch, LDSM1R, 0);
		}
	}

	/* display output */
	lcdc_write(priv, _LDCNT1R, LCDC_ENABLE);

	/* start the lcdc */
	sh_mobile_lcdc_start_stop(priv, 1);
	priv->started = 1;

	/* tell the board code to enable the panel */
	for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
		ch = &priv->ch[k];
		if (!ch->enabled)
			continue;

		board_cfg = &ch->cfg.board_cfg;
		if (board_cfg->display_on && try_module_get(board_cfg->owner)) {
			board_cfg->display_on(board_cfg->board_data, ch->info);
			module_put(board_cfg->owner);
		}

		if (ch->bl) {
			ch->bl->props.power = FB_BLANK_UNBLANK;
			backlight_update_status(ch->bl);
		}
	}

	return 0;
}
Ejemplo n.º 20
0
int ksnd_pcm_open(ksnd_pcm_t **kpcm,
				  int card,
				  int device,
				  snd_pcm_stream_t stream)
{
	int err = 0;
	ksnd_pcm_t *xkpcm;
	int minor;
	int stream_type, device_type;
	snd_pcm_t *pcm;
	xkpcm = kzalloc(sizeof(ksnd_pcm_t), GFP_KERNEL);
	if (!xkpcm)
	{
		err = -ENOMEM;
		goto _error_do_nothing;
	}
	if (stream == SND_PCM_STREAM_PLAYBACK)
	{
		stream_type = SNDRV_PCM_STREAM_PLAYBACK;
		device_type = SNDRV_DEVICE_TYPE_PCM_PLAYBACK;
	}
	else if (stream == SND_PCM_STREAM_CAPTURE)
	{
		stream_type = SNDRV_PCM_STREAM_CAPTURE;
		device_type = SNDRV_DEVICE_TYPE_PCM_CAPTURE;
	}
	else
	{
		err = -ENODEV;
		goto _error_do_free;
	}
#if defined(__TDT__)
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,17)
	minor = snd_get_minor(device_type, card, device);
#else
	minor = SNDRV_MINOR(card, device_type + device);
#endif
#else /* TDT */
#if defined (CONFIG_KERNELVERSION)
	minor = snd_find_minor(device_type, card, device);
#else /* STLinux 2.2 */
	minor = SNDRV_MINOR(card, device_type + device);
#endif
#endif
	if (minor < 0)
	{
		err = -ENODEV;
		goto _error_do_free;
	}
#if 0
	printk(KERN_DEBUG "Opening ALSA device hw:%d,%d for %s...\n",
		   card, device, stream == SND_PCM_STREAM_PLAYBACK ?
		   "playback" : "capture");
#endif
	pcm = snd_lookup_minor_data(minor, device_type);
	if (pcm == NULL)
	{
		err = -ENODEV;
		goto _error_do_free;
	}
	if (!try_module_get(pcm->card->module))
	{
		err = -EFAULT;
		goto _error_do_free;
	}
	mutex_lock(&pcm->open_mutex);
	err = snd_pcm_open_substream(pcm, stream_type, &default_file, &(xkpcm->substream));
	/* We don't support blocking open here, just fail if busy */
	if (err == -EAGAIN)
	{
		err = -EBUSY;
	}
	mutex_unlock(&pcm->open_mutex);
	if (err < 0)
		goto _error_do_put_and_free;
	*kpcm = xkpcm;
	return err;
_error_do_put_and_free:
	module_put(pcm->card->module);
_error_do_free:
	kfree(xkpcm);
_error_do_nothing:
	return err;
}
Ejemplo n.º 21
0
static struct rdma_cm_id *
rpcrdma_create_id(struct rpcrdma_xprt *xprt,
			struct rpcrdma_ia *ia, struct sockaddr *addr)
{
	struct rdma_cm_id *id;
	int rc;

	init_completion(&ia->ri_done);

	id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
			    IB_QPT_RC);
	if (IS_ERR(id)) {
		rc = PTR_ERR(id);
		dprintk("RPC:       %s: rdma_create_id() failed %i\n",
			__func__, rc);
		return id;
	}

	ia->ri_async_rc = -ETIMEDOUT;
	rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_addr() failed %i\n",
			__func__, rc);
		goto out;
	}
	wait_for_completion_interruptible_timeout(&ia->ri_done,
				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);

	/* FIXME:
	 * Until xprtrdma supports DEVICE_REMOVAL, the provider must
	 * be pinned while there are active NFS/RDMA mounts to prevent
	 * hangs and crashes at umount time.
	 */
	if (!ia->ri_async_rc && !try_module_get(id->device->owner)) {
		dprintk("RPC:       %s: Failed to get device module\n",
			__func__);
		ia->ri_async_rc = -ENODEV;
	}
	rc = ia->ri_async_rc;
	if (rc)
		goto out;

	ia->ri_async_rc = -ETIMEDOUT;
	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
	if (rc) {
		dprintk("RPC:       %s: rdma_resolve_route() failed %i\n",
			__func__, rc);
		goto put;
	}
	wait_for_completion_interruptible_timeout(&ia->ri_done,
				msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
	rc = ia->ri_async_rc;
	if (rc)
		goto put;

	return id;
put:
	module_put(id->device->owner);
out:
	rdma_destroy_id(id);
	return ERR_PTR(rc);
}
Ejemplo n.º 22
0
static void kgdbts_pre_exp_handler(void)
{
    /* Increment the module count when the debugger is active */
    if (!kgdb_connected)
        try_module_get(THIS_MODULE);
}
Ejemplo n.º 23
0
/* 565RLE image format: [count(2 bytes), rle(2 bytes)] */
int load_565rle_image(char *filename, bool bf_supported)
{
	struct fb_info *info;
	int fd, count, err = 0;
	unsigned max;
	unsigned short *data, *bits, *ptr;
#ifndef CONFIG_FRAMEBUFFER_CONSOLE
	struct module *owner;
#endif
	int pad;

	info = registered_fb[0];
	if (!info) {
		printk(KERN_WARNING "%s: Can not access framebuffer\n",
			__func__);
		return -ENODEV;
	}
#ifndef CONFIG_FRAMEBUFFER_CONSOLE
	owner = info->fbops->owner;
	if (!try_module_get(owner))
		return -ENODEV;
	if (info->fbops->fb_open && info->fbops->fb_open(info, 0)) {
		module_put(owner);
		return -ENODEV;
	}
#endif

	fd = sys_open(filename, O_RDONLY, 0);
	if (fd < 0) {
		printk(KERN_WARNING "%s: Can not open %s\n",
			__func__, filename);
		return -ENOENT;
	}
	count = sys_lseek(fd, (off_t)0, 2);
	if (count <= 0) {
		err = -EIO;
		goto err_logo_close_file;
	}
	sys_lseek(fd, (off_t)0, 0);
	data = kmalloc(count, GFP_KERNEL);
	if (!data) {
		printk(KERN_WARNING "%s: Can not alloc data\n", __func__);
		err = -ENOMEM;
		goto err_logo_close_file;
	}
	if (sys_read(fd, (char *)data, count) != count) {
		err = -EIO;
		goto err_logo_free_data;
	}

	max = fb_width(info) * fb_height(info);
	ptr = data;
	if (bf_supported && (info->node == 1 || info->node == 2)) {
		err = -EPERM;
		pr_err("%s:%d no info->creen_base on fb%d!\n",
		       __func__, __LINE__, info->node);
		goto err_logo_free_data;
	}
	if (info->screen_base) {
		bits = (unsigned short *)(info->screen_base);
		while (count > 3) {
			unsigned n = ptr[0];
			if (n > max)
				break;
			if (info->var.bits_per_pixel >= 24) {
				pad = memset16_rgb8888(bits, ptr[1], n << 1, info);
				bits += n << 1;
				bits += pad;
			} else {
			memset16(bits, ptr[1], n << 1);
			bits += n;
			}
			max -= n;
			ptr += 2;
			count -= 4;
		}
	}
	err = 0;
err_logo_free_data:
	kfree(data);
err_logo_close_file:
	sys_close(fd);
	return err;
}
int snd_gus_use_inc(struct snd_gus_card * gus)
{
	if (!try_module_get(gus->card->module))
		return 0;
	return 1;
}
Ejemplo n.º 25
0
int acpi_processor_notify_smm(struct module *calling_module)
{
	acpi_status status;
	static int is_done = 0;

	ACPI_FUNCTION_TRACE("acpi_processor_notify_smm");

	if (!(acpi_processor_ppc_status & PPC_REGISTERED))
		return_VALUE(-EBUSY);

	if (!try_module_get(calling_module))
		return_VALUE(-EINVAL);

	/* is_done is set to negative if an error occured,
	 * and to postitive if _no_ error occured, but SMM
	 * was already notified. This avoids double notification
	 * which might lead to unexpected results...
	 */
	if (is_done > 0) {
		module_put(calling_module);
		return_VALUE(0);
	} else if (is_done < 0) {
		module_put(calling_module);
		return_VALUE(is_done);
	}

	is_done = -EIO;

	/* Can't write pstate_cnt to smi_cmd if either value is zero */
	if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n"));
		module_put(calling_module);
		return_VALUE(0);
	}

	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
			  "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
			  acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));

	/* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
	 * it anyway, so we need to support it... */
	if (acpi_fadt_is_v1) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
				  "Using v1.0 FADT reserved value for pstate_cnt\n"));
	}

	status = acpi_os_write_port(acpi_fadt.smi_cmd,
				    (u32) acpi_fadt.pstate_cnt, 8);
	if (ACPI_FAILURE(status)) {
		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
				  "Failed to write pstate_cnt [0x%x] to "
				  "smi_cmd [0x%x]\n", acpi_fadt.pstate_cnt,
				  acpi_fadt.smi_cmd));
		module_put(calling_module);
		return_VALUE(status);
	}

	/* Success. If there's no _PPC, we need to fear nothing, so
	 * we can allow the cpufreq driver to be rmmod'ed. */
	is_done = 1;

	if (!(acpi_processor_ppc_status & PPC_IN_USE))
		module_put(calling_module);

	return_VALUE(0);
}
Ejemplo n.º 26
0
static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
{
	struct rtattr **tca = arg;
	struct tcmsg *t = NLMSG_DATA(n);
	u32 protocol = TC_H_MIN(t->tcm_info);
	u32 prio = TC_H_MAJ(t->tcm_info);
	u32 nprio = prio;
	u32 parent = t->tcm_parent;
	struct net_device *dev;
	struct Qdisc  *q;
	struct tcf_proto **back, **chain;
	struct tcf_proto *tp = NULL;
	struct tcf_proto_ops *tp_ops;
	struct Qdisc_class_ops *cops;
	unsigned long cl = 0;
	unsigned long fh;
	int err;

	if (prio == 0) {
		/* If no priority is given, user wants we allocated it. */
		if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
			return -ENOENT;
		prio = TC_H_MAKE(0x80000000U,0U);
	}

	/* Find head of filter chain. */

	/* Find link */
	if ((dev = __dev_get_by_index(t->tcm_ifindex)) == NULL)
		return -ENODEV;

	/* Find qdisc */
	if (!parent) {
		q = dev->qdisc_sleeping;
		parent = q->handle;
	} else if ((q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent))) == NULL)
		return -EINVAL;

	/* Is it classful? */
	if ((cops = q->ops->cl_ops) == NULL)
		return -EINVAL;

	/* Do we search for filter, attached to class? */
	if (TC_H_MIN(parent)) {
		cl = cops->get(q, parent);
		if (cl == 0)
			return -ENOENT;
	}

	/* And the last stroke */
	chain = cops->tcf_chain(q, cl);
	err = -EINVAL;
	if (chain == NULL)
		goto errout;

	/* Check the chain for existence of proto-tcf with this priority */
	for (back = chain; (tp=*back) != NULL; back = &tp->next) {
		if (tp->prio >= prio) {
			if (tp->prio == prio) {
				if (!nprio || (tp->protocol != protocol && protocol))
					goto errout;
			} else
				tp = NULL;
			break;
		}
	}

	if (tp == NULL) {
		/* Proto-tcf does not exist, create new one */

		if (tca[TCA_KIND-1] == NULL || !protocol)
			goto errout;

		err = -ENOENT;
		if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
			goto errout;


		/* Create new proto tcf */

		err = -ENOBUFS;
		if ((tp = kmalloc(sizeof(*tp), GFP_KERNEL)) == NULL)
			goto errout;
		tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]);
#ifdef CONFIG_KMOD
		if (tp_ops==NULL && tca[TCA_KIND-1] != NULL) {
			struct rtattr *kind = tca[TCA_KIND-1];

			if (RTA_PAYLOAD(kind) <= IFNAMSIZ) {
				request_module("cls_%s", (char*)RTA_DATA(kind));
				tp_ops = tcf_proto_lookup_ops(kind);
			}
		}
#endif
		if (tp_ops == NULL) {
			err = -EINVAL;
			kfree(tp);
			goto errout;
		}
		memset(tp, 0, sizeof(*tp));
		tp->ops = tp_ops;
		tp->protocol = protocol;
		tp->prio = nprio ? : tcf_auto_prio(*back);
		tp->q = q;
		tp->classify = tp_ops->classify;
		tp->classid = parent;
		err = -EBUSY;
		if (!try_module_get(tp_ops->owner)) {
			kfree(tp);
			goto errout;
		}
		if ((err = tp_ops->init(tp)) != 0) {
			module_put(tp_ops->owner);
			kfree(tp);
			goto errout;
		}

		qdisc_lock_tree(dev);
		tp->next = *back;
		*back = tp;
		qdisc_unlock_tree(dev);

	} else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind))
Ejemplo n.º 27
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
                                        unsigned long stack_start,
                                        struct pt_regs *regs,
                                        unsigned long stack_size,
                                        int __user *child_tidptr,
                                        struct pid *pid,
                                        int trace)
{
    int retval;
    struct task_struct *p;
    int cgroup_callbacks_done = 0;

    if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
        return ERR_PTR(-EINVAL);

    /*
     * Thread groups must share signals as well, and detached threads
     * can only be started up within the thread group.
     */
    if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
        return ERR_PTR(-EINVAL);

    /*
     * Shared signal handlers imply shared VM. By way of the above,
     * thread groups also imply shared VM. Blocking this case allows
     * for various simplifications in other code.
     */
    if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
        return ERR_PTR(-EINVAL);

    retval = security_task_create(clone_flags);
    if (retval)
        goto fork_out;

    retval = -ENOMEM;
    p = dup_task_struct(current);
    if (!p)
        goto fork_out;

    rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
    DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
    DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
    retval = -EAGAIN;
    if (atomic_read(&p->real_cred->user->processes) >=
            p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
        if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
                p->real_cred->user != INIT_USER)
            goto bad_fork_free;
    }

    retval = copy_creds(p, clone_flags);
    if (retval < 0)
        goto bad_fork_free;

    /*
     * If multiple threads are within copy_process(), then this check
     * triggers too late. This doesn't hurt, the check is only there
     * to stop root fork bombs.
     */
    retval = -EAGAIN;
    if (nr_threads >= max_threads)
        goto bad_fork_cleanup_count;

    if (!try_module_get(task_thread_info(p)->exec_domain->module))
        goto bad_fork_cleanup_count;

    if (p->binfmt && !try_module_get(p->binfmt->module))
        goto bad_fork_cleanup_put_domain;

    p->did_exec = 0;
    delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
    copy_flags(clone_flags, p);
    INIT_LIST_HEAD(&p->children);
    INIT_LIST_HEAD(&p->sibling);
#ifdef CONFIG_PREEMPT_RCU
    p->rcu_read_lock_nesting = 0;
    p->rcu_flipctr_idx = 0;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
    p->vfork_done = NULL;
    spin_lock_init(&p->alloc_lock);

    clear_tsk_thread_flag(p, TIF_SIGPENDING);
    init_sigpending(&p->pending);

    p->utime = cputime_zero;
    p->stime = cputime_zero;
    p->gtime = cputime_zero;
    p->utimescaled = cputime_zero;
    p->stimescaled = cputime_zero;
    p->prev_utime = cputime_zero;
    p->prev_stime = cputime_zero;

    p->default_timer_slack_ns = current->timer_slack_ns;

    task_io_accounting_init(&p->ioac);
    acct_clear_integrals(p);

    posix_cpu_timers_init(p);

    p->lock_depth = -1;		/* -1 = no lock */
    do_posix_clock_monotonic_gettime(&p->start_time);
    p->real_start_time = p->start_time;
    monotonic_to_bootbased(&p->real_start_time);
    p->io_context = NULL;
    p->audit_context = NULL;
    cgroup_fork(p);
#ifdef CONFIG_NUMA
    p->mempolicy = mpol_dup(p->mempolicy);
    if (IS_ERR(p->mempolicy)) {
        retval = PTR_ERR(p->mempolicy);
        p->mempolicy = NULL;
        goto bad_fork_cleanup_cgroup;
    }
    mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
    p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
    p->hardirqs_enabled = 1;
#else
    p->hardirqs_enabled = 0;
#endif
    p->hardirq_enable_ip = 0;
    p->hardirq_enable_event = 0;
    p->hardirq_disable_ip = _THIS_IP_;
    p->hardirq_disable_event = 0;
    p->softirqs_enabled = 1;
    p->softirq_enable_ip = _THIS_IP_;
    p->softirq_enable_event = 0;
    p->softirq_disable_ip = 0;
    p->softirq_disable_event = 0;
    p->hardirq_context = 0;
    p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
    p->lockdep_depth = 0; /* no locks held yet */
    p->curr_chain_key = 0;
    p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
    p->blocked_on = NULL; /* not blocked yet */
#endif
    if (unlikely(current->ptrace))
        ptrace_fork(p, clone_flags);

    /* Perform scheduler related setup. Assign this task to a CPU. */
    sched_fork(p, clone_flags);

    if ((retval = audit_alloc(p)))
        goto bad_fork_cleanup_policy;
    /* copy all the process information */
    if ((retval = copy_semundo(clone_flags, p)))
        goto bad_fork_cleanup_audit;
    if ((retval = copy_files(clone_flags, p)))
        goto bad_fork_cleanup_semundo;
    if ((retval = copy_fs(clone_flags, p)))
        goto bad_fork_cleanup_files;
    if ((retval = copy_sighand(clone_flags, p)))
        goto bad_fork_cleanup_fs;
    if ((retval = copy_signal(clone_flags, p)))
        goto bad_fork_cleanup_sighand;
    if ((retval = copy_mm(clone_flags, p)))
        goto bad_fork_cleanup_signal;
    if ((retval = copy_namespaces(clone_flags, p)))
        goto bad_fork_cleanup_mm;
    if ((retval = copy_io(clone_flags, p)))
        goto bad_fork_cleanup_namespaces;
    retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
    if (retval)
        goto bad_fork_cleanup_io;

    if (pid != &init_struct_pid) {
        retval = -ENOMEM;
        pid = alloc_pid(p->nsproxy->pid_ns);
        if (!pid)
            goto bad_fork_cleanup_io;

        if (clone_flags & CLONE_NEWPID) {
            retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
            if (retval < 0)
                goto bad_fork_free_pid;
        }
    }

    ftrace_graph_init_task(p);

    p->pid = pid_nr(pid);
    p->tgid = p->pid;
    if (clone_flags & CLONE_THREAD)
        p->tgid = current->tgid;

    if (current->nsproxy != p->nsproxy) {
        retval = ns_cgroup_clone(p, pid);
        if (retval)
            goto bad_fork_free_graph;
    }

    p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
    /*
     * Clear TID on mm_release()?
     */
    p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
    p->robust_list = NULL;
#ifdef CONFIG_COMPAT
    p->compat_robust_list = NULL;
#endif
    INIT_LIST_HEAD(&p->pi_state_list);
    p->pi_state_cache = NULL;
#endif
    /*
     * sigaltstack should be cleared when sharing the same VM
     */
    if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
        p->sas_ss_sp = p->sas_ss_size = 0;

    /*
     * Syscall tracing should be turned off in the child regardless
     * of CLONE_PTRACE.
     */
    clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
    clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
    clear_all_latency_tracing(p);

    /* ok, now we should be set up.. */
    p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
    p->pdeath_signal = 0;
    p->exit_state = 0;

    /*
     * Ok, make it visible to the rest of the system.
     * We dont wake it up yet.
     */
    p->group_leader = p;
    INIT_LIST_HEAD(&p->thread_group);

    /* Now that the task is set up, run cgroup callbacks if
     * necessary. We need to run them before the task is visible
     * on the tasklist. */
    cgroup_fork_callbacks(p);
    cgroup_callbacks_done = 1;

    /* Need tasklist lock for parent etc handling! */
    write_lock_irq(&tasklist_lock);

    /*
     * The task hasn't been attached yet, so its cpus_allowed mask will
     * not be changed, nor will its assigned CPU.
     *
     * The cpus_allowed mask of the parent may have changed after it was
     * copied first time - so re-copy it here, then check the child's CPU
     * to ensure it is on a valid CPU (and if not, just force it back to
     * parent's CPU). This avoids alot of nasty races.
     */
    p->cpus_allowed = current->cpus_allowed;
    p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
    if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
                 !cpu_online(task_cpu(p))))
        set_task_cpu(p, smp_processor_id());

    /* CLONE_PARENT re-uses the old parent */
    if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
        p->real_parent = current->real_parent;
        p->parent_exec_id = current->parent_exec_id;
    } else {
        p->real_parent = current;
        p->parent_exec_id = current->self_exec_id;
    }

    spin_lock(&current->sighand->siglock);

    /*
     * Process group and session signals need to be delivered to just the
     * parent before the fork or both the parent and the child after the
     * fork. Restart if a signal comes in before we add the new process to
     * it's process group.
     * A fatal signal pending means that current will exit, so the new
     * thread can't slip out of an OOM kill (or normal SIGKILL).
     */
    recalc_sigpending();
    if (signal_pending(current)) {
        spin_unlock(&current->sighand->siglock);
        write_unlock_irq(&tasklist_lock);
        retval = -ERESTARTNOINTR;
        goto bad_fork_free_graph;
    }

    if (clone_flags & CLONE_THREAD) {
        p->group_leader = current->group_leader;
        list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
    }

    if (likely(p->pid)) {
        list_add_tail(&p->sibling, &p->real_parent->children);
        tracehook_finish_clone(p, clone_flags, trace);

        if (thread_group_leader(p)) {
            if (clone_flags & CLONE_NEWPID)
                p->nsproxy->pid_ns->child_reaper = p;

            p->signal->leader_pid = pid;
            tty_kref_put(p->signal->tty);
            p->signal->tty = tty_kref_get(current->signal->tty);
            attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
            attach_pid(p, PIDTYPE_SID, task_session(current));
            list_add_tail_rcu(&p->tasks, &init_task.tasks);
            __get_cpu_var(process_counts)++;
        }
        attach_pid(p, PIDTYPE_PID, pid);
        nr_threads++;
    }

    total_forks++;
    spin_unlock(&current->sighand->siglock);
    write_unlock_irq(&tasklist_lock);
    proc_fork_connector(p);
    cgroup_post_fork(p);
    return p;

bad_fork_free_graph:
    ftrace_graph_exit_task(p);
bad_fork_free_pid:
    if (pid != &init_struct_pid)
        free_pid(pid);
bad_fork_cleanup_io:
    put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
    exit_task_namespaces(p);
bad_fork_cleanup_mm:
    if (p->mm)
        mmput(p->mm);
bad_fork_cleanup_signal:
    cleanup_signal(p);
bad_fork_cleanup_sighand:
    __cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
    exit_fs(p); /* blocking */
bad_fork_cleanup_files:
    exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
    exit_sem(p);
bad_fork_cleanup_audit:
    audit_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
    mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
    cgroup_exit(p, cgroup_callbacks_done);
    delayacct_tsk_free(p);
    if (p->binfmt)
        module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
    module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
    atomic_dec(&p->cred->user->processes);
    put_cred(p->real_cred);
    put_cred(p->cred);
bad_fork_free:
    free_task(p);
fork_out:
    return ERR_PTR(retval);
}
Ejemplo n.º 28
0
static int usbrh_proc_open(struct inode *inode, struct file *filp)
{
    try_module_get(THIS_MODULE);
    return 0;
}
Ejemplo n.º 29
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			task_rlimit(p, RLIMIT_NPROC)) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}
	current->flags &= ~PF_NPROC_EXCEEDED;

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = p->stime = p->gtime = 0;
	p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	p->prev_utime = p->prev_stime = 0;
#endif
#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

	/*
	 * Save current task's (not effective) timer slack value as default
	 * timer slack value for new task.
	 */
	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_begin(current);
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
	if (IS_ERR(p->mempolicy)) {
		retval = PTR_ERR(p->mempolicy);
		p->mempolicy = NULL;
		goto bad_fork_cleanup_cgroup;
	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_CPUSETS
	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
	seqcount_init(&p->mems_allowed_seq);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
	p->memcg_batch.do_batch = 0;
	p->memcg_batch.memcg = NULL;
#endif

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;
	retval = audit_alloc(p);
	if (retval)
		goto bad_fork_cleanup_policy;
	/* copy all the process information */
	retval = copy_semundo(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_audit;
	retval = copy_files(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_semundo;
	retval = copy_fs(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_files;
	retval = copy_sighand(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_fs;
	retval = copy_signal(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_sighand;
	retval = copy_mm(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_signal;
	retval = copy_namespaces(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_mm;
	retval = copy_io(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
	p->plug = NULL;
#endif
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing and stepping should be turned off in the
	 * child regardless of CLONE_PTRACE.
	 */
	user_disable_single_step(p);
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	if (clone_flags & CLONE_THREAD)
		p->exit_signal = -1;
	else if (clone_flags & CLONE_PARENT)
		p->exit_signal = current->group_leader->exit_signal;
	else
		p->exit_signal = (clone_flags & CSIGNAL);

	p->pdeath_signal = 0;
	p->exit_state = 0;

	p->nr_dirtied = 0;
	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
	p->dirty_paused_when = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
	*/
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		current->signal->nr_threads++;
		atomic_inc(&current->signal->live);
		atomic_inc(&current->signal->sigcnt);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);

		if (thread_group_leader(p)) {
			if (is_child_reaper(pid))
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail(&p->sibling, &p->real_parent->children);
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__this_cpu_inc(process_counts);
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	perf_event_fork(p);

	trace_task_newtask(p, clone_flags);

	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	if (p->io_context)
		exit_io_context(p);
bad_fork_cleanup_namespaces:
	if (unlikely(clone_flags & CLONE_NEWPID))
		pid_ns_release_proc(p->nsproxy->pid_ns);
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Ejemplo n.º 30
0
static int __devinit hecubafb_probe(struct platform_device *dev)
{
	struct fb_info *info;
	struct hecuba_board *board;
	int retval = -ENOMEM;
	int videomemorysize;
	unsigned char *videomemory;
	struct hecubafb_par *par;

	
	board = dev->dev.platform_data;
	if (!board)
		return -EINVAL;

	
	if (!try_module_get(board->owner))
		return -ENODEV;

	videomemorysize = (DPY_W*DPY_H)/8;

	videomemory = vzalloc(videomemorysize);
	if (!videomemory)
		goto err_videomem_alloc;

	info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
	if (!info)
		goto err_fballoc;

	info->screen_base = (char __force __iomem *)videomemory;
	info->fbops = &hecubafb_ops;

	info->var = hecubafb_var;
	info->fix = hecubafb_fix;
	info->fix.smem_len = videomemorysize;
	par = info->par;
	par->info = info;
	par->board = board;
	par->send_command = apollo_send_command;
	par->send_data = apollo_send_data;

	info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;

	info->fbdefio = &hecubafb_defio;
	fb_deferred_io_init(info);

	retval = register_framebuffer(info);
	if (retval < 0)
		goto err_fbreg;
	platform_set_drvdata(dev, info);

	printk(KERN_INFO
	       "fb%d: Hecuba frame buffer device, using %dK of video memory\n",
	       info->node, videomemorysize >> 10);

	
	retval = par->board->init(par);
	if (retval < 0)
		goto err_fbreg;

	return 0;
err_fbreg:
	framebuffer_release(info);
err_fballoc:
	vfree(videomemory);
err_videomem_alloc:
	module_put(board->owner);
	return retval;
}