Beispiel #1
0
static void voice_auddev_cb_function(u32 evt_id,
			union auddev_evt_data *evt_payload,
			void *private_data)
{
	struct voice_data *v = &voice;
	int rc = 0, i;

	MM_INFO("auddev_cb_function, evt_id=%d, dev_state=%d, voc_state=%d\n",
		evt_id, v->dev_state, v->voc_state);
	if ((evt_id != AUDDEV_EVT_START_VOICE) ||
			(evt_id != AUDDEV_EVT_END_VOICE)) {
		if (evt_payload == NULL) {
			MM_ERR(" evt_payload is NULL pointer\n");
			return;
		}
	}
	switch (evt_id) {
	case AUDDEV_EVT_START_VOICE:
		if ((v->dev_state == DEV_INIT) ||
				(v->dev_state == DEV_REL_DONE)) {
			v->v_call_status = VOICE_CALL_START;
			if ((v->dev_rx.enabled == VOICE_DEV_ENABLED)
				&& (v->dev_tx.enabled == VOICE_DEV_ENABLED)) {
				v->dev_state = DEV_READY;
				MM_DBG("dev_state into ready\n");
				wake_up(&v->dev_wait);
			}
		}
		break;
	case AUDDEV_EVT_DEV_CHG_VOICE:
		if (v->dev_state == DEV_READY) {
			v->dev_rx.enabled = VOICE_DEV_DISABLED;
			v->dev_tx.enabled = VOICE_DEV_DISABLED;
			v->dev_state = DEV_CHANGE;
			mutex_lock(&voice.voc_lock);
			if (v->voc_state == VOICE_ACQUIRE) {
				/* send device change to modem */
				voice_cmd_change();
				mutex_unlock(&voice.voc_lock);
				msm_snddev_enable_sidetone(v->dev_rx.dev_id,
				0);
				/* block to wait for CHANGE_START */
				rc = wait_event_interruptible(
				v->voc_wait, (v->voc_state == VOICE_CHANGE)
				|| (atomic_read(&v->chg_start_flag) == 1)
				|| (atomic_read(&v->rel_start_flag) == 1));
			} else {
				mutex_unlock(&voice.voc_lock);
				MM_ERR(" Voice is not at ACQUIRE state\n");
			}
		} else if ((v->dev_state == DEV_INIT) ||
				(v->dev_state == DEV_REL_DONE)) {
				v->dev_rx.enabled = VOICE_DEV_DISABLED;
				v->dev_tx.enabled = VOICE_DEV_DISABLED;
		} else
			MM_ERR(" device is not at proper state\n");
		break;
	case AUDDEV_EVT_DEV_RDY:
		/* update the dev info */
		if (evt_payload->voc_devinfo.dev_type == DIR_RX) {
			for (i = 0; i < VOC_RX_VOL_ARRAY_NUM; i++) {
				v->max_rx_vol[i] =
					evt_payload->voc_devinfo.max_rx_vol[i];
				v->min_rx_vol[i] =
					evt_payload->voc_devinfo.min_rx_vol[i];
			}
		}
		if (v->dev_state == DEV_CHANGE) {
			if (evt_payload->voc_devinfo.dev_type == DIR_RX) {
				v->dev_rx.dev_acdb_id =
					evt_payload->voc_devinfo.acdb_dev_id;
				v->dev_rx.sample =
					evt_payload->voc_devinfo.dev_sample;
				v->dev_rx.dev_id =
				evt_payload->voc_devinfo.dev_id;
				v->dev_rx.enabled = VOICE_DEV_ENABLED;
			} else {
				v->dev_tx.dev_acdb_id =
					evt_payload->voc_devinfo.acdb_dev_id;
				v->dev_tx.sample =
					evt_payload->voc_devinfo.dev_sample;
				v->dev_tx.enabled = VOICE_DEV_ENABLED;
				v->dev_tx.dev_id =
				evt_payload->voc_devinfo.dev_id;
			}
			if ((v->dev_rx.enabled == VOICE_DEV_ENABLED) &&
				(v->dev_tx.enabled == VOICE_DEV_ENABLED)) {
				v->dev_state = DEV_READY;
				MM_DBG("dev state into ready\n");
				voice_cmd_device_info(v);
				wake_up(&v->dev_wait);
				mutex_lock(&voice.voc_lock);
				if (v->voc_state == VOICE_CHANGE) {
					v->dev_event = DEV_CHANGE_READY;
					complete(&v->complete);
				}
				mutex_unlock(&voice.voc_lock);
			}
		} else if ((v->dev_state == DEV_INIT) ||
			(v->dev_state == DEV_REL_DONE)) {
			if (evt_payload->voc_devinfo.dev_type == DIR_RX) {
				v->dev_rx.dev_acdb_id =
					evt_payload->voc_devinfo.acdb_dev_id;
				v->dev_rx.sample =
					evt_payload->voc_devinfo.dev_sample;
				v->dev_rx.dev_id =
				evt_payload->voc_devinfo.dev_id;
				v->dev_rx.enabled = VOICE_DEV_ENABLED;
			} else {
				v->dev_tx.dev_acdb_id =
					evt_payload->voc_devinfo.acdb_dev_id;
				v->dev_tx.sample =
					evt_payload->voc_devinfo.dev_sample;
				v->dev_tx.dev_id =
				evt_payload->voc_devinfo.dev_id;
				v->dev_tx.enabled = VOICE_DEV_ENABLED;
			}
			if ((v->dev_rx.enabled == VOICE_DEV_ENABLED) &&
				(v->dev_tx.enabled == VOICE_DEV_ENABLED) &&
				(v->v_call_status == VOICE_CALL_START)) {
				v->dev_state = DEV_READY;
				MM_DBG("dev state into ready\n");
				voice_cmd_device_info(v);
				wake_up(&v->dev_wait);
				mutex_lock(&voice.voc_lock);
				if (v->voc_state == VOICE_CHANGE) {
					v->dev_event = DEV_CHANGE_READY;
					complete(&v->complete);
				}
				mutex_unlock(&voice.voc_lock);
			}
		} else
			MM_ERR("Receive READY not at the proper state =%d\n",
				v->dev_state);
		break;
	case AUDDEV_EVT_DEVICE_VOL_MUTE_CHG:
		if (evt_payload->voc_devinfo.dev_type == DIR_TX)
			v->dev_tx.mute =
				evt_payload->voc_vm_info.dev_vm_val.mute;
		else
			v->dev_rx.volume = evt_payload->
						voc_vm_info.dev_vm_val.vol;
		/* send device info */
		voice_cmd_device_info(v);
		break;
	case AUDDEV_EVT_REL_PENDING:
		/* recover the tx mute and rx volume to the default values */
		if (v->dev_state == DEV_READY) {
			if (atomic_read(&v->rel_start_flag)) {
				atomic_dec(&v->rel_start_flag);
				if (evt_payload->voc_devinfo.dev_type == DIR_RX)
					v->dev_rx.enabled = VOICE_DEV_DISABLED;
				else
					v->dev_tx.enabled = VOICE_DEV_DISABLED;
				v->dev_state = DEV_REL_DONE;
				wake_up(&v->dev_wait);
				break;
			}
			mutex_lock(&voice.voc_lock);
			if ((v->voc_state == VOICE_RELEASE) ||
					(v->voc_state == VOICE_INIT)) {
				if (evt_payload->voc_devinfo.dev_type
							== DIR_RX) {
					v->dev_rx.enabled = VOICE_DEV_DISABLED;
				} else {
					v->dev_tx.enabled = VOICE_DEV_DISABLED;
				}
				v->dev_state = DEV_REL_DONE;
				mutex_unlock(&voice.voc_lock);
				wake_up(&v->dev_wait);
			} else {
				/* send device change to modem */
				voice_cmd_change();
				mutex_unlock(&voice.voc_lock);
				rc = wait_event_interruptible(
				v->voc_wait, (v->voc_state == VOICE_CHANGE)
				|| (atomic_read(&v->chg_start_flag) == 1)
				|| (atomic_read(&v->rel_start_flag) == 1));
				if (atomic_read(&v->rel_start_flag) == 1)
					atomic_dec(&v->rel_start_flag);
				/* clear Rx/Tx to Disable */
				if (evt_payload->voc_devinfo.dev_type == DIR_RX)
					v->dev_rx.enabled = VOICE_DEV_DISABLED;
				else
					v->dev_tx.enabled = VOICE_DEV_DISABLED;
				v->dev_state = DEV_REL_DONE;
				wake_up(&v->dev_wait);
			}
		} else if ((v->dev_state == DEV_INIT) ||
				(v->dev_state == DEV_REL_DONE)) {
			if (evt_payload->voc_devinfo.dev_type == DIR_RX)
				v->dev_rx.enabled = VOICE_DEV_DISABLED;
			else
				v->dev_tx.enabled = VOICE_DEV_DISABLED;
		}
		break;
	case AUDDEV_EVT_END_VOICE:
		/* recover the tx mute and rx volume to the default values */
		v->dev_tx.mute = v->default_mute_val;
		v->dev_rx.volume = v->default_vol_val;

		if (v->dev_rx.enabled == VOICE_DEV_ENABLED)
			msm_snddev_enable_sidetone(v->dev_rx.dev_id, 0);

		if ((v->dev_state == DEV_READY) ||
			(v->dev_state == DEV_CHANGE)) {
			if (atomic_read(&v->rel_start_flag)) {
				atomic_dec(&v->rel_start_flag);
				v->v_call_status = VOICE_CALL_END;
				v->dev_state = DEV_REL_DONE;
				wake_up(&v->dev_wait);
				break;
			}
			mutex_lock(&voice.voc_lock);
			if ((v->voc_state == VOICE_RELEASE) ||
					(v->voc_state == VOICE_INIT)) {
				v->v_call_status = VOICE_CALL_END;
				v->dev_state = DEV_REL_DONE;
				mutex_unlock(&voice.voc_lock);
				wake_up(&v->dev_wait);
			} else {
				/* send mute and default volume value to MCAD */
				voice_cmd_device_info(v);
				/* send device change to modem */
				voice_cmd_change();
				mutex_unlock(&voice.voc_lock);
				/* block to wait for RELEASE_START
						or CHANGE_START */
				rc = wait_event_interruptible(
				v->voc_wait, (v->voc_state == VOICE_CHANGE)
				|| (atomic_read(&v->chg_start_flag) == 1)
				|| (atomic_read(&v->rel_start_flag) == 1));
				if (atomic_read(&v->rel_start_flag) == 1)
					atomic_dec(&v->rel_start_flag);
				/* set voice call to END state */
				v->v_call_status = VOICE_CALL_END;
				v->dev_state = DEV_REL_DONE;
				wake_up(&v->dev_wait);
			}
		} else
			v->v_call_status = VOICE_CALL_END;
		break;
	case AUDDEV_EVT_FREQ_CHG:
		MM_DBG("Voice Driver got sample rate change Event\n");
		MM_DBG("sample rate %d\n", evt_payload->freq_info.sample_rate);
		MM_DBG("dev_type %d\n", evt_payload->freq_info.dev_type);
		MM_DBG("acdb_dev_id %d\n", evt_payload->freq_info.acdb_dev_id);
		if (v->dev_state == DEV_READY) {
			v->dev_tx.enabled = VOICE_DEV_DISABLED;
			v->dev_state = DEV_CHANGE;
			mutex_lock(&voice.voc_lock);
			if (v->voc_state == VOICE_ACQUIRE) {
				msm_snddev_enable_sidetone(v->dev_rx.dev_id,
				0);
				/* send device change to modem */
				voice_cmd_change();
				mutex_unlock(&voice.voc_lock);
				/* block to wait for CHANGE_START */
				rc = wait_event_interruptible(
				v->voc_wait, (v->voc_state == VOICE_CHANGE)
				|| (atomic_read(&v->chg_start_flag) == 1)
				|| (atomic_read(&v->rel_start_flag) == 1));
			} else {
				mutex_unlock(&voice.voc_lock);
				MM_ERR(" Voice is not at ACQUIRE state\n");
			}
		} else if ((v->dev_state == DEV_INIT) ||
				(v->dev_state == DEV_REL_DONE)) {
				v->dev_tx.enabled = VOICE_DEV_DISABLED;
		} else
			MM_ERR("Event not at the proper state =%d\n",
				v->dev_state);
		break;
	default:
		MM_ERR("UNKNOWN EVENT\n");
	}
	return;
}
Beispiel #2
0
/*
 * unshare allows a process to 'unshare' part of the process
 * context which was originally shared using clone.  copy_*
 * functions used by do_fork() cannot be used here directly
 * because they modify an inactive task_struct that is being
 * constructed. Here we are modifying the current, active,
 * task_struct.
 */
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
	int err = 0;
	struct fs_struct *fs, *new_fs = NULL;
	struct sighand_struct *new_sigh = NULL;
	struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
	struct files_struct *fd, *new_fd = NULL;
	struct nsproxy *new_nsproxy = NULL;
	int do_sysvsem = 0;

	check_unshare_flags(&unshare_flags);

	/* Return -EINVAL for all unsupported flags */
	err = -EINVAL;
	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
				CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
		goto bad_unshare_out;

	/*
	 * CLONE_NEWIPC must also detach from the undolist: after switching
	 * to a new ipc namespace, the semaphore arrays from the old
	 * namespace are unreachable.
	 */
	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
		do_sysvsem = 1;
	if ((err = unshare_thread(unshare_flags)))
		goto bad_unshare_out;
	if ((err = unshare_fs(unshare_flags, &new_fs)))
		goto bad_unshare_cleanup_thread;
	if ((err = unshare_sighand(unshare_flags, &new_sigh)))
		goto bad_unshare_cleanup_fs;
	if ((err = unshare_vm(unshare_flags, &new_mm)))
		goto bad_unshare_cleanup_sigh;
	if ((err = unshare_fd(unshare_flags, &new_fd)))
		goto bad_unshare_cleanup_vm;
	if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
			new_fs)))
		goto bad_unshare_cleanup_fd;

	if (new_fs ||  new_mm || new_fd || do_sysvsem || new_nsproxy) {
		if (do_sysvsem) {
			/*
			 * CLONE_SYSVSEM is equivalent to sys_exit().
			 */
			exit_sem(current);
		}

		if (new_nsproxy) {
			switch_task_namespaces(current, new_nsproxy);
			new_nsproxy = NULL;
		}

		task_lock(current);

		if (new_fs) {
			fs = current->fs;
			spin_lock(&fs->lock);
			current->fs = new_fs;
			if (--fs->users)
				new_fs = NULL;
			else
				new_fs = fs;
			spin_unlock(&fs->lock);
		}

		if (new_mm) {
			mm = current->mm;
			active_mm = current->active_mm;
			current->mm = new_mm;
			current->active_mm = new_mm;
			if (current->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) {
				atomic_dec(&mm->oom_disable_count);
				atomic_inc(&new_mm->oom_disable_count);
			}
			activate_mm(active_mm, new_mm);
			new_mm = mm;
		}

		if (new_fd) {
			fd = current->files;
			current->files = new_fd;
			new_fd = fd;
		}

		task_unlock(current);
	}

	if (new_nsproxy)
		put_nsproxy(new_nsproxy);

bad_unshare_cleanup_fd:
	if (new_fd)
		put_files_struct(new_fd);

bad_unshare_cleanup_vm:
	if (new_mm)
		mmput(new_mm);

bad_unshare_cleanup_sigh:
	if (new_sigh)
		if (atomic_dec_and_test(&new_sigh->count))
			kmem_cache_free(sighand_cachep, new_sigh);

bad_unshare_cleanup_fs:
	if (new_fs)
		free_fs_struct(new_fs);

bad_unshare_cleanup_thread:
bad_unshare_out:
	return err;
}
Beispiel #3
0
/*
 * 从旧进程复制一个完全一样的新的进程,
 * 复制完成后不启动新的进程.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	/* dup_task_struct()为新进程分配内核栈,task_struct等,其中的内容与父进程相同 */
	p = dup_task_struct(current);
	/* task_struct创建失败 */
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			task_rlimit(p, RLIMIT_NPROC)) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	/* 确保当前系统中的任务总数没有超过限制 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	/* 将children和sibling两个链表清空 */
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);
	/* 初始化新进程的一部分信息,以便和父进程区分开 */
	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->gtime = cputime_zero;
	p->utimescaled = cputime_zero;
	p->stimescaled = cputime_zero;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	p->prev_utime = cputime_zero;
	p->prev_stime = cputime_zero;
#endif
#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cgroup;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* 现在还持有任何的锁 */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* 现在还没有blocked */
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
	p->memcg_batch.do_batch = 0;
	p->memcg_batch.memcg = NULL;
#endif

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;

	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_policy;
	/* 复制父进程的全部信息到新的进程 */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_namespaces(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_io(clone_flags, p)))
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		/* 为新的进程申请PID */
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;

		if (clone_flags & CLONE_NEWPID) {
			retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);
			if (retval < 0)
				goto bad_fork_free_pid;
		}
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if (current->nsproxy != p->nsproxy) {
		retval = ns_cgroup_clone(p, pid);
		if (retval)
			goto bad_fork_free_pid;
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing and stepping should be turned off in the
	 * child regardless of CLONE_PTRACE.
	 */
	user_disable_single_step(p);
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		current->signal->nr_threads++;
		atomic_inc(&current->signal->live);
		atomic_inc(&current->signal->sigcnt);
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
	}

	if (likely(p->pid)) {
		tracehook_finish_clone(p, clone_flags, trace);

		if (thread_group_leader(p)) {
			if (clone_flags & CLONE_NEWPID)
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail(&p->sibling, &p->real_parent->children);
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__this_cpu_inc(process_counts);
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	perf_event_fork(p);
	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	if (p->io_context)
		exit_io_context(p);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm) {
		task_lock(p);
		if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
			atomic_dec(&p->mm->oom_disable_count);
		task_unlock(p);
		mmput(p->mm);
	}
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_policy:
	perf_event_free_task(p);
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Beispiel #4
0
static void avc_node_delete(struct avc_node *node)
{
	hlist_del_rcu(&node->list);
	call_rcu(&node->rhead, avc_node_free);
	atomic_dec(&avc_cache.active_nodes);
}
Beispiel #5
0
/*
 * Rx I/O daemon
 */
static int rxrpc_krxiod(void *arg)
{
	DECLARE_WAITQUEUE(krxiod,current);

	printk("Started krxiod %d\n",current->pid);

	daemonize("krxiod");

	/* loop around waiting for work to do */
	do {
		/* wait for work or to be told to exit */
		_debug("### Begin Wait");
		if (!atomic_read(&rxrpc_krxiod_qcount)) {
			set_current_state(TASK_INTERRUPTIBLE);

			add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);

			for (;;) {
				set_current_state(TASK_INTERRUPTIBLE);
				if (atomic_read(&rxrpc_krxiod_qcount) ||
				    rxrpc_krxiod_die ||
				    signal_pending(current))
					break;

				schedule();
			}

			remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
			set_current_state(TASK_RUNNING);
		}
		_debug("### End Wait");

		/* do work if been given some to do */
		_debug("### Begin Work");

		/* see if there's a transport in need of attention */
		if (!list_empty(&rxrpc_krxiod_transportq)) {
			struct rxrpc_transport *trans = NULL;

			spin_lock_irq(&rxrpc_krxiod_transportq_lock);

			if (!list_empty(&rxrpc_krxiod_transportq)) {
				trans = list_entry(
					rxrpc_krxiod_transportq.next,
					struct rxrpc_transport,
					krxiodq_link);

				list_del_init(&trans->krxiodq_link);
				atomic_dec(&rxrpc_krxiod_qcount);

				/* make sure it hasn't gone away and doesn't go
				 * away */
				if (atomic_read(&trans->usage)>0)
					rxrpc_get_transport(trans);
				else
					trans = NULL;
			}

			spin_unlock_irq(&rxrpc_krxiod_transportq_lock);

			if (trans) {
				rxrpc_trans_receive_packet(trans);
				rxrpc_put_transport(trans);
			}
		}

		/* see if there's a call in need of attention */
		if (!list_empty(&rxrpc_krxiod_callq)) {
			struct rxrpc_call *call = NULL;

			spin_lock_irq(&rxrpc_krxiod_callq_lock);

			if (!list_empty(&rxrpc_krxiod_callq)) {
				call = list_entry(rxrpc_krxiod_callq.next,
						  struct rxrpc_call,
						  rcv_krxiodq_lk);
				list_del_init(&call->rcv_krxiodq_lk);
				atomic_dec(&rxrpc_krxiod_qcount);

				/* make sure it hasn't gone away and doesn't go
				 * away */
				if (atomic_read(&call->usage) > 0) {
					_debug("@@@ KRXIOD"
					       " Begin Attend Call %p", call);
					rxrpc_get_call(call);
				}
				else {
					call = NULL;
				}
			}

			spin_unlock_irq(&rxrpc_krxiod_callq_lock);

			if (call) {
				rxrpc_call_do_stuff(call);
				rxrpc_put_call(call);
				_debug("@@@ KRXIOD End Attend Call %p", call);
			}
		}
Beispiel #6
0
/**
 * Transmit a packet.
 * This is a helper function for ctcm_tx().
 *
 *  ch		Channel to be used for sending.
 *  skb		Pointer to struct sk_buff of packet to send.
 *            The linklevel header has already been set up
 *            by ctcm_tx().
 *
 * returns 0 on success, -ERRNO on failure. (Never fails.)
 */
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
	unsigned long saveflags;
	struct ll_header header;
	int rc = 0;
	__u16 block_len;
	int ccw_idx;
	struct sk_buff *nskb;
	unsigned long hi;

	/* we need to acquire the lock for testing the state
	 * otherwise we can have an IRQ changing the state to
	 * TXIDLE after the test but before acquiring the lock.
	 */
	spin_lock_irqsave(&ch->collect_lock, saveflags);
	if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
		int l = skb->len + LL_HEADER_LENGTH;

		if (ch->collect_len + l > ch->max_bufsize - 2) {
			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
			return -EBUSY;
		} else {
			atomic_inc(&skb->users);
			header.length = l;
			header.type = skb->protocol;
			header.unused = 0;
			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
			       LL_HEADER_LENGTH);
			skb_queue_tail(&ch->collect_queue, skb);
			ch->collect_len += l;
		}
		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
				goto done;
	}
	spin_unlock_irqrestore(&ch->collect_lock, saveflags);
	/*
	 * Protect skb against beeing free'd by upper
	 * layers.
	 */
	atomic_inc(&skb->users);
	ch->prof.txlen += skb->len;
	header.length = skb->len + LL_HEADER_LENGTH;
	header.type = skb->protocol;
	header.unused = 0;
	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
	block_len = skb->len + 2;
	*((__u16 *)skb_push(skb, 2)) = block_len;

	/*
	 * IDAL support in CTCM is broken, so we have to
	 * care about skb's above 2G ourselves.
	 */
	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
	if (hi) {
		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		if (!nskb) {
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		} else {
			memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
			atomic_inc(&nskb->users);
			atomic_dec(&skb->users);
			dev_kfree_skb_irq(skb);
			skb = nskb;
		}
	}

	ch->ccw[4].count = block_len;
	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
		/*
		 * idal allocation failed, try via copying to
		 * trans_skb. trans_skb usually has a pre-allocated
		 * idal.
		 */
		if (ctcm_checkalloc_buffer(ch)) {
			/*
			 * Remove our header. It gets added
			 * again on retransmit.
			 */
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		}

		skb_reset_tail_pointer(ch->trans_skb);
		ch->trans_skb->len = 0;
		ch->ccw[1].count = skb->len;
		skb_copy_from_linear_data(skb,
				skb_put(ch->trans_skb, skb->len), skb->len);
		atomic_dec(&skb->users);
		dev_kfree_skb_irq(skb);
		ccw_idx = 0;
	} else {
		skb_queue_tail(&ch->io_queue, skb);
		ccw_idx = 3;
	}
	ch->retry = 0;
	fsm_newstate(ch->fsm, CTC_STATE_TX);
	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
	ch->prof.send_stamp = current_kernel_time(); /* xtime */
	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
					(unsigned long)ch, 0xff, 0);
	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
	if (ccw_idx == 3)
		ch->prof.doios_single++;
	if (rc != 0) {
		fsm_deltimer(&ch->timer);
		ctcm_ccw_check_rc(ch, rc, "single skb TX");
		if (ccw_idx == 3)
			skb_dequeue_tail(&ch->io_queue);
		/*
		 * Remove our header. It gets added
		 * again on retransmit.
		 */
		skb_pull(skb, LL_HEADER_LENGTH + 2);
	} else if (ccw_idx == 0) {
		struct net_device *dev = ch->netdev;
		struct ctcm_priv *priv = dev->ml_priv;
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
	}
done:
	ctcm_clear_busy(ch->netdev);
	return rc;
}
Beispiel #7
0
static int shutdown(struct socket *sock, int how)
{
	struct tipc_sock* tsock = tipc_sk(sock->sk);
	struct sk_buff *buf;
	int res;

	/* Could return -EINVAL for an invalid "how", but why bother? */

	if (down_interruptible(&tsock->sem))
		return -ERESTARTSYS;

	sock_lock(tsock);

	switch (sock->state) {
	case SS_CONNECTED:

		/* Send 'FIN+' or 'FIN-' message to peer */

		sock_unlock(tsock);
restart:
		if ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
			atomic_dec(&tipc_queue_size);
			if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
				buf_discard(buf);
				goto restart;
			}
			tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
		}
		else {
			tipc_shutdown(tsock->p->ref);
		}
		sock_lock(tsock);

		/* fall through */

	case SS_DISCONNECTING:

		/* Discard any unreceived messages */

		while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
			atomic_dec(&tipc_queue_size);
			buf_discard(buf);
		}
		tsock->p->conn_unacked = 0;

		/* fall through */

	case SS_CONNECTING:
		sock->state = SS_DISCONNECTING;
		res = 0;
		break;

	default:
		res = -ENOTCONN;
	}

	sock_unlock(tsock);

	up(&tsock->sem);
	return res;
}
Beispiel #8
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;
	int cgroup_callbacks_done = 0;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->user != current->nsproxy->user_ns->root_user)
			goto bad_fork_free;
	}

	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);
	get_group_info(p->group_info);

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	if (p->binfmt && !try_module_get(p->binfmt->module))
		goto bad_fork_cleanup_put_domain;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
#ifdef CONFIG_PREEMPT_RCU
	p->rcu_read_lock_nesting = 0;
	p->rcu_flipctr_idx = 0;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	clear_tsk_thread_flag(p, TIF_SIGPENDING);
	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
	p->gtime = cputime_zero;
	p->utimescaled = cputime_zero;
	p->stimescaled = cputime_zero;
	p->prev_utime = cputime_zero;
	p->prev_stime = cputime_zero;

#ifdef CONFIG_DETECT_SOFTLOCKUP
	p->last_switch_count = 0;
	p->last_switch_timestamp = 0;
#endif

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	p->it_virt_expires = cputime_zero;
	p->it_prof_expires = cputime_zero;
	p->it_sched_expires = 0;
	INIT_LIST_HEAD(&p->cpu_timers[0]);
	INIT_LIST_HEAD(&p->cpu_timers[1]);
	INIT_LIST_HEAD(&p->cpu_timers[2]);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
#ifdef CONFIG_SECURITY
	p->security = NULL;
#endif
	p->cap_bset = current->cap_bset;
	p->io_context = NULL;
	p->audit_context = NULL;
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		goto bad_fork_cleanup_cgroup;
 	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

	if ((retval = security_task_alloc(p)))
		goto bad_fork_cleanup_policy;
	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_security;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_keys(clone_flags, p)))
		goto bad_fork_cleanup_mm;
	if ((retval = copy_namespaces(clone_flags, p)))
		goto bad_fork_cleanup_keys;
	if ((retval = copy_io(clone_flags, p)))
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(task_active_pid_ns(p));
		if (!pid)
			goto bad_fork_cleanup_io;

		if (clone_flags & CLONE_NEWPID) {
			retval = pid_ns_prepare_proc(task_active_pid_ns(p));
			if (retval < 0)
				goto bad_fork_free_pid;
		}
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if (current->nsproxy != p->nsproxy) {
		retval = ns_cgroup_clone(p, pid);
		if (retval)
			goto bad_fork_free_pid;
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);

	/* Now that the task is set up, run cgroup callbacks if
	 * necessary. We need to run them before the task is visible
	 * on the tasklist. */
	cgroup_fork_callbacks(p);
	cgroup_callbacks_done = 1;

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/*
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
	 */
	p->cpus_allowed = current->cpus_allowed;
	p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
		set_task_cpu(p, smp_processor_id());

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (clone_flags & CLONE_THREAD) {
		p->group_leader = current->group_leader;
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);

		if (!cputime_eq(current->signal->it_virt_expires,
				cputime_zero) ||
		    !cputime_eq(current->signal->it_prof_expires,
				cputime_zero) ||
		    current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
		    !list_empty(&current->signal->cpu_timers[0]) ||
		    !list_empty(&current->signal->cpu_timers[1]) ||
		    !list_empty(&current->signal->cpu_timers[2])) {
			/*
			 * Have child wake up on its first tick to check
			 * for process CPU timers.
			 */
			p->it_prof_expires = jiffies_to_cputime(1);
		}
	}

	if (likely(p->pid)) {
		list_add_tail(&p->sibling, &p->real_parent->children);
		tracehook_finish_clone(p, clone_flags, trace);

		if (thread_group_leader(p)) {
			if (clone_flags & CLONE_NEWPID)
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			p->signal->tty = current->signal->tty;
			set_task_pgrp(p, task_pgrp_nr(current));
			set_task_session(p, task_session_nr(current));
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__get_cpu_var(process_counts)++;
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	write_unlock_irq(&tasklist_lock);
	proc_fork_connector(p);
	cgroup_post_fork(p);
	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	put_io_context(p->io_context);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_keys:
	exit_keys(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	cleanup_signal(p);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_security:
	security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	cgroup_exit(p, cgroup_callbacks_done);
	delayacct_tsk_free(p);
	if (p->binfmt)
		module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	put_group_info(p->group_info);
	atomic_dec(&p->user->processes);
	free_uid(p->user);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Beispiel #9
0
static void pcan_usb_read_notify(purb_t purb)
#endif
{
	int err = 0;
	struct pcan_usb_interface *usb_if = purb->context;
	struct pcandev *dev = &usb_if->dev[0];

#if 0
	DPRINTK(KERN_DEBUG "%s: %s() status=%d\n",
	        DEVICE_NAME, __func__, purb->status);
#endif

	// un-register outstanding urb
	atomic_dec(&usb_if->active_urbs);

	// do interleaving read
	// stop with first error
	if (!purb->status && dev->ucPhysicallyInstalled) {
		uint8_t *read_buffer_addr = purb->transfer_buffer;
		const int read_buffer_len = purb->actual_length;
		int read_buffer_size;

		// buffer interleave to increase speed
		if (read_buffer_addr == usb_if->read_buffer_addr[0]) {
			FILL_BULK_URB(purb, usb_if->usb_dev,
				           usb_rcvbulkpipe(usb_if->usb_dev,
			                              usb_if->pipe_read.ucNumber),
				           usb_if->read_buffer_addr[1], usb_if->read_buffer_size,
			              pcan_usb_read_notify, usb_if);
		} else {
			FILL_BULK_URB(purb, usb_if->usb_dev,
				           usb_rcvbulkpipe(usb_if->usb_dev,
			                              usb_if->pipe_read.ucNumber),
				           usb_if->read_buffer_addr[0], usb_if->read_buffer_size,
			              pcan_usb_read_notify, usb_if);
		}

		// start next urb
		if ((err = __usb_submit_urb(purb))) {
			dev->nLastError = err;
			dev->dwErrorCounter++;

			printk(KERN_ERR "%s: %s() URB submit failure %d\n",
			       DEVICE_NAME, __func__, err);
		} else
			atomic_inc(&usb_if->active_urbs);

#ifdef PCAN_USB_DEBUG_DECODE
		printk(KERN_INFO "%s: got %u bytes URB, "
		                 "decoding it by packets of %u bytes:\n",
		       DEVICE_NAME, read_buffer_len, usb_if->read_packet_size);
#endif

		for (read_buffer_size=0; read_buffer_size < read_buffer_len; ) {
#ifdef PCAN_USB_DEBUG_DECODE
			printk(KERN_INFO "%s: decoding @offset %u:\n",
			       DEVICE_NAME, read_buffer_size);
#endif
			err = usb_if->device_msg_decode(usb_if,
			                              read_buffer_addr,
			                              usb_if->read_packet_size);
			if (err < 0) {
				dev->nLastError = err;
				dev->wCANStatus |= CAN_ERR_QOVERRUN;
				dev->dwErrorCounter++;

				if (net_ratelimit())
					printk(KERN_DEBUG "%s: @offset %d: message decoding error %d\n",
					       DEVICE_NAME, read_buffer_size, err);
			}

			read_buffer_addr += usb_if->read_packet_size;
			read_buffer_size += usb_if->read_packet_size;
		}
	} else {
		if (purb->status != -ENOENT) {
			printk(KERN_ERR
				"%s: read data stream turned off caused by ",
				DEVICE_NAME);

			if (!dev->ucPhysicallyInstalled) {
				printk("device plug out!\n");
			} else {

				switch (purb->status) {

				case -ESHUTDOWN:
					printk("endpoint shutdown\n");
					break;

#ifdef PCAN_USB_FAST_CLOSE_NMI
				/*
				 * here are cases that have been seen to occur
				 */
				case -EILSEQ:
				case -EOVERFLOW:

				case -EPIPE:
				case -EPROTO:
#endif
				default:
					printk("err %d!\n", purb->status);
					break;
				}

				if (dev->nOpenPaths) {

#ifdef PCAN_USB_FAST_CLOSE_NMI
					/*
					 * seems that this is the most
					 * reasonable thing to do most of the
					 * times...
					 */
					dev->ucPhysicallyInstalled = 0;
#endif
					printk("err %d: considering unplugged device\n", purb->status);
				} else {
					/*
					 * Otherwise, do nothing but wait for
					 * the usb core to disconnect then
					 * reconnect the device.
					 */
				}
			}
		}
	}
}
Beispiel #10
0
static bool vbuffer_data_ctl_release(struct vbuffer_data *_buf)
{
	VBUFFER_DATA_CTL;
	return atomic_dec(&buf->ref) == 0;
}
Beispiel #11
0
static bool vbuffer_data_basic_release(struct vbuffer_data *_buf)
{
	VBUFFER_DATA_BASIC;
	return atomic_dec(&buf->ref) == 0;
}
/*
 * iowarrior_write
 */
static ssize_t iowarrior_write(struct file *file,
			       const char __user *user_buffer,
			       size_t count, loff_t *ppos)
{
	struct iowarrior *dev;
	int retval = 0;
	char *buf = NULL;	/* for IOW24 and IOW56 we need a buffer */
	struct urb *int_out_urb = NULL;

	dev = file->private_data;

	mutex_lock(&dev->mutex);
	/* verify that the device wasn't unplugged */
	if (!dev->present) {
		retval = -ENODEV;
		goto exit;
	}
	dbg("%s - minor %d, count = %zd", __func__, dev->minor, count);
	/* if count is 0 we're already done */
	if (count == 0) {
		retval = 0;
		goto exit;
	}
	/* We only accept full reports */
	if (count != dev->report_size) {
		retval = -EINVAL;
		goto exit;
	}
	switch (dev->product_id) {
	case USB_DEVICE_ID_CODEMERCS_IOW24:
	case USB_DEVICE_ID_CODEMERCS_IOWPV1:
	case USB_DEVICE_ID_CODEMERCS_IOWPV2:
	case USB_DEVICE_ID_CODEMERCS_IOW40:
		/* IOW24 and IOW40 use a synchronous call */
		buf = kmalloc(count, GFP_KERNEL);
		if (!buf) {
			retval = -ENOMEM;
			goto exit;
		}
		if (copy_from_user(buf, user_buffer, count)) {
			retval = -EFAULT;
			kfree(buf);
			goto exit;
		}
		retval = usb_set_report(dev->interface, 2, 0, buf, count);
		kfree(buf);
		goto exit;
		break;
	case USB_DEVICE_ID_CODEMERCS_IOW56:
		/* The IOW56 uses asynchronous IO and more urbs */
		if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
			/* Wait until we are below the limit for submitted urbs */
			if (file->f_flags & O_NONBLOCK) {
				retval = -EAGAIN;
				goto exit;
			} else {
				retval = wait_event_interruptible(dev->write_wait,
								  (!dev->present || (atomic_read (&dev-> write_busy) < MAX_WRITES_IN_FLIGHT)));
				if (retval) {
					/* we were interrupted by a signal */
					retval = -ERESTART;
					goto exit;
				}
				if (!dev->present) {
					/* The device was unplugged */
					retval = -ENODEV;
					goto exit;
				}
				if (!dev->opened) {
					/* We were closed while waiting for an URB */
					retval = -ENODEV;
					goto exit;
				}
			}
		}
		atomic_inc(&dev->write_busy);
		int_out_urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!int_out_urb) {
			retval = -ENOMEM;
			dbg("%s Unable to allocate urb ", __func__);
			goto error_no_urb;
		}
		buf = usb_alloc_coherent(dev->udev, dev->report_size,
					 GFP_KERNEL, &int_out_urb->transfer_dma);
		if (!buf) {
			retval = -ENOMEM;
			dbg("%s Unable to allocate buffer ", __func__);
			goto error_no_buffer;
		}
		usb_fill_int_urb(int_out_urb, dev->udev,
				 usb_sndintpipe(dev->udev,
						dev->int_out_endpoint->bEndpointAddress),
				 buf, dev->report_size,
				 iowarrior_write_callback, dev,
				 dev->int_out_endpoint->bInterval);
		int_out_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
		if (copy_from_user(buf, user_buffer, count)) {
			retval = -EFAULT;
			goto error;
		}
		retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
		if (retval) {
			dbg("%s submit error %d for urb nr.%d", __func__,
			    retval, atomic_read(&dev->write_busy));
			goto error;
		}
		/* submit was ok */
		retval = count;
		usb_free_urb(int_out_urb);
		goto exit;
		break;
	default:
		/* what do we have here ? An unsupported Product-ID ? */
		dev_err(&dev->interface->dev, "%s - not supported for product=0x%x\n",
			__func__, dev->product_id);
		retval = -EFAULT;
		goto exit;
		break;
	}
error:
	usb_free_coherent(dev->udev, dev->report_size, buf,
			  int_out_urb->transfer_dma);
error_no_buffer:
	usb_free_urb(int_out_urb);
error_no_urb:
	atomic_dec(&dev->write_busy);
	wake_up_interruptible(&dev->write_wait);
exit:
	mutex_unlock(&dev->mutex);
	return retval;
}
static int audio_dev_ctrl_release(struct inode *inode, struct file *file)
{
	MM_DBG("release audio_dev_ctrl\n");
	atomic_dec(&audio_dev_ctrl.opened);
	return 0;
}
Beispiel #14
0
VOS_VOID HPA_TransferTaskEntry(VOS_VOID)
{
    for ( ; ; )
    {
        if (VOS_OK != VOS_SmP(g_ulHpaTransferSem, 0))
        {
            LogPrint("HPA_TransferTaskEntry: VOS_SmP Fail.\r\n");
#if (VOS_WIN32 != VOS_OS_VER)
            continue;
#else
            break;
#endif
        }

        if ( 0 < (atomic_read(&g_stDspMailBoxTransferCount)) )
        {
            atomic_dec(&g_stDspMailBoxTransferCount);

            PsRegCapture(0, g_usHpaSfnRead, (VOS_UINT32)g_ucHpaCfnRead, PS_REG_SYS_MODE_WCDMA);

            if ( MAIL_BOX_PROTECTWORD_SND == g_ulOmNosigEnable )
            {
                OM_LoopTestProc();
            }

#if (VOS_WIN32 != VOS_OS_VER)
            continue;
#else
            break;
#endif
        }

        if ( 0 < (atomic_read(&g_stGDspMailBoxTransferCount)) )
        {
            atomic_dec(&g_stGDspMailBoxTransferCount);

            PsRegCapture(0, 0, GHPA_GetRealFN(MODEM_ID_0), PS_REG_SYS_MODE_GSM);

#if (VOS_WIN32 != VOS_OS_VER)
            continue;
#else
            break;
#endif
        }

#if  ( FEATURE_MULTI_MODEM == FEATURE_ON )
        if ( 0 < (atomic_read(&g_stGDsp1MailBoxTransferCount)) )
        {
            atomic_dec(&g_stGDsp1MailBoxTransferCount);

            PsRegCapture(0, 0, GHPA_GetRealFN(MODEM_ID_1), PS_REG_SYS_MODE_GSM1);

#if (VOS_WIN32 != VOS_OS_VER)
            continue;
#else
            break;
#endif
        }
#endif
        LogPrint("HPA_TransferTaskEntry: should not.\r\n");
    }
}
Beispiel #15
0
static void advance_rx_queue(struct sock *sk)
{
	buf_discard(__skb_dequeue(&sk->sk_receive_queue));
	atomic_dec(&tipc_queue_size);
}
Beispiel #16
0
static void put_compound_page(struct page *page)
{
	if (unlikely(PageTail(page))) {
		/* __split_huge_page_refcount can run under us */
		struct page *page_head = compound_head(page);

		if (likely(page != page_head &&
			   get_page_unless_zero(page_head))) {
			unsigned long flags;

			/*
			 * THP can not break up slab pages so avoid taking
			 * compound_lock().  Slab performs non-atomic bit ops
			 * on page->flags for better performance.  In particular
			 * slab_unlock() in slub used to be a hot path.  It is
			 * still hot on arches that do not support
			 * this_cpu_cmpxchg_double().
			 */
			if (PageSlab(page_head) || PageHeadHuge(page_head)) {
				if (likely(PageTail(page))) {
					/*
					 * __split_huge_page_refcount
					 * cannot race here.
					 */
					VM_BUG_ON(!PageHead(page_head));
					atomic_dec(&page->_mapcount);
					if (put_page_testzero(page_head))
						VM_BUG_ON(1);
					if (put_page_testzero(page_head))
						__put_compound_page(page_head);
					return;
				} else
					/*
					 * __split_huge_page_refcount
					 * run before us, "page" was a
					 * THP tail. The split
					 * page_head has been freed
					 * and reallocated as slab or
					 * hugetlbfs page of smaller
					 * order (only possible if
					 * reallocated as slab on
					 * x86).
					 */
					goto skip_lock;
			}
			/*
			 * page_head wasn't a dangling pointer but it
			 * may not be a head page anymore by the time
			 * we obtain the lock. That is ok as long as it
			 * can't be freed from under us.
			 */
			flags = compound_lock_irqsave(page_head);
			if (unlikely(!PageTail(page))) {
				/* __split_huge_page_refcount run before us */
				compound_unlock_irqrestore(page_head, flags);
skip_lock:
				if (put_page_testzero(page_head)) {
					/*
					 * The head page may have been
					 * freed and reallocated as a
					 * compound page of smaller
					 * order and then freed again.
					 * All we know is that it
					 * cannot have become: a THP
					 * page, a compound page of
					 * higher order, a tail page.
					 * That is because we still
					 * hold the refcount of the
					 * split THP tail and
					 * page_head was the THP head
					 * before the split.
					 */
					if (PageHead(page_head))
						__put_compound_page(page_head);
					else
						__put_single_page(page_head);
				}
out_put_single:
				if (put_page_testzero(page))
					__put_single_page(page);
				return;
			}
			VM_BUG_ON(page_head != page->first_page);
			/*
			 * We can release the refcount taken by
			 * get_page_unless_zero() now that
			 * __split_huge_page_refcount() is blocked on
			 * the compound_lock.
			 */
			if (put_page_testzero(page_head))
				VM_BUG_ON(1);
			/* __split_huge_page_refcount will wait now */
			VM_BUG_ON(page_mapcount(page) <= 0);
			atomic_dec(&page->_mapcount);
			VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
			VM_BUG_ON(atomic_read(&page->_count) != 0);
			compound_unlock_irqrestore(page_head, flags);

			if (put_page_testzero(page_head)) {
				if (PageHead(page_head))
					__put_compound_page(page_head);
				else
					__put_single_page(page_head);
			}
		} else {
			/* page_head is a dangling pointer */
			VM_BUG_ON(PageTail(page));
			goto out_put_single;
		}
	} else if (put_page_testzero(page)) {
		if (PageHead(page))
			__put_compound_page(page);
		else
			__put_single_page(page);
	}
}
Beispiel #17
0
static int release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport;
	struct sk_buff *buf;
	int res;

	/*
	 * Exit if socket isn't fully initialized (occurs when a failed accept()
	 * releases a pre-allocated child socket that was never used)
	 */

	if (sk == NULL)
		return 0;

	tport = tipc_sk_port(sk);
	lock_sock(sk);

	/*
	 * Reject all unreceived messages, except on an active connection
	 * (which disconnects locally & sends a 'FIN+' to peer)
	 */

	while (sock->state != SS_DISCONNECTING) {
		buf = __skb_dequeue(&sk->sk_receive_queue);
		if (buf == NULL)
			break;
		atomic_dec(&tipc_queue_size);
		if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
			buf_discard(buf);
		else {
			if ((sock->state == SS_CONNECTING) ||
			    (sock->state == SS_CONNECTED)) {
				sock->state = SS_DISCONNECTING;
				tipc_disconnect(tport->ref);
			}
			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
		}
	}

	/*
	 * Delete TIPC port; this ensures no more messages are queued
	 * (also disconnects an active connection & sends a 'FIN-' to peer)
	 */

	res = tipc_deleteport(tport->ref);

	/* Discard any remaining (connection-based) messages in receive queue */

	discard_rx_queue(sk);

	/* Reject any messages that accumulated in backlog queue */

	sock->state = SS_DISCONNECTING;
	release_sock(sk);

	sock_put(sk);
	sock->sk = NULL;

	atomic_dec(&tipc_user_count);
	return res;
}
/**
 * map_extent_mft_record - load an extent inode and attach it to its base
 * @base_ni:	base ntfs inode
 * @mref:	mft reference of the extent inode to load (in little endian)
 * @ntfs_ino:	on successful return, pointer to the ntfs_inode structure
 *
 * Load the extent mft record @mref and attach it to its base inode @base_ni.
 * Return the mapped extent mft record if IS_ERR(result) is false. Otherwise
 * PTR_ERR(result) gives the negative error code.
 *
 * On successful return, @ntfs_ino contains a pointer to the ntfs_inode
 * structure of the mapped extent inode.
 */
MFT_RECORD *map_extent_mft_record(ntfs_inode *base_ni, MFT_REF mref,
		ntfs_inode **ntfs_ino)
{
	MFT_RECORD *m;
	ntfs_inode *ni = NULL;
	ntfs_inode **extent_nis = NULL;
	int i;
	unsigned long mft_no = MREF_LE(mref);
	u16 seq_no = MSEQNO_LE(mref);
	BOOL destroy_ni = FALSE;

	ntfs_debug("Mapping extent mft record 0x%lx (base mft record 0x%lx).",
			mft_no, base_ni->mft_no);
	/* Make sure the base ntfs inode doesn't go away. */
	atomic_inc(&base_ni->count);
	/*
	 * Check if this extent inode has already been added to the base inode,
	 * in which case just return it. If not found, add it to the base
	 * inode before returning it.
	 */
	down(&base_ni->extent_lock);
	if (base_ni->nr_extents > 0) {
		extent_nis = base_ni->ext.extent_ntfs_inos;
		for (i = 0; i < base_ni->nr_extents; i++) {
			if (mft_no != extent_nis[i]->mft_no)
				continue;
			ni = extent_nis[i];
			/* Make sure the ntfs inode doesn't go away. */
			atomic_inc(&ni->count);
			break;
		}
	}
	if (likely(ni != NULL)) {
		up(&base_ni->extent_lock);
		atomic_dec(&base_ni->count);
		/* We found the record; just have to map and return it. */
		m = map_mft_record(ni);
		/* map_mft_record() has incremented this on success. */
		atomic_dec(&ni->count);
		if (likely(!IS_ERR(m))) {
			/* Verify the sequence number. */
			if (likely(le16_to_cpu(m->sequence_number) == seq_no)) {
				ntfs_debug("Done 1.");
				*ntfs_ino = ni;
				return m;
			}
			unmap_mft_record(ni);
			ntfs_error(base_ni->vol->sb, "Found stale extent mft "
					"reference! Corrupt file system. "
					"Run chkdsk.");
			return ERR_PTR(-EIO);
		}
map_err_out:
		ntfs_error(base_ni->vol->sb, "Failed to map extent "
				"mft record, error code %ld.", -PTR_ERR(m));
		return m;
	}
	/* Record wasn't there. Get a new ntfs inode and initialize it. */
	ni = ntfs_new_extent_inode(base_ni->vol->sb, mft_no);
	if (unlikely(!ni)) {
		up(&base_ni->extent_lock);
		atomic_dec(&base_ni->count);
		return ERR_PTR(-ENOMEM);
	}
	ni->vol = base_ni->vol;
	ni->seq_no = seq_no;
	ni->nr_extents = -1;
	ni->ext.base_ntfs_ino = base_ni;
	/* Now map the record. */
	m = map_mft_record(ni);
	if (unlikely(IS_ERR(m))) {
		up(&base_ni->extent_lock);
		atomic_dec(&base_ni->count);
		ntfs_clear_extent_inode(ni);
		goto map_err_out;
	}
	/* Verify the sequence number. */
	if (unlikely(le16_to_cpu(m->sequence_number) != seq_no)) {
		ntfs_error(base_ni->vol->sb, "Found stale extent mft "
				"reference! Corrupt file system. Run chkdsk.");
		destroy_ni = TRUE;
		m = ERR_PTR(-EIO);
		goto unm_err_out;
	}
	/* Attach extent inode to base inode, reallocating memory if needed. */
	if (!(base_ni->nr_extents & 3)) {
		ntfs_inode **tmp;
		int new_size = (base_ni->nr_extents + 4) * sizeof(ntfs_inode *);

		tmp = (ntfs_inode **)kmalloc(new_size, GFP_NOFS);
		if (unlikely(!tmp)) {
			ntfs_error(base_ni->vol->sb, "Failed to allocate "
					"internal buffer.");
			destroy_ni = TRUE;
			m = ERR_PTR(-ENOMEM);
			goto unm_err_out;
		}
		if (base_ni->ext.extent_ntfs_inos) {
			memcpy(tmp, base_ni->ext.extent_ntfs_inos, new_size -
					4 * sizeof(ntfs_inode *));
			kfree(base_ni->ext.extent_ntfs_inos);
		}
		base_ni->ext.extent_ntfs_inos = tmp;
	}
	base_ni->ext.extent_ntfs_inos[base_ni->nr_extents++] = ni;
	up(&base_ni->extent_lock);
	atomic_dec(&base_ni->count);
	ntfs_debug("Done 2.");
	*ntfs_ino = ni;
	return m;
unm_err_out:
	unmap_mft_record(ni);
	up(&base_ni->extent_lock);
	atomic_dec(&base_ni->count);
	/*
	 * If the extent inode was not attached to the base inode we need to
	 * release it or we will leak memory.
	 */
	if (destroy_ni)
		ntfs_clear_extent_inode(ni);
	return m;
}
Beispiel #19
0
/*
 * spin_lock_irqsave() is expected to be held on entry.
 */
static void
xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
{
	struct xpc_partition *part = &xpc_partitions[ch->partid];
	u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);

	DBUG_ON(!spin_is_locked(&ch->lock));

	if (!(ch->flags & XPC_C_DISCONNECTING))
		return;

	DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));

	/* make sure all activity has settled down first */

	if (atomic_read(&ch->kthreads_assigned) > 0 ||
	    atomic_read(&ch->references) > 0) {
		return;
	}
	DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
		!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));

	if (part->act_state == XPC_P_AS_DEACTIVATING) {
		/* can't proceed until the other side disengages from us */
		if (xpc_arch_ops.partition_engaged(ch->partid))
			return;

	} else {

		/* as long as the other side is up do the full protocol */

		if (!(ch->flags & XPC_C_RCLOSEREQUEST))
			return;

		if (!(ch->flags & XPC_C_CLOSEREPLY)) {
			ch->flags |= XPC_C_CLOSEREPLY;
			xpc_arch_ops.send_chctl_closereply(ch, irq_flags);
		}

		if (!(ch->flags & XPC_C_RCLOSEREPLY))
			return;
	}

	/* wake those waiting for notify completion */
	if (atomic_read(&ch->n_to_notify) > 0) {
		/* we do callout while holding ch->lock, callout can't block */
		xpc_arch_ops.notify_senders_of_disconnect(ch);
	}

	/* both sides are disconnected now */

	if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
		spin_unlock_irqrestore(&ch->lock, *irq_flags);
		xpc_disconnect_callout(ch, xpDisconnected);
		spin_lock_irqsave(&ch->lock, *irq_flags);
	}

	DBUG_ON(atomic_read(&ch->n_to_notify) != 0);

	/* it's now safe to free the channel's message queues */
	xpc_arch_ops.teardown_msg_structures(ch);

	ch->func = NULL;
	ch->key = NULL;
	ch->entry_size = 0;
	ch->local_nentries = 0;
	ch->remote_nentries = 0;
	ch->kthreads_assigned_limit = 0;
	ch->kthreads_idle_limit = 0;

	/*
	 * Mark the channel disconnected and clear all other flags, including
	 * XPC_C_SETUP (because of call to
	 * xpc_arch_ops.teardown_msg_structures()) but not including
	 * XPC_C_WDISCONNECT (if it was set).
	 */
	ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));

	atomic_dec(&part->nchannels_active);

	if (channel_was_connected) {
		dev_info(xpc_chan, "channel %d to partition %d disconnected, "
			 "reason=%d\n", ch->number, ch->partid, ch->reason);
	}

	if (ch->flags & XPC_C_WDISCONNECT) {
		/* we won't lose the CPU since we're holding ch->lock */
		complete(&ch->wdisconnect_wait);
	} else if (ch->delayed_chctl_flags) {
		if (part->act_state != XPC_P_AS_DEACTIVATING) {
			/* time to take action on any delayed chctl flags */
			spin_lock(&part->chctl_lock);
			part->chctl.flags[ch->number] |=
			    ch->delayed_chctl_flags;
			spin_unlock(&part->chctl_lock);
		}
		ch->delayed_chctl_flags = 0;
	}
}
Beispiel #20
0
static int sock_ep_close(struct fid *fid)
{
	struct sock_ep *sock_ep;
	char c = 0;

	switch(fid->fclass) {
	case FI_CLASS_EP:
		sock_ep = container_of(fid, struct sock_ep, ep.fid);
		break;

	case FI_CLASS_SEP:
		sock_ep = container_of(fid, struct sock_ep, ep.fid);
		break;

	default:
		return -FI_EINVAL;
	}

	if (atomic_get(&sock_ep->ref) || atomic_get(&sock_ep->num_rx_ctx) ||
	    atomic_get(&sock_ep->num_tx_ctx))
		return -FI_EBUSY;

	if (sock_ep->fclass != FI_CLASS_SEP && !sock_ep->tx_shared) {
		sock_pe_remove_tx_ctx(sock_ep->tx_array[0]);
		sock_tx_ctx_free(sock_ep->tx_array[0]);
	}

	if (sock_ep->fclass != FI_CLASS_SEP && !sock_ep->rx_shared) {
		sock_pe_remove_rx_ctx(sock_ep->rx_array[0]);
		sock_rx_ctx_free(sock_ep->rx_array[0]);
	}

	free(sock_ep->tx_array);
	free(sock_ep->rx_array);
	
	if (sock_ep->src_addr)
		free(sock_ep->src_addr);
	if (sock_ep->dest_addr)
		free(sock_ep->dest_addr);

	if (sock_ep->ep_type == FI_EP_MSG) {

		sock_ep->cm.do_listen = 0;

		if (write(sock_ep->cm.signal_fds[0], &c, 1) != 1) {
			SOCK_LOG_INFO("Failed to signal\n");
		}

		if (sock_ep->cm.listener_thread && 
		    pthread_join(sock_ep->cm.listener_thread, NULL)) {
			SOCK_LOG_ERROR("pthread join failed (%d)\n", errno);
		}

		close(sock_ep->cm.signal_fds[0]);
		close(sock_ep->cm.signal_fds[1]);
	}
	
	sock_ep->listener.do_listen = 0;
	if (write(sock_ep->listener.signal_fds[0], &c, 1) != 1) {
		SOCK_LOG_INFO("Failed to signal\n");
	}
	
	if (pthread_join(sock_ep->listener.listener_thread, NULL)) {
		SOCK_LOG_ERROR("pthread join failed (%d)\n", errno);
	}
	
	close(sock_ep->listener.signal_fds[0]);
	close(sock_ep->listener.signal_fds[1]);
	sock_fabric_remove_service(sock_ep->domain->fab, 
				   atoi(sock_ep->listener.service));

	atomic_dec(&sock_ep->domain->ref);
	free(sock_ep);
	return 0;
}
Beispiel #21
0
static int send_msg(struct kiocb *iocb, struct socket *sock,
		    struct msghdr *m, size_t total_len)
{
	struct tipc_sock *tsock = tipc_sk(sock->sk);
	struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
	struct sk_buff *buf;
	int needs_conn;
	int res = -EINVAL;

	if (unlikely(!dest))
		return -EDESTADDRREQ;
	if (unlikely((m->msg_namelen < sizeof(*dest)) ||
		     (dest->family != AF_TIPC)))
		return -EINVAL;

	needs_conn = (sock->state != SS_READY);
	if (unlikely(needs_conn)) {
		if (sock->state == SS_LISTENING)
			return -EPIPE;
		if (sock->state != SS_UNCONNECTED)
			return -EISCONN;
		if ((tsock->p->published) ||
		    ((sock->type == SOCK_STREAM) && (total_len != 0)))
			return -EOPNOTSUPP;
		if (dest->addrtype == TIPC_ADDR_NAME) {
			tsock->p->conn_type = dest->addr.name.name.type;
			tsock->p->conn_instance = dest->addr.name.name.instance;
		}
	}

	if (down_interruptible(&tsock->sem))
		return -ERESTARTSYS;

	if (needs_conn) {

		/* Abort any pending connection attempts (very unlikely) */

		while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
			atomic_dec(&tipc_queue_size);
		}

		sock->state = SS_CONNECTING;
	}

	do {
		if (dest->addrtype == TIPC_ADDR_NAME) {
			if ((res = dest_name_check(dest, m)))
				goto exit;
			res = tipc_send2name(tsock->p->ref,
					     &dest->addr.name.name,
					     dest->addr.name.domain,
					     m->msg_iovlen,
					     m->msg_iov);
		}
		else if (dest->addrtype == TIPC_ADDR_ID) {
			res = tipc_send2port(tsock->p->ref,
					     &dest->addr.id,
					     m->msg_iovlen,
					     m->msg_iov);
		}
		else if (dest->addrtype == TIPC_ADDR_MCAST) {
			if (needs_conn) {
				res = -EOPNOTSUPP;
				goto exit;
			}
			if ((res = dest_name_check(dest, m)))
				goto exit;
			res = tipc_multicast(tsock->p->ref,
					     &dest->addr.nameseq,
					     0,
					     m->msg_iovlen,
					     m->msg_iov);
		}
		if (likely(res != -ELINKCONG)) {
exit:
			up(&tsock->sem);
			return res;
		}
		if (m->msg_flags & MSG_DONTWAIT) {
			res = -EWOULDBLOCK;
			goto exit;
		}
		if (wait_event_interruptible(*sock->sk->sk_sleep,
					     !tsock->p->congested)) {
		    res = -ERESTARTSYS;
		    goto exit;
		}
	} while (1);
}
Beispiel #22
0
static void mwifiex_usb_rx_complete(struct urb *urb)
{
	struct urb_context *context = (struct urb_context *)urb->context;
	struct mwifiex_adapter *adapter = context->adapter;
	struct sk_buff *skb = context->skb;
	struct usb_card_rec *card;
	int recv_length = urb->actual_length;
	int size, status;

	if (!adapter || !adapter->card) {
		pr_err("mwifiex adapter or card structure is not valid\n");
		return;
	}

	card = (struct usb_card_rec *)adapter->card;
	if (card->rx_cmd_ep == context->ep)
		atomic_dec(&card->rx_cmd_urb_pending);
	else
		atomic_dec(&card->rx_data_urb_pending);

	if (recv_length) {
		if (urb->status || (adapter->surprise_removed)) {
			dev_err(adapter->dev,
				"URB status is failed: %d\n", urb->status);
			/* Do not free skb in case of command ep */
			if (card->rx_cmd_ep != context->ep)
				dev_kfree_skb_any(skb);
			goto setup_for_next;
		}
		if (skb->len > recv_length)
			skb_trim(skb, recv_length);
		else
			skb_put(skb, recv_length - skb->len);

		atomic_inc(&adapter->rx_pending);
		status = mwifiex_usb_recv(adapter, skb, context->ep);

		dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
			recv_length, status);
		if (status == -EINPROGRESS) {
			queue_work(adapter->workqueue, &adapter->main_work);

			/* urb for data_ep is re-submitted now;
			 * urb for cmd_ep will be re-submitted in callback
			 * mwifiex_usb_recv_complete
			 */
			if (card->rx_cmd_ep == context->ep)
				return;
		} else {
			atomic_dec(&adapter->rx_pending);
			if (status == -1)
				dev_err(adapter->dev,
					"received data processing failed!\n");

			/* Do not free skb in case of command ep */
			if (card->rx_cmd_ep != context->ep)
				dev_kfree_skb_any(skb);
		}
	} else if (urb->status) {
		if (!adapter->is_suspended) {
			dev_warn(adapter->dev,
				 "Card is removed: %d\n", urb->status);
			adapter->surprise_removed = true;
		}
		dev_kfree_skb_any(skb);
		return;
	} else {
		/* Do not free skb in case of command ep */
		if (card->rx_cmd_ep != context->ep)
			dev_kfree_skb_any(skb);

		/* fall through setup_for_next */
	}

setup_for_next:
	if (card->rx_cmd_ep == context->ep)
		size = MWIFIEX_RX_CMD_BUF_SIZE;
	else
		size = MWIFIEX_RX_DATA_BUF_SIZE;

	mwifiex_usb_submit_rx_urb(context, size);

	return;
}
Beispiel #23
0
static void avc_node_kill(struct avc_node *node)
{
	kmem_cache_free(avc_node_cachep, node);
	avc_cache_stats_incr(frees);
	atomic_dec(&avc_cache.active_nodes);
}
Beispiel #24
0
/* This function write a command/data packet to card. */
static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
				    struct sk_buff *skb,
				    struct mwifiex_tx_param *tx_param)
{
	struct usb_card_rec *card = adapter->card;
	struct urb_context *context;
	u8 *data = (u8 *)skb->data;
	struct urb *tx_urb;

	if (adapter->is_suspended) {
		dev_err(adapter->dev,
			"%s: not allowed while suspended\n", __func__);
		return -1;
	}

	if (adapter->surprise_removed) {
		dev_err(adapter->dev, "%s: device removed\n", __func__);
		return -1;
	}

	if (ep == card->tx_data_ep &&
	    atomic_read(&card->tx_data_urb_pending) >= MWIFIEX_TX_DATA_URB) {
		return -EBUSY;
	}

	dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep);

	if (ep == card->tx_cmd_ep) {
		context = &card->tx_cmd;
	} else {
		if (card->tx_data_ix >= MWIFIEX_TX_DATA_URB)
			card->tx_data_ix = 0;
		context = &card->tx_data_list[card->tx_data_ix++];
	}

	context->adapter = adapter;
	context->ep = ep;
	context->skb = skb;
	tx_urb = context->urb;

	usb_fill_bulk_urb(tx_urb, card->udev, usb_sndbulkpipe(card->udev, ep),
			  data, skb->len, mwifiex_usb_tx_complete,
			  (void *)context);

	tx_urb->transfer_flags |= URB_ZERO_PACKET;

	if (ep == card->tx_cmd_ep)
		atomic_inc(&card->tx_cmd_urb_pending);
	else
		atomic_inc(&card->tx_data_urb_pending);

	if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
		dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__);
		if (ep == card->tx_cmd_ep) {
			atomic_dec(&card->tx_cmd_urb_pending);
		} else {
			atomic_dec(&card->tx_data_urb_pending);
			if (card->tx_data_ix)
				card->tx_data_ix--;
			else
				card->tx_data_ix = MWIFIEX_TX_DATA_URB;
		}

		return -1;
	} else {
		if (ep == card->tx_data_ep &&
		    atomic_read(&card->tx_data_urb_pending) ==
							MWIFIEX_TX_DATA_URB)
			return -ENOSR;
	}

	return -EINPROGRESS;
}
Beispiel #25
0
int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
{
	struct ivtv *itv = s->itv;
	DECLARE_WAITQUEUE(wait, current);
	int cap_type;
	int stopmode;

	if (s->v4l2dev == NULL)
		return -EINVAL;

	/* This function assumes that you are allowed to stop the capture
	   and that we are actually capturing */

	IVTV_DEBUG_INFO("Stop Capture\n");

	if (s->type == IVTV_DEC_STREAM_TYPE_VOUT)
		return 0;
	if (atomic_read(&itv->capturing) == 0)
		return 0;

	switch (s->type) {
	case IVTV_ENC_STREAM_TYPE_YUV:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_PCM:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_VBI:
		cap_type = 1;
		break;
	case IVTV_ENC_STREAM_TYPE_MPG:
	default:
		cap_type = 0;
		break;
	}

	/* Stop Capture Mode */
	if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
		stopmode = 0;
	} else {
		stopmode = 1;
	}

	/* end_capture */
	/* when: 0 =  end of GOP  1 = NOW!, type: 0 = mpeg, subtype: 3 = video+audio */
	ivtv_vapi(itv, CX2341X_ENC_STOP_CAPTURE, 3, stopmode, cap_type, s->subtype);

	if (!test_bit(IVTV_F_S_PASSTHROUGH, &s->s_flags)) {
		if (s->type == IVTV_ENC_STREAM_TYPE_MPG && gop_end) {
			/* only run these if we're shutting down the last cap */
			unsigned long duration;
			unsigned long then = jiffies;

			add_wait_queue(&itv->eos_waitq, &wait);

			set_current_state(TASK_INTERRUPTIBLE);

			/* wait 2s for EOS interrupt */
			while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
				time_before(jiffies,
					    then + msecs_to_jiffies(2000))) {
				schedule_timeout(msecs_to_jiffies(10));
			}

			/* To convert jiffies to ms, we must multiply by 1000
			 * and divide by HZ.  To avoid runtime division, we
			 * convert this to multiplication by 1000/HZ.
			 * Since integer division truncates, we get the best
			 * accuracy if we do a rounding calculation of the constant.
			 * Think of the case where HZ is 1024.
			 */
			duration = ((1000 + HZ / 2) / HZ) * (jiffies - then);

			if (!test_bit(IVTV_F_I_EOS, &itv->i_flags)) {
				IVTV_DEBUG_WARN("%s: EOS interrupt not received! stopping anyway.\n", s->name);
				IVTV_DEBUG_WARN("%s: waited %lu ms.\n", s->name, duration);
			} else {
				IVTV_DEBUG_INFO("%s: EOS took %lu ms to occur.\n", s->name, duration);
			}
			set_current_state(TASK_RUNNING);
			remove_wait_queue(&itv->eos_waitq, &wait);
			set_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
		}

		/* Handle any pending interrupts */
		ivtv_msleep_timeout(100, 1);
	}

	atomic_dec(&itv->capturing);

	/* Clear capture and no-read bits */
	clear_bit(IVTV_F_S_STREAMING, &s->s_flags);

	if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
		ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VBI_CAP);

	if (atomic_read(&itv->capturing) > 0) {
		return 0;
	}

	/* Set the following Interrupt mask bits for capture */
	ivtv_set_irq_mask(itv, IVTV_IRQ_MASK_CAPTURE);
	del_timer(&itv->dma_timer);

	/* event notification (off) */
	if (test_and_clear_bit(IVTV_F_I_DIG_RST, &itv->i_flags)) {
		/* type: 0 = refresh */
		/* on/off: 0 = off, intr: 0x10000000, mbox_id: -1: none */
		ivtv_vapi(itv, CX2341X_ENC_SET_EVENT_NOTIFICATION, 4, 0, 0, IVTV_IRQ_ENC_VIM_RST, -1);
		ivtv_set_irq_mask(itv, IVTV_IRQ_ENC_VIM_RST);
	}

	wake_up(&s->waitq);

	return 0;
}
Beispiel #26
0
static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter)
{
	atomic_dec(&adapter->rx_pending);

	return 0;
}
Beispiel #27
0
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
	struct rb_node **rb_link, *rb_parent;
	int retval;
	unsigned long charge;
	struct mempolicy *pol;

	down_write(&oldmm->mmap_sem);
	flush_cache_dup_mm(oldmm);
	/*
	 * Not linked in yet - no deadlock potential:
	 */
	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);

	mm->locked_vm = 0;
	mm->mmap = NULL;
	mm->mmap_cache = NULL;
	mm->free_area_cache = oldmm->mmap_base;
	mm->cached_hole_size = ~0UL;
	mm->map_count = 0;
	cpumask_clear(mm_cpumask(mm));
	mm->mm_rb = RB_ROOT;
	rb_link = &mm->mm_rb.rb_node;
	rb_parent = NULL;
	pprev = &mm->mmap;
	retval = ksm_fork(mm, oldmm);
	if (retval)
		goto out;
	retval = khugepaged_fork(mm, oldmm);
	if (retval)
		goto out;

	prev = NULL;
	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
		struct file *file;

		if (mpnt->vm_flags & VM_DONTCOPY) {
			long pages = vma_pages(mpnt);
			mm->total_vm -= pages;
			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
								-pages);
			continue;
		}
		charge = 0;
		if (mpnt->vm_flags & VM_ACCOUNT) {
			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
			if (security_vm_enough_memory(len))
				goto fail_nomem;
			charge = len;
		}
		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
		if (!tmp)
			goto fail_nomem;
		*tmp = *mpnt;
		INIT_LIST_HEAD(&tmp->anon_vma_chain);
		pol = mpol_dup(vma_policy(mpnt));
		retval = PTR_ERR(pol);
		if (IS_ERR(pol))
			goto fail_nomem_policy;
		vma_set_policy(tmp, pol);
		tmp->vm_mm = mm;
		if (anon_vma_fork(tmp, mpnt))
			goto fail_nomem_anon_vma_fork;
		tmp->vm_flags &= ~VM_LOCKED;
		tmp->vm_next = tmp->vm_prev = NULL;
		file = tmp->vm_file;
		if (file) {
			struct inode *inode = file->f_path.dentry->d_inode;
			struct address_space *mapping = file->f_mapping;

			get_file(file);
			if (tmp->vm_flags & VM_DENYWRITE)
				atomic_dec(&inode->i_writecount);
			spin_lock(&mapping->i_mmap_lock);
			if (tmp->vm_flags & VM_SHARED)
				mapping->i_mmap_writable++;
			tmp->vm_truncate_count = mpnt->vm_truncate_count;
			flush_dcache_mmap_lock(mapping);
			/* insert tmp into the share list, just after mpnt */
			vma_prio_tree_add(tmp, mpnt);
			flush_dcache_mmap_unlock(mapping);
			spin_unlock(&mapping->i_mmap_lock);
		}

		/*
		 * Clear hugetlb-related page reserves for children. This only
		 * affects MAP_PRIVATE mappings. Faults generated by the child
		 * are not guaranteed to succeed, even if read-only
		 */
		if (is_vm_hugetlb_page(tmp))
			reset_vma_resv_huge_pages(tmp);

		/*
		 * Link in the new vma and copy the page table entries.
		 */
		*pprev = tmp;
		pprev = &tmp->vm_next;
		tmp->vm_prev = prev;
		prev = tmp;

		__vma_link_rb(mm, tmp, rb_link, rb_parent);
		rb_link = &tmp->vm_rb.rb_right;
		rb_parent = &tmp->vm_rb;

		mm->map_count++;
		retval = copy_page_range(mm, oldmm, mpnt);

		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);

		if (retval)
			goto out;
	}
Beispiel #28
0
/* returns 0 on OK, <0 on error and 1 on overflow */
int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr,
	int datalen, int c_offs, int c_len, int a_offs, int a_len,
	int hmac, char *iv, int encrypt)
{
	struct npe_crypt_cont *cr_cont;
	struct npe_cont *cont;
	u32 data_phys;
	int ret = -ENOMEM;
	struct ix_sa_dir *dir;

	dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt;

	if (sa_ctx->state != STATE_REGISTERED)
		return -ENOENT;

	cr_cont = ix_sa_get_cont(sa_ctx->master);
	if (!cr_cont)
		return ret;

	cr_cont->ctl.crypt.sa_ctx = sa_ctx;
	cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(dir->npe_ctx_phys);
	cr_cont->ctl.crypt.oper_type = OP_PERFORM;
	cr_cont->ctl.crypt.mode = dir->npe_mode;
	cr_cont->ctl.crypt.init_len = dir->npe_ctx_idx;

	if (sa_ctx->c_algo) {
		cr_cont->ctl.crypt.crypt_offs = cpu_to_npe16(c_offs);
		cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(c_len);
		if (sa_ctx->c_algo->iv_len) {
			if (!iv) {
				ret = -EINVAL;
				goto err_cr;
			}
			memcpy(cr_cont->ctl.crypt.iv, iv,
					sa_ctx->c_algo->iv_len);
		}
	}

	if (sa_ctx->h_algo) {
		/* prepare hashing */
		cr_cont->ctl.crypt.auth_offs = cpu_to_npe16(a_offs);
		cr_cont->ctl.crypt.auth_len = cpu_to_npe16(a_len);
	}

	data_phys = dma_map_single(sa_ctx->master->npe_dev,
			data, datalen, DMA_BIDIRECTIONAL);
	if (hmac)
		cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(data_phys + hmac);

	/* Prepare the data ptr */
	cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev));
	if (!cont) {
		goto err_unmap;
	}

	cont->data = ptr;
	cont->eth.next = 0;
	cont->eth.buf_len = cpu_to_npe16(datalen);
	cont->eth.pkt_len = 0;

	cont->eth.phys_addr = cpu_to_npe32(data_phys);
	cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys);

	atomic_inc(&sa_ctx->use_cnt);
	queue_put_entry(sa_ctx->master->sendq, cr_cont->phys);
	if (queue_stat(sa_ctx->master->sendq) != 2) {
		return 0;
	}

	/* overflow */
	printk("%s: Overflow\n", __FUNCTION__);
	ret = -EAGAIN;
	atomic_dec(&sa_ctx->use_cnt);
	qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev), cont);

err_unmap:
	dma_unmap_single(sa_ctx->master->npe_dev, data_phys, datalen,
			DMA_BIDIRECTIONAL);
err_cr:
	ix_sa_return_cont(sa_ctx->master, cr_cont);

	return ret;
}
int rtlx_open(int index, int can_sleep)
{
	struct rtlx_info **p;
	struct rtlx_channel *chan;
	enum rtlx_state state;
	int ret = 0;

	if (index >= RTLX_CHANNELS) {
		printk(KERN_DEBUG "rtlx_open index out of range\n");
		return -ENOSYS;
	}

	if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
		printk(KERN_DEBUG "rtlx_open channel %d already opened\n",
		       index);
		ret = -EBUSY;
		goto out_fail;
	}

	if (rtlx == NULL) {
		if( (p = vpe_get_shared(tclimit)) == NULL) {
		    if (can_sleep) {
			__wait_event_interruptible(channel_wqs[index].lx_queue,
				(p = vpe_get_shared(tclimit)), ret);
			if (ret)
				goto out_fail;
		    } else {
			printk(KERN_DEBUG "No SP program loaded, and device "
					"opened with O_NONBLOCK\n");
			ret = -ENOSYS;
			goto out_fail;
		    }
		}

		smp_rmb();
		if (*p == NULL) {
			if (can_sleep) {
				DEFINE_WAIT(wait);

				for (;;) {
					prepare_to_wait(
						&channel_wqs[index].lx_queue,
						&wait, TASK_INTERRUPTIBLE);
					smp_rmb();
					if (*p != NULL)
						break;
					if (!signal_pending(current)) {
						schedule();
						continue;
					}
					ret = -ERESTARTSYS;
					goto out_fail;
				}
				finish_wait(&channel_wqs[index].lx_queue, &wait);
			} else {
				pr_err(" *vpe_get_shared is NULL. "
				       "Has an SP program been loaded?\n");
				ret = -ENOSYS;
				goto out_fail;
			}
		}

		if ((unsigned int)*p < KSEG0) {
			printk(KERN_WARNING "vpe_get_shared returned an "
			       "invalid pointer maybe an error code %d\n",
			       (int)*p);
			ret = -ENOSYS;
			goto out_fail;
		}

		if ((ret = rtlx_init(*p)) < 0)
			goto out_ret;
	}

	chan = &rtlx->channel[index];

	state = xchg(&chan->lx_state, RTLX_STATE_OPENED);
	if (state == RTLX_STATE_OPENED) {
		ret = -EBUSY;
		goto out_fail;
	}

out_fail:
	smp_mb();
	atomic_dec(&channel_wqs[index].in_open);
	smp_mb();

out_ret:
	return ret;
}
Beispiel #30
0
static int voice_thread(void *data)
{
	struct voice_data *v = (struct voice_data *)data;
	int rc = 0;

	MM_INFO("voice_thread() start\n");

	while (!kthread_should_stop()) {
		wait_for_completion(&v->complete);
		init_completion(&v->complete);

		MM_DBG(" voc_event=%d, voice state =%d, dev_event=%d\n",
				v->voc_event, v->voc_state, v->dev_event);
		switch (v->voc_event) {
		case VOICE_ACQUIRE_START:
			/* check if dev_state = READY */
			/* if ready, send device_info and acquire_done */
			/* if not ready, block to wait the dev_state = READY */
			if ((v->voc_state == VOICE_INIT) ||
				(v->voc_state == VOICE_RELEASE)) {
				if (v->dev_state == DEV_READY) {
					mutex_lock(&voice.voc_lock);
					voice_change_sample_rate(v);
					rc = voice_cmd_device_info(v);
					rc = voice_cmd_acquire_done(v);
					v->voc_state = VOICE_ACQUIRE;
					mutex_unlock(&voice.voc_lock);
					broadcast_event(
					AUDDEV_EVT_VOICE_STATE_CHG,
					VOICE_STATE_INCALL, SESSION_IGNORE);
				} else {
					rc = wait_event_interruptible(
					v->dev_wait,
					(v->dev_state == DEV_READY)
					|| (atomic_read(&v->rel_start_flag)
						== 1));
					if (atomic_read(&v->rel_start_flag)
						== 1) {
						v->voc_state = VOICE_RELEASE;
						atomic_dec(&v->rel_start_flag);
						msm_snddev_withdraw_freq(0,
						SNDDEV_CAP_TX, AUDDEV_CLNT_VOC);
						broadcast_event(
						AUDDEV_EVT_VOICE_STATE_CHG,
						VOICE_STATE_OFFCALL,
						SESSION_IGNORE);
					} else {
						mutex_lock(&voice.voc_lock);
						voice_change_sample_rate(v);
						rc = voice_cmd_device_info(v);
						rc = voice_cmd_acquire_done(v);
						v->voc_state = VOICE_ACQUIRE;
						mutex_unlock(&voice.voc_lock);
						broadcast_event(
						AUDDEV_EVT_VOICE_STATE_CHG,
						VOICE_STATE_INCALL,
						SESSION_IGNORE);
					}
				}
			} else
				MM_ERR("Get this event at the wrong state\n");
			if (atomic_read(&v->acq_start_flag))
				atomic_dec(&v->acq_start_flag);
			break;
		case VOICE_RELEASE_START:
			MM_DBG("broadcast voice call end\n");
			broadcast_event(AUDDEV_EVT_VOICE_STATE_CHG,
					VOICE_STATE_OFFCALL, SESSION_IGNORE);
			if ((v->dev_state == DEV_REL_DONE) ||
					(v->dev_state == DEV_INIT)) {
				v->voc_state = VOICE_RELEASE;
				msm_snddev_withdraw_freq(0, SNDDEV_CAP_TX,
					AUDDEV_CLNT_VOC);
			} else {
				/* wait for the dev_state = RELEASE */
				rc = wait_event_interruptible(v->dev_wait,
					(v->dev_state == DEV_REL_DONE)
				|| (atomic_read(&v->acq_start_flag) == 1));
				if (atomic_read(&v->acq_start_flag) == 1)
					atomic_dec(&v->acq_start_flag);
				v->voc_state = VOICE_RELEASE;
				msm_snddev_withdraw_freq(0, SNDDEV_CAP_TX,
					AUDDEV_CLNT_VOC);
			}
			if (atomic_read(&v->rel_start_flag))
				atomic_dec(&v->rel_start_flag);
			break;
		case VOICE_CHANGE_START:
			if (v->voc_state == VOICE_ACQUIRE)
				v->voc_state = VOICE_CHANGE;
			else
				MM_ERR("Get this event at the wrong state\n");
			wake_up(&v->voc_wait);
			if (atomic_read(&v->chg_start_flag))
				atomic_dec(&v->chg_start_flag);
			break;
		case VOICE_NETWORK_RECONFIG:
			if ((v->voc_state == VOICE_ACQUIRE)
				|| (v->voc_state == VOICE_CHANGE)) {
				voice_change_sample_rate(v);
				rc = voice_cmd_device_info(v);
				rc = voice_cmd_acquire_done(v);
			}
			break;
		default:
			break;
		}

		switch (v->dev_event) {
		case DEV_CHANGE_READY:
			if (v->voc_state == VOICE_CHANGE) {
				mutex_lock(&voice.voc_lock);
				msm_snddev_enable_sidetone(v->dev_rx.dev_id,
				1);
				/* update voice state */
				v->voc_state = VOICE_ACQUIRE;
				v->dev_event = 0;
				mutex_unlock(&voice.voc_lock);
				broadcast_event(AUDDEV_EVT_VOICE_STATE_CHG,
					VOICE_STATE_INCALL, SESSION_IGNORE);
			} else {
				mutex_lock(&voice.voc_lock);
				v->dev_event = 0;
				mutex_unlock(&voice.voc_lock);
				MM_ERR("Get this event at the wrong state\n");
			}
			break;
		default:
			mutex_lock(&voice.voc_lock);
			v->dev_event = 0;
			mutex_unlock(&voice.voc_lock);
			break;
		}
	}
	return 0;
}