Пример #1
0
/*
 * This function is called at module load.
 */
static int __init sync_init(void)
{
    DBG(0, KERN_INFO, "Sync init\n");
    DBG(1, KERN_DEBUG, "debug level %d\n", debug_level);

    mutex_init(&sync.mutex);

    init_completion(&sync.completion);

    /* init_MUTEX(&sync.sem); */
    /* init_MUTEX_LOCKED(&sync.sem); */
    sema_init(&sync.sem, 2);

    init_rwsem(&sync.rw_sem);

    seqlock_init(&sync.seqlock);

    spin_lock_init(&sync.spinlock);

    rwlock_init(&sync.rwlock);

    sync_proc_init(&sync);

	return 0;
}
Пример #2
0
static struct fblock *fb_udp_ctor(char *name)
{
	int ret = 0;
	struct fblock *fb;
	struct fb_udp_priv *fb_priv;

	fb = alloc_fblock(GFP_ATOMIC);
	if (!fb)
		return NULL;
	fb_priv = kzalloc(sizeof(*fb_priv), GFP_ATOMIC);
	if (!fb_priv)
		goto err;
	seqlock_init(&fb_priv->lock);
	fb_priv->port[0] = IDP_UNKNOWN;
	fb_priv->port[1] = IDP_UNKNOWN;
	ret = init_fblock(fb, name, fb_priv);
	if (ret)
		goto err2;
	fb->netfb_rx = fb_udp_netrx;
	fb->event_rx = fb_udp_event;
	ret = register_fblock_namespace(fb);
	if (ret)
		goto err3;
	__module_get(THIS_MODULE);

	return fb;
err3:
	cleanup_fblock_ctor(fb);
err2:
	kfree(fb_priv);
err:
	kfree_fblock(fb);
	return NULL;
}
Пример #3
0
static struct fblock *fb_counter_ctor(char *name)
{
	int ret = 0;
	unsigned int cpu;
	struct fblock *fb;
	struct fb_counter_priv __percpu *fb_priv;
	struct proc_dir_entry *fb_proc;

	fb = alloc_fblock(GFP_ATOMIC);
	if (!fb)
		return NULL;

	fb_priv = alloc_percpu(struct fb_counter_priv);
	if (!fb_priv)
		goto err;

	get_online_cpus();
	for_each_online_cpu(cpu) {
		struct fb_counter_priv *fb_priv_cpu;
		fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
		seqlock_init(&fb_priv_cpu->lock);
		fb_priv_cpu->port[0] = IDP_UNKNOWN;
		fb_priv_cpu->port[1] = IDP_UNKNOWN;
		fb_priv_cpu->packets = 0;
		fb_priv_cpu->bytes = 0;
	}
	put_online_cpus();

	ret = init_fblock(fb, name, fb_priv);
	if (ret)
		goto err2;
	fb->netfb_rx = fb_counter_netrx;
	fb->event_rx = fb_counter_event;

	fb_proc = proc_create_data(fb->name, 0444, fblock_proc_dir,
				   &fb_counter_proc_fops,
				   (void *)(long) fb);
	if (!fb_proc)
		goto err3;

	ret = register_fblock_namespace(fb);
	if (ret)
		goto err4;

	__module_get(THIS_MODULE);
	return fb;
err4:
	remove_proc_entry(fb->name, fblock_proc_dir);
err3:
	cleanup_fblock_ctor(fb);
err2:
	free_percpu(fb_priv);
err:
	kfree_fblock(fb);
	return NULL;
}
Пример #4
0
//[*]--------------------------------------------------------------------------------------------------[*]
static	void bma150_sensor_config(unsigned char state)
{
	unsigned char	rd = 0;

	// GPIO Init 
	bma150_sensor_port_init();
	
	// Setting I2C Protocol
	bma150_sensor_write(BMA150_REGx15, 0);	// 3 wire

	// EEPROM Unlock
	bma150_sensor_read(BMA150_REGx0A, &rd, sizeof(rd));
	bma150_sensor_write(BMA150_REGx0A, (rd | 0x10));
	bma150_sensor_write(0x35, 0x00);
	// EEPROM Lock
	bma150_sensor_write(BMA150_REGx0A, (rd & (~0x10)));
	
	mdelay(10);	// wait

	// Must set to 0, BIT7 of REGx15 (SPI4 = 0 -> 3 wire, default 4 wire) : MODE I2C
	bma150_sensor_read(BMA150_REGx15, &rd, sizeof(rd));
	
	if(rd & 0x80)	printk("BMA150 Sensor SPI 4 Wire Mode [rd = 0x%02X] \r\n", rd);
	else			printk("BMA150 Sensor I2C 3 Wire Mode [rd = 0x%02X] \r\n", rd);

	bma150_sensor_read(BMA150_REGx01, &rd, sizeof(rd));
	printk("BMA150 Sensor Chip version [%d.%d]\r\n", (rd >> 4) & 0x0F, rd & 0x0F);
	
	bma150_sensor_read(BMA150_REGx00, &rd, sizeof(rd));
	printk("BMA150 Sensor Chip ID [%d] \r\n", rd);

	// set all interrupt disable (hw bug) : wrong interrupt connected (bma interrupt -> pin 4)
	bma150_sensor_write(BMA150_REGx0B, 0x00);

	if(state == SENSOR_STATE_BOOT)	{
		// set default config
		bma150_sensor.acc_range = ACC_RANGE_2G;	// +- 2g
		bma150_sensor.mode 		= BMA150_MODE_NORMAL;
		
		bma150_sensor.modify_range = 0;	// acc range modify flag clear
	}
	else	{	// SENSOR_STATE RESUME
		if(bma150_sensor.acc_range != ACC_RANGE_2G)
			bma150_sensor.modify_range = 1;	// acc range modify flag set
	}

	init_timer(&bma150_sensor.rd_timer);
	bma150_sensor.rd_timer.data 		= (unsigned long)&bma150_sensor.rd_timer;
	bma150_sensor.rd_timer.function 	= bma150_sensor_timer_irq;

	seqlock_init(&bma150_sensor.lock);	
	
	bma150_sensor_timer_start();
}
Пример #5
0
static struct fblock *fb_huf_ctor(char *name)
{
	int ret = 0;
	struct fblock *fb;
	struct fb_huf_priv *fb_priv;
	struct proc_dir_entry *fb_proc;
	Node *tree;
	fb = alloc_fblock(GFP_ATOMIC);
	if (!fb)
		return NULL;
	fb_priv = kzalloc(sizeof(*fb_priv), GFP_ATOMIC);
	if (!fb_priv)
		goto err;
	seqlock_init(&fb_priv->lock);
	rwlock_init(&fb_priv->klock);
	fb_priv->port[0] = IDP_UNKNOWN;
	fb_priv->port[1] = IDP_UNKNOWN;

	ret = init_fblock(fb, name, fb_priv);
	if (ret)
		goto err2;

	fb->netfb_rx = fb_huf_netrx;
	fb->event_rx = fb_huf_event;
//	fb->linearize = fb_aes_linearize;
//	fb->delinearize = fb_aes_delinearize;
	fb_proc = proc_create_data(fb->name, 0444, fblock_proc_dir,
				   &fb_huf_proc_fops, (void *)(long) fb);
	if (!fb_proc)
		goto err3;

	ret = register_fblock_namespace(fb);
	if (ret)
		goto err4;

	__module_get(THIS_MODULE);

	
	buildHuffmanTree(&tree);
	fillTable(tree, 0);
	invertCodes();


	return fb;
err4:
	remove_proc_entry(fb->name, fblock_proc_dir);
err3:
	cleanup_fblock_ctor(fb);
err2:
	kfree(fb_priv);
err:
	kfree_fblock(fb);
	return NULL;
}
Пример #6
0
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
	struct signal_struct *sig;

	if (clone_flags & CLONE_THREAD)
		return 0;

	sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
	tsk->signal = sig;
	if (!sig)
		return -ENOMEM;

	sig->nr_threads = 1;
	atomic_set(&sig->live, 1);
	atomic_set(&sig->sigcnt, 1);

	/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
	sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
	tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);

	init_waitqueue_head(&sig->wait_chldexit);
	sig->curr_target = tsk;
	init_sigpending(&sig->shared_pending);
	INIT_LIST_HEAD(&sig->posix_timers);
	seqlock_init(&sig->stats_lock);

	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	sig->real_timer.function = it_real_fn;

	task_lock(current->group_leader);
	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
	task_unlock(current->group_leader);

	posix_cpu_timers_init_group(sig);

	tty_audit_fork(sig);
	sched_autogroup_fork(sig);

#ifdef CONFIG_CGROUPS
	init_rwsem(&sig->group_rwsem);
#endif

	sig->oom_score_adj = current->signal->oom_score_adj;
	sig->oom_score_adj_min = current->signal->oom_score_adj_min;

	sig->has_child_subreaper = current->signal->has_child_subreaper ||
				   current->signal->is_child_subreaper;

	mutex_init(&sig->cred_guard_mutex);

	return 0;
}
Пример #7
0
static struct inode *ufs_alloc_inode(struct super_block *sb)
{
	struct ufs_inode_info *ei;

	ei = kmem_cache_alloc(ufs_inode_cachep, GFP_NOFS);
	if (!ei)
		return NULL;

	ei->vfs_inode.i_version = 1;
	seqlock_init(&ei->meta_lock);
	mutex_init(&ei->truncate_mutex);
	return &ei->vfs_inode;
}
void perftestinit(int nthreads)
{
	int i;

	init_per_thread(n_reads_pt, 0LL);
	init_per_thread(n_read_retries_pt, 0LL);
	init_per_thread(n_read_errs_pt, 0LL);
	init_per_thread(n_writes_pt, 0LL);
	atomic_set(&nthreadsrunning, 0);
	nthreadsexpected = nthreads;
	seqlock_init(&test_seqlock);
	for (i = 0; i < TESTARRAY_SIZE; i++)
		testarray[i] = i;
}
Пример #9
0
bool_t register_clocksource(struct device_t ** device, struct clocksource_t * cs)
{
	struct device_t * dev;
	irq_flags_t flags;

	if(!cs || !cs->name || !cs->read)
		return FALSE;

	dev = malloc(sizeof(struct device_t));
	if(!dev)
		return FALSE;

	cs->keeper.interval = clocksource_deferment(cs) >> 1;
	cs->keeper.last = clocksource_cycle(cs);
	cs->keeper.nsec = 0;
	seqlock_init(&cs->keeper.lock);
	timer_init(&cs->keeper.timer, clocksource_keeper_timer_function, cs);

	dev->name = strdup(cs->name);
	dev->type = DEVICE_TYPE_CLOCKSOURCE;
	dev->driver = NULL;
	dev->priv = cs;
	dev->kobj = kobj_alloc_directory(dev->name);
	kobj_add_regular(dev->kobj, "mult", clocksource_read_mult, NULL, cs);
	kobj_add_regular(dev->kobj, "shift", clocksource_read_shift, NULL, cs);
	kobj_add_regular(dev->kobj, "period", clocksource_read_period, NULL, cs);
	kobj_add_regular(dev->kobj, "deferment", clocksource_read_deferment, NULL, cs);
	kobj_add_regular(dev->kobj, "cycle", clocksource_read_cycle, NULL, cs);
	kobj_add_regular(dev->kobj, "time", clocksource_read_time, NULL, cs);

	if(!register_device(dev))
	{
		kobj_remove_self(dev->kobj);
		free(dev->name);
		free(dev);
		return FALSE;
	}

	if(__clocksource == &__cs_dummy)
	{
		spin_lock_irqsave(&__clocksource_lock, flags);
		__clocksource = cs;
		spin_unlock_irqrestore(&__clocksource_lock, flags);
	}
	timer_start_now(&cs->keeper.timer, ns_to_ktime(cs->keeper.interval));

	if(device)
		*device = dev;
	return TRUE;
}
Пример #10
0
/*
 * Initialise an inode cache slab element prior to any use.  Note that
 * afs_alloc_inode() *must* reset anything that could incorrectly leak from one
 * inode to another.
 */
static void afs_i_init_once(void *_vnode)
{
	struct afs_vnode *vnode = _vnode;

	memset(vnode, 0, sizeof(*vnode));
	inode_init_once(&vnode->vfs_inode);
	mutex_init(&vnode->io_lock);
	init_rwsem(&vnode->validate_lock);
	spin_lock_init(&vnode->wb_lock);
	spin_lock_init(&vnode->lock);
	INIT_LIST_HEAD(&vnode->wb_keys);
	INIT_LIST_HEAD(&vnode->pending_locks);
	INIT_LIST_HEAD(&vnode->granted_locks);
	INIT_DELAYED_WORK(&vnode->lock_work, afs_lock_work);
	seqlock_init(&vnode->cb_lock);
}
Пример #11
0
static struct fblock *fb_counter_ctor(char *name)
{
	int ret = 0;
	struct fblock *fb;
	struct fb_counter_priv *fb_priv;
	struct proc_dir_entry *fb_proc;

	fb = alloc_fblock(GFP_ATOMIC);
	if (!fb)
		return NULL;
	fb_priv = kzalloc(sizeof(*fb_priv), GFP_ATOMIC);
	if (!fb_priv)
		goto err;
	seqlock_init(&fb_priv->lock);
	fb_priv->port[0] = IDP_UNKNOWN;
	fb_priv->port[1] = IDP_UNKNOWN;
	fb_priv->packets = 0;
	fb_priv->bytes = 0;
	ret = init_fblock(fb, name, fb_priv);
	if (ret)
		goto err2;
	fb->netfb_rx = fb_counter_netrx;
	fb->event_rx = fb_counter_event;
	fb_proc = proc_create_data(fb->name, 0444, fblock_proc_dir,
				   &fb_counter_proc_fops,
				   (void *)(long) fb);
	if (!fb_proc)
		goto err3;
	ret = register_fblock_namespace(fb);
	if (ret)
		goto err4;
	__module_get(THIS_MODULE);
	return fb;
err4:
	remove_proc_entry(fb->name, fblock_proc_dir);
err3:
	cleanup_fblock_ctor(fb);
err2:
	kfree(fb_priv);
err:
	kfree_fblock(fb);
	return NULL;
}
Пример #12
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE)
		return ERR_PTR(-EINVAL);

	/*
	 * If the new process will be in a different pid or user namespace
	 * do not allow it to share a thread group or signal handlers or
	 * parent with the forking task.
	 */
	if (clone_flags & CLONE_SIGHAND) {
		if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
		    (task_active_pid_ns(current) !=
				current->nsproxy->pid_ns_for_children))
			return ERR_PTR(-EINVAL);
	}

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			task_rlimit(p, RLIMIT_NPROC)) {
		if (p->real_cred->user != INIT_USER &&
		    !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
			goto bad_fork_free;
	}
	current->flags &= ~PF_NPROC_EXCEEDED;

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
	p->flags |= PF_FORKNOEXEC;
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = p->stime = p->gtime = 0;
	p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
	p->prev_cputime.utime = p->prev_cputime.stime = 0;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
	seqlock_init(&p->vtime_seqlock);
	p->vtime_snap = 0;
	p->vtime_snap_whence = VTIME_SLEEPING;
#endif

#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	p->start_time = ktime_get_ns();
	p->real_start_time = ktime_get_boot_ns();
	p->io_context = NULL;
	p->audit_context = NULL;
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_begin(current);
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
	if (IS_ERR(p->mempolicy)) {
		retval = PTR_ERR(p->mempolicy);
		p->mempolicy = NULL;
		goto bad_fork_cleanup_threadgroup_lock;
	}
#endif
#ifdef CONFIG_CPUSETS
	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
	seqcount_init(&p->mems_allowed_seq);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
	p->hardirqs_enabled = 0;
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_BCACHE
	p->sequential_io	= 0;
	p->sequential_io_avg	= 0;
#endif

	/* Perform scheduler related setup. Assign this task to a CPU. */
	retval = sched_fork(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_policy;

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;
	retval = audit_alloc(p);
	if (retval)
		goto bad_fork_cleanup_perf;
	/* copy all the process information */
	shm_init_task(p);
	retval = copy_semundo(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_audit;
	retval = copy_files(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_semundo;
	retval = copy_fs(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_files;
	retval = copy_sighand(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_fs;
	retval = copy_signal(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_sighand;
	retval = copy_mm(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_signal;
	retval = copy_namespaces(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_mm;
	retval = copy_io(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		pid = alloc_pid(p->nsproxy->pid_ns_for_children);
		if (IS_ERR(pid)) {
			retval = PTR_ERR(pid);
			goto bad_fork_cleanup_io;
		}
	}

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
	p->plug = NULL;
#endif
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing and stepping should be turned off in the
	 * child regardless of CLONE_PTRACE.
	 */
	user_disable_single_step(p);
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	p->pid = pid_nr(pid);
	if (clone_flags & CLONE_THREAD) {
		p->exit_signal = -1;
		p->group_leader = current->group_leader;
		p->tgid = current->tgid;
	} else {
		if (clone_flags & CLONE_PARENT)
			p->exit_signal = current->group_leader->exit_signal;
		else
			p->exit_signal = (clone_flags & CSIGNAL);
		p->group_leader = p;
		p->tgid = p->pid;
	}

	p->nr_dirtied = 0;
	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
	p->dirty_paused_when = 0;

	p->pdeath_signal = 0;
	INIT_LIST_HEAD(&p->thread_group);
	p->task_works = NULL;

	/*
	 * Make it visible to the rest of the system, but dont wake it up yet.
	 * Need tasklist lock for parent etc handling!
	 */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Copy seccomp details explicitly here, in case they were changed
	 * before holding sighand lock.
	 */
	copy_seccomp(p);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
	*/
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (likely(p->pid)) {
		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);

		init_task_pid(p, PIDTYPE_PID, pid);
		if (thread_group_leader(p)) {
			init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
			init_task_pid(p, PIDTYPE_SID, task_session(current));

			if (is_child_reaper(pid)) {
				ns_of_pid(pid)->child_reaper = p;
				p->signal->flags |= SIGNAL_UNKILLABLE;
			}

			p->signal->leader_pid = pid;
			p->signal->tty = tty_kref_get(current->signal->tty);
			list_add_tail(&p->sibling, &p->real_parent->children);
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			attach_pid(p, PIDTYPE_PGID);
			attach_pid(p, PIDTYPE_SID);
			__this_cpu_inc(process_counts);
		} else {
			current->signal->nr_threads++;
			atomic_inc(&current->signal->live);
			atomic_inc(&current->signal->sigcnt);
			list_add_tail_rcu(&p->thread_group,
					  &p->group_leader->thread_group);
			list_add_tail_rcu(&p->thread_node,
					  &p->signal->thread_head);
		}
		attach_pid(p, PIDTYPE_PID);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	syscall_tracepoint_update(p);
	write_unlock_irq(&tasklist_lock);

	proc_fork_connector(p);
	cgroup_post_fork(p);
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	perf_event_fork(p);

	trace_task_newtask(p, clone_flags);
	uprobe_copy_process(p, clone_flags);

	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	if (p->io_context)
		exit_io_context(p);
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_perf:
	perf_event_free_task(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:
#endif
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	delayacct_tsk_free(p);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
	return ERR_PTR(retval);
}
Пример #13
0
/*
 * There are two policies for allocating an inode.  If the new inode is
 * a directory, then a forward search is made for a block group with both
 * free space and a low directory-to-inode ratio; if that fails, then of
 * the groups with above-average free space, that group with the fewest
 * directories already is chosen.
 *
 * For other inodes, search forward from the parent directory's block
 * group to find a free inode.
 */
struct inode *ext3_new_inode(handle_t *handle, struct inode * dir, int mode)
{
	struct super_block *sb;
	struct buffer_head *bitmap_bh = NULL;
	struct buffer_head *bh2;
	int group;
	unsigned long ino = 0;
	struct inode * inode;
	struct ext3_group_desc * gdp = NULL;
	struct ext3_super_block * es;
	struct ext3_inode_info *ei;
	struct ext3_sb_info *sbi;
	int err = 0;
	struct inode *ret;
	int i;

	/* Cannot create files in a deleted directory */
	if (!dir || !dir->i_nlink)
		return ERR_PTR(-EPERM);

	sb = dir->i_sb;
	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOMEM);
	ei = EXT3_I(inode);

	sbi = EXT3_SB(sb);
	es = sbi->s_es;
	if (S_ISDIR(mode)) {
		if (test_opt (sb, OLDALLOC))
			group = find_group_dir(sb, dir);
		else
			group = find_group_orlov(sb, dir);
	} else 
		group = find_group_other(sb, dir);

	err = -ENOSPC;
	if (group == -1)
		goto out;

	for (i = 0; i < sbi->s_groups_count; i++) {
		gdp = ext3_get_group_desc(sb, group, &bh2);

		err = -EIO;
		brelse(bitmap_bh);
		bitmap_bh = read_inode_bitmap(sb, group);
		if (!bitmap_bh)
			goto fail;

		ino = 0;

repeat_in_this_group:
		ino = ext3_find_next_zero_bit((unsigned long *)
				bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino);
		if (ino < EXT3_INODES_PER_GROUP(sb)) {
			int credits = 0;

			BUFFER_TRACE(bitmap_bh, "get_write_access");
			err = ext3_journal_get_write_access_credits(handle,
							bitmap_bh, &credits);
			if (err)
				goto fail;

			if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group),
						ino, bitmap_bh->b_data)) {
				/* we won it */
				BUFFER_TRACE(bitmap_bh,
					"call ext3_journal_dirty_metadata");
				err = ext3_journal_dirty_metadata(handle,
								bitmap_bh);
				if (err)
					goto fail;
				goto got;
			}
			/* we lost it */
			journal_release_buffer(handle, bitmap_bh, credits);

			if (++ino < EXT3_INODES_PER_GROUP(sb))
				goto repeat_in_this_group;
		}

		/*
		 * This case is possible in concurrent environment.  It is very
		 * rare.  We cannot repeat the find_group_xxx() call because
		 * that will simply return the same blockgroup, because the
		 * group descriptor metadata has not yet been updated.
		 * So we just go onto the next blockgroup.
		 */
		if (++group == sbi->s_groups_count)
			group = 0;
	}
	err = -ENOSPC;
	goto out;

got:
	ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
		ext3_error (sb, "ext3_new_inode",
			    "reserved inode or inode > inodes count - "
			    "block_group = %d, inode=%lu", group, ino);
		err = -EIO;
		goto fail;
	}

	BUFFER_TRACE(bh2, "get_write_access");
	err = ext3_journal_get_write_access(handle, bh2);
	if (err) goto fail;
	spin_lock(sb_bgl_lock(sbi, group));
	gdp->bg_free_inodes_count =
		cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
	if (S_ISDIR(mode)) {
		gdp->bg_used_dirs_count =
			cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
	}
	spin_unlock(sb_bgl_lock(sbi, group));
	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
	err = ext3_journal_dirty_metadata(handle, bh2);
	if (err) goto fail;

	percpu_counter_dec(&sbi->s_freeinodes_counter);
	if (S_ISDIR(mode))
		percpu_counter_inc(&sbi->s_dirs_counter);
	sb->s_dirt = 1;

	inode->i_uid = current->fsuid;
	if (test_opt (sb, GRPID))
		inode->i_gid = dir->i_gid;
	else if (dir->i_mode & S_ISGID) {
		inode->i_gid = dir->i_gid;
		if (S_ISDIR(mode))
			mode |= S_ISGID;
	} else
		inode->i_gid = current->fsgid;
	inode->i_mode = mode;

	inode->i_ino = ino;
	/* This is the optimal IO size (for stat), not the fs block size */
	inode->i_blksize = PAGE_SIZE;
	inode->i_blocks = 0;
	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;

	memset(ei->i_data, 0, sizeof(ei->i_data));
	ei->i_next_alloc_block = 0;
	ei->i_next_alloc_goal = 0;
	ei->i_dir_start_lookup = 0;
	ei->i_disksize = 0;

	ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
	if (S_ISLNK(mode))
		ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
	/* dirsync only applies to directories */
	if (!S_ISDIR(mode))
		ei->i_flags &= ~EXT3_DIRSYNC_FL;
#ifdef EXT3_FRAGMENTS
	ei->i_faddr = 0;
	ei->i_frag_no = 0;
	ei->i_frag_size = 0;
#endif
	ei->i_file_acl = 0;
	ei->i_dir_acl = 0;
	ei->i_dtime = 0;
	ei->i_rsv_window.rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
	ei->i_rsv_window.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
	atomic_set(&ei->i_rsv_window.rsv_goal_size, EXT3_DEFAULT_RESERVE_BLOCKS);
	atomic_set(&ei->i_rsv_window.rsv_alloc_hit, 0);
	seqlock_init(&ei->i_rsv_window.rsv_seqlock);
	ei->i_block_group = group;

	ext3_set_inode_flags(inode);
	if (IS_DIRSYNC(inode))
		handle->h_sync = 1;
	insert_inode_hash(inode);
	spin_lock(&sbi->s_next_gen_lock);
	inode->i_generation = sbi->s_next_generation++;
	spin_unlock(&sbi->s_next_gen_lock);

	ei->i_state = EXT3_STATE_NEW;

	ret = inode;
	if(DQUOT_ALLOC_INODE(inode)) {
		DQUOT_DROP(inode);
		err = -EDQUOT;
		goto fail2;
	}
	err = ext3_init_acl(handle, inode, dir);
	if (err) {
		DQUOT_FREE_INODE(inode);
		goto fail2;
  	}
	err = ext3_mark_inode_dirty(handle, inode);
	if (err) {
		ext3_std_error(sb, err);
		DQUOT_FREE_INODE(inode);
		goto fail2;
	}

	ext3_debug("allocating inode %lu\n", inode->i_ino);
	goto really_out;
fail:
	ext3_std_error(sb, err);
out:
	iput(inode);
	ret = ERR_PTR(err);
really_out:
	brelse(bitmap_bh);
	return ret;

fail2:
	inode->i_flags |= S_NOQUOTA;
	inode->i_nlink = 0;
	iput(inode);
	brelse(bitmap_bh);
	return ERR_PTR(err);
}
Пример #14
0
static struct fblock *fb_crr_rx_ctor(char *name)
{
	int ret = 0;
	unsigned int cpu, *tmp_rx_bitstream;
	unsigned char *tmp_expected_seq_nr, *tmp_rx_win_nr;
	struct sk_buff_head *tmp_list;
	struct fblock *fb;
	struct fb_crr_rx_priv __percpu *fb_priv;
	rwlock_t *tmp_rx_lock;

	

	fb = alloc_fblock(GFP_ATOMIC);
	if (!fb)
		return NULL;


	fb_priv = alloc_percpu(struct fb_crr_rx_priv);
	if (!fb_priv)
		goto err;

	if (unlikely((tmp_rx_bitstream = kzalloc(sizeof(unsigned int), GFP_ATOMIC)) == NULL))
		goto err_;

	if (unlikely((tmp_rx_win_nr = kzalloc(sizeof(unsigned char), GFP_ATOMIC)) == NULL))
		goto err__;

	if (unlikely((tmp_rx_lock = kzalloc(sizeof(rwlock_t), GFP_ATOMIC)) == NULL))
		goto err0;

	if (unlikely((tmp_list = kzalloc(sizeof(struct sk_buff_head), GFP_ATOMIC)) == NULL))
		goto err1;

	if (unlikely((tmp_expected_seq_nr = kzalloc(sizeof(unsigned char), GFP_ATOMIC)) == NULL))
		goto err1a;
	
	rwlock_init(tmp_rx_lock);

	*tmp_rx_bitstream = 0;
	*tmp_rx_win_nr = 0;

	*tmp_expected_seq_nr = 1;
	
	skb_queue_head_init(tmp_list);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		struct fb_crr_rx_priv *fb_priv_cpu;
		fb_priv_cpu = per_cpu_ptr(fb_priv, cpu);
		seqlock_init(&fb_priv_cpu->lock);
		//rwlock_init(&fb_priv_cpu->rx_lock);
		fb_priv_cpu->rx_lock = tmp_rx_lock;		
		fb_priv_cpu->port[0] = IDP_UNKNOWN;
		fb_priv_cpu->port[1] = IDP_UNKNOWN;
		fb_priv_cpu->rx_seq_nr = tmp_expected_seq_nr;
		fb_priv_cpu->list = tmp_list;
		fb_priv_cpu->rx_bitstream = tmp_rx_bitstream;
		fb_priv_cpu->rx_win_nr = tmp_rx_win_nr;
	}
	put_online_cpus();

	ret = init_fblock(fb, name, fb_priv);
	if (ret)
		goto err2;
	fb->netfb_rx = fb_crr_rx_netrx;
	fb->event_rx = fb_crr_rx_event;
	ret = register_fblock_namespace(fb);
	if (ret)
		goto err3;
	__module_get(THIS_MODULE);
	printk(KERN_ERR "[CRR_RX] Initialization passed!\n");
	return fb;
err3:
	cleanup_fblock_ctor(fb);
err2:
	kfree(tmp_expected_seq_nr);
err1a:
	kfree(tmp_list);
err1:
	kfree(tmp_rx_lock);
	
err0:
	kfree(tmp_rx_win_nr); 
err__:
	kfree(tmp_rx_bitstream);
err_:
	free_percpu(fb_priv);
err:
	kfree_fblock(fb);
	printk(KERN_ERR "[CRR_RX] Initialization failed!\n");
	return NULL;
}
Пример #15
0
static __net_init int ipv4_sysctl_init_net(struct net *net)
{
    struct ctl_table *table;

    table = ipv4_net_table;
    if (!net_eq(net, &init_net)) {
        table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
        if (table == NULL)
            goto err_alloc;

        table[0].data =
            &net->ipv4.sysctl_icmp_echo_ignore_all;
        table[1].data =
            &net->ipv4.sysctl_icmp_echo_ignore_broadcasts;
        table[2].data =
            &net->ipv4.sysctl_icmp_ignore_bogus_error_responses;
        table[3].data =
            &net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr;
        table[4].data =
            &net->ipv4.sysctl_icmp_ratelimit;
        table[5].data =
            &net->ipv4.sysctl_icmp_ratemask;
        table[6].data =
            &net->ipv4.sysctl_ping_group_range;
        table[7].data =
            &net->ipv4.sysctl_tcp_ecn;
        table[8].data =
            &net->ipv4_sysctl_local_ports.range;

        /* Don't export sysctls to unprivileged users */
        if (net->user_ns != &init_user_ns)
            table[0].procname = NULL;
    }

    /*
     * Sane defaults - nobody may create ping sockets.
     * Boot scripts should set this to distro-specific group.
     */
    net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
    net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);

    /*
     * Set defaults for local port range
     */
    seqlock_init(&net->ipv4_sysctl_local_ports.lock);
    net->ipv4_sysctl_local_ports.range[0] =  32768;
    net->ipv4_sysctl_local_ports.range[1] =  61000;

    tcp_init_mem(net);

    net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
    if (net->ipv4.ipv4_hdr == NULL)
        goto err_reg;

    return 0;

err_reg:
    if (!net_eq(net, &init_net))
        kfree(table);
err_alloc:
    return -ENOMEM;
}
Пример #16
0
/*
 * Initialise an AFS network namespace record.
 */
static int __net_init afs_net_init(struct afs_net *net)
{
	struct afs_sysnames *sysnames;
	int ret;

	net->live = true;
	generate_random_uuid((unsigned char *)&net->uuid);

	INIT_WORK(&net->charge_preallocation_work, afs_charge_preallocation);
	mutex_init(&net->socket_mutex);

	net->cells = RB_ROOT;
	seqlock_init(&net->cells_lock);
	INIT_WORK(&net->cells_manager, afs_manage_cells);
	timer_setup(&net->cells_timer, afs_cells_timer, 0);

	spin_lock_init(&net->proc_cells_lock);
	INIT_LIST_HEAD(&net->proc_cells);

	seqlock_init(&net->fs_lock);
	net->fs_servers = RB_ROOT;
	INIT_LIST_HEAD(&net->fs_updates);
	INIT_HLIST_HEAD(&net->fs_proc);

	INIT_HLIST_HEAD(&net->fs_addresses4);
	INIT_HLIST_HEAD(&net->fs_addresses6);
	seqlock_init(&net->fs_addr_lock);

	INIT_WORK(&net->fs_manager, afs_manage_servers);
	timer_setup(&net->fs_timer, afs_servers_timer, 0);

	ret = -ENOMEM;
	sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL);
	if (!sysnames)
		goto error_sysnames;
	sysnames->subs[0] = (char *)&afs_init_sysname;
	sysnames->nr = 1;
	refcount_set(&sysnames->usage, 1);
	net->sysnames = sysnames;
	rwlock_init(&net->sysnames_lock);

	/* Register the /proc stuff */
	ret = afs_proc_init(net);
	if (ret < 0)
		goto error_proc;

	/* Initialise the cell DB */
	ret = afs_cell_init(net, rootcell);
	if (ret < 0)
		goto error_cell_init;

	/* Create the RxRPC transport */
	ret = afs_open_socket(net);
	if (ret < 0)
		goto error_open_socket;

	return 0;

error_open_socket:
	net->live = false;
	afs_cell_purge(net);
	afs_purge_servers(net);
error_cell_init:
	net->live = false;
	afs_proc_cleanup(net);
error_proc:
	afs_put_sysnames(net->sysnames);
error_sysnames:
	net->live = false;
	return ret;
}