예제 #1
0
파일: iriap.c 프로젝트: CSCLOG/beaglebone
/*
 * Function iriap_init (void)
 *
 *    Initializes the IrIAP layer, called by the module initialization code
 *    in irmod.c
 */
int __init iriap_init(void)
{
	struct ias_object *obj;
	struct iriap_cb *server;
	__u8 oct_seq[6];
	__u16 hints;

	/* Allocate master array */
	iriap = hashbin_new(HB_LOCK);
	if (!iriap)
		return -ENOMEM;

	/* Object repository - defined in irias_object.c */
	irias_objects = hashbin_new(HB_LOCK);
	if (!irias_objects) {
		IRDA_WARNING("%s: Can't allocate irias_objects hashbin!\n",
			     __func__);
		hashbin_delete(iriap, NULL);
		return -ENOMEM;
	}

	lockdep_set_class_and_name(&irias_objects->hb_spinlock, &irias_objects_key,
				   "irias_objects");

	/*
	 *  Register some default services for IrLMP
	 */
	hints  = irlmp_service_to_hint(S_COMPUTER);
	service_handle = irlmp_register_service(hints);

	/* Register the Device object with LM-IAS */
	obj = irias_new_object("Device", IAS_DEVICE_ID);
	irias_add_string_attrib(obj, "DeviceName", "Linux", IAS_KERNEL_ATTR);

	oct_seq[0] = 0x01;  /* Version 1 */
	oct_seq[1] = 0x00;  /* IAS support bits */
	oct_seq[2] = 0x00;  /* LM-MUX support bits */
#ifdef CONFIG_IRDA_ULTRA
	oct_seq[2] |= 0x04; /* Connectionless Data support */
#endif
	irias_add_octseq_attrib(obj, "IrLMPSupport", oct_seq, 3,
				IAS_KERNEL_ATTR);
	irias_insert_object(obj);

	/*
	 *  Register server support with IrLMP so we can accept incoming
	 *  connections
	 */
	server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
	if (!server) {
		IRDA_DEBUG(0, "%s(), unable to open server\n", __func__);
		return -1;
	}
	iriap_register_lsap(server, LSAP_IAS, IAS_SERVER);

	return 0;
}
예제 #2
0
void __init_kthread_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
	worker->task = NULL;
}
예제 #3
0
파일: xfs_iget.c 프로젝트: longqzh/chronnOS
/*
 * Allocate and initialise an xfs_inode.
 */
STATIC struct xfs_inode *
xfs_inode_alloc(
	struct xfs_mount	*mp,
	xfs_ino_t		ino)
{
	struct xfs_inode	*ip;

	/*
	 * if this didn't occur in transactions, we could use
	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
	 * code up to do this anyway.
	 */
	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
	if (!ip)
		return NULL;
	if (inode_init_always(mp->m_super, VFS_I(ip))) {
		kmem_zone_free(xfs_inode_zone, ip);
		return NULL;
	}

	ASSERT(atomic_read(&ip->i_iocount) == 0);
	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(completion_done(&ip->i_flush));
	ASSERT(ip->i_ino == 0);

	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
			&xfs_iolock_active, "xfs_iolock_active");

	/* initialise the xfs inode */
	ip->i_ino = ino;
	ip->i_mount = mp;
	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
	ip->i_afp = NULL;
	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
	ip->i_flags = 0;
	ip->i_update_core = 0;
	ip->i_delayed_blks = 0;
	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
	ip->i_size = 0;
	ip->i_new_size = 0;

	return ip;
}
예제 #4
0
파일: sock.c 프로젝트: Voskrese/mipsonqemu
/*
 * Initialize an sk_lock.
 *
 * (We also register the sk_lock with the lock validator.)
 */
static void inline sock_lock_init(struct sock *sk)
{
	spin_lock_init(&sk->sk_lock.slock);
	sk->sk_lock.owner = NULL;
	init_waitqueue_head(&sk->sk_lock.wq);
	/*
	 * Make sure we are not reinitializing a held lock:
	 */
	debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));

	/*
	 * Mark both the sk_lock and the sk_lock.slock as a
	 * per-address-family lock class:
	 */
	lockdep_set_class_and_name(&sk->sk_lock.slock,
				   af_family_slock_keys + sk->sk_family,
				   af_family_slock_key_strings[sk->sk_family]);
	lockdep_init_map(&sk->sk_lock.dep_map,
			 af_family_key_strings[sk->sk_family],
			 af_family_keys + sk->sk_family, 0);
}
STATIC struct xfs_inode *
xfs_inode_alloc(
	struct xfs_mount	*mp,
	xfs_ino_t		ino)
{
	struct xfs_inode	*ip;

	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
	if (!ip)
		return NULL;
	if (inode_init_always(mp->m_super, VFS_I(ip))) {
		kmem_zone_free(xfs_inode_zone, ip);
		return NULL;
	}

	ASSERT(atomic_read(&ip->i_pincount) == 0);
	ASSERT(!spin_is_locked(&ip->i_flags_lock));
	ASSERT(!xfs_isiflocked(ip));
	ASSERT(ip->i_ino == 0);

	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
			&xfs_iolock_active, "xfs_iolock_active");

	
	ip->i_ino = ino;
	ip->i_mount = mp;
	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
	ip->i_afp = NULL;
	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
	ip->i_flags = 0;
	ip->i_delayed_blks = 0;
	memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));

	return ip;
}
예제 #6
0
파일: kthread.c 프로젝트: 454053205/linux
/**
 * kthread_create_on_node - create a kthread.
 * @threadfn: the function to run until signal_pending(current).
 * @data: data ptr for @threadfn.
 * @node: memory node number.
 * @namefmt: printf-style name for the thread.
 *
 * Description: This helper function creates and names a kernel
 * thread.  The thread will be stopped: use wake_up_process() to start
 * it.  See also kthread_run().
 *
 * If thread is going to be bound on a particular cpu, give its node
 * in @node, to get NUMA affinity for kthread stack, or else give -1.
 * When woken, the thread will run @threadfn() with @data as its
 * argument. @threadfn() can either call do_exit() directly if it is a
 * standalone thread for which no one will call kthread_stop(), or
 * return when 'kthread_should_stop()' is true (which means
 * kthread_stop() has been called).  The return value should be zero
 * or a negative error number; it will be passed to kthread_stop().
 *
 * Returns a task_struct or ERR_PTR(-ENOMEM).
 */
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data,
					   int node,
					   const char namefmt[],
					   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	create.node = node;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
		 * root may have changed our (kthreadd's) priority or CPU mask.
		 * The kernel thread should not inherit these properties.
		 */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create_on_node);

/**
 * kthread_bind - bind a just-created kthread to a cpu.
 * @p: thread created by kthread_create().
 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 *
 * Description: This function is equivalent to set_cpus_allowed(),
 * except that @cpu doesn't need to be online, and the thread must be
 * stopped (i.e., just returned from kthread_create()).
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	/* Must have done schedule() in kthread() before we set_task_cpu */
	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}

	/* It's safe because the task is inactive. */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/**
 * kthread_stop - stop a thread created by kthread_create().
 * @k: thread created by kthread_create().
 *
 * Sets kthread_should_stop() for @k to return true, wakes it, and
 * waits for it to exit. This can also be called after kthread_create()
 * instead of calling wake_up_process(): the thread will exit without
 * calling threadfn().
 *
 * If threadfn() may call do_exit() itself, the caller must ensure
 * task_struct can't go away.
 *
 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 * was never called.
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /* it might have exited */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/* Setup a clean context for our children to inherit. */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}

void __init_kthread_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
	worker->task = NULL;
}
예제 #7
0
/* Create a new IPv4 subflow.
 *
 * We are in user-context and meta-sock-lock is hold.
 */
int mptcp_init4_subsockets(struct sock *meta_sk, const struct mptcp_loc4 *loc,
			   struct mptcp_rem4 *rem)
{
	struct tcp_sock *tp;
	struct sock *sk;
	struct sockaddr_in loc_in, rem_in;
	struct socket_alloc sock_full;
	struct socket *sock = (struct socket *)&sock_full;
	int ret;

	/** First, create and prepare the new socket */
	memcpy(&sock_full, meta_sk->sk_socket, sizeof(sock_full));
	sock->state = SS_UNCONNECTED;
	sock->ops = NULL;

	ret = inet_create(sock_net(meta_sk), sock, IPPROTO_TCP, 1);
	if (unlikely(ret < 0)) {
		net_err_ratelimited("%s inet_create failed ret: %d\n",
				    __func__, ret);
		return ret;
	}

	sk = sock->sk;
	tp = tcp_sk(sk);

	/* All subsockets need the MPTCP-lock-class */
	lockdep_set_class_and_name(&(sk)->sk_lock.slock, &meta_slock_key, meta_slock_key_name);
	lockdep_init_map(&(sk)->sk_lock.dep_map, meta_key_name, &meta_key, 0);

	ret = mptcp_add_sock(meta_sk, sk, loc->loc4_id, rem->rem4_id, GFP_KERNEL);
	if (ret) {
		net_err_ratelimited("%s mptcp_add_sock failed ret: %d\n",
				    __func__, ret);
		goto error;
	}

	tp->mptcp->slave_sk = 1;

	/* Initializing the timer for an MPTCP subflow */
	timer_setup(&tp->mptcp->mptcp_ack_timer, mptcp_ack_handler, 0);

	/** Then, connect the socket to the peer */
	loc_in.sin_family = AF_INET;
	rem_in.sin_family = AF_INET;
	loc_in.sin_port = 0;
	if (rem->port)
		rem_in.sin_port = rem->port;
	else
		rem_in.sin_port = inet_sk(meta_sk)->inet_dport;
	loc_in.sin_addr = loc->addr;
	rem_in.sin_addr = rem->addr;

	if (loc->if_idx)
		sk->sk_bound_dev_if = loc->if_idx;

	ret = kernel_bind(sock, (struct sockaddr *)&loc_in,
			  sizeof(struct sockaddr_in));
	if (ret < 0) {
		net_err_ratelimited("%s: token %#x bind() to %pI4 index %d failed, error %d\n",
				    __func__, tcp_sk(meta_sk)->mpcb->mptcp_loc_token,
				    &loc_in.sin_addr, loc->if_idx, ret);
		goto error;
	}

	mptcp_debug("%s: token %#x pi %d src_addr:%pI4:%d dst_addr:%pI4:%d ifidx: %d\n",
		    __func__, tcp_sk(meta_sk)->mpcb->mptcp_loc_token,
		    tp->mptcp->path_index, &loc_in.sin_addr,
		    ntohs(loc_in.sin_port), &rem_in.sin_addr,
		    ntohs(rem_in.sin_port), loc->if_idx);

	ret = kernel_connect(sock, (struct sockaddr *)&rem_in,
			     sizeof(struct sockaddr_in), O_NONBLOCK);
	if (ret < 0 && ret != -EINPROGRESS) {
		net_err_ratelimited("%s: MPTCP subsocket connect() failed, error %d\n",
				    __func__, ret);
		goto error;
	}

	MPTCP_INC_STATS(sock_net(meta_sk), MPTCP_MIB_JOINSYNTX);

	sk_set_socket(sk, meta_sk->sk_socket);
	sk->sk_wq = meta_sk->sk_wq;

	return 0;

error:
	/* May happen if mptcp_add_sock fails first */
	if (!mptcp(tp)) {
		tcp_close(sk, 0);
	} else {
		local_bh_disable();
		mptcp_sub_force_close(sk);
		local_bh_enable();
	}
	return ret;
}
예제 #8
0
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
					   void *data,
					   int node,
					   const char namefmt[],
					   ...)
{
	struct kthread_create_info create;

	create.threadfn = threadfn;
	create.data = data;
	create.node = node;
	init_completion(&create.done);

	spin_lock(&kthread_create_lock);
	list_add_tail(&create.list, &kthread_create_list);
	spin_unlock(&kthread_create_lock);

	wake_up_process(kthreadd_task);
	wait_for_completion(&create.done);

	if (!IS_ERR(create.result)) {
		static const struct sched_param param = { .sched_priority = 0 };
		va_list args;

		va_start(args, namefmt);
		vsnprintf(create.result->comm, sizeof(create.result->comm),
			  namefmt, args);
		va_end(args);
		/*
                                                                 
                                                           
   */
		sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
		set_cpus_allowed_ptr(create.result, cpu_all_mask);
	}
	return create.result;
}
EXPORT_SYMBOL(kthread_create_on_node);

/* 
                                                       
                                          
                                                                      
  
                                                                  
                                                                     
                                                       
 */
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
	/*                                                               */
	if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
		WARN_ON(1);
		return;
	}

	/*                                         */
	do_set_cpus_allowed(p, cpumask_of(cpu));
	p->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);

/* 
                                                            
                                          
  
                                                                  
                                                                       
                                                                     
                      
  
                                                                  
                             
  
                                                                    
                    
 */
int kthread_stop(struct task_struct *k)
{
	struct kthread *kthread;
	int ret;

	trace_sched_kthread_stop(k);
	get_task_struct(k);

	kthread = to_kthread(k);
	barrier(); /*                      */
	if (k->vfork_done != NULL) {
		kthread->should_stop = 1;
		wake_up_process(k);
		wait_for_completion(&kthread->exited);
	}
	ret = k->exit_code;

	put_task_struct(k);
	trace_sched_kthread_stop_ret(ret);

	return ret;
}
EXPORT_SYMBOL(kthread_stop);

int kthreadd(void *unused)
{
	struct task_struct *tsk = current;

	/*                                                    */
	set_task_comm(tsk, "kthreadd");
	ignore_signals(tsk);
	set_cpus_allowed_ptr(tsk, cpu_all_mask);
	set_mems_allowed(node_states[N_HIGH_MEMORY]);

	current->flags |= PF_NOFREEZE;

	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (list_empty(&kthread_create_list))
			schedule();
		__set_current_state(TASK_RUNNING);

		spin_lock(&kthread_create_lock);
		while (!list_empty(&kthread_create_list)) {
			struct kthread_create_info *create;

			create = list_entry(kthread_create_list.next,
					    struct kthread_create_info, list);
			list_del_init(&create->list);
			spin_unlock(&kthread_create_lock);

			create_kthread(create);

			spin_lock(&kthread_create_lock);
		}
		spin_unlock(&kthread_create_lock);
	}

	return 0;
}

void __init_kthread_worker(struct kthread_worker *worker,
				const char *name,
				struct lock_class_key *key)
{
	spin_lock_init(&worker->lock);
	lockdep_set_class_and_name(&worker->lock, key, name);
	INIT_LIST_HEAD(&worker->work_list);
	worker->task = NULL;
}
예제 #9
0
void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
{
	spin_lock_init(&q->lock);
	lockdep_set_class_and_name(&q->lock, key, name);
	INIT_LIST_HEAD(&q->task_list);
}
예제 #10
0
static void mdd_lockdep_init(struct mdd_object *obj)
{
        lockdep_set_class_and_name(obj, &mdd_pdirop_key, "pdir");
}