示例#1
0
文件: srcu.c 项目: 383530895/linux
int __init_srcu_struct(struct srcu_struct *sp, const char *name,
		       struct lock_class_key *key)
{
	/* Don't re-initialize a lock while it is held. */
	debug_check_no_locks_freed((void *)sp, sizeof(*sp));
	lockdep_init_map(&sp->dep_map, name, key, 0);
	return init_srcu_struct_fields(sp);
}
示例#2
0
int __init_srcu_struct(struct srcu_struct *sp, const char *name,
		       struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/* Don't re-initialize a lock while it is held. */
	debug_check_no_locks_freed((void *)sp, sizeof(*sp));
	lockdep_init_map(&sp->dep_map, name, key, 0);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
	return init_srcu_struct_fields(sp);
}
示例#3
0
void debug_mutex_init(struct mutex *lock, const char *name,
		      struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	
	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
	lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
	lock->magic = lock;
}
示例#4
0
/*
 * struct mutex functions
 */
void __mutex_init(struct mutex *lock, char *name, struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held lock:
	 */
	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
	lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
	__rt_mutex_init(&lock->lock, name);
}
示例#5
0
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
	lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
	sem->activity = 0;
	raw_spin_lock_init(&sem->wait_lock);
	INIT_LIST_HEAD(&sem->wait_list);
}
示例#6
0
文件: rt.c 项目: mrtos/Logitech-Revue
void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held lock:
	 */
	debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
	lockdep_init_map(&rwlock->dep_map, name, key);
#endif
	__rt_mutex_init(&rwlock->lock, name);
	rwlock->read_depth = 0;
}
示例#7
0
void fastcall __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
			      struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held lock:
	 */
	debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
	lockdep_init_map(&rwsem->dep_map, name, key, 0);
#endif
	__rt_mutex_init(&rwsem->lock, name);
	rwsem->read_depth = 0;
}
示例#8
0
void debug_mutex_init(struct mutex *lock, const char *name,
		      struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held lock:
	 */
	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
	lockdep_init_map(&lock->dep_map, name, key);
#endif
	lock->owner = NULL;
	lock->magic = lock;
}
示例#9
0
/* Allocate a new handle.  This should probably be in a slab... */
static handle_t *new_handle(int nblocks)
{
	handle_t *handle = jbd_alloc_handle(GFP_NOFS);
	if (!handle)
		return NULL;
	memset(handle, 0, sizeof(*handle));
	handle->h_buffer_credits = nblocks;
	handle->h_ref = 1;

	lockdep_init_map(&handle->h_lockdep_map, "jbd_handle", &jbd_handle_key, 0);

	return handle;
}
示例#10
0
/*
 * Initialize an rwsem:
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
	lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
	sem->count = RWSEM_UNLOCKED_VALUE;
	raw_spin_lock_init(&sem->wait_lock);
	INIT_LIST_HEAD(&sem->wait_list);
}
void __rwlock_init(rwlock_t *lock, const char *name,
		   struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held lock:
	 */
	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
	lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
	lock->magic = RWLOCK_MAGIC;
	lock->owner = SPINLOCK_OWNER_INIT;
	lock->owner_cpu = -1;
}
示例#12
0
/*
 * initialise the semaphore
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
	lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
	sem->activity = 0;
#ifdef CONFIG_BRCM_DEBUG_RWSEM
	sem->wr_owner = NULL;
#endif
	spin_lock_init(&sem->wait_lock);
	INIT_LIST_HEAD(&sem->wait_list);
}
示例#13
0
static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb,
        int type,
        u32 slot)
{
    char namebuf[40];
    struct inode *inode = NULL;
    u64 blkno;
    int status = 0;

    ocfs2_sprintf_system_inode_name(namebuf,
                                    sizeof(namebuf),
                                    type, slot);

    status = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
                                        strlen(namebuf), &blkno);
    if (status < 0) {
        goto bail;
    }

    inode = ocfs2_iget(osb, blkno, OCFS2_FI_FLAG_SYSFILE, type);
    if (IS_ERR(inode)) {
        mlog_errno(PTR_ERR(inode));
        inode = NULL;
        goto bail;
    }
#ifdef CONFIG_DEBUG_LOCK_ALLOC
    if (type == LOCAL_USER_QUOTA_SYSTEM_INODE ||
            type == LOCAL_GROUP_QUOTA_SYSTEM_INODE ||
            type == JOURNAL_SYSTEM_INODE) {
        /* Ignore inode lock on these inodes as the lock does not
         * really belong to any process and lockdep cannot handle
         * that */
        OCFS2_I(inode)->ip_inode_lockres.l_lockdep_map.key = NULL;
    } else {
        lockdep_init_map(&OCFS2_I(inode)->ip_inode_lockres.
                         l_lockdep_map,
                         ocfs2_system_inodes[type].si_name,
                         &ocfs2_sysfile_cluster_lock_key[type], 0);
    }
#endif
bail:

    return inode;
}
示例#14
0
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
				  struct device *dev,
				  const struct iwl_cfg *cfg,
				  const struct iwl_trans_ops *ops,
				  size_t dev_cmd_headroom)
{
	struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
	static struct lock_class_key __key;
#endif

	trans = kzalloc(sizeof(*trans) + priv_size, GFP_KERNEL);
	if (!trans)
		return NULL;

#ifdef CONFIG_LOCKDEP
	lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
			 &__key, 0);
#endif

	trans->dev = dev;
	trans->cfg = cfg;
	trans->ops = ops;
	trans->dev_cmd_headroom = dev_cmd_headroom;
	trans->num_rx_queues = 1;

	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
		 "iwl_cmd_pool:%s", dev_name(trans->dev));
	trans->dev_cmd_pool =
		kmem_cache_create(trans->dev_cmd_pool_name,
				  sizeof(struct iwl_device_cmd)
				  + trans->dev_cmd_headroom,
				  sizeof(void *),
				  SLAB_HWCACHE_ALIGN,
				  NULL);
	if (!trans->dev_cmd_pool)
		goto free;

	return trans;
 free:
	kfree(trans);
	return NULL;
}
示例#15
0
/*
 * Initialize an sk_lock.
 *
 * (We also register the sk_lock with the lock validator.)
 */
static void inline sock_lock_init(struct sock *sk)
{
	spin_lock_init(&sk->sk_lock.slock);
	sk->sk_lock.owner = NULL;
	init_waitqueue_head(&sk->sk_lock.wq);
	/*
	 * Make sure we are not reinitializing a held lock:
	 */
	debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));

	/*
	 * Mark both the sk_lock and the sk_lock.slock as a
	 * per-address-family lock class:
	 */
	lockdep_set_class_and_name(&sk->sk_lock.slock,
				   af_family_slock_keys + sk->sk_family,
				   af_family_slock_key_strings[sk->sk_family]);
	lockdep_init_map(&sk->sk_lock.dep_map,
			 af_family_key_strings[sk->sk_family],
			 af_family_keys + sk->sk_family, 0);
}
示例#16
0
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
				  struct device *dev,
				  const struct iwl_cfg *cfg,
				  const struct iwl_trans_ops *ops)
{
	struct iwl_trans *trans;
#ifdef CONFIG_LOCKDEP
	static struct lock_class_key __key;
#endif

	trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL);
	if (!trans)
		return NULL;

#ifdef CONFIG_LOCKDEP
	lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
			 &__key, 0);
#endif

	trans->dev = dev;
	trans->cfg = cfg;
	trans->ops = ops;
	trans->num_rx_queues = 1;

	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
		 "iwl_cmd_pool:%s", dev_name(trans->dev));
	trans->dev_cmd_pool =
		kmem_cache_create(trans->dev_cmd_pool_name,
				  sizeof(struct iwl_device_cmd),
				  sizeof(void *),
				  SLAB_HWCACHE_ALIGN,
				  NULL);
	if (!trans->dev_cmd_pool)
		return NULL;

	WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);

	return trans;
}
示例#17
0
文件: preload.c 项目: 908626950/linux
/**
 * __get_lock - find or create a lock instance
 * @lock: pointer to a pthread lock function
 *
 * Try to find an existing lock in the rbtree using the provided pointer. If
 * one wasn't found - create it.
 */
static struct lock_lookup *__get_lock(void *lock)
{
	struct rb_node **node, *parent;
	struct lock_lookup *l;

	ll_pthread_rwlock_rdlock(&locks_rwlock);
	node = __get_lock_node(lock, &parent);
	ll_pthread_rwlock_unlock(&locks_rwlock);
	if (*node) {
		return rb_entry(*node, struct lock_lookup, node);
	}

	/* We didn't find the lock, let's create it */
	l = alloc_lock();
	if (l == NULL)
		return NULL;

	l->orig = lock;
	/*
	 * Currently the name of the lock is the ptr value of the pthread lock,
	 * while not optimal, it makes debugging a bit easier.
	 *
	 * TODO: Get the real name of the lock using libdwarf
	 */
	sprintf(l->name, "%p", lock);
	lockdep_init_map(&l->dep_map, l->name, &l->key, 0);

	ll_pthread_rwlock_wrlock(&locks_rwlock);
	/* This might have changed since the last time we fetched it */
	node = __get_lock_node(lock, &parent);
	rb_link_node(&l->node, parent, node);
	rb_insert_color(&l->node, &locks);
	ll_pthread_rwlock_unlock(&locks_rwlock);

	return l;
}
示例#18
0
/* Create a new IPv4 subflow.
 *
 * We are in user-context and meta-sock-lock is hold.
 */
int mptcp_init4_subsockets(struct sock *meta_sk, const struct mptcp_loc4 *loc,
			   struct mptcp_rem4 *rem)
{
	struct tcp_sock *tp;
	struct sock *sk;
	struct sockaddr_in loc_in, rem_in;
	struct socket_alloc sock_full;
	struct socket *sock = (struct socket *)&sock_full;
	int ret;

	/** First, create and prepare the new socket */
	memcpy(&sock_full, meta_sk->sk_socket, sizeof(sock_full));
	sock->state = SS_UNCONNECTED;
	sock->ops = NULL;

	ret = inet_create(sock_net(meta_sk), sock, IPPROTO_TCP, 1);
	if (unlikely(ret < 0)) {
		net_err_ratelimited("%s inet_create failed ret: %d\n",
				    __func__, ret);
		return ret;
	}

	sk = sock->sk;
	tp = tcp_sk(sk);

	/* All subsockets need the MPTCP-lock-class */
	lockdep_set_class_and_name(&(sk)->sk_lock.slock, &meta_slock_key, meta_slock_key_name);
	lockdep_init_map(&(sk)->sk_lock.dep_map, meta_key_name, &meta_key, 0);

	ret = mptcp_add_sock(meta_sk, sk, loc->loc4_id, rem->rem4_id, GFP_KERNEL);
	if (ret) {
		net_err_ratelimited("%s mptcp_add_sock failed ret: %d\n",
				    __func__, ret);
		goto error;
	}

	tp->mptcp->slave_sk = 1;

	/* Initializing the timer for an MPTCP subflow */
	timer_setup(&tp->mptcp->mptcp_ack_timer, mptcp_ack_handler, 0);

	/** Then, connect the socket to the peer */
	loc_in.sin_family = AF_INET;
	rem_in.sin_family = AF_INET;
	loc_in.sin_port = 0;
	if (rem->port)
		rem_in.sin_port = rem->port;
	else
		rem_in.sin_port = inet_sk(meta_sk)->inet_dport;
	loc_in.sin_addr = loc->addr;
	rem_in.sin_addr = rem->addr;

	if (loc->if_idx)
		sk->sk_bound_dev_if = loc->if_idx;

	ret = kernel_bind(sock, (struct sockaddr *)&loc_in,
			  sizeof(struct sockaddr_in));
	if (ret < 0) {
		net_err_ratelimited("%s: token %#x bind() to %pI4 index %d failed, error %d\n",
				    __func__, tcp_sk(meta_sk)->mpcb->mptcp_loc_token,
				    &loc_in.sin_addr, loc->if_idx, ret);
		goto error;
	}

	mptcp_debug("%s: token %#x pi %d src_addr:%pI4:%d dst_addr:%pI4:%d ifidx: %d\n",
		    __func__, tcp_sk(meta_sk)->mpcb->mptcp_loc_token,
		    tp->mptcp->path_index, &loc_in.sin_addr,
		    ntohs(loc_in.sin_port), &rem_in.sin_addr,
		    ntohs(rem_in.sin_port), loc->if_idx);

	ret = kernel_connect(sock, (struct sockaddr *)&rem_in,
			     sizeof(struct sockaddr_in), O_NONBLOCK);
	if (ret < 0 && ret != -EINPROGRESS) {
		net_err_ratelimited("%s: MPTCP subsocket connect() failed, error %d\n",
				    __func__, ret);
		goto error;
	}

	MPTCP_INC_STATS(sock_net(meta_sk), MPTCP_MIB_JOINSYNTX);

	sk_set_socket(sk, meta_sk->sk_socket);
	sk->sk_wq = meta_sk->sk_wq;

	return 0;

error:
	/* May happen if mptcp_add_sock fails first */
	if (!mptcp(tp)) {
		tcp_close(sk, 0);
	} else {
		local_bh_disable();
		mptcp_sub_force_close(sk);
		local_bh_enable();
	}
	return ret;
}