static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) { memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); spin_lock_init(&desc->lock); desc->irq = irq; #ifdef CONFIG_SMP desc->node = node; #endif lockdep_set_class(&desc->lock, &irq_desc_lock_class); init_kstat_irqs(desc, node, nr_cpu_ids); if (!desc->kstat_irqs) { printk(KERN_ERR "can not alloc kstat_irqs\n"); BUG_ON(1); } if (!alloc_desc_masks(desc, node, false)) { printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); BUG_ON(1); } init_desc_masks(desc); arch_init_chip_data(desc, node); }
/** * nilfs_dat_read - read or get dat inode * @sb: super block instance * @entry_size: size of a dat entry * @raw_inode: on-disk dat inode * @inodep: buffer to store the inode */ int nilfs_dat_read(struct super_block *sb, size_t entry_size, struct nilfs_inode *raw_inode, struct inode **inodep) { static struct lock_class_key dat_lock_key; struct inode *dat; struct nilfs_dat_info *di; int err; dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO); if (unlikely(!dat)) return -ENOMEM; if (!(dat->i_state & I_NEW)) goto out; err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di)); if (err) goto failed; err = nilfs_palloc_init_blockgroup(dat, entry_size); if (err) goto failed; di = NILFS_DAT_I(dat); lockdep_set_class(&di->mi.mi_sem, &dat_lock_key); nilfs_palloc_setup_cache(dat, &di->palloc_cache); nilfs_mdt_setup_shadow_map(dat, &di->shadow); err = nilfs_read_inode_common(dat, raw_inode); if (err) goto failed; unlock_new_inode(dat); out: *inodep = dat; return 0; failed: iget_failed(dat); return err; }
int __init early_irq_init(void) { int count, i, node = first_online_node; struct irq_desc *desc; init_irq_default_affinity(); printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); desc = irq_desc; count = ARRAY_SIZE(irq_desc); for (i = 0; i < count; i++) { desc[i].kstat_irqs = alloc_percpu(unsigned int); alloc_masks(&desc[i], node); raw_spin_lock_init(&desc[i].lock); lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); mutex_init(&desc[i].request_mutex); desc_set_defaults(i, &desc[i], node, NULL, NULL); } return arch_early_irq_init(); }
int __init early_irq_init(void) { int count, i, node = first_online_node; struct irq_desc *desc; init_irq_default_affinity(); printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); desc = irq_desc; count = ARRAY_SIZE(irq_desc); for (i = 0; i < count; i++) { desc[i].irq_data.irq = i; desc[i].irq_data.chip = &no_irq_chip; desc[i].kstat_irqs = kstat_irqs_all[i]; alloc_masks(desc + i, GFP_KERNEL, node); desc_smp_init(desc + i, node); lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); } return arch_early_irq_init(); }
/** * nilfs_bmap_read - read a bmap from an inode * @bmap: bmap * @raw_inode: on-disk inode * * Description: nilfs_bmap_read() initializes the bmap @bmap. * * Return Value: On success, 0 is returned. On error, the following negative * error code is returned. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) { if (raw_inode == NULL) memset(bmap->b_u.u_data, 0, NILFS_BMAP_SIZE); else memcpy(bmap->b_u.u_data, raw_inode->i_bmap, NILFS_BMAP_SIZE); init_rwsem(&bmap->b_sem); bmap->b_state = 0; bmap->b_inode = &NILFS_BMAP_I(bmap)->vfs_inode; switch (bmap->b_inode->i_ino) { case NILFS_DAT_INO: bmap->b_pops = &nilfs_bmap_ptr_ops_p; bmap->b_last_allocated_key = 0; /* XXX: use macro */ bmap->b_last_allocated_ptr = NILFS_BMAP_NEW_PTR_INIT; lockdep_set_class(&bmap->b_sem, &nilfs_bmap_dat_lock_key); break; case NILFS_CPFILE_INO: case NILFS_SUFILE_INO: bmap->b_pops = &nilfs_bmap_ptr_ops_vmdt; bmap->b_last_allocated_key = 0; /* XXX: use macro */ bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; break; default: bmap->b_pops = &nilfs_bmap_ptr_ops_v; bmap->b_last_allocated_key = 0; /* XXX: use macro */ bmap->b_last_allocated_ptr = NILFS_BMAP_INVALID_PTR; break; } return (bmap->b_u.u_flags & NILFS_BMAP_LARGE) ? nilfs_btree_init(bmap, NILFS_BMAP_LARGE_LOW, NILFS_BMAP_LARGE_HIGH) : nilfs_direct_init(bmap, NILFS_BMAP_SMALL_LOW, NILFS_BMAP_SMALL_HIGH); }
int __init early_irq_init(void) { struct irq_desc *desc; int legacy_count; int node; int i; init_irq_default_affinity(); /* initialize nr_irqs based on nr_cpu_ids */ arch_probe_nr_irqs(); printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); desc = irq_desc_legacy; legacy_count = ARRAY_SIZE(irq_desc_legacy); node = first_online_node; /* allocate irq_desc_ptrs array based on nr_irqs */ irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); /* allocate based on nr_cpu_ids */ kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * sizeof(int), GFP_NOWAIT, node); for (i = 0; i < legacy_count; i++) { desc[i].irq = i; desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); alloc_desc_masks(&desc[i], node, true); init_desc_masks(&desc[i]); irq_desc_ptrs[i] = desc + i; } for (i = legacy_count; i < nr_irqs; i++) irq_desc_ptrs[i] = NULL; return arch_early_irq_init(); }
int __init early_irq_init(void) { struct irq_desc *desc; int legacy_count; int i; init_irq_default_affinity(); /* initialize nr_irqs based on nr_cpu_ids */ arch_probe_nr_irqs(); printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); desc = irq_desc_legacy; legacy_count = ARRAY_SIZE(irq_desc_legacy); /* allocate irq_desc_ptrs array based on nr_irqs */ irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); /* allocate based on nr_cpu_ids */ /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * sizeof(int)); for (i = 0; i < legacy_count; i++) { desc[i].irq = i; desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); init_alloc_desc_masks(&desc[i], 0, true); irq_desc_ptrs[i] = desc + i; } for (i = legacy_count; i < nr_irqs; i++) irq_desc_ptrs[i] = NULL; return arch_early_irq_init(); }
/* * Initialize the Linux inode, set up the operation vectors and * unlock the inode. * * When reading existing inodes from disk this is called directly * from xfs_iget, when creating a new inode it is called from * xfs_ialloc after setting up the inode. * * We are always called with an uninitialised linux inode here. * We need to initialise the necessary fields and take a reference * on it. */ void xfs_setup_inode( struct xfs_inode *ip) { struct inode *inode = &ip->i_vnode; gfp_t gfp_mask; inode->i_ino = ip->i_ino; inode->i_state = I_NEW; inode_sb_list_add(inode); /* make the inode look hashed for the writeback code */ hlist_add_fake(&inode->i_hash); inode->i_mode = ip->i_d.di_mode; set_nlink(inode, ip->i_d.di_nlink); inode->i_uid = xfs_uid_to_kuid(ip->i_d.di_uid); inode->i_gid = xfs_gid_to_kgid(ip->i_d.di_gid); switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: inode->i_rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: inode->i_rdev = 0; break; } inode->i_generation = ip->i_d.di_gen; i_size_write(inode, ip->i_d.di_size); inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; xfs_diflags_to_iflags(inode, ip); ip->d_ops = ip->i_mount->m_nondir_inode_ops; lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class); switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &xfs_inode_operations; inode->i_fop = &xfs_file_operations; inode->i_mapping->a_ops = &xfs_address_space_operations; break; case S_IFDIR: lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class); if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) inode->i_op = &xfs_dir_ci_inode_operations; else inode->i_op = &xfs_dir_inode_operations; inode->i_fop = &xfs_dir_file_operations; ip->d_ops = ip->i_mount->m_dir_inode_ops; break; case S_IFLNK: inode->i_op = &xfs_symlink_inode_operations; if (!(ip->i_df.if_flags & XFS_IFINLINE)) inode->i_mapping->a_ops = &xfs_address_space_operations; break; default: inode->i_op = &xfs_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } /* * Ensure all page cache allocations are done from GFP_NOFS context to * prevent direct reclaim recursion back into the filesystem and blowing * stacks or deadlocking. */ gfp_mask = mapping_gfp_mask(inode->i_mapping); mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); /* * If there is no attribute fork no ACL can exist on this inode, * and it can't have any file capabilities attached to it either. */ if (!XFS_IFORK_Q(ip)) { inode_has_no_xattr(inode); cache_no_acl(inode); } xfs_iflags_clear(ip, XFS_INEW); barrier(); unlock_new_inode(inode); }
struct sock *sk_clone(const struct sock *sk, const gfp_t priority) { struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); if (newsk != NULL) { struct sk_filter *filter; sock_copy(newsk, sk); /* SANITY */ sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); atomic_set(&newsk->sk_rmem_alloc, 0); atomic_set(&newsk->sk_wmem_alloc, 0); atomic_set(&newsk->sk_omem_alloc, 0); skb_queue_head_init(&newsk->sk_receive_queue); skb_queue_head_init(&newsk->sk_write_queue); #ifdef CONFIG_NET_DMA skb_queue_head_init(&newsk->sk_async_wait_queue); #endif rwlock_init(&newsk->sk_dst_lock); rwlock_init(&newsk->sk_callback_lock); lockdep_set_class(&newsk->sk_callback_lock, af_callback_keys + newsk->sk_family); newsk->sk_dst_cache = NULL; newsk->sk_wmem_queued = 0; newsk->sk_forward_alloc = 0; newsk->sk_send_head = NULL; newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; sock_reset_flag(newsk, SOCK_DONE); skb_queue_head_init(&newsk->sk_error_queue); filter = newsk->sk_filter; if (filter != NULL) sk_filter_charge(newsk, filter); if (unlikely(xfrm_sk_clone_policy(newsk))) { /* It is still raw copy of parent, so invalidate * destructor and make plain sk_free() */ newsk->sk_destruct = NULL; sk_free(newsk); newsk = NULL; goto out; } newsk->sk_err = 0; newsk->sk_priority = 0; atomic_set(&newsk->sk_refcnt, 2); /* * Increment the counter in the same struct proto as the master * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that * is the same as sk->sk_prot->socks, as this field was copied * with memcpy). * * This _changes_ the previous behaviour, where * tcp_create_openreq_child always was incrementing the * equivalent to tcp_prot->socks (inet_sock_nr), so this have * to be taken into account in all callers. -acme */ sk_refcnt_debug_inc(newsk); newsk->sk_socket = NULL; newsk->sk_sleep = NULL; if (newsk->sk_prot->sockets_allocated) atomic_inc(newsk->sk_prot->sockets_allocated); } out: return newsk; }
void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key) { spin_lock_init(&q->lock); lockdep_set_class(&q->lock, key); INIT_LIST_HEAD(&q->task_list); }
static void nr_set_lockdep_key(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); }
static void nr_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); }
// ARM10C 20141004 // i: 0, node: 0, null // ARM10C 20141115 // 16, node: 0, owner: NULL // ARM10C 20141115 // 64, node: 0, owner: NULL static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) { struct irq_desc *desc; // GFP_KERNEL: 0xD0 gfp_t gfp = GFP_KERNEL; // gfp: GFP_KERNEL: 0xD0 // sizeof(struct irq_desc): 156 bytes, gfp: GFP_KERNEL: 0xD0, node: 0 // kzalloc_node(156, GFP_KERNEL: 0xD0, 0): kmem_cache#28-o0 desc = kzalloc_node(sizeof(*desc), gfp, node); // desc: kmem_cache#28-o0 // desc: kmem_cache#28-o0 if (!desc) return NULL; /* allocate based on nr_cpu_ids */ // desc->kstat_irqs: (kmem_cache#28-o0)->kstat_irqs // alloc_percpu(unsigned int): pcp 4 byte 공간 할당 desc->kstat_irqs = alloc_percpu(unsigned int); // desc->kstat_irqs: (kmem_cache#28-o0)->kstat_irqs: pcp 4 byte 공간 // desc->kstat_irqs: (kmem_cache#28-o0)->kstat_irqs: pcp 4 byte 공간 if (!desc->kstat_irqs) goto err_desc; // desc: kmem_cache#28-o0, gfp: GFP_KERNEL: 0xD0, node: 0 // alloc_masks(kmem_cache#28-o0, GFP_KERNEL: 0xD0, 0): 0 if (alloc_masks(desc, gfp, node)) goto err_kstat; // alloc_masks에서 한일: // (kmem_cache#28-o0)->irq_data.affinity.bits[0]: 0 // desc->lock: (kmem_cache#28-o0)->lock raw_spin_lock_init(&desc->lock); // desc->lock: (kmem_cache#28-o0)->lock 을 이용한 spinlock 초기화 수행 // desc->lock: (kmem_cache#28-o0)->lock lockdep_set_class(&desc->lock, &irq_desc_lock_class); // null function // irq: 0, desc: kmem_cache#28-o0, node: 0, owner: null desc_set_defaults(irq, desc, node, owner); // desc_set_defaults에서 한일: // (kmem_cache#28-o0)->irq_data.irq: 0 // (kmem_cache#28-o0)->irq_data.chip: &no_irq_chip // (kmem_cache#28-o0)->irq_data.chip_data: NULL // (kmem_cache#28-o0)->irq_data.handler_data: NULL // (kmem_cache#28-o0)->irq_data.msi_desc: NULL // (kmem_cache#28-o0)->status_use_accessors: 0xc00 // (&(kmem_cache#28-o0)->irq_data)->state_use_accessors: 0x10000 // (kmem_cache#28-o0)->handle_irq: handle_bad_irq // (kmem_cache#28-o0)->depth: 1 // (kmem_cache#28-o0)->irq_count: 0 // (kmem_cache#28-o0)->irqs_unhandled: 0 // (kmem_cache#28-o0)->name: NULL // (kmem_cache#28-o0)->owner: null // [pcp0...3] (kmem_cache#28-o0)->kstat_irqs: 0 // (kmem_cache#28-o0)->irq_data.node: 0 // (kmem_cache#28-o0)->irq_data.affinity.bits[0]: 0xF return desc; err_kstat: free_percpu(desc->kstat_irqs); err_desc: kfree(desc); return NULL; }
/* * Read in the ondisk dquot using dqtobp() then copy it to an incore version, * and release the buffer immediately. * * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed. */ int xfs_qm_dqread( struct xfs_mount *mp, xfs_dqid_t id, uint type, uint flags, struct xfs_dquot **O_dqpp) { struct xfs_dquot *dqp; struct xfs_disk_dquot *ddqp; struct xfs_buf *bp; struct xfs_trans *tp = NULL; int error; int cancelflags = 0; dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP); dqp->dq_flags = type; dqp->q_core.d_id = cpu_to_be32(id); dqp->q_mount = mp; INIT_LIST_HEAD(&dqp->q_lru); mutex_init(&dqp->q_qlock); init_waitqueue_head(&dqp->q_pinwait); /* * Because we want to use a counting completion, complete * the flush completion once to allow a single access to * the flush completion without blocking. */ init_completion(&dqp->q_flush); complete(&dqp->q_flush); /* * Make sure group quotas have a different lock class than user * quotas. */ switch (type) { case XFS_DQ_USER: /* uses the default lock class */ break; case XFS_DQ_GROUP: lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class); break; case XFS_DQ_PROJ: lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class); break; default: ASSERT(0); break; } XFS_STATS_INC(xs_qm_dquot); trace_xfs_dqread(dqp); if (flags & XFS_QMOPT_DQALLOC) { tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc, XFS_QM_DQALLOC_SPACE_RES(mp), 0); if (error) goto error1; cancelflags = XFS_TRANS_RELEASE_LOG_RES; } /* * get a pointer to the on-disk dquot and the buffer containing it * dqp already knows its own type (GROUP/USER). */ error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags); if (error) { /* * This can happen if quotas got turned off (ESRCH), * or if the dquot didn't exist on disk and we ask to * allocate (ENOENT). */ trace_xfs_dqread_fail(dqp); cancelflags |= XFS_TRANS_ABORT; goto error1; } /* copy everything from disk dquot to the incore dquot */ memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); xfs_qm_dquot_logitem_init(dqp); /* * Reservation counters are defined as reservation plus current usage * to avoid having to add every time. */ dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); /* initialize the dquot speculative prealloc thresholds */ xfs_dquot_set_prealloc_limits(dqp); /* Mark the buf so that this will stay incore a little longer */ xfs_buf_set_ref(bp, XFS_DQUOT_REF); /* * We got the buffer with a xfs_trans_read_buf() (in dqtobp()) * So we need to release with xfs_trans_brelse(). * The strategy here is identical to that of inodes; we lock * the dquot in xfs_qm_dqget() before making it accessible to * others. This is because dquots, like inodes, need a good level of * concurrency, and we don't want to take locks on the entire buffers * for dquot accesses. * Note also that the dquot buffer may even be dirty at this point, if * this particular dquot was repaired. We still aren't afraid to * brelse it because we have the changes incore. */ ASSERT(xfs_buf_islocked(bp)); xfs_trans_brelse(tp, bp); if (tp) { error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) goto error0; } *O_dqpp = dqp; return error; error1: if (tp) xfs_trans_cancel(tp, cancelflags); error0: xfs_qm_dqdestroy(dqp); *O_dqpp = NULL; return error; }
/** * batadv_set_lockdep_class - Set txq and addr_list lockdep class * @dev: network device to modify */ static void batadv_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL); }
/* * Allocate and initialize a dquot. We don't always allocate fresh memory; * we try to reclaim a free dquot if the number of incore dquots are above * a threshold. * The only field inside the core that gets initialized at this point * is the d_id field. The idea is to fill in the entire q_core * when we read in the on disk dquot. */ STATIC xfs_dquot_t * xfs_qm_dqinit( xfs_mount_t *mp, xfs_dqid_t id, uint type) { xfs_dquot_t *dqp; boolean_t brandnewdquot; brandnewdquot = xfs_qm_dqalloc_incore(&dqp); dqp->dq_flags = type; dqp->q_core.d_id = cpu_to_be32(id); dqp->q_mount = mp; /* * No need to re-initialize these if this is a reclaimed dquot. */ if (brandnewdquot) { dqp->dq_flnext = dqp->dq_flprev = dqp; mutex_init(&dqp->q_qlock); init_waitqueue_head(&dqp->q_pinwait); /* * Because we want to use a counting completion, complete * the flush completion once to allow a single access to * the flush completion without blocking. */ init_completion(&dqp->q_flush); complete(&dqp->q_flush); #ifdef XFS_DQUOT_TRACE dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_NOFS); xfs_dqtrace_entry(dqp, "DQINIT"); #endif } else { /* * Only the q_core portion was zeroed in dqreclaim_one(). * So, we need to reset others. */ dqp->q_nrefs = 0; dqp->q_blkno = 0; dqp->MPL_NEXT = dqp->HL_NEXT = NULL; dqp->HL_PREVP = dqp->MPL_PREVP = NULL; dqp->q_bufoffset = 0; dqp->q_fileoffset = 0; dqp->q_transp = NULL; dqp->q_gdquot = NULL; dqp->q_res_bcount = 0; dqp->q_res_icount = 0; dqp->q_res_rtbcount = 0; atomic_set(&dqp->q_pincount, 0); dqp->q_hash = NULL; ASSERT(dqp->dq_flnext == dqp->dq_flprev); #ifdef XFS_DQUOT_TRACE ASSERT(dqp->q_trace); xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT"); #endif } /* * In either case we need to make sure group quotas have a different * lock class than user quotas, to make sure lockdep knows we can * locks of one of each at the same time. */ if (!(type & XFS_DQ_USER)) lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class); /* * log item gets initialized later */ return (dqp); }