Пример #1
0
static void migrate_hrtimers(int scpu)
{
	struct hrtimer_cpu_base *old_base, *new_base;
	int i;

	BUG_ON(cpu_online(scpu));
	tick_cancel_sched_timer(scpu);

	local_irq_disable();
	old_base = &per_cpu(hrtimer_bases, scpu);
	new_base = &__get_cpu_var(hrtimer_bases);
	/*
	 * The caller is globally serialized and nobody else
	 * takes two locks at once, deadlock is not possible.
	 */
	spin_lock(&new_base->lock);
	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		migrate_hrtimer_list(&old_base->clock_base[i],
				     &new_base->clock_base[i]);
	}

	spin_unlock(&old_base->lock);
	spin_unlock(&new_base->lock);

	/* Check, if we got expired work to do */
	__hrtimer_peek_ahead_timers();
	local_irq_enable();
}
Пример #2
0
static void migrate_hrtimers(int cpu)
{
	struct hrtimer_cpu_base *old_base, *new_base;
	int i;

	BUG_ON(cpu_online(cpu));
	old_base = &per_cpu(hrtimer_bases, cpu);
	new_base = &get_cpu_var(hrtimer_bases);

	tick_cancel_sched_timer(cpu);

	local_irq_disable();
	spin_lock(&new_base->lock);
	spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		migrate_hrtimer_list(&old_base->clock_base[i],
				     &new_base->clock_base[i]);
	}

	spin_unlock(&old_base->lock);
	spin_unlock(&new_base->lock);
	local_irq_enable();
	put_cpu_var(hrtimer_bases);
}
Пример #3
0
static void sel_remove_entries(struct dentry *de)
{
	struct list_head *node;

	spin_lock(&de->d_lock);
	node = de->d_subdirs.next;
	while (node != &de->d_subdirs) {
		struct dentry *d = list_entry(node, struct dentry, d_u.d_child);

		spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
		list_del_init(node);

		if (d->d_inode) {
			dget_dlock(d);
			spin_unlock(&de->d_lock);
			spin_unlock(&d->d_lock);
			d_delete(d);
			simple_unlink(de->d_inode, d);
			dput(d);
			spin_lock(&de->d_lock);
		} else
			spin_unlock(&d->d_lock);
		node = de->d_subdirs.next;
	}

	spin_unlock(&de->d_lock);
}
Пример #4
0
/*
 * Calculate and dget next entry in top down tree traversal.
 */
static struct dentry *get_next_positive_dentry(struct dentry *prev,
					       struct dentry *root)
{
	struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
	struct list_head *next;
	struct dentry *p, *ret;

	if (prev == NULL)
		return dget(root);

	spin_lock(&sbi->lookup_lock);
relock:
	p = prev;
	spin_lock(&p->d_lock);
again:
	next = p->d_subdirs.next;
	if (next == &p->d_subdirs) {
		while (1) {
			struct dentry *parent;

			if (p == root) {
				spin_unlock(&p->d_lock);
				spin_unlock(&sbi->lookup_lock);
				dput(prev);
				return NULL;
			}

			parent = p->d_parent;
			if (!spin_trylock(&parent->d_lock)) {
				spin_unlock(&p->d_lock);
				cpu_relax();
				goto relock;
			}
			spin_unlock(&p->d_lock);
			next = p->d_child.next;
			p = parent;
			if (next != &parent->d_subdirs)
				break;
		}
	}
	ret = list_entry(next, struct dentry, d_child);

	spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
	/* Negative dentry - try next */
	if (!simple_positive(ret)) {
		spin_unlock(&p->d_lock);
		lock_set_subclass(&ret->d_lock.dep_map, 0, _RET_IP_);
		p = ret;
		goto again;
	}
	dget_dlock(ret);
	spin_unlock(&ret->d_lock);
	spin_unlock(&p->d_lock);
	spin_unlock(&sbi->lookup_lock);

	dput(prev);

	return ret;
}
Пример #5
0
static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
{
	int err = 0, subclass;

	subclass = vlan_calculate_locking_subclass(to);

	spin_lock_nested(&to->addr_list_lock, subclass);
	err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
	if (!err)
		__dev_set_rx_mode(to);
	spin_unlock(&to->addr_list_lock);
}
Пример #6
0
/*
 * we need to notify the parent when an op completes that we had outstanding
 * upon it
 */
static inline void fscache_done_parent_op(struct fscache_object *object)
{
	struct fscache_object *parent = object->parent;

	_enter("OBJ%x {OBJ%x,%x}",
	       object->debug_id, parent->debug_id, parent->n_ops);

	spin_lock_nested(&parent->lock, 1);
	parent->n_ops--;
	parent->n_obj_ops--;
	if (parent->n_ops == 0)
		fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
	spin_unlock(&parent->lock);
}
Пример #7
0
static struct dentry *get_next_positive_subdir(struct dentry *prev,
						struct dentry *root)
{
	struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
	struct list_head *next;
	struct dentry *p, *q;

	spin_lock(&sbi->lookup_lock);

	if (prev == NULL) {
		spin_lock(&root->d_lock);
		prev = dget_dlock(root);
		next = prev->d_subdirs.next;
		p = prev;
		goto start;
	}

	p = prev;
	spin_lock(&p->d_lock);
again:
	next = p->d_u.d_child.next;
start:
	if (next == &root->d_subdirs) {
		spin_unlock(&p->d_lock);
		spin_unlock(&sbi->lookup_lock);
		dput(prev);
		return NULL;
	}

	q = list_entry(next, struct dentry, d_u.d_child);

	spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
	
	if (!simple_positive(q)) {
		spin_unlock(&p->d_lock);
		lock_set_subclass(&q->d_lock.dep_map, 0, _RET_IP_);
		p = q;
		goto again;
	}
	dget_dlock(q);
	spin_unlock(&q->d_lock);
	spin_unlock(&p->d_lock);
	spin_unlock(&sbi->lookup_lock);

	dput(prev);

	return q;
}
Пример #8
0
/*
 * Calculate and dget next entry in the subdirs list under root.
 */
static struct dentry *get_next_positive_subdir(struct dentry *prev,
					       struct dentry *root)
{
	struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb);
	struct list_head *next;
	struct dentry *q;

	spin_lock(&sbi->lookup_lock);
	spin_lock(&root->d_lock);

	if (prev)
		next = prev->d_child.next;
	else {
		prev = dget_dlock(root);
		next = prev->d_subdirs.next;
	}

cont:
	if (next == &root->d_subdirs) {
		spin_unlock(&root->d_lock);
		spin_unlock(&sbi->lookup_lock);
		dput(prev);
		return NULL;
	}

	q = list_entry(next, struct dentry, d_child);

	spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
	/* Already gone or negative dentry (under construction) - try next */
	if (!d_count(q) || !simple_positive(q)) {
		spin_unlock(&q->d_lock);
		next = q->d_child.next;
		goto cont;
	}
	dget_dlock(q);
	spin_unlock(&q->d_lock);
	spin_unlock(&root->d_lock);
	spin_unlock(&sbi->lookup_lock);

	dput(prev);

	return q;
}
Пример #9
0
loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
{
	struct dentry *dentry = file->f_path.dentry;
	mutex_lock(&dentry->d_inode->i_mutex);
	switch (whence) {
		case 1:
			offset += file->f_pos;
		case 0:
			if (offset >= 0)
				break;
		default:
			mutex_unlock(&dentry->d_inode->i_mutex);
			return -EINVAL;
	}
	if (offset != file->f_pos) {
		file->f_pos = offset;
		if (file->f_pos >= 2) {
			struct list_head *p;
			struct dentry *cursor = file->private_data;
			loff_t n = file->f_pos - 2;

			spin_lock(&dentry->d_lock);
			/* d_lock not required for cursor */
			list_del(&cursor->d_u.d_child);
			p = dentry->d_subdirs.next;
			while (n && p != &dentry->d_subdirs) {
				struct dentry *next;
				next = list_entry(p, struct dentry, d_u.d_child);
				spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
				if (simple_positive(next))
					n--;
				spin_unlock(&next->d_lock);
				p = p->next;
			}
			list_add_tail(&cursor->d_u.d_child, p);
			spin_unlock(&dentry->d_lock);
		}
	}
Пример #10
0
/*
 * initialise an object
 * - check the specified object's parent to see if we can make use of it
 *   immediately to do a creation
 * - we may need to start the process of creating a parent and we need to wait
 *   for the parent's lookup and creation to complete if it's not there yet
 * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the
 *   leaf-most cookies of the object and all its children
 */
static void fscache_initialise_object(struct fscache_object *object)
{
	struct fscache_object *parent;

	_enter("");
	ASSERT(object->cookie != NULL);
	ASSERT(object->cookie->parent != NULL);
	ASSERT(list_empty(&object->work.link));

	if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) |
			      (1 << FSCACHE_OBJECT_EV_RELEASE) |
			      (1 << FSCACHE_OBJECT_EV_RETIRE) |
			      (1 << FSCACHE_OBJECT_EV_WITHDRAW))) {
		_debug("abort init %lx", object->events);
		spin_lock(&object->lock);
		object->state = FSCACHE_OBJECT_ABORT_INIT;
		spin_unlock(&object->lock);
		return;
	}

	spin_lock(&object->cookie->lock);
	spin_lock_nested(&object->cookie->parent->lock, 1);

	parent = object->parent;
	if (!parent) {
		_debug("no parent");
		set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
	} else {
		spin_lock(&object->lock);
		spin_lock_nested(&parent->lock, 1);
		_debug("parent %s", fscache_object_states[parent->state]);

		if (parent->state >= FSCACHE_OBJECT_DYING) {
			_debug("bad parent");
			set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
		} else if (parent->state < FSCACHE_OBJECT_AVAILABLE) {
			_debug("wait");

			/* we may get woken up in this state by child objects
			 * binding on to us, so we need to make sure we don't
			 * add ourself to the list multiple times */
			if (list_empty(&object->dep_link)) {
				fscache_stat(&fscache_n_cop_grab_object);
				object->cache->ops->grab_object(object);
				fscache_stat_d(&fscache_n_cop_grab_object);
				list_add(&object->dep_link,
					 &parent->dependents);

				/* fscache_acquire_non_index_cookie() uses this
				 * to wake the chain up */
				if (parent->state == FSCACHE_OBJECT_INIT)
					fscache_enqueue_object(parent);
			}
		} else {
			_debug("go");
			parent->n_ops++;
			parent->n_obj_ops++;
			object->lookup_jif = jiffies;
			object->state = FSCACHE_OBJECT_LOOKING_UP;
			set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
		}

		spin_unlock(&parent->lock);
		spin_unlock(&object->lock);
	}

	spin_unlock(&object->cookie->parent->lock);
	spin_unlock(&object->cookie->lock);
	_leave("");
}
Пример #11
0
/* try d_walk() in linux/fs/dcache.c */
int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root,
		   au_dpages_test test, void *arg)
{
	int err;
	struct dentry *this_parent;
	struct list_head *next;
	struct super_block *sb = root->d_sb;

	err = 0;
	write_seqlock(&rename_lock);
	this_parent = root;
	spin_lock(&this_parent->d_lock);
repeat:
	next = this_parent->d_subdirs.next;
resume:
	if (this_parent->d_sb == sb
	    && !IS_ROOT(this_parent)
	    && au_di(this_parent)
	    && d_count(this_parent)
	    && (!test || test(this_parent, arg))) {
		err = au_dpages_append(dpages, this_parent, GFP_ATOMIC);
		if (unlikely(err))
			goto out;
	}

	while (next != &this_parent->d_subdirs) {
		struct list_head *tmp = next;
		struct dentry *dentry = list_entry(tmp, struct dentry,
						   d_u.d_child);

		next = tmp->next;
		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
		if (d_count(dentry)) {
			if (!list_empty(&dentry->d_subdirs)) {
				spin_unlock(&this_parent->d_lock);
				spin_release(&dentry->d_lock.dep_map, 1,
					     _RET_IP_);
				this_parent = dentry;
				spin_acquire(&this_parent->d_lock.dep_map, 0, 1,
					     _RET_IP_);
				goto repeat;
			}
			if (dentry->d_sb == sb
			    && au_di(dentry)
			    && (!test || test(dentry, arg)))
				err = au_dpages_append(dpages, dentry,
						       GFP_ATOMIC);
		}
		spin_unlock(&dentry->d_lock);
		if (unlikely(err))
			goto out;
	}

	if (this_parent != root) {
		struct dentry *tmp;
		struct dentry *child;

		tmp = this_parent->d_parent;
		rcu_read_lock();
		spin_unlock(&this_parent->d_lock);
		child = this_parent;
		this_parent = tmp;
		spin_lock(&this_parent->d_lock);
		rcu_read_unlock();
		next = child->d_u.d_child.next;
		goto resume;
	}

out:
	spin_unlock(&this_parent->d_lock);
	write_sequnlock(&rename_lock);
	return err;
}
Пример #12
0
struct dentry *autofs4_expire_indirect(struct super_block *sb,
				       struct vfsmount *mnt,
				       struct autofs_sb_info *sbi,
				       int how)
{
	unsigned long timeout;
	struct dentry *root = sb->s_root;
	struct dentry *dentry;
	struct dentry *expired = NULL;
	int do_now = how & AUTOFS_EXP_IMMEDIATE;
	int exp_leaves = how & AUTOFS_EXP_LEAVES;
	struct autofs_info *ino;
	unsigned int ino_count;

	if (!root)
		return NULL;

	now = jiffies;
	timeout = sbi->exp_timeout;

	dentry = NULL;
	while ((dentry = get_next_positive_subdir(dentry, root))) {
		spin_lock(&sbi->fs_lock);
		ino = autofs4_dentry_ino(dentry);
		
		if (ino->flags & AUTOFS_INF_PENDING)
			goto next;

		if (d_mountpoint(dentry)) {
			DPRINTK("checking mountpoint %p %.*s",
				dentry, (int)dentry->d_name.len, dentry->d_name.name);

			
			ino_count = atomic_read(&ino->count) + 2;
			if (dentry->d_count > ino_count)
				goto next;

			
			if (autofs4_mount_busy(mnt, dentry))
				goto next;

			
			if (autofs4_can_expire(dentry, timeout, do_now)) {
				expired = dentry;
				goto found;
			}
			goto next;
		}

		if (simple_empty(dentry))
			goto next;

		
		if (!exp_leaves) {
			
			ino_count = atomic_read(&ino->count) + 1;
			if (dentry->d_count > ino_count)
				goto next;

			if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) {
				expired = dentry;
				goto found;
			}
		} else {
			
			ino_count = atomic_read(&ino->count) + 1;
			if (dentry->d_count > ino_count)
				goto next;

			expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
			if (expired) {
				dput(dentry);
				goto found;
			}
		}
next:
		spin_unlock(&sbi->fs_lock);
	}
	return NULL;

found:
	DPRINTK("returning %p %.*s",
		expired, (int)expired->d_name.len, expired->d_name.name);
	ino = autofs4_dentry_ino(expired);
	ino->flags |= AUTOFS_INF_EXPIRING;
	init_completion(&ino->expire_complete);
	spin_unlock(&sbi->fs_lock);
	spin_lock(&sbi->lookup_lock);
	spin_lock(&expired->d_parent->d_lock);
	spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
	list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
	spin_unlock(&expired->d_lock);
	spin_unlock(&expired->d_parent->d_lock);
	spin_unlock(&sbi->lookup_lock);
	return expired;
}
/*
 * Find an eligible tree to time-out
 * A tree is eligible if :-
 *  - it is unused by any user process
 *  - it has been unused for exp_timeout time
 */
struct dentry *autofs4_expire_indirect(struct super_block *sb,
				       struct vfsmount *mnt,
				       struct autofs_sb_info *sbi,
				       int how)
{
	unsigned long timeout;
	struct dentry *root = sb->s_root;
	struct dentry *dentry;
	struct dentry *expired = NULL;
	int do_now = how & AUTOFS_EXP_IMMEDIATE;
	int exp_leaves = how & AUTOFS_EXP_LEAVES;
	struct autofs_info *ino;
	unsigned int ino_count;

	if (!root)
		return NULL;

	now = jiffies;
	timeout = sbi->exp_timeout;

	dentry = NULL;
	while ((dentry = get_next_positive_subdir(dentry, root))) {
		spin_lock(&sbi->fs_lock);
		ino = autofs4_dentry_ino(dentry);
		/* No point expiring a pending mount */
		if (ino->flags & AUTOFS_INF_PENDING)
			goto next;

		/*
		 * Case 1: (i) indirect mount or top level pseudo direct mount
		 *	   (autofs-4.1).
		 *	   (ii) indirect mount with offset mount, check the "/"
		 *	   offset (autofs-5.0+).
		 */
		if (d_mountpoint(dentry)) {
			DPRINTK("checking mountpoint %p %.*s",
				dentry, (int)dentry->d_name.len, dentry->d_name.name);

			/* Path walk currently on this dentry? */
			ino_count = atomic_read(&ino->count) + 2;
			if (dentry->d_count > ino_count)
				goto next;

			/* Can we umount this guy */
			if (autofs4_mount_busy(mnt, dentry))
				goto next;

			/* Can we expire this guy */
			if (autofs4_can_expire(dentry, timeout, do_now)) {
				expired = dentry;
				goto found;
			}
			goto next;
		}

		if (simple_empty(dentry))
			goto next;

		/* Case 2: tree mount, expire iff entire tree is not busy */
		if (!exp_leaves) {
			/* Path walk currently on this dentry? */
			ino_count = atomic_read(&ino->count) + 1;
			if (dentry->d_count > ino_count)
				goto next;

			if (!autofs4_tree_busy(mnt, dentry, timeout, do_now)) {
				expired = dentry;
				goto found;
			}
		/*
		 * Case 3: pseudo direct mount, expire individual leaves
		 *	   (autofs-4.1).
		 */
		} else {
			/* Path walk currently on this dentry? */
			ino_count = atomic_read(&ino->count) + 1;
			if (dentry->d_count > ino_count)
				goto next;

			expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
			if (expired) {
				dput(dentry);
				goto found;
			}
		}
next:
		spin_unlock(&sbi->fs_lock);
	}
	return NULL;

found:
	DPRINTK("returning %p %.*s",
		expired, (int)expired->d_name.len, expired->d_name.name);
	ino = autofs4_dentry_ino(expired);
	ino->flags |= AUTOFS_INF_EXPIRING;
	init_completion(&ino->expire_complete);
	spin_unlock(&sbi->fs_lock);
	spin_lock(&sbi->lookup_lock);
	spin_lock(&expired->d_parent->d_lock);
	spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
	list_move(&expired->d_parent->d_subdirs, &expired->d_child);
	spin_unlock(&expired->d_lock);
	spin_unlock(&expired->d_parent->d_lock);
	spin_unlock(&sbi->lookup_lock);
	return expired;
}
Пример #14
0
static void fscache_initialise_object(struct fscache_object *object)
{
	struct fscache_object *parent;

	_enter("");
	ASSERT(object->cookie != NULL);
	ASSERT(object->cookie->parent != NULL);

	if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) |
			      (1 << FSCACHE_OBJECT_EV_RELEASE) |
			      (1 << FSCACHE_OBJECT_EV_RETIRE) |
			      (1 << FSCACHE_OBJECT_EV_WITHDRAW))) {
		_debug("abort init %lx", object->events);
		spin_lock(&object->lock);
		object->state = FSCACHE_OBJECT_ABORT_INIT;
		spin_unlock(&object->lock);
		return;
	}

	spin_lock(&object->cookie->lock);
	spin_lock_nested(&object->cookie->parent->lock, 1);

	parent = object->parent;
	if (!parent) {
		_debug("no parent");
		set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
	} else {
		spin_lock(&object->lock);
		spin_lock_nested(&parent->lock, 1);
		_debug("parent %s", fscache_object_states[parent->state]);

		if (parent->state >= FSCACHE_OBJECT_DYING) {
			_debug("bad parent");
			set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
		} else if (parent->state < FSCACHE_OBJECT_AVAILABLE) {
			_debug("wait");

			if (list_empty(&object->dep_link)) {
				fscache_stat(&fscache_n_cop_grab_object);
				object->cache->ops->grab_object(object);
				fscache_stat_d(&fscache_n_cop_grab_object);
				list_add(&object->dep_link,
					 &parent->dependents);

				if (parent->state == FSCACHE_OBJECT_INIT)
					fscache_enqueue_object(parent);
			}
		} else {
			_debug("go");
			parent->n_ops++;
			parent->n_obj_ops++;
			object->lookup_jif = jiffies;
			object->state = FSCACHE_OBJECT_LOOKING_UP;
			set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
		}

		spin_unlock(&parent->lock);
		spin_unlock(&object->lock);
	}

	spin_unlock(&object->cookie->parent->lock);
	spin_unlock(&object->cookie->lock);
	_leave("");
}
Пример #15
0
/*
 * returns: -ERRNO if error (returned to user)
 *          0: tell VFS to invalidate dentry
 *          1: dentry is valid
 */
static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
    int err = 1;
    struct path parent_lower_path, lower_path;
    struct dentry *parent_dentry = NULL;
    struct dentry *parent_lower_dentry = NULL;
    struct dentry *lower_cur_parent_dentry = NULL;
    struct dentry *lower_dentry = NULL;

    if (flags & LOOKUP_RCU)
        return -ECHILD;

    spin_lock(&dentry->d_lock);
    if (IS_ROOT(dentry)) {
        spin_unlock(&dentry->d_lock);
        return 1;
    }
    spin_unlock(&dentry->d_lock);

    /* check uninitialized obb_dentry and
     * whether the base obbpath has been changed or not */
    if (is_obbpath_invalid(dentry)) {
        d_drop(dentry);
        return 0;
    }

    parent_dentry = dget_parent(dentry);
    sdcardfs_get_lower_path(parent_dentry, &parent_lower_path);
    sdcardfs_get_real_lower(dentry, &lower_path);
    parent_lower_dentry = parent_lower_path.dentry;
    lower_dentry = lower_path.dentry;
    lower_cur_parent_dentry = dget_parent(lower_dentry);

    spin_lock(&lower_dentry->d_lock);
    if (d_unhashed(lower_dentry)) {
        spin_unlock(&lower_dentry->d_lock);
        d_drop(dentry);
        err = 0;
        goto out;
    }
    spin_unlock(&lower_dentry->d_lock);

    if (parent_lower_dentry != lower_cur_parent_dentry) {
        d_drop(dentry);
        err = 0;
        goto out;
    }

    if (dentry < lower_dentry) {
        spin_lock(&dentry->d_lock);
        spin_lock_nested(&lower_dentry->d_lock, DENTRY_D_LOCK_NESTED);
    } else {
        spin_lock(&lower_dentry->d_lock);
        spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
    }

    if (dentry->d_name.len != lower_dentry->d_name.len) {
        __d_drop(dentry);
        err = 0;
    } else if (strncasecmp(dentry->d_name.name, lower_dentry->d_name.name,
                           dentry->d_name.len) != 0) {
        __d_drop(dentry);
        err = 0;
    }

    if (dentry < lower_dentry) {
        spin_unlock(&lower_dentry->d_lock);
        spin_unlock(&dentry->d_lock);
    } else {
        spin_unlock(&dentry->d_lock);
        spin_unlock(&lower_dentry->d_lock);
    }

out:
    dput(parent_dentry);
    dput(lower_cur_parent_dentry);
    sdcardfs_put_lower_path(parent_dentry, &parent_lower_path);
    sdcardfs_put_real_lower(dentry, &lower_path);
    return err;
}