예제 #1
0
/* Must be called with interrupts off and with the kcs_lock held. */
static void kcs_restart_short_timer(struct kcs_info *kcs_info)
{
#ifdef CONFIG_HIGH_RES_TIMERS
	unsigned long jiffies_now;

	if (del_timer(&(kcs_info->kcs_timer))) {
		/* If we don't delete the timer, then it will go off
		   immediately, anyway.  So we only process if we
		   actually delete the timer. */

		/* We already have irqsave on, so no need for it
                   here. */
		br_read_lock(BR_XTIME_LOCK);
		jiffies_now = jiffies;
		kcs_info->kcs_timer.expires = jiffies_now;

		kcs_info->kcs_timer.sub_expires
			= quick_update_jiffies_sub(jiffies_now);
		br_read_unlock(BR_XTIME_LOCK);

		kcs_info->kcs_timer.sub_expires
			+= usec_to_arch_cycles(KCS_SHORT_TIMEOUT_USEC);
		while (kcs_info->kcs_timer.sub_expires >= cycles_per_jiffies) {
			kcs_info->kcs_timer.expires++;
			kcs_info->kcs_timer.sub_expires -= cycles_per_jiffies;
		}
		add_timer(&(kcs_info->kcs_timer));
	}
#endif
}
예제 #2
0
파일: glue.c 프로젝트: sophos/talpa
/*
 * hidden vfsmnt_lock handling
 */
void talpa_vfsmount_lock(unsigned* m_seq)
{
#if defined TALPA_USE_VFSMOUNT_LOCK
#   if defined TALPA_VFSMOUNT_LG_BRLOCK
    br_read_lock(&vfsmount_lock);
#   elif defined TALPA_VFSMOUNT_LOCK_BRLOCK
    br_read_lock(vfsmount_lock);
#   else
    spinlock_t* talpa_vfsmount_lock_addr = (spinlock_t *)talpa_get_symbol("vfmount_lock", (void *)TALPA_VFSMOUNT_LOCK_ADDR);

    spin_lock(talpa_vfsmount_lock_addr);
#   endif
#elif defined TALPA_USE_MOUNT_LOCK
    seqlock_t* mount_lock_addr = (seqlock_t *)talpa_get_symbol("mount_lock", (void *)TALPA_MOUNT_LOCK_ADDR);
    read_seqbegin_or_lock(mount_lock_addr,m_seq);
#else
    // On 2.4 we don't have vfsmount_lock - we use dcache_lock instead
    spin_lock(&dcache_lock);
#endif

}
static unsigned mounts_poll(struct file *file, poll_table *wait)
{
	struct proc_mounts *p = proc_mounts(file->private_data);
	struct mnt_namespace *ns = p->ns;
	unsigned res = POLLIN | POLLRDNORM;

	poll_wait(file, &p->ns->poll, wait);

	br_read_lock(&vfsmount_lock);
	if (p->m.poll_event != ns->event) {
		p->m.poll_event = ns->event;
		res |= POLLERR | POLLPRI;
	}
	br_read_unlock(&vfsmount_lock);

	return res;
}
예제 #4
0
extern void
mvfs_linux_umount_begin(
    struct vfsmount * mnt,
    int flags
)
#endif
{
    VNODE_T *vp;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
    LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
    struct vfsmount *mnt;
#else
    /*
     * Since 2.6.18 and before 2.6.27 we have mnt as a parameter.
     * But we still need super_p.
     */
    SUPER_T *super_p = mnt->mnt_sb;
#endif
    int mount_count = 0;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) && defined(CONFIG_SMP)
    int cpu;
#endif

    ASSERT(super_p != NULL);
    ASSERT(super_p->s_root != NULL);
    vp = ITOV(super_p->s_root->d_inode);
    ASSERT(vp != NULL);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
    LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
    mnt = VTOVFSMNT(vp);
#else
    /* Check that the mountpoint passed in matches the one
     * from the vp that we are going to clear.  Skip it otherwise.
     * We know from experience that this can happen when unmounting
     * loopback (bind) mounts.
     */
     if (mnt != VTOVFSMNT(vp))
         return;
#endif
    /* Note that there is no mechanism for restoring the mount pointer
     * in the vnode if an error happens later on in the umount.  This is
     * the only callback into the mvfs during umount.  So far this has not
     * been a problem and if we don't do this here, the umount will never
     * succeed because the Linux code expects the mnt_count to be 2.
     * The count is 3 at this point from the initial allocation of the 
     * vfsmnt structure, the path_lookup call in this umount call and 
     * from when we placed the pointer in the vp.  
     */
    if (mnt == NULL) {
        MDKI_VFS_LOG(VFS_LOG_ERR, "%s: mnt is NULL\n", __FUNCTION__);
        return;
    }
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
    mount_count = atomic_read(&mnt->mnt_count);
#else
# ifdef CONFIG_SMP
    br_read_lock(vfsmount_lock);
    for_each_possible_cpu(cpu) {
        mount_count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
    }
    br_read_unlock(vfsmount_lock);
# else /* CONFIG_SMP */
    mount_count = mnt->mnt_count;
# endif /* else CONFIG_SMP */
#endif /* else < KERNEL_VERSION(2,6,38) */
    if (mount_count == 3) {
        MDKI_MNTPUT(mnt);
        SET_VTOVFSMNT(vp, NULL);
    }
}