static int
id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
{
    int rc = 0;
    struct key *sidkey;
    const struct cred *saved_cred;
    struct cifs_sid *lsid;
    struct cifs_sid_id *psidid, *npsidid;
    struct rb_root *cidtree;
    spinlock_t *cidlock;

    if (sidtype == SIDOWNER) {
        cidlock = &siduidlock;
        cidtree = &uidtree;
    } else if (sidtype == SIDGROUP) {
        cidlock = &sidgidlock;
        cidtree = &gidtree;
    } else
        return -EINVAL;

    spin_lock(cidlock);
    psidid = sid_rb_search(cidtree, cid);

    if (!psidid) { /* node does not exist, allocate one & attempt adding */
        spin_unlock(cidlock);
        npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
        if (!npsidid)
            return -ENOMEM;

        npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
        if (!npsidid->sidstr) {
            kfree(npsidid);
            return -ENOMEM;
        }

        spin_lock(cidlock);
        psidid = sid_rb_search(cidtree, cid);
        if (psidid) { /* node happened to get inserted meanwhile */
            ++psidid->refcount;
            spin_unlock(cidlock);
            kfree(npsidid->sidstr);
            kfree(npsidid);
        } else {
            psidid = npsidid;
            sid_rb_insert(cidtree, cid, &psidid,
                          sidtype == SIDOWNER ? "oi:" : "gi:");
            ++psidid->refcount;
            spin_unlock(cidlock);
        }
    } else {
        ++psidid->refcount;
        spin_unlock(cidlock);
    }

    /*
     * If we are here, it is safe to access psidid and its fields
     * since a reference was taken earlier while holding the spinlock.
     * A reference on the node is put without holding the spinlock
     * and it is OK to do so in this case, shrinker will not erase
     * this node until all references are put and we do not access
     * any fields of the node after a reference is put .
     */
    if (test_bit(SID_ID_MAPPED, &psidid->state)) {
        cifs_copy_sid(ssid, &psidid->sid);
        psidid->time = jiffies; /* update ts for accessing */
        goto id_sid_out;
    }

    if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
        rc = -EINVAL;
        goto id_sid_out;
    }

    if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
        saved_cred = override_creds(root_cred);
        sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
        if (IS_ERR(sidkey)) {
            rc = -EINVAL;
            cFYI(1, "%s: Can't map and id to a SID", __func__);
        } else if (sidkey->datalen < sizeof(struct cifs_sid)) {
            rc = -EIO;
            cFYI(1, "%s: Downcall contained malformed key "
                 "(datalen=%hu)", __func__, sidkey->datalen);
        } else {
            lsid = (struct cifs_sid *)sidkey->payload.data;
            cifs_copy_sid(&psidid->sid, lsid);
            cifs_copy_sid(ssid, &psidid->sid);
            set_bit(SID_ID_MAPPED, &psidid->state);
            key_put(sidkey);
            kfree(psidid->sidstr);
        }
        psidid->time = jiffies; /* update ts for accessing */
        revert_creds(saved_cred);
        clear_bit(SID_ID_PENDING, &psidid->state);
        wake_up_bit(&psidid->state, SID_ID_PENDING);
    } else {
        rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
                         sidid_pending_wait, TASK_INTERRUPTIBLE);
        if (rc) {
            cFYI(1, "%s: sidid_pending_wait interrupted %d",
                 __func__, rc);
            --psidid->refcount;
            return rc;
        }
        if (test_bit(SID_ID_MAPPED, &psidid->state))
            cifs_copy_sid(ssid, &psidid->sid);
        else
            rc = -EINVAL;
    }
id_sid_out:
    --psidid->refcount;
    return rc;
}
Beispiel #2
0
static int decode_sfu_inode(struct inode *inode, __u64 size,
			    const unsigned char *path,
			    struct cifs_sb_info *cifs_sb, int xid)
{
	int rc;
	int oplock = FALSE;
	__u16 netfid;
	struct cifsTconInfo *pTcon = cifs_sb->tcon;
	char buf[24];
	unsigned int bytes_read;
	char *pbuf;

	pbuf = buf;

	if (size == 0) {
		inode->i_mode |= S_IFIFO;
		return 0;
	} else if (size < 8) {
		return -EINVAL;	 /* EOPNOTSUPP? */
	}

	rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ, FILE_SHARE_ALL,
			 CREATE_NOT_DIR, &netfid, &oplock, NULL,
			 cifs_sb->local_nls,
			 cifs_sb->mnt_cifs_flags &
				CIFS_MOUNT_MAP_SPECIAL_CHR);
	if (rc == 0) {
		int buf_type = CIFS_NO_BUFFER;
			/* Read header */
		rc = CIFSSMBRead(xid, pTcon,
				 netfid, current->tgid,
				 24 /* length */, 0 /* offset */,
				 &bytes_read, &pbuf, &buf_type);
		if ((rc == 0) && (bytes_read >= 8)) {
			if (memcmp("IntxBLK", pbuf, 8) == 0) {
				cFYI(1, ("Block device"));
				inode->i_mode |= S_IFBLK;
				if (bytes_read == 24) {
					/* we have enough to decode dev num */
					__u64 mjr; /* major */
					__u64 mnr; /* minor */
					mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
					mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
					inode->i_rdev = MKDEV(mjr, mnr);
				}
			} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
				cFYI(1, ("Char device"));
				inode->i_mode |= S_IFCHR;
				if (bytes_read == 24) {
					/* we have enough to decode dev num */
					__u64 mjr; /* major */
					__u64 mnr; /* minor */
					mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
					mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
					inode->i_rdev = MKDEV(mjr, mnr);
				}
			} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
				cFYI(1, ("Symlink"));
				inode->i_mode |= S_IFLNK;
			} else {
				inode->i_mode |= S_IFREG; /* file? */
				rc = -EOPNOTSUPP;
			}
		} else {
			inode->i_mode |= S_IFREG; /* then it is a file */
			rc = -EOPNOTSUPP; /* or some unknown SFU type */
		}
		CIFSSMBClose(xid, pTcon, netfid);
	}
	return rc;
}
Beispiel #3
0
int cifs_get_inode_info(struct inode **pinode,
	const unsigned char *search_path, FILE_ALL_INFO *pfindData,
	struct super_block *sb, int xid, const __u16 *pfid)
{
	int rc = 0;
	struct cifsTconInfo *pTcon;
	struct inode *inode;
	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
	const unsigned char *full_path = NULL;
	char *buf = NULL;
	int adjustTZ = FALSE;
	bool is_dfs_referral = false;

	pTcon = cifs_sb->tcon;
	cFYI(1, ("Getting info on %s", search_path));

	if ((pfindData == NULL) && (*pinode != NULL)) {
		if (CIFS_I(*pinode)->clientCanCacheRead) {
			cFYI(1, ("No need to revalidate cached inode sizes"));
			return rc;
		}
	}

	/* if file info not passed in then get it from server */
	if (pfindData == NULL) {
		buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
		if (buf == NULL)
			return -ENOMEM;
		pfindData = (FILE_ALL_INFO *)buf;

		full_path = cifs_get_search_path(cifs_sb, search_path);

try_again_CIFSSMBQPathInfo:
		/* could do find first instead but this returns more info */
		rc = CIFSSMBQPathInfo(xid, pTcon, full_path, pfindData,
			      0 /* not legacy */,
			      cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
				CIFS_MOUNT_MAP_SPECIAL_CHR);
		/* BB optimize code so we do not make the above call
		when server claims no NT SMB support and the above call
		failed at least once - set flag in tcon or mount */
		if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
			rc = SMBQueryInformation(xid, pTcon, full_path,
					pfindData, cifs_sb->local_nls,
					cifs_sb->mnt_cifs_flags &
					  CIFS_MOUNT_MAP_SPECIAL_CHR);
			adjustTZ = TRUE;
		}
	}
	/* dump_mem("\nQPathInfo return data",&findData, sizeof(findData)); */
	if (rc) {
		if (rc == -EREMOTE && !is_dfs_referral) {
			is_dfs_referral = true;
			if (full_path != search_path) {
				kfree(full_path);
				full_path = search_path;
			}
			goto try_again_CIFSSMBQPathInfo;
		}
		goto cgii_exit;
	} else {
		struct cifsInodeInfo *cifsInfo;
		__u32 attr = le32_to_cpu(pfindData->Attributes);

		/* get new inode */
		if (*pinode == NULL) {
			*pinode = new_inode(sb);
			if (*pinode == NULL) {
				rc = -ENOMEM;
				goto cgii_exit;
			}
			/* Is an i_ino of zero legal? Can we use that to check
			   if the server supports returning inode numbers?  Are
			   there other sanity checks we can use to ensure that
			   the server is really filling in that field? */

			/* We can not use the IndexNumber field by default from
			   Windows or Samba (in ALL_INFO buf) but we can request
			   it explicitly.  It may not be unique presumably if
			   the server has multiple devices mounted under one
			   share */

			/* There may be higher info levels that work but are
			   there Windows server or network appliances for which
			   IndexNumber field is not guaranteed unique? */

			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
				int rc1 = 0;
				__u64 inode_num;

				rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
					search_path, &inode_num,
					cifs_sb->local_nls,
					cifs_sb->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
				if (rc1) {
					cFYI(1, ("GetSrvInodeNum rc %d", rc1));
					/* BB EOPNOSUPP disable SERVER_INUM? */
				} else /* do we need cast or hash to ino? */
					(*pinode)->i_ino = inode_num;
			} /* else ino incremented to unique num in new_inode*/
			if (sb->s_flags & MS_NOATIME)
				(*pinode)->i_flags |= S_NOATIME | S_NOCMTIME;
			insert_inode_hash(*pinode);
		}
		inode = *pinode;
		cifsInfo = CIFS_I(inode);
		cifsInfo->cifsAttrs = attr;
		cFYI(1, ("Old time %ld", cifsInfo->time));
		cifsInfo->time = jiffies;
		cFYI(1, ("New time %ld", cifsInfo->time));

		/* blksize needs to be multiple of two. So safer to default to
		blksize and blkbits set in superblock so 2**blkbits and blksize
		will match rather than setting to:
		(pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/

		/* Linux can not store file creation time so ignore it */
		if (pfindData->LastAccessTime)
			inode->i_atime = cifs_NTtimeToUnix
				(le64_to_cpu(pfindData->LastAccessTime));
		else /* do not need to use current_fs_time - time not stored */
			inode->i_atime = CURRENT_TIME;
		inode->i_mtime =
		    cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
		inode->i_ctime =
		    cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
		cFYI(0, ("Attributes came in as 0x%x", attr));
		if (adjustTZ && (pTcon->ses) && (pTcon->ses->server)) {
			inode->i_ctime.tv_sec += pTcon->ses->server->timeAdj;
			inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj;
		}

		/* set default mode. will override for dirs below */
		if (atomic_read(&cifsInfo->inUse) == 0)
			/* new inode, can safely set these fields */
			inode->i_mode = cifs_sb->mnt_file_mode;
		else /* since we set the inode type below we need to mask off
		     to avoid strange results if type changes and both
		     get orred in */
			inode->i_mode &= ~S_IFMT;
/*		if (attr & ATTR_REPARSE)  */
		/* We no longer handle these as symlinks because we could not
		   follow them due to the absolute path with drive letter */
		if (attr & ATTR_DIRECTORY) {
		/* override default perms since we do not do byte range locking
		   on dirs */
			inode->i_mode = cifs_sb->mnt_dir_mode;
			inode->i_mode |= S_IFDIR;
		} else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
			   (cifsInfo->cifsAttrs & ATTR_SYSTEM) &&
			   /* No need to le64 convert size of zero */
			   (pfindData->EndOfFile == 0)) {
			inode->i_mode = cifs_sb->mnt_file_mode;
			inode->i_mode |= S_IFIFO;
/* BB Finish for SFU style symlinks and devices */
		} else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
			   (cifsInfo->cifsAttrs & ATTR_SYSTEM)) {
			if (decode_sfu_inode(inode,
					 le64_to_cpu(pfindData->EndOfFile),
					 search_path,
					 cifs_sb, xid))
				cFYI(1, ("Unrecognized sfu inode type"));

			cFYI(1, ("sfu mode 0%o", inode->i_mode));
		} else {
			inode->i_mode |= S_IFREG;
			/* treat the dos attribute of read-only as read-only
			   mode e.g. 555 */
			if (cifsInfo->cifsAttrs & ATTR_READONLY)
				inode->i_mode &= ~(S_IWUGO);
			else if ((inode->i_mode & S_IWUGO) == 0)
				/* the ATTR_READONLY flag may have been	*/
				/* changed on server -- set any w bits	*/
				/* allowed by mnt_file_mode		*/
				inode->i_mode |= (S_IWUGO &
						  cifs_sb->mnt_file_mode);
		/* BB add code here -
		   validate if device or weird share or device type? */
		}

		spin_lock(&inode->i_lock);
		if (is_size_safe_to_change(cifsInfo,
					   le64_to_cpu(pfindData->EndOfFile))) {
			/* can not safely shrink the file size here if the
			   client is writing to it due to potential races */
			i_size_write(inode, le64_to_cpu(pfindData->EndOfFile));

			/* 512 bytes (2**9) is the fake blocksize that must be
			   used for this calculation */
			inode->i_blocks = (512 - 1 + le64_to_cpu(
					   pfindData->AllocationSize)) >> 9;
		}
		spin_unlock(&inode->i_lock);

		inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);

		/* BB fill in uid and gid here? with help from winbind?
		   or retrieve from NTFS stream extended attribute */
#ifdef CONFIG_CIFS_EXPERIMENTAL
		/* fill in 0777 bits from ACL */
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
			cFYI(1, ("Getting mode bits from ACL"));
			acl_to_uid_mode(inode, search_path, pfid);
		}
#endif
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
			/* fill in remaining high mode bits e.g. SUID, VTX */
			get_sfu_mode(inode, search_path, cifs_sb, xid);
		} else if (atomic_read(&cifsInfo->inUse) == 0) {
			inode->i_uid = cifs_sb->mnt_uid;
			inode->i_gid = cifs_sb->mnt_gid;
			/* set so we do not keep refreshing these fields with
			   bad data after user has changed them in memory */
			atomic_set(&cifsInfo->inUse, 1);
		}

		cifs_set_ops(inode, is_dfs_referral);
	}
Beispiel #4
0
static int
cifs_init_request_bufs(void)
{
	if (CIFSMaxBufSize < 8192) {
	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
	Unicode path name has to fit in any SMB/CIFS path based frames */
		CIFSMaxBufSize = 8192;
	} else if (CIFSMaxBufSize > 1024*127) {
		CIFSMaxBufSize = 1024 * 127;
	} else {
		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
	}
/*	cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
	cifs_req_cachep = kmem_cache_create("cifs_request",
					    CIFSMaxBufSize +
					    MAX_CIFS_HDR_SIZE, 0,
					    SLAB_HWCACHE_ALIGN, NULL);
	if (cifs_req_cachep == NULL)
		return -ENOMEM;

	if (cifs_min_rcv < 1)
		cifs_min_rcv = 1;
	else if (cifs_min_rcv > 64) {
		cifs_min_rcv = 64;
		cERROR(1, "cifs_min_rcv set to maximum (64)");
	}

	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
						  cifs_req_cachep);

	if (cifs_req_poolp == NULL) {
		kmem_cache_destroy(cifs_req_cachep);
		return -ENOMEM;
	}
	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
	almost all handle based requests (but not write response, nor is it
	sufficient for path based requests).  A smaller size would have
	been more efficient (compacting multiple slab items on one 4k page)
	for the case in which debug was on, but this larger size allows
	more SMBs to use small buffer alloc and is still much more
	efficient to alloc 1 per page off the slab compared to 17K (5page)
	alloc of large cifs buffers even when page debugging is on */
	cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
			NULL);
	if (cifs_sm_req_cachep == NULL) {
		mempool_destroy(cifs_req_poolp);
		kmem_cache_destroy(cifs_req_cachep);
		return -ENOMEM;
	}

	if (cifs_min_small < 2)
		cifs_min_small = 2;
	else if (cifs_min_small > 256) {
		cifs_min_small = 256;
		cFYI(1, "cifs_min_small set to maximum (256)");
	}

	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
						     cifs_sm_req_cachep);

	if (cifs_sm_req_poolp == NULL) {
		mempool_destroy(cifs_req_poolp);
		kmem_cache_destroy(cifs_req_cachep);
		kmem_cache_destroy(cifs_sm_req_cachep);
		return -ENOMEM;
	}

	return 0;
}
Beispiel #5
0
int cifs_revalidate(struct dentry *direntry)
{
	int xid;
	int rc = 0, wbrc = 0;
	char *full_path;
	struct cifs_sb_info *cifs_sb;
	struct cifsInodeInfo *cifsInode;
	loff_t local_size;
	struct timespec local_mtime;
	int invalidate_inode = FALSE;

	if (direntry->d_inode == NULL)
		return -ENOENT;

	cifsInode = CIFS_I(direntry->d_inode);

	if (cifsInode == NULL)
		return -ENOENT;

	/* no sense revalidating inode info on file that no one can write */
	if (CIFS_I(direntry->d_inode)->clientCanCacheRead)
		return rc;

	xid = GetXid();

	cifs_sb = CIFS_SB(direntry->d_sb);

	/* can not safely grab the rename sem here if rename calls revalidate
	   since that would deadlock */
	full_path = build_path_from_dentry(direntry);
	if (full_path == NULL) {
		FreeXid(xid);
		return -ENOMEM;
	}
	cFYI(1, ("Revalidate: %s inode 0x%p count %d dentry: 0x%p d_time %ld "
		 "jiffies %ld", full_path, direntry->d_inode,
		 direntry->d_inode->i_count.counter, direntry,
		 direntry->d_time, jiffies));

	if (cifsInode->time == 0) {
		/* was set to zero previously to force revalidate */
	} else if (time_before(jiffies, cifsInode->time + HZ) &&
		   lookupCacheEnabled) {
		if ((S_ISREG(direntry->d_inode->i_mode) == 0) ||
		    (direntry->d_inode->i_nlink == 1)) {
			kfree(full_path);
			FreeXid(xid);
			return rc;
		} else {
			cFYI(1, ("Have to revalidate file due to hardlinks"));
		}
	}

	/* save mtime and size */
	local_mtime = direntry->d_inode->i_mtime;
	local_size = direntry->d_inode->i_size;

	if (cifs_sb->tcon->unix_ext) {
		rc = cifs_get_inode_info_unix(&direntry->d_inode, full_path,
					      direntry->d_sb, xid);
		if (rc) {
			cFYI(1, ("error on getting revalidate info %d", rc));
/*			if (rc != -ENOENT)
				rc = 0; */	/* BB should we cache info on
						   certain errors? */
		}
	} else {
		rc = cifs_get_inode_info(&direntry->d_inode, full_path, NULL,
					 direntry->d_sb, xid, NULL);
		if (rc) {
			cFYI(1, ("error on getting revalidate info %d", rc));
/*			if (rc != -ENOENT)
				rc = 0; */	/* BB should we cache info on
						   certain errors? */
		}
	}
	/* should we remap certain errors, access denied?, to zero */

	/* if not oplocked, we invalidate inode pages if mtime or file size
	   had changed on server */

	if (timespec_equal(&local_mtime, &direntry->d_inode->i_mtime) &&
	    (local_size == direntry->d_inode->i_size)) {
		cFYI(1, ("cifs_revalidate - inode unchanged"));
	} else {
		/* file may have changed on server */
		if (cifsInode->clientCanCacheRead) {
			/* no need to invalidate inode pages since we were the
			   only ones who could have modified the file and the
			   server copy is staler than ours */
		} else {
			invalidate_inode = TRUE;
		}
	}

	/* can not grab this sem since kernel filesys locking documentation
	   indicates i_mutex may be taken by the kernel on lookup and rename
	   which could deadlock if we grab the i_mutex here as well */
/*	mutex_lock(&direntry->d_inode->i_mutex);*/
	/* need to write out dirty pages here  */
	if (direntry->d_inode->i_mapping) {
		/* do we need to lock inode until after invalidate completes
		   below? */
		wbrc = filemap_fdatawrite(direntry->d_inode->i_mapping);
		if (wbrc)
			CIFS_I(direntry->d_inode)->write_behind_rc = wbrc;
	}
	if (invalidate_inode) {
	/* shrink_dcache not necessary now that cifs dentry ops
	are exported for negative dentries */
/*		if (S_ISDIR(direntry->d_inode->i_mode))
			shrink_dcache_parent(direntry); */
		if (S_ISREG(direntry->d_inode->i_mode)) {
			if (direntry->d_inode->i_mapping)
				wbrc = filemap_fdatawait(direntry->d_inode->i_mapping);
				if (wbrc)
					CIFS_I(direntry->d_inode)->write_behind_rc = wbrc;
			cFYI(1, ("Invalidating read ahead data on "
				 "closed file"));
			invalidate_remote_inode(direntry->d_inode);
		}
	}
/*	mutex_unlock(&direntry->d_inode->i_mutex); */

	kfree(full_path);
	FreeXid(xid);
	return rc;
}
Beispiel #6
0
/* Note: caller must free return buffer */
char *
build_path_from_dentry(struct dentry *direntry)
{
	struct dentry *temp;
	int namelen;
	int dfsplen;
	char *full_path;
	char dirsep;
	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
	unsigned seq;

	dirsep = CIFS_DIR_SEP(cifs_sb);
	if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
		dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
	else
		dfsplen = 0;
cifs_bp_rename_retry:
	namelen = dfsplen;
	seq = read_seqbegin(&rename_lock);
	rcu_read_lock();
	for (temp = direntry; !IS_ROOT(temp);) {
		namelen += (1 + temp->d_name.len);
		temp = temp->d_parent;
		if (temp == NULL) {
			cERROR(1, "corrupt dentry");
			rcu_read_unlock();
			return NULL;
		}
	}
	rcu_read_unlock();

	full_path = kmalloc(namelen+1, GFP_KERNEL);
	if (full_path == NULL)
		return full_path;
	full_path[namelen] = 0;	/* trailing null */
	rcu_read_lock();
	for (temp = direntry; !IS_ROOT(temp);) {
		spin_lock(&temp->d_lock);
		namelen -= 1 + temp->d_name.len;
		if (namelen < 0) {
			spin_unlock(&temp->d_lock);
			break;
		} else {
			full_path[namelen] = dirsep;
			strncpy(full_path + namelen + 1, temp->d_name.name,
				temp->d_name.len);
			cFYI(0, "name: %s", full_path + namelen);
		}
		spin_unlock(&temp->d_lock);
		temp = temp->d_parent;
		if (temp == NULL) {
			cERROR(1, "corrupt dentry");
			rcu_read_unlock();
			kfree(full_path);
			return NULL;
		}
	}
	rcu_read_unlock();
	if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
		cFYI(1, "did not end path lookup where expected. namelen=%d "
			"dfsplen=%d", namelen, dfsplen);
		/* presumably this is only possible if racing with a rename
		of one of the parent directories  (we can not lock the dentries
		above us to prevent this, but retrying should be harmless) */
		kfree(full_path);
		goto cifs_bp_rename_retry;
	}
	/* DIR_SEP already set for byte  0 / vs \ but not for
	   subsequent slashes in prepath which currently must
	   be entered the right way - not sure if there is an alternative
	   since the '\' is a valid posix character so we can not switch
	   those safely to '/' if any are found in the middle of the prepath */
	/* BB test paths to Windows with '/' in the midst of prepath */

	if (dfsplen) {
		strncpy(full_path, tcon->treeName, dfsplen);
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
			int i;
			for (i = 0; i < dfsplen; i++) {
				if (full_path[i] == '\\')
					full_path[i] = '/';
			}
		}
	}
	return full_path;
}
Beispiel #7
0
static int
cifs_read_super(struct super_block *sb)
{
	struct inode *inode;
	struct cifs_sb_info *cifs_sb;
	int rc = 0;

	cifs_sb = CIFS_SB(sb);

	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
		sb->s_flags |= MS_POSIXACL;

	if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES)
		sb->s_maxbytes = MAX_LFS_FILESIZE;
	else
		sb->s_maxbytes = MAX_NON_LFS;

	/* BB FIXME fix time_gran to be larger for LANMAN sessions */
	sb->s_time_gran = 100;

	sb->s_magic = CIFS_MAGIC_NUMBER;
	sb->s_op = &cifs_super_ops;
	sb->s_bdi = &cifs_sb->bdi;
	sb->s_blocksize = CIFS_MAX_MSGSIZE;
	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
	inode = cifs_root_iget(sb);

	if (IS_ERR(inode)) {
		rc = PTR_ERR(inode);
		inode = NULL;
		goto out_no_root;
	}

	sb->s_root = d_alloc_root(inode);

	if (!sb->s_root) {
		rc = -ENOMEM;
		goto out_no_root;
	}

	/* do that *after* d_alloc_root() - we want NULL ->d_op for root here */
	if (cifs_sb_master_tcon(cifs_sb)->nocase)
		sb->s_d_op = &cifs_ci_dentry_ops;
	else
		sb->s_d_op = &cifs_dentry_ops;

#ifdef CIFS_NFSD_EXPORT
	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
		cFYI(1, "export ops supported");
		sb->s_export_op = &cifs_export_ops;
	}
#endif /* CIFS_NFSD_EXPORT */

	return 0;

out_no_root:
	cERROR(1, "cifs_read_super: get root inode failed");
	if (inode)
		iput(inode);

	return rc;
}
int
init_cifs_idmap(void)
{
	struct cred *cred;
	struct key *keyring;
	int ret;

	cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name);

	/*                                                                   
                             
   
                                                                       
                   
  */
	cred = prepare_kernel_cred(NULL);
	if (!cred)
		return -ENOMEM;

	keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
			    (KEY_POS_ALL & ~KEY_POS_SETATTR) |
			    KEY_USR_VIEW | KEY_USR_READ,
			    KEY_ALLOC_NOT_IN_QUOTA);
	if (IS_ERR(keyring)) {
		ret = PTR_ERR(keyring);
		goto failed_put_cred;
	}

	ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
	if (ret < 0)
		goto failed_put_key;

	ret = register_key_type(&cifs_idmap_key_type);
	if (ret < 0)
		goto failed_put_key;

	/*                                                                  
                            */
	set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
	cred->thread_keyring = keyring;
	cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
	root_cred = cred;

	spin_lock_init(&siduidlock);
	uidtree = RB_ROOT;
	spin_lock_init(&sidgidlock);
	gidtree = RB_ROOT;

	spin_lock_init(&uidsidlock);
	siduidtree = RB_ROOT;
	spin_lock_init(&gidsidlock);
	sidgidtree = RB_ROOT;
	register_shrinker(&cifs_shrinker);

	cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
	return 0;

failed_put_key:
	key_put(keyring);
failed_put_cred:
	put_cred(cred);
	return ret;
}
static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
		       struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
		       struct cifs_fattr *fattr)
{
	int i;
	int num_aces = 0;
	int acl_size;
	char *acl_base;
	struct cifs_ace **ppace;

	/*                                                */

	if (!pdacl) {
		/*                                        
                                              */
		fattr->cf_mode |= S_IRWXUGO;
		return;
	}

	/*                                            */
	if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
		cERROR(1, "ACL too small to parse DACL");
		return;
	}

	cFYI(DBG2, "DACL revision %d size %d num aces %d",
		le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
		le32_to_cpu(pdacl->num_aces));

	/*                                            
                                                 
                                         */
	fattr->cf_mode &= ~(S_IRWXUGO);

	acl_base = (char *)pdacl;
	acl_size = sizeof(struct cifs_acl);

	num_aces = le32_to_cpu(pdacl->num_aces);
	if (num_aces > 0) {
		umode_t user_mask = S_IRWXU;
		umode_t group_mask = S_IRWXG;
		umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;

		if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
			return;
		ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
				GFP_KERNEL);
		if (!ppace) {
			cERROR(1, "DACL memory allocation error");
			return;
		}

		for (i = 0; i < num_aces; ++i) {
			ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
#ifdef CONFIG_CIFS_DEBUG2
			dump_ace(ppace[i], end_of_acl);
#endif
			if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
				access_flags_to_mode(ppace[i]->access_req,
						     ppace[i]->type,
						     &fattr->cf_mode,
						     &user_mask);
			if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
				access_flags_to_mode(ppace[i]->access_req,
						     ppace[i]->type,
						     &fattr->cf_mode,
						     &group_mask);
			if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
				access_flags_to_mode(ppace[i]->access_req,
						     ppace[i]->type,
						     &fattr->cf_mode,
						     &other_mask);
			if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
				access_flags_to_mode(ppace[i]->access_req,
						     ppace[i]->type,
						     &fattr->cf_mode,
						     &other_mask);


/*                                         
                     
                              */

			acl_base = (char *)ppace[i];
			acl_size = le16_to_cpu(ppace[i]->size);
		}

		kfree(ppace);
	}

	return;
}
static int
id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
{
	int rc = 0;
	struct key *sidkey;
	const struct cred *saved_cred;
	struct cifs_sid *lsid;
	struct cifs_sid_id *psidid, *npsidid;
	struct rb_root *cidtree;
	spinlock_t *cidlock;

	if (sidtype == SIDOWNER) {
		cidlock = &siduidlock;
		cidtree = &uidtree;
	} else if (sidtype == SIDGROUP) {
		cidlock = &sidgidlock;
		cidtree = &gidtree;
	} else
		return -EINVAL;

	spin_lock(cidlock);
	psidid = sid_rb_search(cidtree, cid);

	if (!psidid) { /*                                                    */
		spin_unlock(cidlock);
		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
		if (!npsidid)
			return -ENOMEM;

		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
		if (!npsidid->sidstr) {
			kfree(npsidid);
			return -ENOMEM;
		}

		spin_lock(cidlock);
		psidid = sid_rb_search(cidtree, cid);
		if (psidid) { /*                                         */
			++psidid->refcount;
			spin_unlock(cidlock);
			kfree(npsidid->sidstr);
			kfree(npsidid);
		} else {
			psidid = npsidid;
			sid_rb_insert(cidtree, cid, &psidid,
					sidtype == SIDOWNER ? "oi:" : "gi:");
			++psidid->refcount;
			spin_unlock(cidlock);
		}
	} else {
		++psidid->refcount;
		spin_unlock(cidlock);
	}

	/*
                                                              
                                                                   
                                                               
                                                               
                                                               
                                                     
  */
	if (test_bit(SID_ID_MAPPED, &psidid->state)) {
		memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
		psidid->time = jiffies; /*                         */
		goto id_sid_out;
	}

	if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
		rc = -EINVAL;
		goto id_sid_out;
	}

	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
		saved_cred = override_creds(root_cred);
		sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
		if (IS_ERR(sidkey)) {
			rc = -EINVAL;
			cFYI(1, "%s: Can't map and id to a SID", __func__);
		} else {
			lsid = (struct cifs_sid *)sidkey->payload.data;
			memcpy(&psidid->sid, lsid,
				sidkey->datalen < sizeof(struct cifs_sid) ?
				sidkey->datalen : sizeof(struct cifs_sid));
			memcpy(ssid, &psidid->sid,
				sidkey->datalen < sizeof(struct cifs_sid) ?
				sidkey->datalen : sizeof(struct cifs_sid));
			set_bit(SID_ID_MAPPED, &psidid->state);
			key_put(sidkey);
			kfree(psidid->sidstr);
		}
		psidid->time = jiffies; /*                         */
		revert_creds(saved_cred);
		clear_bit(SID_ID_PENDING, &psidid->state);
		wake_up_bit(&psidid->state, SID_ID_PENDING);
	} else {
		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
				sidid_pending_wait, TASK_INTERRUPTIBLE);
		if (rc) {
			cFYI(1, "%s: sidid_pending_wait interrupted %d",
					__func__, rc);
			--psidid->refcount;
			return rc;
		}
		if (test_bit(SID_ID_MAPPED, &psidid->state))
			memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
		else
			rc = -EINVAL;
	}
id_sid_out:
	--psidid->refcount;
	return rc;
}
static int
sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
		struct cifs_fattr *fattr, uint sidtype)
{
	int rc;
	unsigned long cid;
	struct key *idkey;
	const struct cred *saved_cred;
	struct cifs_sid_id *psidid, *npsidid;
	struct rb_root *cidtree;
	spinlock_t *cidlock;

	if (sidtype == SIDOWNER) {
		cid = cifs_sb->mnt_uid; /*                                   */
		cidlock = &siduidlock;
		cidtree = &uidtree;
	} else if (sidtype == SIDGROUP) {
		cid = cifs_sb->mnt_gid; /*                                   */
		cidlock = &sidgidlock;
		cidtree = &gidtree;
	} else
		return -ENOENT;

	spin_lock(cidlock);
	psidid = id_rb_search(cidtree, psid);

	if (!psidid) { /*                                                    */
		spin_unlock(cidlock);
		npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
		if (!npsidid)
			return -ENOMEM;

		npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
		if (!npsidid->sidstr) {
			kfree(npsidid);
			return -ENOMEM;
		}

		spin_lock(cidlock);
		psidid = id_rb_search(cidtree, psid);
		if (psidid) { /*                                         */
			++psidid->refcount;
			spin_unlock(cidlock);
			kfree(npsidid->sidstr);
			kfree(npsidid);
		} else {
			psidid = npsidid;
			id_rb_insert(cidtree, psid, &psidid,
					sidtype == SIDOWNER ? "os:" : "gs:");
			++psidid->refcount;
			spin_unlock(cidlock);
		}
	} else {
		++psidid->refcount;
		spin_unlock(cidlock);
	}

	/*
                                                              
                                                                   
                                                               
                                                               
                                                               
                                                     
  */
	if (test_bit(SID_ID_MAPPED, &psidid->state)) {
		cid = psidid->id;
		psidid->time = jiffies; /*                         */
		goto sid_to_id_out;
	}

	if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
		goto sid_to_id_out;

	if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
		saved_cred = override_creds(root_cred);
		idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
		if (IS_ERR(idkey))
			cFYI(1, "%s: Can't map SID to an id", __func__);
		else {
			cid = *(unsigned long *)idkey->payload.value;
			psidid->id = cid;
			set_bit(SID_ID_MAPPED, &psidid->state);
			key_put(idkey);
			kfree(psidid->sidstr);
		}
		revert_creds(saved_cred);
		psidid->time = jiffies; /*                         */
		clear_bit(SID_ID_PENDING, &psidid->state);
		wake_up_bit(&psidid->state, SID_ID_PENDING);
	} else {
		rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
				sidid_pending_wait, TASK_INTERRUPTIBLE);
		if (rc) {
			cFYI(1, "%s: sidid_pending_wait interrupted %d",
					__func__, rc);
			--psidid->refcount; /*                              */
			return rc;
		}
		if (test_bit(SID_ID_MAPPED, &psidid->state))
			cid = psidid->id;
	}

sid_to_id_out:
	--psidid->refcount; /*                              */
	if (sidtype == SIDOWNER)
		fattr->cf_uid = cid;
	else
		fattr->cf_gid = cid;

	return 0;
}
static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
	__u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
{
	int rc = 0;
	__u32 dacloffset;
	__u32 ndacloffset;
	__u32 sidsoffset;
	struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
	struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
	struct cifs_acl *dacl_ptr = NULL;  /*                      */
	struct cifs_acl *ndacl_ptr = NULL; /*                      */

	if (nmode != NO_CHANGE_64) { /*       */
		owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
				le32_to_cpu(pntsd->osidoffset));
		group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
				le32_to_cpu(pntsd->gsidoffset));
		dacloffset = le32_to_cpu(pntsd->dacloffset);
		dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
		ndacloffset = sizeof(struct cifs_ntsd);
		ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
		ndacl_ptr->revision = dacl_ptr->revision;
		ndacl_ptr->size = 0;
		ndacl_ptr->num_aces = 0;

		rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
					nmode);
		sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
		/*                                                      */
		copy_sec_desc(pntsd, pnntsd, sidsoffset);
		*aclflag = CIFS_ACL_DACL;
	} else {
		memcpy(pnntsd, pntsd, secdesclen);
		if (uid != NO_CHANGE_32) { /*       */
			owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
					le32_to_cpu(pnntsd->osidoffset));
			nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
								GFP_KERNEL);
			if (!nowner_sid_ptr)
				return -ENOMEM;
			rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
			if (rc) {
				cFYI(1, "%s: Mapping error %d for owner id %d",
						__func__, rc, uid);
				kfree(nowner_sid_ptr);
				return rc;
			}
			memcpy(owner_sid_ptr, nowner_sid_ptr,
					sizeof(struct cifs_sid));
			kfree(nowner_sid_ptr);
			*aclflag = CIFS_ACL_OWNER;
		}
		if (gid != NO_CHANGE_32) { /*       */
			group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
					le32_to_cpu(pnntsd->gsidoffset));
			ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
								GFP_KERNEL);
			if (!ngroup_sid_ptr)
				return -ENOMEM;
			rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
			if (rc) {
				cFYI(1, "%s: Mapping error %d for group id %d",
						__func__, rc, gid);
				kfree(ngroup_sid_ptr);
				return rc;
			}
			memcpy(group_sid_ptr, ngroup_sid_ptr,
					sizeof(struct cifs_sid));
			kfree(ngroup_sid_ptr);
			*aclflag = CIFS_ACL_GROUP;
		}
	}

	return rc;
}
static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
                       struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
                       struct cifs_fattr *fattr)
{
    int i;
    int num_aces = 0;
    int acl_size;
    char *acl_base;
    struct cifs_ace **ppace;

    /* BB need to add parm so we can store the SID BB */

    if (!pdacl) {
        /* no DACL in the security descriptor, set
           all the permissions for user/group/other */
        fattr->cf_mode |= S_IRWXUGO;
        return;
    }

    /* validate that we do not go past end of acl */
    if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
        cERROR(1, "ACL too small to parse DACL");
        return;
    }

    cFYI(DBG2, "DACL revision %d size %d num aces %d",
         le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
         le32_to_cpu(pdacl->num_aces));

    /* reset rwx permissions for user/group/other.
       Also, if num_aces is 0 i.e. DACL has no ACEs,
       user/group/other have no permissions */
    fattr->cf_mode &= ~(S_IRWXUGO);

    acl_base = (char *)pdacl;
    acl_size = sizeof(struct cifs_acl);

    num_aces = le32_to_cpu(pdacl->num_aces);
    if (num_aces > 0) {
        umode_t user_mask = S_IRWXU;
        umode_t group_mask = S_IRWXG;
        umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;

        if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
            return;
        ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
                        GFP_KERNEL);
        if (!ppace) {
            cERROR(1, "DACL memory allocation error");
            return;
        }

        for (i = 0; i < num_aces; ++i) {
            ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
#ifdef CONFIG_CIFS_DEBUG2
            dump_ace(ppace[i], end_of_acl);
#endif
            if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
                access_flags_to_mode(ppace[i]->access_req,
                                     ppace[i]->type,
                                     &fattr->cf_mode,
                                     &user_mask);
            if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
                access_flags_to_mode(ppace[i]->access_req,
                                     ppace[i]->type,
                                     &fattr->cf_mode,
                                     &group_mask);
            if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
                access_flags_to_mode(ppace[i]->access_req,
                                     ppace[i]->type,
                                     &fattr->cf_mode,
                                     &other_mask);
            if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
                access_flags_to_mode(ppace[i]->access_req,
                                     ppace[i]->type,
                                     &fattr->cf_mode,
                                     &other_mask);


            /*			memcpy((void *)(&(cifscred->aces[i])),
            				(void *)ppace[i],
            				sizeof(struct cifs_ace)); */

            acl_base = (char *)ppace[i];
            acl_size = le16_to_cpu(ppace[i]->size);
        }

        kfree(ppace);
    }

    return;
}
static int
sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
          struct cifs_fattr *fattr, uint sidtype)
{
    int rc;
    unsigned long cid;
    struct key *idkey;
    const struct cred *saved_cred;
    struct cifs_sid_id *psidid, *npsidid;
    struct rb_root *cidtree;
    spinlock_t *cidlock;

    if (sidtype == SIDOWNER) {
        cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
        cidlock = &siduidlock;
        cidtree = &uidtree;
    } else if (sidtype == SIDGROUP) {
        cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
        cidlock = &sidgidlock;
        cidtree = &gidtree;
    } else
        return -ENOENT;

    spin_lock(cidlock);
    psidid = id_rb_search(cidtree, psid);

    if (!psidid) { /* node does not exist, allocate one & attempt adding */
        spin_unlock(cidlock);
        npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
        if (!npsidid)
            return -ENOMEM;

        npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
        if (!npsidid->sidstr) {
            kfree(npsidid);
            return -ENOMEM;
        }

        spin_lock(cidlock);
        psidid = id_rb_search(cidtree, psid);
        if (psidid) { /* node happened to get inserted meanwhile */
            ++psidid->refcount;
            spin_unlock(cidlock);
            kfree(npsidid->sidstr);
            kfree(npsidid);
        } else {
            psidid = npsidid;
            id_rb_insert(cidtree, psid, &psidid,
                         sidtype == SIDOWNER ? "os:" : "gs:");
            ++psidid->refcount;
            spin_unlock(cidlock);
        }
    } else {
        ++psidid->refcount;
        spin_unlock(cidlock);
    }

    /*
     * If we are here, it is safe to access psidid and its fields
     * since a reference was taken earlier while holding the spinlock.
     * A reference on the node is put without holding the spinlock
     * and it is OK to do so in this case, shrinker will not erase
     * this node until all references are put and we do not access
     * any fields of the node after a reference is put .
     */
    if (test_bit(SID_ID_MAPPED, &psidid->state)) {
        cid = psidid->id;
        psidid->time = jiffies; /* update ts for accessing */
        goto sid_to_id_out;
    }

    if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
        goto sid_to_id_out;

    if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
        saved_cred = override_creds(root_cred);
        idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
        if (IS_ERR(idkey))
            cFYI(1, "%s: Can't map SID to an id", __func__);
        else {
            cid = *(unsigned long *)idkey->payload.value;
            psidid->id = cid;
            set_bit(SID_ID_MAPPED, &psidid->state);
            key_put(idkey);
            kfree(psidid->sidstr);
        }
        revert_creds(saved_cred);
        psidid->time = jiffies; /* update ts for accessing */
        clear_bit(SID_ID_PENDING, &psidid->state);
        wake_up_bit(&psidid->state, SID_ID_PENDING);
    } else {
        rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
                         sidid_pending_wait, TASK_INTERRUPTIBLE);
        if (rc) {
            cFYI(1, "%s: sidid_pending_wait interrupted %d",
                 __func__, rc);
            --psidid->refcount; /* decremented without spinlock */
            return rc;
        }
        if (test_bit(SID_ID_MAPPED, &psidid->state))
            cid = psidid->id;
    }

sid_to_id_out:
    --psidid->refcount; /* decremented without spinlock */
    if (sidtype == SIDOWNER)
        fattr->cf_uid = cid;
    else
        fattr->cf_gid = cid;

    return 0;
}
Beispiel #15
0
int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
		dev_t device_number)
{
	int rc = -EPERM;
	unsigned int xid;
	int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
	struct cifs_sb_info *cifs_sb;
	struct tcon_link *tlink;
	struct cifs_tcon *pTcon;
	struct cifs_io_parms io_parms;
	char *full_path = NULL;
	struct inode *newinode = NULL;
	int oplock = 0;
	u16 fileHandle;
	FILE_ALL_INFO *buf = NULL;
	unsigned int bytes_written;
	struct win_dev *pdev;

	if (!old_valid_dev(device_number))
		return -EINVAL;

	cifs_sb = CIFS_SB(inode->i_sb);
	tlink = cifs_sb_tlink(cifs_sb);
	if (IS_ERR(tlink))
		return PTR_ERR(tlink);

	pTcon = tlink_tcon(tlink);

	xid = get_xid();

	full_path = build_path_from_dentry(direntry);
	if (full_path == NULL) {
		rc = -ENOMEM;
		goto mknod_out;
	}

	if (pTcon->unix_ext) {
		struct cifs_unix_set_info_args args = {
			.mode	= mode & ~current_umask(),
			.ctime	= NO_CHANGE_64,
			.atime	= NO_CHANGE_64,
			.mtime	= NO_CHANGE_64,
			.device	= device_number,
		};
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
			args.uid = (__u64) current_fsuid();
			args.gid = (__u64) current_fsgid();
		} else {
			args.uid = NO_CHANGE_64;
			args.gid = NO_CHANGE_64;
		}
		rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
					    cifs_sb->local_nls,
					    cifs_sb->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
		if (rc)
			goto mknod_out;

		rc = cifs_get_inode_info_unix(&newinode, full_path,
						inode->i_sb, xid);

		if (rc == 0)
			d_instantiate(direntry, newinode);
		goto mknod_out;
	}

	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
		goto mknod_out;


	cFYI(1, "sfu compat create special file");

	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
	if (buf == NULL) {
		kfree(full_path);
		rc = -ENOMEM;
		free_xid(xid);
		return rc;
	}

	if (backup_cred(cifs_sb))
		create_options |= CREATE_OPEN_BACKUP_INTENT;

	rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
			 GENERIC_WRITE, create_options,
			 &fileHandle, &oplock, buf, cifs_sb->local_nls,
			 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
	if (rc)
		goto mknod_out;

	/* BB Do not bother to decode buf since no local inode yet to put
	 * timestamps in, but we can reuse it safely */

	pdev = (struct win_dev *)buf;
	io_parms.netfid = fileHandle;
	io_parms.pid = current->tgid;
	io_parms.tcon = pTcon;
	io_parms.offset = 0;
	io_parms.length = sizeof(struct win_dev);
	if (S_ISCHR(mode)) {
		memcpy(pdev->type, "IntxCHR", 8);
		pdev->major =
		      cpu_to_le64(MAJOR(device_number));
		pdev->minor =
		      cpu_to_le64(MINOR(device_number));
		rc = CIFSSMBWrite(xid, &io_parms,
			&bytes_written, (char *)pdev,
			NULL, 0);
	} else if (S_ISBLK(mode)) {
		memcpy(pdev->type, "IntxBLK", 8);
		pdev->major =
		      cpu_to_le64(MAJOR(device_number));
		pdev->minor =
		      cpu_to_le64(MINOR(device_number));
		rc = CIFSSMBWrite(xid, &io_parms,
			&bytes_written, (char *)pdev,
			NULL, 0);
	} /* else if (S_ISFIFO) */
	CIFSSMBClose(xid, pTcon, fileHandle);
	d_drop(direntry);

	/* FIXME: add code here to set EAs */

mknod_out:
	kfree(full_path);
	kfree(buf);
	free_xid(xid);
	cifs_put_tlink(tlink);
	return rc;
}
Beispiel #16
0
ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
	void *ea_value, size_t buf_size)
{
	ssize_t rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
	int xid;
	struct cifs_sb_info *cifs_sb;
	struct tcon_link *tlink;
	struct cifsTconInfo *pTcon;
	struct super_block *sb;
	char *full_path;

	if (direntry == NULL)
		return -EIO;
	if (direntry->d_inode == NULL)
		return -EIO;
	sb = direntry->d_inode->i_sb;
	if (sb == NULL)
		return -EIO;

	cifs_sb = CIFS_SB(sb);
	tlink = cifs_sb_tlink(cifs_sb);
	if (IS_ERR(tlink))
		return PTR_ERR(tlink);
	pTcon = tlink_tcon(tlink);

	xid = GetXid();

	full_path = build_path_from_dentry(direntry);
	if (full_path == NULL) {
		rc = -ENOMEM;
		goto get_ea_exit;
	}
	/* return dos attributes as pseudo xattr */
	/* return alt name if available as pseudo attr */
	if (ea_name == NULL) {
		cFYI(1, "Null xattr names not supported");
	} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
			goto get_ea_exit;

		if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
			cFYI(1, "attempt to query cifs inode metadata");
			/* revalidate/getattr then populate from inode */
		} /* BB add else when above is implemented */
		ea_name += 5; /* skip past user. prefix */
		rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
			buf_size, cifs_sb->local_nls,
			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
	} else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
			goto get_ea_exit;

		ea_name += 4; /* skip past os2. prefix */
		rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
			buf_size, cifs_sb->local_nls,
			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
	} else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
			  strlen(POSIX_ACL_XATTR_ACCESS)) == 0) {
#ifdef CONFIG_CIFS_POSIX
		if (sb->s_flags & MS_POSIXACL)
			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
				ea_value, buf_size, ACL_TYPE_ACCESS,
				cifs_sb->local_nls,
				cifs_sb->mnt_cifs_flags &
					CIFS_MOUNT_MAP_SPECIAL_CHR);
#ifdef CONFIG_CIFS_EXPERIMENTAL
		else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
			__u16 fid;
			int oplock = 0;
			struct cifs_ntsd *pacl = NULL;
			__u32 buflen = 0;
			if (experimEnabled)
				rc = CIFSSMBOpen(xid, pTcon, full_path,
					FILE_OPEN, GENERIC_READ, 0, &fid,
					&oplock, NULL, cifs_sb->local_nls,
					cifs_sb->mnt_cifs_flags &
					CIFS_MOUNT_MAP_SPECIAL_CHR);
			/* else rc is EOPNOTSUPP from above */

			if (rc == 0) {
				rc = CIFSSMBGetCIFSACL(xid, pTcon, fid, &pacl,
						      &buflen);
				CIFSSMBClose(xid, pTcon, fid);
			}
		}
#endif /* EXPERIMENTAL */
#else
		cFYI(1, "query POSIX ACL not supported yet");
#endif /* CONFIG_CIFS_POSIX */
	} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
			  strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
#ifdef CONFIG_CIFS_POSIX
		if (sb->s_flags & MS_POSIXACL)
			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
				ea_value, buf_size, ACL_TYPE_DEFAULT,
				cifs_sb->local_nls,
				cifs_sb->mnt_cifs_flags &
					CIFS_MOUNT_MAP_SPECIAL_CHR);
#else
		cFYI(1, "query POSIX default ACL not supported yet");
#endif
	} else if (strncmp(ea_name,
		  CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
		cFYI(1, "Trusted xattr namespace not supported yet");
	} else if (strncmp(ea_name,
		  CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
		cFYI(1, "Security xattr namespace not supported yet");
	} else
		cFYI(1,
		    "illegal xattr request %s (only user namespace supported)",
		     ea_name);

	/* We could add an additional check for streams ie
	    if proc/fs/cifs/streamstoxattr is set then
		search server for EAs or streams to
		returns as xattrs */

	if (rc == -EINVAL)
		rc = -EOPNOTSUPP;

get_ea_exit:
	kfree(full_path);
	FreeXid(xid);
	cifs_put_tlink(tlink);
#endif
	return rc;
}
Beispiel #17
0
struct dentry *
cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
	    unsigned int flags)
{
	unsigned int xid;
	int rc = 0; /* to get around spurious gcc warning, set to zero here */
	struct cifs_sb_info *cifs_sb;
	struct tcon_link *tlink;
	struct cifs_tcon *pTcon;
	struct inode *newInode = NULL;
	char *full_path = NULL;

	xid = get_xid();

	cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p",
	      parent_dir_inode, direntry->d_name.name, direntry);

	/* check whether path exists */

	cifs_sb = CIFS_SB(parent_dir_inode->i_sb);
	tlink = cifs_sb_tlink(cifs_sb);
	if (IS_ERR(tlink)) {
		free_xid(xid);
		return (struct dentry *)tlink;
	}
	pTcon = tlink_tcon(tlink);

	rc = check_name(direntry);
	if (rc)
		goto lookup_out;

	/* can not grab the rename sem here since it would
	deadlock in the cases (beginning of sys_rename itself)
	in which we already have the sb rename sem */
	full_path = build_path_from_dentry(direntry);
	if (full_path == NULL) {
		rc = -ENOMEM;
		goto lookup_out;
	}

	if (direntry->d_inode != NULL) {
		cFYI(1, "non-NULL inode in lookup");
	} else {
		cFYI(1, "NULL inode in lookup");
	}
	cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode);

	if (pTcon->unix_ext) {
		rc = cifs_get_inode_info_unix(&newInode, full_path,
					      parent_dir_inode->i_sb, xid);
	} else {
		rc = cifs_get_inode_info(&newInode, full_path, NULL,
				parent_dir_inode->i_sb, xid, NULL);
	}

	if ((rc == 0) && (newInode != NULL)) {
		d_add(direntry, newInode);
		/* since paths are not looked up by component - the parent
		   directories are presumed to be good here */
		renew_parental_timestamps(direntry);

	} else if (rc == -ENOENT) {
		rc = 0;
		direntry->d_time = jiffies;
		d_add(direntry, NULL);
	/*	if it was once a directory (but how can we tell?) we could do
		shrink_dcache_parent(direntry); */
	} else if (rc != -EACCES) {
		cERROR(1, "Unexpected lookup error %d", rc);
		/* We special case check for Access Denied - since that
		is a common return code */
	}

lookup_out:
	kfree(full_path);
	cifs_put_tlink(tlink);
	free_xid(xid);
	return ERR_PTR(rc);
}
static int
smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
{
	int rc = 0;
	int i = 0;
	struct msghdr smb_msg;
	__be32 *buf_len = (__be32 *)(iov[0].iov_base);
	unsigned int len = iov[0].iov_len;
	unsigned int total_len;
	int first_vec = 0;
	unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
	struct socket *ssocket = server->ssocket;

	if (ssocket == NULL)
		return -ENOTSOCK; /* BB eventually add reconnect code here */

	smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
	smb_msg.msg_namelen = sizeof(struct sockaddr);
	smb_msg.msg_control = NULL;
	smb_msg.msg_controllen = 0;
	if (server->noblocksnd)
		smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
	else
		smb_msg.msg_flags = MSG_NOSIGNAL;

	total_len = 0;
	for (i = 0; i < n_vec; i++)
		total_len += iov[i].iov_len;

	cFYI(1, "Sending smb:  total_len %d", total_len);
	dump_smb(iov[0].iov_base, len);

	i = 0;
	while (total_len) {
		rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
				    n_vec - first_vec, total_len);
		if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
			i++;
			/*
			 * If blocking send we try 3 times, since each can block
			 * for 5 seconds. For nonblocking  we have to try more
			 * but wait increasing amounts of time allowing time for
			 * socket to clear.  The overall time we wait in either
			 * case to send on the socket is about 15 seconds.
			 * Similarly we wait for 15 seconds for a response from
			 * the server in SendReceive[2] for the server to send
			 * a response back for most types of requests (except
			 * SMB Write past end of file which can be slow, and
			 * blocking lock operations). NFS waits slightly longer
			 * than CIFS, but this can make it take longer for
			 * nonresponsive servers to be detected and 15 seconds
			 * is more than enough time for modern networks to
			 * send a packet.  In most cases if we fail to send
			 * after the retries we will kill the socket and
			 * reconnect which may clear the network problem.
			 */
			if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
				cERROR(1, "sends on sock %p stuck for 15 seconds",
				    ssocket);
				rc = -EAGAIN;
				break;
			}
			msleep(1 << i);
			continue;
		}
		if (rc < 0)
			break;

		if (rc == total_len) {
			total_len = 0;
			break;
		} else if (rc > total_len) {
			cERROR(1, "sent %d requested %d", rc, total_len);
			break;
		}
		if (rc == 0) {
			/* should never happen, letting socket clear before
			   retrying is our only obvious option here */
			cERROR(1, "tcp sent no data");
			msleep(500);
			continue;
		}
		total_len -= rc;
		/* the line below resets i */
		for (i = first_vec; i < n_vec; i++) {
			if (iov[i].iov_len) {
				if (rc > iov[i].iov_len) {
					rc -= iov[i].iov_len;
					iov[i].iov_len = 0;
				} else {
					iov[i].iov_base += rc;
					iov[i].iov_len -= rc;
					first_vec = i;
					break;
				}
			}
		}
		i = 0; /* in case we get ENOSPC on the next send */
	}

	if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
		cFYI(1, "partial send (%d remaining), terminating session",
			total_len);
		/* If we have only sent part of an SMB then the next SMB
		   could be taken as the remainder of this one.  We need
		   to kill the socket so the server throws away the partial
		   SMB */
		server->tcpStatus = CifsNeedReconnect;
	}

	if (rc < 0 && rc != -EINTR)
		cERROR(1, "Error %d sending data on socket to server", rc);
	else
		rc = 0;

	/* Don't want to modify the buffer as a side effect of this call. */
	*buf_len = cpu_to_be32(smb_buf_length);

	return rc;
}
Beispiel #19
0
static int __init
init_cifs(void)
{
	int rc = 0;
	cifs_proc_init();
	INIT_LIST_HEAD(&cifs_tcp_ses_list);
#ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
	INIT_LIST_HEAD(&GlobalDnotifyReqList);
	INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
#endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
/*
 *  Initialize Global counters
 */
	atomic_set(&sesInfoAllocCount, 0);
	atomic_set(&tconInfoAllocCount, 0);
	atomic_set(&tcpSesAllocCount, 0);
	atomic_set(&tcpSesReconnectCount, 0);
	atomic_set(&tconInfoReconnectCount, 0);

	atomic_set(&bufAllocCount, 0);
	atomic_set(&smBufAllocCount, 0);
#ifdef CONFIG_CIFS_STATS2
	atomic_set(&totBufAllocCount, 0);
	atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */

	atomic_set(&midCount, 0);
	GlobalCurrentXid = 0;
	GlobalTotalActiveXid = 0;
	GlobalMaxActiveXid = 0;
	spin_lock_init(&cifs_tcp_ses_lock);
	spin_lock_init(&cifs_file_list_lock);
	spin_lock_init(&GlobalMid_Lock);

	if (cifs_max_pending < 2) {
		cifs_max_pending = 2;
		cFYI(1, "cifs_max_pending set to min of 2");
	} else if (cifs_max_pending > 256) {
		cifs_max_pending = 256;
		cFYI(1, "cifs_max_pending set to max of 256");
	}

	rc = cifs_fscache_register();
	if (rc)
		goto out_clean_proc;

	rc = cifs_init_inodecache();
	if (rc)
		goto out_unreg_fscache;

	rc = cifs_init_mids();
	if (rc)
		goto out_destroy_inodecache;

	rc = cifs_init_request_bufs();
	if (rc)
		goto out_destroy_mids;

#ifdef CONFIG_CIFS_UPCALL
	rc = register_key_type(&cifs_spnego_key_type);
	if (rc)
		goto out_destroy_request_bufs;
#endif /* CONFIG_CIFS_UPCALL */

#ifdef CONFIG_CIFS_ACL
	rc = init_cifs_idmap();
	if (rc)
		goto out_register_key_type;
#endif /* CONFIG_CIFS_ACL */

	rc = register_filesystem(&cifs_fs_type);
	if (rc)
		goto out_init_cifs_idmap;

	return 0;

out_init_cifs_idmap:
#ifdef CONFIG_CIFS_ACL
	exit_cifs_idmap();
out_register_key_type:
#endif
#ifdef CONFIG_CIFS_UPCALL
	unregister_key_type(&cifs_spnego_key_type);
out_destroy_request_bufs:
#endif
	cifs_destroy_request_bufs();
out_destroy_mids:
	cifs_destroy_mids();
out_destroy_inodecache:
	cifs_destroy_inodecache();
out_unreg_fscache:
	cifs_fscache_unregister();
out_clean_proc:
	cifs_proc_clean();
	return rc;
}
int
SendReceive2(const unsigned int xid, struct cifs_ses *ses,
	     struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
	     const int flags)
{
	int rc = 0;
	int long_op;
	struct mid_q_entry *midQ = NULL;
	char *buf = iov[0].iov_base;

	long_op = flags & CIFS_TIMEOUT_MASK;

	*pRespBufType = CIFS_NO_BUFFER;  /* no response buf yet */

	if ((ses == NULL) || (ses->server == NULL)) {
		cifs_small_buf_release(buf);
		cERROR(1, "Null session");
		return -EIO;
	}

	if (ses->server->tcpStatus == CifsExiting) {
		cifs_small_buf_release(buf);
		return -ENOENT;
	}

	/*
	 * Ensure that we do not send more than 50 overlapping requests
	 * to the same server. We may make this configurable later or
	 * use ses->maxReq.
	 */

	rc = wait_for_free_request(ses->server, long_op);
	if (rc) {
		cifs_small_buf_release(buf);
		return rc;
	}

	/*
	 * Make sure that we sign in the same order that we send on this socket
	 * and avoid races inside tcp sendmsg code that could cause corruption
	 * of smb data.
	 */

	mutex_lock(&ses->server->srv_mutex);

	rc = cifs_setup_request(ses, iov, n_vec, &midQ);
	if (rc) {
		mutex_unlock(&ses->server->srv_mutex);
		cifs_small_buf_release(buf);
		/* Update # of requests on wire to server */
		cifs_add_credits(ses->server, 1);
		return rc;
	}

	midQ->mid_state = MID_REQUEST_SUBMITTED;
	cifs_in_send_inc(ses->server);
	rc = smb_sendv(ses->server, iov, n_vec);
	cifs_in_send_dec(ses->server);
	cifs_save_when_sent(midQ);

	mutex_unlock(&ses->server->srv_mutex);

	if (rc < 0) {
		cifs_small_buf_release(buf);
		goto out;
	}

	if (long_op == CIFS_ASYNC_OP) {
		cifs_small_buf_release(buf);
		goto out;
	}

	rc = wait_for_response(ses->server, midQ);
	if (rc != 0) {
		send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
		spin_lock(&GlobalMid_Lock);
		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
			midQ->callback = DeleteMidQEntry;
			spin_unlock(&GlobalMid_Lock);
			cifs_small_buf_release(buf);
			cifs_add_credits(ses->server, 1);
			return rc;
		}
		spin_unlock(&GlobalMid_Lock);
	}

	cifs_small_buf_release(buf);

	rc = cifs_sync_mid_result(midQ, ses->server);
	if (rc != 0) {
		cifs_add_credits(ses->server, 1);
		return rc;
	}

	if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
		rc = -EIO;
		cFYI(1, "Bad MID state?");
		goto out;
	}

	buf = (char *)midQ->resp_buf;
	iov[0].iov_base = buf;
	iov[0].iov_len = get_rfc1002_length(buf) + 4;
	if (midQ->large_buf)
		*pRespBufType = CIFS_LARGE_BUFFER;
	else
		*pRespBufType = CIFS_SMALL_BUFFER;

	rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);

	/* mark it so buf will not be freed by delete_mid */
	if ((flags & CIFS_NO_RESP) == 0)
		midQ->resp_buf = NULL;
out:
	delete_mid(midQ);
	cifs_add_credits(ses->server, 1);

	return rc;
}
Beispiel #21
0
static struct dentry *
cifs_do_mount(struct file_system_type *fs_type,
	      int flags, const char *dev_name, void *data)
{
	int rc;
	struct super_block *sb;
	struct cifs_sb_info *cifs_sb;
	struct smb_vol *volume_info;
	struct cifs_mnt_data mnt_data;
	struct dentry *root;

	cFYI(1, "Devname: %s flags: %d ", dev_name, flags);

	volume_info = cifs_get_volume_info((char *)data, dev_name);
	if (IS_ERR(volume_info))
		return ERR_CAST(volume_info);

	cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
	if (cifs_sb == NULL) {
		root = ERR_PTR(-ENOMEM);
		goto out_nls;
	}

	cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
	if (cifs_sb->mountdata == NULL) {
		root = ERR_PTR(-ENOMEM);
		goto out_cifs_sb;
	}

	cifs_setup_cifs_sb(volume_info, cifs_sb);

	rc = cifs_mount(cifs_sb, volume_info);
	if (rc) {
		if (!(flags & MS_SILENT))
			cERROR(1, "cifs_mount failed w/return code = %d", rc);
		root = ERR_PTR(rc);
		goto out_mountdata;
	}

	mnt_data.vol = volume_info;
	mnt_data.cifs_sb = cifs_sb;
	mnt_data.flags = flags;

	sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data);
	if (IS_ERR(sb)) {
		root = ERR_CAST(sb);
		cifs_umount(cifs_sb);
		goto out;
	}

	if (sb->s_root) {
		cFYI(1, "Use existing superblock");
		cifs_umount(cifs_sb);
	} else {
		sb->s_flags = flags;
		/* BB should we make this contingent on mount parm? */
		sb->s_flags |= MS_NODIRATIME | MS_NOATIME;

		rc = cifs_read_super(sb);
		if (rc) {
			root = ERR_PTR(rc);
			goto out_super;
		}

		sb->s_flags |= MS_ACTIVE;
	}

	root = cifs_get_root(volume_info, sb);
	if (IS_ERR(root))
		goto out_super;

	cFYI(1, "dentry root is: %p", root);
	goto out;

out_super:
	deactivate_locked_super(sb);
out:
	cifs_cleanup_volume_info(volume_info);
	return root;

out_mountdata:
	kfree(cifs_sb->mountdata);
out_cifs_sb:
	kfree(cifs_sb);
out_nls:
	unload_nls(volume_info->local_nls);
	goto out;
}
Beispiel #22
0
int cifs_setxattr(struct dentry *direntry, const char *ea_name,
		  const void *ea_value, size_t value_size, int flags)
{
	int rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
	int xid;
	struct cifs_sb_info *cifs_sb;
	struct tcon_link *tlink;
	struct cifs_tcon *pTcon;
	struct super_block *sb;
	char *full_path;
	struct cifs_ntsd *pacl;

	if (direntry == NULL)
		return -EIO;
	if (direntry->d_inode == NULL)
		return -EIO;
	sb = direntry->d_inode->i_sb;
	if (sb == NULL)
		return -EIO;

	cifs_sb = CIFS_SB(sb);
	tlink = cifs_sb_tlink(cifs_sb);
	if (IS_ERR(tlink))
		return PTR_ERR(tlink);
	pTcon = tlink_tcon(tlink);

	xid = GetXid();

	full_path = build_path_from_dentry(direntry);
	if (full_path == NULL) {
		rc = -ENOMEM;
		goto set_ea_exit;
	}
	/* return dos attributes as pseudo xattr */
	/* return alt name if available as pseudo attr */

	/* if proc/fs/cifs/streamstoxattr is set then
		search server for EAs or streams to
		returns as xattrs */
	if (value_size > MAX_EA_VALUE_SIZE) {
		cFYI(1, "size of EA value too large");
		rc = -EOPNOTSUPP;
		goto set_ea_exit;
	}

	if (ea_name == NULL) {
		cFYI(1, "Null xattr names not supported");
	} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
			goto set_ea_exit;
		if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
			cFYI(1, "attempt to set cifs inode metadata");

		ea_name += 5; /* skip past user. prefix */
		rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
			(__u16)value_size, cifs_sb->local_nls,
			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
	} else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
			goto set_ea_exit;

		ea_name += 4; /* skip past os2. prefix */
		rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
			(__u16)value_size, cifs_sb->local_nls,
			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
	} else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
			strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
		pacl = kmalloc(value_size, GFP_KERNEL);
		if (!pacl) {
			cFYI(1, "%s: Can't allocate memory for ACL",
					__func__);
			rc = -ENOMEM;
		} else {
#ifdef CONFIG_CIFS_ACL
			memcpy(pacl, ea_value, value_size);
			rc = set_cifs_acl(pacl, value_size,
				direntry->d_inode, full_path);
			if (rc == 0) /* force revalidate of the inode */
				CIFS_I(direntry->d_inode)->time = 0;
			kfree(pacl);
#else
			cFYI(1, "Set CIFS ACL not supported yet");
#endif /* CONFIG_CIFS_ACL */
		}
	} else {
		int temp;
		temp = strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
			strlen(POSIX_ACL_XATTR_ACCESS));
		if (temp == 0) {
#ifdef CONFIG_CIFS_POSIX
			if (sb->s_flags & MS_POSIXACL)
				rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
					ea_value, (const int)value_size,
					ACL_TYPE_ACCESS, cifs_sb->local_nls,
					cifs_sb->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
			cFYI(1, "set POSIX ACL rc %d", rc);
#else
			cFYI(1, "set POSIX ACL not supported");
#endif
		} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
				   strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
#ifdef CONFIG_CIFS_POSIX
			if (sb->s_flags & MS_POSIXACL)
				rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
					ea_value, (const int)value_size,
					ACL_TYPE_DEFAULT, cifs_sb->local_nls,
					cifs_sb->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
			cFYI(1, "set POSIX default ACL rc %d", rc);
#else
			cFYI(1, "set default POSIX ACL not supported");
#endif
		} else {
			cFYI(1, "illegal xattr request %s (only user namespace"
				" supported)", ea_name);
		  /* BB what if no namespace prefix? */
		  /* Should we just pass them to server, except for
		  system and perhaps security prefixes? */
		}
	}

set_ea_exit:
	kfree(full_path);
	FreeXid(xid);
	cifs_put_tlink(tlink);
#endif
	return rc;
}
Beispiel #23
0
int cifs_rename(struct inode *source_inode, struct dentry *source_direntry,
	struct inode *target_inode, struct dentry *target_direntry)
{
	char *fromName;
	char *toName;
	struct cifs_sb_info *cifs_sb_source;
	struct cifs_sb_info *cifs_sb_target;
	struct cifsTconInfo *pTcon;
	int xid;
	int rc = 0;

	xid = GetXid();

	cifs_sb_target = CIFS_SB(target_inode->i_sb);
	cifs_sb_source = CIFS_SB(source_inode->i_sb);
	pTcon = cifs_sb_source->tcon;

	if (pTcon != cifs_sb_target->tcon) {
		FreeXid(xid);
		return -EXDEV;	/* BB actually could be allowed if same server,
				   but different share.
				   Might eventually add support for this */
	}

	/* we already  have the rename sem so we do not need to grab it again
	   here to protect the path integrity */
	fromName = build_path_from_dentry(source_direntry);
	toName = build_path_from_dentry(target_direntry);
	if ((fromName == NULL) || (toName == NULL)) {
		rc = -ENOMEM;
		goto cifs_rename_exit;
	}

	rc = CIFSSMBRename(xid, pTcon, fromName, toName,
			   cifs_sb_source->local_nls,
			   cifs_sb_source->mnt_cifs_flags &
				CIFS_MOUNT_MAP_SPECIAL_CHR);
	if (rc == -EEXIST) {
		/* check if they are the same file because rename of hardlinked
		   files is a noop */
		FILE_UNIX_BASIC_INFO *info_buf_source;
		FILE_UNIX_BASIC_INFO *info_buf_target;

		info_buf_source =
			kmalloc(2 * sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
		if (info_buf_source != NULL) {
			info_buf_target = info_buf_source + 1;
			if (pTcon->unix_ext)
				rc = CIFSSMBUnixQPathInfo(xid, pTcon, fromName,
					info_buf_source,
					cifs_sb_source->local_nls,
					cifs_sb_source->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
			/* else rc is still EEXIST so will fall through to
			   unlink the target and retry rename */
			if (rc == 0) {
				rc = CIFSSMBUnixQPathInfo(xid, pTcon, toName,
						info_buf_target,
						cifs_sb_target->local_nls,
						/* remap based on source sb */
						cifs_sb_source->mnt_cifs_flags &
						    CIFS_MOUNT_MAP_SPECIAL_CHR);
			}
			if ((rc == 0) &&
			    (info_buf_source->UniqueId ==
			     info_buf_target->UniqueId)) {
			/* do not rename since the files are hardlinked which
			   is a noop */
			} else {
			/* we either can not tell the files are hardlinked
			   (as with Windows servers) or files are not
			   hardlinked so delete the target manually before
			   renaming to follow POSIX rather than Windows
			   semantics */
				cifs_unlink(target_inode, target_direntry);
				rc = CIFSSMBRename(xid, pTcon, fromName,
						   toName,
						   cifs_sb_source->local_nls,
						   cifs_sb_source->mnt_cifs_flags
						   & CIFS_MOUNT_MAP_SPECIAL_CHR);
			}
			kfree(info_buf_source);
		} /* if we can not get memory just leave rc as EEXIST */
	}

	if (rc)
		cFYI(1, ("rename rc %d", rc));

	if ((rc == -EIO) || (rc == -EEXIST)) {
		int oplock = FALSE;
		__u16 netfid;

		/* BB FIXME Is Generic Read correct for rename? */
		/* if renaming directory - we should not say CREATE_NOT_DIR,
		   need to test renaming open directory, also GENERIC_READ
		   might not right be right access to request */
		rc = CIFSSMBOpen(xid, pTcon, fromName, FILE_OPEN, GENERIC_READ, FILE_SHARE_ALL,
				 CREATE_NOT_DIR, &netfid, &oplock, NULL,
				 cifs_sb_source->local_nls,
				 cifs_sb_source->mnt_cifs_flags &
					CIFS_MOUNT_MAP_SPECIAL_CHR);
		if (rc == 0) {
			rc = CIFSSMBRenameOpenFile(xid, pTcon, netfid, toName,
					      cifs_sb_source->local_nls,
					      cifs_sb_source->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
			CIFSSMBClose(xid, pTcon, netfid);
		}
	}

cifs_rename_exit:
	kfree(fromName);
	kfree(toName);
	FreeXid(xid);
	return rc;
}
Beispiel #24
0
ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
	void *ea_value, size_t buf_size)
{
	ssize_t rc = -EOPNOTSUPP;
#ifdef CONFIG_CIFS_XATTR
	int xid;
	struct cifs_sb_info *cifs_sb;
	struct tcon_link *tlink;
	struct cifs_tcon *pTcon;
	struct super_block *sb;
	char *full_path;

	if (direntry == NULL)
		return -EIO;
	if (direntry->d_inode == NULL)
		return -EIO;
	sb = direntry->d_inode->i_sb;
	if (sb == NULL)
		return -EIO;

	cifs_sb = CIFS_SB(sb);
	tlink = cifs_sb_tlink(cifs_sb);
	if (IS_ERR(tlink))
		return PTR_ERR(tlink);
	pTcon = tlink_tcon(tlink);

	xid = GetXid();

	full_path = build_path_from_dentry(direntry);
	if (full_path == NULL) {
		rc = -ENOMEM;
		goto get_ea_exit;
	}
	/* return dos attributes as pseudo xattr */
	/* return alt name if available as pseudo attr */
	if (ea_name == NULL) {
		cFYI(1, "Null xattr names not supported");
	} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
			goto get_ea_exit;

		if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
			cFYI(1, "attempt to query cifs inode metadata");
			/* revalidate/getattr then populate from inode */
		} /* BB add else when above is implemented */
		ea_name += 5; /* skip past user. prefix */
		rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
			buf_size, cifs_sb->local_nls,
			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
	} else if (strncmp(ea_name, CIFS_XATTR_OS2_PREFIX, 4) == 0) {
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
			goto get_ea_exit;

		ea_name += 4; /* skip past os2. prefix */
		rc = CIFSSMBQAllEAs(xid, pTcon, full_path, ea_name, ea_value,
			buf_size, cifs_sb->local_nls,
			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
	} else if (strncmp(ea_name, POSIX_ACL_XATTR_ACCESS,
			  strlen(POSIX_ACL_XATTR_ACCESS)) == 0) {
#ifdef CONFIG_CIFS_POSIX
		if (sb->s_flags & MS_POSIXACL)
			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
				ea_value, buf_size, ACL_TYPE_ACCESS,
				cifs_sb->local_nls,
				cifs_sb->mnt_cifs_flags &
					CIFS_MOUNT_MAP_SPECIAL_CHR);
#else
		cFYI(1, "Query POSIX ACL not supported yet");
#endif /* CONFIG_CIFS_POSIX */
	} else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
			  strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
#ifdef CONFIG_CIFS_POSIX
		if (sb->s_flags & MS_POSIXACL)
			rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
				ea_value, buf_size, ACL_TYPE_DEFAULT,
				cifs_sb->local_nls,
				cifs_sb->mnt_cifs_flags &
					CIFS_MOUNT_MAP_SPECIAL_CHR);
#else
		cFYI(1, "Query POSIX default ACL not supported yet");
#endif /* CONFIG_CIFS_POSIX */
	} else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
				strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
#ifdef CONFIG_CIFS_ACL
			u32 acllen;
			struct cifs_ntsd *pacl;

			pacl = get_cifs_acl(cifs_sb, direntry->d_inode,
						full_path, &acllen);
			if (IS_ERR(pacl)) {
				rc = PTR_ERR(pacl);
				cERROR(1, "%s: error %zd getting sec desc",
						__func__, rc);
			} else {
				if (ea_value) {
					if (acllen > buf_size)
						acllen = -ERANGE;
					else
						memcpy(ea_value, pacl, acllen);
				}
				rc = acllen;
				kfree(pacl);
			}
#else
		cFYI(1, "Query CIFS ACL not supported yet");
#endif /* CONFIG_CIFS_ACL */
	} else if (strncmp(ea_name,
		  CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN_LOCAL) == 0) {
		cFYI(1, "Trusted xattr namespace not supported yet");
	} else if (strncmp(ea_name,
		  CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN_LOCAL) == 0) {
		cFYI(1, "Security xattr namespace not supported yet");
	} else
		cFYI(1,
		    "illegal xattr request %s (only user namespace supported)",
		     ea_name);

	/* We could add an additional check for streams ie
	    if proc/fs/cifs/streamstoxattr is set then
		search server for EAs or streams to
		returns as xattrs */

	if (rc == -EINVAL)
		rc = -EOPNOTSUPP;

get_ea_exit:
	kfree(full_path);
	FreeXid(xid);
	cifs_put_tlink(tlink);
#endif
	return rc;
}
Beispiel #25
0
int cifs_get_inode_info_unix(struct inode **pinode,
	const unsigned char *search_path, struct super_block *sb, int xid)
{
	int rc = 0;
	FILE_UNIX_BASIC_INFO findData;
	struct cifsTconInfo *pTcon;
	struct inode *inode;
	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
	const unsigned char *full_path;
	bool is_dfs_referral = false;

	pTcon = cifs_sb->tcon;
	cFYI(1, ("Getting info on %s", search_path));

	full_path = cifs_get_search_path(cifs_sb, search_path);

try_again_CIFSSMBUnixQPathInfo:
	/* could have done a find first instead but this returns more info */
	rc = CIFSSMBUnixQPathInfo(xid, pTcon, full_path, &findData,
				  cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
					CIFS_MOUNT_MAP_SPECIAL_CHR);
/*	dump_mem("\nUnixQPathInfo return data", &findData,
		 sizeof(findData)); */
	if (rc) {
		if (rc == -EREMOTE && !is_dfs_referral) {
			is_dfs_referral = true;
			if (full_path != search_path) {
				kfree(full_path);
				full_path = search_path;
			}
			goto try_again_CIFSSMBUnixQPathInfo;
		}
		goto cgiiu_exit;
	} else {
		struct cifsInodeInfo *cifsInfo;
		__u64 num_of_bytes = le64_to_cpu(findData.NumOfBytes);
		__u64 end_of_file = le64_to_cpu(findData.EndOfFile);

		/* get new inode */
		if (*pinode == NULL) {
			*pinode = new_inode(sb);
			if (*pinode == NULL) {
				rc = -ENOMEM;
				goto cgiiu_exit;
			}
			/* Is an i_ino of zero legal? */
			/* Are there sanity checks we can use to ensure that
			   the server is really filling in that field? */
			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
				(*pinode)->i_ino =
					(unsigned long)findData.UniqueId;
			} /* note ino incremented to unique num in new_inode */
			if (sb->s_flags & MS_NOATIME)
				(*pinode)->i_flags |= S_NOATIME | S_NOCMTIME;

			insert_inode_hash(*pinode);
		}

		inode = *pinode;
		cifsInfo = CIFS_I(inode);

		cFYI(1, ("Old time %ld", cifsInfo->time));
		cifsInfo->time = jiffies;
		cFYI(1, ("New time %ld", cifsInfo->time));
		/* this is ok to set on every inode revalidate */
		atomic_set(&cifsInfo->inUse, 1);

		cifs_unix_info_to_inode(inode, &findData, 0);


		if (num_of_bytes < end_of_file)
			cFYI(1, ("allocation size less than end of file"));
		cFYI(1, ("Size %ld and blocks %llu",
			(unsigned long) inode->i_size,
			(unsigned long long)inode->i_blocks));

		cifs_set_ops(inode, is_dfs_referral);
	}
cgiiu_exit:
	if (full_path != search_path)
		kfree(full_path);
	return rc;
}
Beispiel #26
0
static int
cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
	       struct tcon_link *tlink, unsigned oflags, umode_t mode,
	       __u32 *oplock, struct cifs_fid *fid, int *created)
{
	int rc = -ENOENT;
	int create_options = CREATE_NOT_DIR;
	int desired_access;
	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
	struct cifs_tcon *tcon = tlink_tcon(tlink);
	char *full_path = NULL;
	FILE_ALL_INFO *buf = NULL;
	struct inode *newinode = NULL;
	int disposition;
	struct TCP_Server_Info *server = tcon->ses->server;

	*oplock = 0;
	if (tcon->ses->server->oplocks)
		*oplock = REQ_OPLOCK;

	full_path = build_path_from_dentry(direntry);
	if (full_path == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open &&
	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
		rc = cifs_posix_open(full_path, &newinode, inode->i_sb, mode,
				     oflags, oplock, &fid->netfid, xid);
		switch (rc) {
		case 0:
			if (newinode == NULL) {
				/* query inode info */
				goto cifs_create_get_file_info;
			}

			if (!S_ISREG(newinode->i_mode)) {
				/*
				 * The server may allow us to open things like
				 * FIFOs, but the client isn't set up to deal
				 * with that. If it's not a regular file, just
				 * close it and proceed as if it were a normal
				 * lookup.
				 */
				CIFSSMBClose(xid, tcon, fid->netfid);
				goto cifs_create_get_file_info;
			}
			/* success, no need to query */
			goto cifs_create_set_dentry;

		case -ENOENT:
			goto cifs_create_get_file_info;

		case -EIO:
		case -EINVAL:
			/*
			 * EIO could indicate that (posix open) operation is not
			 * supported, despite what server claimed in capability
			 * negotiation.
			 *
			 * POSIX open in samba versions 3.3.1 and earlier could
			 * incorrectly fail with invalid parameter.
			 */
			tcon->broken_posix_open = true;
			break;

		case -EREMOTE:
		case -EOPNOTSUPP:
			/*
			 * EREMOTE indicates DFS junction, which is not handled
			 * in posix open.  If either that or op not supported
			 * returned, follow the normal lookup.
			 */
			break;

		default:
			goto out;
		}
		/*
		 * fallthrough to retry, using older open call, this is case
		 * where server does not support this SMB level, and falsely
		 * claims capability (also get here for DFS case which should be
		 * rare for path not covered on files)
		 */
	}

	desired_access = 0;
	if (OPEN_FMODE(oflags) & FMODE_READ)
		desired_access |= GENERIC_READ; /* is this too little? */
	if (OPEN_FMODE(oflags) & FMODE_WRITE)
		desired_access |= GENERIC_WRITE;

	disposition = FILE_OVERWRITE_IF;
	if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
		disposition = FILE_CREATE;
	else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
		disposition = FILE_OVERWRITE_IF;
	else if ((oflags & O_CREAT) == O_CREAT)
		disposition = FILE_OPEN_IF;
	else
		cFYI(1, "Create flag not set in create function");

	/*
	 * BB add processing to set equivalent of mode - e.g. via CreateX with
	 * ACLs
	 */

	if (!server->ops->open) {
		rc = -ENOSYS;
		goto out;
	}

	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
	if (buf == NULL) {
		rc = -ENOMEM;
		goto out;
	}

	/*
	 * if we're not using unix extensions, see if we need to set
	 * ATTR_READONLY on the create call
	 */
	if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
		create_options |= CREATE_OPTION_READONLY;

	if (backup_cred(cifs_sb))
		create_options |= CREATE_OPEN_BACKUP_INTENT;

	rc = server->ops->open(xid, tcon, full_path, disposition,
			       desired_access, create_options, fid, oplock,
			       buf, cifs_sb);
	if (rc) {
		cFYI(1, "cifs_create returned 0x%x", rc);
		goto out;
	}

	/*
	 * If Open reported that we actually created a file then we now have to
	 * set the mode if possible.
	 */
	if ((tcon->unix_ext) && (*oplock & CIFS_CREATE_ACTION)) {
		struct cifs_unix_set_info_args args = {
				.mode	= mode,
				.ctime	= NO_CHANGE_64,
				.atime	= NO_CHANGE_64,
				.mtime	= NO_CHANGE_64,
				.device	= 0,
		};

		*created |= FILE_CREATED;
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
			args.uid = (__u64) current_fsuid();
			if (inode->i_mode & S_ISGID)
				args.gid = (__u64) inode->i_gid;
			else
				args.gid = (__u64) current_fsgid();
		} else {
			args.uid = NO_CHANGE_64;
			args.gid = NO_CHANGE_64;
		}
		CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid->netfid,
				       current->tgid);
	} else {
		/*
		 * BB implement mode setting via Windows security
		 * descriptors e.g.
		 */
		/* CIFSSMBWinSetPerms(xid,tcon,path,mode,-1,-1,nls);*/

		/* Could set r/o dos attribute if mode & 0222 == 0 */
	}

cifs_create_get_file_info:
	/* server might mask mode so we have to query for it */
	if (tcon->unix_ext)
		rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb,
					      xid);
	else {
		rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb,
					 xid, &fid->netfid);
		if (newinode) {
			if (server->ops->set_lease_key)
				server->ops->set_lease_key(newinode, fid);
			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
				newinode->i_mode = mode;
			if ((*oplock & CIFS_CREATE_ACTION) &&
			    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) {
				newinode->i_uid = current_fsuid();
				if (inode->i_mode & S_ISGID)
					newinode->i_gid = inode->i_gid;
				else
					newinode->i_gid = current_fsgid();
			}
		}
	}

cifs_create_set_dentry:
	if (rc != 0) {
		cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
		if (server->ops->close)
			server->ops->close(xid, tcon, fid);
		goto out;
	}
	d_drop(direntry);
	d_add(direntry, newinode);

out:
	kfree(buf);
	kfree(full_path);
	return rc;
}
Beispiel #27
0
static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral)
{
	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);

	switch (inode->i_mode & S_IFMT) {
	case S_IFREG:
		inode->i_op = &cifs_file_inode_ops;
		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
				inode->i_fop = &cifs_file_direct_nobrl_ops;
			else
				inode->i_fop = &cifs_file_direct_ops;
		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
			inode->i_fop = &cifs_file_nobrl_ops;
		else { /* not direct, send byte range locks */
			inode->i_fop = &cifs_file_ops;
		}


		/* check if server can support readpages */
		if (cifs_sb->tcon->ses->server->maxBuf <
				PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
			inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
		else
			inode->i_data.a_ops = &cifs_addr_ops;
		break;
	case S_IFDIR:
#ifdef CONFIG_CIFS_DFS_UPCALL
		if (is_dfs_referral) {
			inode->i_op = &cifs_dfs_referral_inode_operations;
		} else {
#else /* NO DFS support, treat as a directory */
		{
#endif
			inode->i_op = &cifs_dir_inode_ops;
			inode->i_fop = &cifs_dir_ops;
		}
		break;
	case S_IFLNK:
		inode->i_op = &cifs_symlink_inode_ops;
		break;
	default:
		init_special_inode(inode, inode->i_mode, inode->i_rdev);
		break;
	}
}

static void cifs_unix_info_to_inode(struct inode *inode,
		FILE_UNIX_BASIC_INFO *info, int force_uid_gid)
{
	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
	struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
	__u64 num_of_bytes = le64_to_cpu(info->NumOfBytes);
	__u64 end_of_file = le64_to_cpu(info->EndOfFile);

	inode->i_atime = cifs_NTtimeToUnix(le64_to_cpu(info->LastAccessTime));
	inode->i_mtime =
		cifs_NTtimeToUnix(le64_to_cpu(info->LastModificationTime));
	inode->i_ctime = cifs_NTtimeToUnix(le64_to_cpu(info->LastStatusChange));
	inode->i_mode = le64_to_cpu(info->Permissions);

	/*
	 * Since we set the inode type below we need to mask off
	 * to avoid strange results if bits set above.
	 */
	inode->i_mode &= ~S_IFMT;
	switch (le32_to_cpu(info->Type)) {
	case UNIX_FILE:
		inode->i_mode |= S_IFREG;
		break;
	case UNIX_SYMLINK:
		inode->i_mode |= S_IFLNK;
		break;
	case UNIX_DIR:
		inode->i_mode |= S_IFDIR;
		break;
	case UNIX_CHARDEV:
		inode->i_mode |= S_IFCHR;
		inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor),
				      le64_to_cpu(info->DevMinor) & MINORMASK);
		break;
	case UNIX_BLOCKDEV:
		inode->i_mode |= S_IFBLK;
		inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor),
				      le64_to_cpu(info->DevMinor) & MINORMASK);
		break;
	case UNIX_FIFO:
		inode->i_mode |= S_IFIFO;
		break;
	case UNIX_SOCKET:
		inode->i_mode |= S_IFSOCK;
		break;
	default:
		/* safest to call it a file if we do not know */
		inode->i_mode |= S_IFREG;
		cFYI(1, ("unknown type %d", le32_to_cpu(info->Type)));
		break;
	}

	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) &&
	    !force_uid_gid)
		inode->i_uid = cifs_sb->mnt_uid;
	else
		inode->i_uid = le64_to_cpu(info->Uid);

	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) &&
	    !force_uid_gid)
		inode->i_gid = cifs_sb->mnt_gid;
	else
		inode->i_gid = le64_to_cpu(info->Gid);

	inode->i_nlink = le64_to_cpu(info->Nlinks);

	spin_lock(&inode->i_lock);
	if (is_size_safe_to_change(cifsInfo, end_of_file)) {
		/*
		 * We can not safely change the file size here if the client
		 * is writing to it due to potential races.
		 */
		i_size_write(inode, end_of_file);

		/*
		 * i_blocks is not related to (i_size / i_blksize),
		 * but instead 512 byte (2**9) size is required for
		 * calculating num blocks.
		 */
		inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
	}
	spin_unlock(&inode->i_lock);
}

static const unsigned char *cifs_get_search_path(struct cifs_sb_info *cifs_sb,
						const char *search_path)
{
	int tree_len;
	int path_len;
	int i;
	char *tmp_path;
	struct cifsTconInfo *pTcon = cifs_sb->tcon;

	if (!(pTcon->Flags & SMB_SHARE_IS_IN_DFS))
		return search_path;

	/* use full path name for working with DFS */
	tree_len = strnlen(pTcon->treeName, MAX_TREE_SIZE + 1);
	path_len = strnlen(search_path, MAX_PATHCONF);

	tmp_path = kmalloc(tree_len+path_len+1, GFP_KERNEL);
	if (tmp_path == NULL)
		return search_path;

	strncpy(tmp_path, pTcon->treeName, tree_len);
	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
		for (i = 0; i < tree_len; i++) {
			if (tmp_path[i] == '\\')
				tmp_path[i] = '/';
		}
	strncpy(tmp_path+tree_len, search_path, path_len);
	tmp_path[tree_len+path_len] = 0;
	return tmp_path;
}
Beispiel #28
0
int
cifs_atomic_open(struct inode *inode, struct dentry *direntry,
		 struct file *file, unsigned oflags, umode_t mode,
		 int *opened)
{
	int rc;
	unsigned int xid;
	struct tcon_link *tlink;
	struct cifs_tcon *tcon;
	struct TCP_Server_Info *server;
	struct cifs_fid fid;
	struct cifs_pending_open open;
	__u32 oplock;
	struct cifsFileInfo *file_info;

	/*
	 * Posix open is only called (at lookup time) for file create now. For
	 * opens (rather than creates), because we do not know if it is a file
	 * or directory yet, and current Samba no longer allows us to do posix
	 * open on dirs, we could end up wasting an open call on what turns out
	 * to be a dir. For file opens, we wait to call posix open till
	 * cifs_open.  It could be added to atomic_open in the future but the
	 * performance tradeoff of the extra network request when EISDIR or
	 * EACCES is returned would have to be weighed against the 50% reduction
	 * in network traffic in the other paths.
	 */
	if (!(oflags & O_CREAT)) {
		struct dentry *res;

		/*
		 * Check for hashed negative dentry. We have already revalidated
		 * the dentry and it is fine. No need to perform another lookup.
		 */
		if (!d_unhashed(direntry))
			return -ENOENT;

		res = cifs_lookup(inode, direntry, 0);
		if (IS_ERR(res))
			return PTR_ERR(res);

		return finish_no_open(file, res);
	}

	rc = check_name(direntry);
	if (rc)
		return rc;

	xid = get_xid();

	cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p",
	     inode, direntry->d_name.name, direntry);

	tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
	if (IS_ERR(tlink))
		goto out_free_xid;

	tcon = tlink_tcon(tlink);
	server = tcon->ses->server;

	if (server->ops->new_lease_key)
		server->ops->new_lease_key(&fid);

	cifs_add_pending_open(&fid, tlink, &open);

	rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
			    &oplock, &fid, opened);

	if (rc) {
		cifs_del_pending_open(&open);
		goto out;
	}

	rc = finish_open(file, direntry, generic_file_open, opened);
	if (rc) {
		if (server->ops->close)
			server->ops->close(xid, tcon, &fid);
		cifs_del_pending_open(&open);
		goto out;
	}

	file_info = cifs_new_fileinfo(&fid, file, tlink, oplock);
	if (file_info == NULL) {
		if (server->ops->close)
			server->ops->close(xid, tcon, &fid);
		cifs_del_pending_open(&open);
		rc = -ENOMEM;
	}

out:
	cifs_put_tlink(tlink);
out_free_xid:
	free_xid(xid);
	return rc;
}
Beispiel #29
0
int cifs_unlink(struct inode *inode, struct dentry *direntry)
{
	int rc = 0;
	int xid;
	struct cifs_sb_info *cifs_sb;
	struct cifsTconInfo *pTcon;
	char *full_path = NULL;
	struct cifsInodeInfo *cifsInode;
	FILE_BASIC_INFO *pinfo_buf;

	cFYI(1, ("cifs_unlink, inode = 0x%p", inode));

	xid = GetXid();

	if (inode)
		cifs_sb = CIFS_SB(inode->i_sb);
	else
		cifs_sb = CIFS_SB(direntry->d_sb);
	pTcon = cifs_sb->tcon;

	/* Unlink can be called from rename so we can not grab the sem here
	   since we deadlock otherwise */
/*	mutex_lock(&direntry->d_sb->s_vfs_rename_mutex);*/
	full_path = build_path_from_dentry(direntry);
/*	mutex_unlock(&direntry->d_sb->s_vfs_rename_mutex);*/
	if (full_path == NULL) {
		FreeXid(xid);
		return -ENOMEM;
	}

	if ((pTcon->ses->capabilities & CAP_UNIX) &&
		(CIFS_UNIX_POSIX_PATH_OPS_CAP &
			le64_to_cpu(pTcon->fsUnixInfo.Capability))) {
		rc = CIFSPOSIXDelFile(xid, pTcon, full_path,
			SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls,
			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
		cFYI(1, ("posix del rc %d", rc));
		if ((rc == 0) || (rc == -ENOENT))
			goto psx_del_no_retry;
	}

	rc = CIFSSMBDelFile(xid, pTcon, full_path, cifs_sb->local_nls,
			cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
psx_del_no_retry:
	if (!rc) {
		if (direntry->d_inode)
			drop_nlink(direntry->d_inode);
	} else if (rc == -ENOENT) {
		d_drop(direntry);
	} else if (rc == -ETXTBSY) {
		int oplock = FALSE;
		__u16 netfid;

		rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, DELETE, FILE_SHARE_ALL,
				 CREATE_NOT_DIR | CREATE_DELETE_ON_CLOSE,
				 &netfid, &oplock, NULL, cifs_sb->local_nls,
				 cifs_sb->mnt_cifs_flags &
					CIFS_MOUNT_MAP_SPECIAL_CHR);
		if (rc == 0) {
			CIFSSMBRenameOpenFile(xid, pTcon, netfid, NULL,
					      cifs_sb->local_nls,
					      cifs_sb->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
			CIFSSMBClose(xid, pTcon, netfid);
			if (direntry->d_inode)
				drop_nlink(direntry->d_inode);
		}
	} else if (rc == -EACCES) {
		/* try only if r/o attribute set in local lookup data? */
		pinfo_buf = kzalloc(sizeof(FILE_BASIC_INFO), GFP_KERNEL);
		if (pinfo_buf) {
			/* ATTRS set to normal clears r/o bit */
			pinfo_buf->Attributes = cpu_to_le32(ATTR_NORMAL);
			if (!(pTcon->ses->flags & CIFS_SES_NT4))
				rc = CIFSSMBSetTimes(xid, pTcon, full_path,
						     pinfo_buf,
						     cifs_sb->local_nls,
						     cifs_sb->mnt_cifs_flags &
							CIFS_MOUNT_MAP_SPECIAL_CHR);
			else
				rc = -EOPNOTSUPP;

			if (rc == -EOPNOTSUPP) {
				int oplock = FALSE;
				__u16 netfid;
			/*	rc = CIFSSMBSetAttrLegacy(xid, pTcon,
							  full_path,
							  (__u16)ATTR_NORMAL,
							  cifs_sb->local_nls);
			   For some strange reason it seems that NT4 eats the
			   old setattr call without actually setting the
			   attributes so on to the third attempted workaround
			   */

			/* BB could scan to see if we already have it open
			   and pass in pid of opener to function */
				rc = CIFSSMBOpen(xid, pTcon, full_path,
						 FILE_OPEN, SYNCHRONIZE |
						 FILE_WRITE_ATTRIBUTES, FILE_SHARE_ALL, 0,
						 &netfid, &oplock, NULL,
						 cifs_sb->local_nls,
						 cifs_sb->mnt_cifs_flags &
						    CIFS_MOUNT_MAP_SPECIAL_CHR);
				if (rc == 0) {
					rc = CIFSSMBSetFileTimes(xid, pTcon,
								 pinfo_buf,
								 netfid);
					CIFSSMBClose(xid, pTcon, netfid);
				}
			}
			kfree(pinfo_buf);
		}
		if (rc == 0) {
			rc = CIFSSMBDelFile(xid, pTcon, full_path,
					    cifs_sb->local_nls,
					    cifs_sb->mnt_cifs_flags &
						CIFS_MOUNT_MAP_SPECIAL_CHR);
			if (!rc) {
				if (direntry->d_inode)
					drop_nlink(direntry->d_inode);
			} else if (rc == -ETXTBSY) {
				int oplock = FALSE;
				__u16 netfid;

				rc = CIFSSMBOpen(xid, pTcon, full_path,
						 FILE_OPEN, DELETE, FILE_SHARE_ALL,
						 CREATE_NOT_DIR |
						 CREATE_DELETE_ON_CLOSE,
						 &netfid, &oplock, NULL,
						 cifs_sb->local_nls,
						 cifs_sb->mnt_cifs_flags &
						    CIFS_MOUNT_MAP_SPECIAL_CHR);
				if (rc == 0) {
					CIFSSMBRenameOpenFile(xid, pTcon,
						netfid, NULL,
						cifs_sb->local_nls,
						cifs_sb->mnt_cifs_flags &
						    CIFS_MOUNT_MAP_SPECIAL_CHR);
					CIFSSMBClose(xid, pTcon, netfid);
					if (direntry->d_inode)
						drop_nlink(direntry->d_inode);
				}
			/* BB if rc = -ETXTBUSY goto the rename logic BB */
			}
		}
	}
	if (direntry->d_inode) {
		cifsInode = CIFS_I(direntry->d_inode);
		cifsInode->time = 0;	/* will force revalidate to get info
					   when needed */
		direntry->d_inode->i_ctime = current_fs_time(inode->i_sb);
	}
	if (inode) {
		inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
		cifsInode = CIFS_I(inode);
		cifsInode->time = 0;	/* force revalidate of dir as well */
	}

	kfree(full_path);
	FreeXid(xid);
	return rc;
}
Beispiel #30
0
static int __init
init_cifs(void)
{
	int rc = 0;
	cifs_proc_init();
	INIT_LIST_HEAD(&cifs_tcp_ses_list);
#ifdef CONFIG_CIFS_EXPERIMENTAL
	INIT_LIST_HEAD(&GlobalDnotifyReqList);
	INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
#endif
/*
 *  Initialize Global counters
 */
	atomic_set(&sesInfoAllocCount, 0);
	atomic_set(&tconInfoAllocCount, 0);
	atomic_set(&tcpSesAllocCount, 0);
	atomic_set(&tcpSesReconnectCount, 0);
	atomic_set(&tconInfoReconnectCount, 0);

	atomic_set(&bufAllocCount, 0);
	atomic_set(&smBufAllocCount, 0);
#ifdef CONFIG_CIFS_STATS2
	atomic_set(&totBufAllocCount, 0);
	atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */

	atomic_set(&midCount, 0);
	GlobalCurrentXid = 0;
	GlobalTotalActiveXid = 0;
	GlobalMaxActiveXid = 0;
	memset(Local_System_Name, 0, 15);
	rwlock_init(&GlobalSMBSeslock);
	rwlock_init(&cifs_tcp_ses_lock);
	spin_lock_init(&GlobalMid_Lock);

	if (cifs_max_pending < 2) {
		cifs_max_pending = 2;
		cFYI(1, ("cifs_max_pending set to min of 2"));
	} else if (cifs_max_pending > 256) {
		cifs_max_pending = 256;
		cFYI(1, ("cifs_max_pending set to max of 256"));
	}

	rc = cifs_init_inodecache();
	if (rc)
		goto out_clean_proc;

	rc = cifs_init_mids();
	if (rc)
		goto out_destroy_inodecache;

	rc = cifs_init_request_bufs();
	if (rc)
		goto out_destroy_mids;

	rc = register_filesystem(&cifs_fs_type);
	if (rc)
		goto out_destroy_request_bufs;
#ifdef CONFIG_CIFS_UPCALL
	rc = register_key_type(&cifs_spnego_key_type);
	if (rc)
		goto out_unregister_filesystem;
#endif
#ifdef CONFIG_CIFS_DFS_UPCALL
	rc = register_key_type(&key_type_dns_resolver);
	if (rc)
		goto out_unregister_key_type;
#endif
	rc = slow_work_register_user(THIS_MODULE);
	if (rc)
		goto out_unregister_resolver_key;

	return 0;

 out_unregister_resolver_key:
#ifdef CONFIG_CIFS_DFS_UPCALL
	unregister_key_type(&key_type_dns_resolver);
 out_unregister_key_type:
#endif
#ifdef CONFIG_CIFS_UPCALL
	unregister_key_type(&cifs_spnego_key_type);
 out_unregister_filesystem:
#endif
	unregister_filesystem(&cifs_fs_type);
 out_destroy_request_bufs:
	cifs_destroy_request_bufs();
 out_destroy_mids:
	cifs_destroy_mids();
 out_destroy_inodecache:
	cifs_destroy_inodecache();
 out_clean_proc:
	cifs_proc_clean();
	return rc;
}