Exemple #1
0
int wrapfs_write_end(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned copied,
		struct page *page, void *fsdata)
{
	char *page_data = (char *)kmap(page);
	struct file *lower_file = wrapfs_lower_file(file);
	struct inode *inode = page->mapping->host;
	struct inode *lower_inode = NULL;
	unsigned from = pos & (PAGE_CACHE_SIZE - 1);
	mm_segment_t old_fs;
	int err = 0;
#ifdef WRAPFS_CRYPTO
	struct inode *cur_inode = page->mapping->host;
	pgoff_t index = pos >> (PAGE_CACHE_SHIFT);
	loff_t cur_inode_size = cur_inode->i_size;
	pgoff_t cur_inode_last_index = cur_inode_size >> (PAGE_CACHE_SHIFT);
	unsigned int cur_inode_end_offset;
	loff_t extra_padding = pos - cur_inode_size;
	char *encrypted_buf = NULL;
	unsigned copied1 = copied;
	cur_inode_end_offset = cur_inode_size & (PAGE_CACHE_SIZE - 1);
#endif
	wrapfs_debug_aops(WRAPFS_SB(file->f_dentry->d_sb)->wrapfs_debug_a_ops,
				"");
	wrapfs_debug("");
	if (lower_file == NULL) {
		wrapfs_debug("lower_file is NULL!!\n");
		err = -EACCES;
		goto out;
	}

	wrapfs_debug("pos : %lld", pos);
	wrapfs_debug("from : %u", from);
	wrapfs_debug("copied : %u", copied);
	wrapfs_debug("lower_file->f_pos : %lld", lower_file->f_pos);
#ifdef WRAPFS_CRYPTO
	if (extra_padding > 0 && (cur_inode_last_index == index)) {
		copied = copied + pos - cur_inode_size;
		from = cur_inode_end_offset;
	}
	encrypted_buf = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
	if (encrypted_buf == NULL) {
		wrapfs_debug("encrypted_buf is NULL!!");
		err = -ENOMEM;
		goto out;
	}
	err = my_encrypt(page_data, PAGE_CACHE_SIZE, encrypted_buf,
				PAGE_CACHE_SIZE,
				WRAPFS_SB(file->f_dentry->d_sb)->key,
				WRAPFS_CRYPTO_KEY_LEN);
	if (err < 0) {
		wrapfs_debug("Encrypt failed!!");
		err = -EINVAL;
		kfree(encrypted_buf);
		goto out;
	}
#endif
	lower_file->f_pos = page_offset(page) + from;

	if (!PageUptodate(page))
		SetPageUptodate(page);

	wrapfs_debug("pos : %lld", pos);
	wrapfs_debug("from : %u", from);
	wrapfs_debug("copied : %u", copied);
	wrapfs_debug("lower_file->f_pos : %lld", lower_file->f_pos);

	old_fs = get_fs();
	set_fs(KERNEL_DS);
#ifndef WRAPFS_CRYPTO
	err = vfs_write(lower_file, page_data + from, copied,
				&lower_file->f_pos);
#else
	err = vfs_write(lower_file, encrypted_buf + from, copied,
				&lower_file->f_pos);
	/* If zeroes need to be placed, then err exceeds copied.
	 * In this case, we need to make err=copied1 to avoid oops in iov_iter
	 */
	if (err > 0 && extra_padding > 0)
		err = copied1;
#endif
	wrapfs_debug("err : %d", err);
	set_fs(old_fs);
	if (err < 0) {
		wrapfs_debug("vfs_write error : %d!!\n", err);
		err = -EINVAL;
#ifdef WRAPFS_CRYPTO
		kfree(encrypted_buf);
#endif
		goto out;
	}

	lower_inode = lower_file->f_path.dentry->d_inode;
	if (!lower_inode)
		lower_inode = wrapfs_lower_inode(inode);
	BUG_ON(!lower_inode);
	fsstack_copy_inode_size(inode, lower_inode);
	fsstack_copy_attr_times(inode, lower_inode);

out:
	kunmap(page);
	unlock_page(page);
	page_cache_release(page);
	wrapfs_debug_aops(WRAPFS_SB(file->f_dentry->d_sb)->wrapfs_debug_a_ops,
					"err : %d", err);
	return err;
}
/*
 * Read a directory, using filldir to fill the dirent memory.
 * smb_proc_readdir does the actual reading from the smb server.
 *
 * The cache code is almost directly taken from ncpfs
 */
static int 
smb_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
	struct dentry *dentry = filp->f_dentry;
	struct inode *dir = dentry->d_inode;
	struct smb_sb_info *server = server_from_dentry(dentry);
	union  smb_dir_cache *cache = NULL;
	struct smb_cache_control ctl;
	struct page *page = NULL;
	int result;

	ctl.page  = NULL;
	ctl.cache = NULL;

	VERBOSE("reading %s/%s, f_pos=%d\n",
		DENTRY_PATH(dentry),  (int) filp->f_pos);

	result = 0;

	lock_kernel();

	switch ((unsigned int) filp->f_pos) {
	case 0:
		if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0)
			goto out;
		filp->f_pos = 1;
		/* fallthrough */
	case 1:
		if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR) < 0)
			goto out;
		filp->f_pos = 2;
	}

	/*
	 * Make sure our inode is up-to-date.
	 */
	result = smb_revalidate_inode(dentry);
	if (result)
		goto out;


	page = grab_cache_page(&dir->i_data, 0);
	if (!page)
		goto read_really;

	ctl.cache = cache = kmap(page);
	ctl.head  = cache->head;

	if (!PageUptodate(page) || !ctl.head.eof) {
		VERBOSE("%s/%s, page uptodate=%d, eof=%d\n",
			 DENTRY_PATH(dentry), PageUptodate(page),ctl.head.eof);
		goto init_cache;
	}

	if (filp->f_pos == 2) {
		if (jiffies - ctl.head.time >= SMB_MAX_AGE(server))
			goto init_cache;

		/*
		 * N.B. ncpfs checks mtime of dentry too here, we don't.
		 *   1. common smb servers do not update mtime on dir changes
		 *   2. it requires an extra smb request
		 *      (revalidate has the same timeout as ctl.head.time)
		 *
		 * Instead smbfs invalidates its own cache on local changes
		 * and remote changes are not seen until timeout.
		 */
	}

	if (filp->f_pos > ctl.head.end)
		goto finished;

	ctl.fpos = filp->f_pos + (SMB_DIRCACHE_START - 2);
	ctl.ofs  = ctl.fpos / SMB_DIRCACHE_SIZE;
	ctl.idx  = ctl.fpos % SMB_DIRCACHE_SIZE;

	for (;;) {
		if (ctl.ofs != 0) {
			ctl.page = find_lock_page(&dir->i_data, ctl.ofs);
			if (!ctl.page)
				goto invalid_cache;
			ctl.cache = kmap(ctl.page);
			if (!PageUptodate(ctl.page))
				goto invalid_cache;
		}
		while (ctl.idx < SMB_DIRCACHE_SIZE) {
			struct dentry *dent;
			int res;

			dent = smb_dget_fpos(ctl.cache->dentry[ctl.idx],
					     dentry, filp->f_pos);
			if (!dent)
				goto invalid_cache;

			res = filldir(dirent, dent->d_name.name,
				      dent->d_name.len, filp->f_pos,
				      dent->d_inode->i_ino, DT_UNKNOWN);
			dput(dent);
			if (res)
				goto finished;
			filp->f_pos += 1;
			ctl.idx += 1;
			if (filp->f_pos > ctl.head.end)
				goto finished;
		}
		if (ctl.page) {
			kunmap(ctl.page);
			SetPageUptodate(ctl.page);
			unlock_page(ctl.page);
			page_cache_release(ctl.page);
			ctl.page = NULL;
		}
		ctl.idx  = 0;
		ctl.ofs += 1;
	}
invalid_cache:
	if (ctl.page) {
		kunmap(ctl.page);
		unlock_page(ctl.page);
		page_cache_release(ctl.page);
		ctl.page = NULL;
	}
	ctl.cache = cache;
init_cache:
	smb_invalidate_dircache_entries(dentry);
	ctl.head.time = jiffies;
	ctl.head.eof = 0;
	ctl.fpos = 2;
	ctl.ofs = 0;
	ctl.idx = SMB_DIRCACHE_START;
	ctl.filled = 0;
	ctl.valid  = 1;
read_really:
	result = server->ops->readdir(filp, dirent, filldir, &ctl);
	if (result == -ERESTARTSYS && page)
		ClearPageUptodate(page);
	if (ctl.idx == -1)
		goto invalid_cache;	/* retry */
	ctl.head.end = ctl.fpos - 1;
	ctl.head.eof = ctl.valid;
finished:
	if (page) {
		cache->head = ctl.head;
		kunmap(page);
		if (result != -ERESTARTSYS)
			SetPageUptodate(page);
		unlock_page(page);
		page_cache_release(page);
	}
	if (ctl.page) {
		kunmap(ctl.page);
		SetPageUptodate(ctl.page);
		unlock_page(ctl.page);
		page_cache_release(ctl.page);
	}
out:
	unlock_kernel();
	return result;
}
Exemple #3
0
static int
HgfsDoRead(HgfsHandle handle,             // IN:  Handle for this file
           HgfsDataPacket dataPacket[],   // IN/OUT: Data description
           uint32 numEntries,             // IN: Number of entries in dataPacket
           loff_t offset)                 // IN:  Offset at which to read
{
   HgfsReq *req;
   HgfsOp opUsed;
   int result = 0;
   uint32 actualSize = 0;
   char *payload = NULL;
   HgfsStatus replyStatus;
   char *buf;
   uint32 count;
   ASSERT(numEntries == 1);

   count = dataPacket[0].len;

   req = HgfsGetNewRequest();
   if (!req) {
      LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: out of memory while "
              "getting new request\n"));
      result = -ENOMEM;
      goto out;
   }

 retry:
   opUsed = hgfsVersionRead;
   if (opUsed == HGFS_OP_READ_FAST_V4) {
      HgfsRequest *header;
      HgfsRequestReadV3 *request;

      header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req));
      header->id = req->id;
      header->op = opUsed;

      request = (HgfsRequestReadV3 *)(HGFS_REQ_PAYLOAD_V3(req));
      request->file = handle;
      request->offset = offset;
      request->requiredSize = count;
      request->reserved = 0;
      req->dataPacket = kmalloc(numEntries * sizeof req->dataPacket[0],
                                GFP_KERNEL);
      if (!req->dataPacket) {
         LOG(4, (KERN_WARNING "%s: Failed to allocate mem\n", __func__));
         result = -ENOMEM;
         goto out;
      }
      memcpy(req->dataPacket, dataPacket, numEntries * sizeof req->dataPacket[0]);
      req->numEntries = numEntries;

      LOG(4, (KERN_WARNING "VMware hgfs: Fast Read V4\n"));
   } else if (opUsed == HGFS_OP_READ_V3) {
      HgfsRequest *header;
      HgfsRequestReadV3 *request;

      header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req));
      header->id = req->id;
      header->op = opUsed;

      request = (HgfsRequestReadV3 *)(HGFS_REQ_PAYLOAD_V3(req));
      request->file = handle;
      request->offset = offset;
      request->requiredSize = MIN(req->bufferSize - sizeof *request -
                                  sizeof *header, count);
      request->reserved = 0;
      req->dataPacket = NULL;
      req->numEntries = 0;
      req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request);
   } else {
      HgfsRequestRead *request;

      request = (HgfsRequestRead *)(HGFS_REQ_PAYLOAD(req));
      request->header.id = req->id;
      request->header.op = opUsed;
      request->file = handle;
      request->offset = offset;
      request->requiredSize = MIN(req->bufferSize - sizeof *request, count);
      req->dataPacket = NULL;
      req->numEntries = 0;
      req->payloadSize = sizeof *request;
   }

   /* Send the request and process the reply. */
   result = HgfsSendRequest(req);
   if (result == 0) {
      /* Get the reply. */
      replyStatus = HgfsReplyStatus(req);
      result = HgfsStatusConvertToLinux(replyStatus);

      switch (result) {
      case 0:
         if (opUsed == HGFS_OP_READ_FAST_V4) {
            actualSize = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize;
         } else if (opUsed == HGFS_OP_READ_V3) {
            actualSize = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize;
            payload = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->payload;
         } else {
            actualSize = ((HgfsReplyRead *)HGFS_REQ_PAYLOAD(req))->actualSize;
            payload = ((HgfsReplyRead *)HGFS_REQ_PAYLOAD(req))->payload;
         }

         /* Sanity check on read size. */
         if (actualSize > count) {
            LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: read too big!\n"));
            result = -EPROTO;
            goto out;
         }

         if (!actualSize) {
            /* We got no bytes. */
            LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoRead: server returned "
                   "zero\n"));
            result = actualSize;
            goto out;
         }

         /* Return result. */
         if (opUsed == HGFS_OP_READ_V3 || opUsed == HGFS_OP_READ) {
            buf = kmap(dataPacket[0].page) + dataPacket[0].offset;
            ASSERT(buf);
            memcpy(buf, payload, actualSize);
            LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoRead: copied %u\n",
                    actualSize));
            kunmap(dataPacket[0].page);
         }
         result = actualSize;
	      break;

      case -EPROTO:
         /* Retry with older version(s). Set globally. */
         switch (opUsed) {
         case HGFS_OP_READ_FAST_V4:
            LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: Fast Read V4 not "
                    "supported. Falling back to V3 Read.\n"));
            if (req->dataPacket) {
               kfree(req->dataPacket);
               req->dataPacket = NULL;
            }
            hgfsVersionRead = HGFS_OP_READ_V3;
            goto retry;

         case HGFS_OP_READ_V3:
            LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: Version 3 not "
                    "supported. Falling back to version 1.\n"));
            hgfsVersionRead = HGFS_OP_READ;
            goto retry;

         default:
            break;
         }
	      break;

      default:
         break;
      }
   } else if (result == -EIO) {
      LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: timed out\n"));
   } else if (result == -EPROTO) {
      LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: server "
              "returned error: %d\n", result));
   } else {
      LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: unknown error: "
              "%d\n", result));
   }

out:
   if (req->dataPacket) {
      kfree(req->dataPacket);
   }
   HgfsFreeRequest(req);
   return result;
}
Exemple #4
0
static int
smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
	int rc;
	struct kvec *iov = rqst->rq_iov;
	int n_vec = rqst->rq_nvec;
	unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
	unsigned int i;
	size_t total_len = 0, sent;
	struct socket *ssocket = server->ssocket;
	int val = 1;

	if (ssocket == NULL)
		return -ENOTSOCK;

	cFYI(1, "Sending smb: smb_len=%u", smb_buf_length);
	dump_smb(iov[0].iov_base, iov[0].iov_len);

	/* cork the socket */
	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
				(char *)&val, sizeof(val));

	rc = smb_send_kvec(server, iov, n_vec, &sent);
	if (rc < 0)
		goto uncork;

	total_len += sent;

	/* now walk the page array and send each page in it */
	for (i = 0; i < rqst->rq_npages; i++) {
		struct kvec p_iov;

		cifs_rqst_page_to_kvec(rqst, i, &p_iov);
		rc = smb_send_kvec(server, &p_iov, 1, &sent);
		kunmap(rqst->rq_pages[i]);
		if (rc < 0)
			break;

		total_len += sent;
	}

uncork:
	/* uncork it */
	val = 0;
	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
				(char *)&val, sizeof(val));

	if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
		cFYI(1, "partial send (wanted=%u sent=%zu): terminating "
			"session", smb_buf_length + 4, total_len);
		/*
		 * If we have only sent part of an SMB then the next SMB could
		 * be taken as the remainder of this one. We need to kill the
		 * socket so the server throws away the partial SMB
		 */
		server->tcpStatus = CifsNeedReconnect;
	}

	if (rc < 0 && rc != -EINTR)
		cERROR(1, "Error %d sending data on socket to server", rc);
	else
		rc = 0;

	return rc;
}
Exemple #5
0
static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
		      struct inode *new_dir, struct dentry *new_dentry)
{
	struct inode *old_inode = old_dentry->d_inode;
	struct inode *new_inode = new_dentry->d_inode;
	struct page *dir_page = NULL;
	struct ufs_dir_entry * dir_de = NULL;
	struct page *old_page;
	struct ufs_dir_entry *old_de;
	int err = -ENOENT;

	old_de = ufs_find_entry(old_dir, old_dentry, &old_page);
	if (!old_de)
		goto out;

	if (S_ISDIR(old_inode->i_mode)) {
		err = -EIO;
		dir_de = ufs_dotdot(old_inode, &dir_page);
		if (!dir_de)
			goto out_old;
	}

	if (new_inode) {
		struct page *new_page;
		struct ufs_dir_entry *new_de;

		err = -ENOTEMPTY;
		if (dir_de && !ufs_empty_dir(new_inode))
			goto out_dir;

		err = -ENOENT;
		new_de = ufs_find_entry(new_dir, new_dentry, &new_page);
		if (!new_de)
			goto out_dir;
		inode_inc_link_count(old_inode);
		ufs_set_link(new_dir, new_de, new_page, old_inode);
		new_inode->i_ctime = CURRENT_TIME_SEC;
		if (dir_de)
			drop_nlink(new_inode);
		inode_dec_link_count(new_inode);
	} else {
		if (dir_de) {
			err = -EMLINK;
			if (new_dir->i_nlink >= UFS_LINK_MAX)
				goto out_dir;
		}
		inode_inc_link_count(old_inode);
		err = ufs_add_link(new_dentry, old_inode);
		if (err) {
			inode_dec_link_count(old_inode);
			goto out_dir;
		}
		if (dir_de)
			inode_inc_link_count(new_dir);
	}

	/*
	 * Like most other Unix systems, set the ctime for inodes on a
 	 * rename.
	 * inode_dec_link_count() will mark the inode dirty.
	 */
	old_inode->i_ctime = CURRENT_TIME_SEC;

	ufs_delete_entry(old_dir, old_de, old_page);
	inode_dec_link_count(old_inode);

	if (dir_de) {
		ufs_set_link(old_inode, dir_de, dir_page, new_dir);
		inode_dec_link_count(old_dir);
	}
	return 0;


out_dir:
	if (dir_de) {
		kunmap(dir_page);
		page_cache_release(dir_page);
	}
out_old:
	kunmap(old_page);
	page_cache_release(old_page);
out:
	return err;
}
Exemple #6
0
void
userinit(void)
{
	Mach *m = machp();
	Proc *p;
	Segment *s;
	KMap *k;
	Page *pg;

	p = newproc();
	p->pgrp = newpgrp();
	p->egrp = smalloc(sizeof(Egrp));
	p->egrp->ref = 1;
	p->fgrp = dupfgrp(nil);
	p->rgrp = newrgrp();
	p->procmode = 0640;

	kstrdup(&eve, "");
	kstrdup(&p->text, "*init*");
	kstrdup(&p->user, eve);

	/*
	 * Kernel Stack
	 *
	 * N.B. make sure there's enough space for syscall to check
	 *	for valid args and
	 *	space for gotolabel's return PC
	 * AMD64 stack must be quad-aligned.
	 */
	p->sched.pc = PTR2UINT(init0);
	p->sched.sp = PTR2UINT(p->kstack+KSTACK-sizeof(m->externup->arg)-sizeof(uintptr_t));
	p->sched.sp = STACKALIGN(p->sched.sp);

	/*
	 * User Stack
	 *
	 * Technically, newpage can't be called here because it
	 * should only be called when in a user context as it may
	 * try to sleep if there are no pages available, but that
	 * shouldn't be the case here.
	 */
	s = newseg(SG_STACK, USTKTOP-USTKSIZE, USTKSIZE/ BIGPGSZ);
	p->seg[SSEG] = s;
	pg = newpage(1, 0, USTKTOP-BIGPGSZ, BIGPGSZ, -1);
	segpage(s, pg);
	k = kmap(pg);
	bootargs(VA(k));
	kunmap(k);

	/*
	 * Text
	 */
	s = newseg(SG_TEXT, UTZERO, 1);
	s->flushme++;
	p->seg[TSEG] = s;
	pg = newpage(1, 0, UTZERO, BIGPGSZ, -1);
	memset(pg->cachectl, PG_TXTFLUSH, sizeof(pg->cachectl));
	segpage(s, pg);
	k = kmap(s->map[0]->pages[0]);
	/* UTZERO is only needed until we make init not have 2M block of zeros at the front. */
	memmove(UINT2PTR(VA(k) + init_code_start - UTZERO), init_code_out, sizeof(init_code_out));
	kunmap(k);

	/*
	 * Data
	 */
	s = newseg(SG_DATA, UTZERO + BIGPGSZ, 1);
	s->flushme++;
	p->seg[DSEG] = s;
	pg = newpage(1, 0, UTZERO + BIGPGSZ, BIGPGSZ, -1);
	memset(pg->cachectl, PG_TXTFLUSH, sizeof(pg->cachectl));
	segpage(s, pg);
	k = kmap(s->map[0]->pages[0]);
	/* This depends on init having a text segment < 2M. */
	memmove(UINT2PTR(VA(k) + init_data_start - (UTZERO + BIGPGSZ)), init_data_out, sizeof(init_data_out));
	kunmap(k);
	ready(p);
}
Exemple #7
0
struct dentry *ext2_get_parent(struct dentry *child)
{
	struct qstr dotdot = {.name = "..", .len = 2};
	unsigned long ino = ext2_inode_by_name(child->d_inode, &dotdot);
	if (!ino)
		return ERR_PTR(-ENOENT);
	return d_obtain_alias(ext2_iget(child->d_inode->i_sb, ino));
} 

/*
 * By the time this is called, we already have created
 * the directory cache entry for the new file, but it
 * is so far negative - it has no inode.
 *
 * If the create succeeds, we fill in the inode information
 * with d_instantiate(). 
 */
static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, struct nameidata *nd)
{
	struct inode *inode;

	dquot_initialize(dir);

	inode = ext2_new_inode(dir, mode, &dentry->d_name);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = &ext2_file_inode_operations;
	if (ext2_use_xip(inode->i_sb)) {
		inode->i_mapping->a_ops = &ext2_aops_xip;
		inode->i_fop = &ext2_xip_file_operations;
	} else if (test_opt(inode->i_sb, NOBH)) {
		inode->i_mapping->a_ops = &ext2_nobh_aops;
		inode->i_fop = &ext2_file_operations;
	} else {
		inode->i_mapping->a_ops = &ext2_aops;
		inode->i_fop = &ext2_file_operations;
	}
	mark_inode_dirty(inode);
	return ext2_add_nondir(dentry, inode);
}

static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode, dev_t rdev)
{
	struct inode * inode;
	int err;

	if (!new_valid_dev(rdev))
		return -EINVAL;

	dquot_initialize(dir);

	inode = ext2_new_inode (dir, mode, &dentry->d_name);
	err = PTR_ERR(inode);
	if (!IS_ERR(inode)) {
		init_special_inode(inode, inode->i_mode, rdev);
#ifdef CONFIG_EXT2_FS_XATTR
		inode->i_op = &ext2_special_inode_operations;
#endif
		mark_inode_dirty(inode);
		err = ext2_add_nondir(dentry, inode);
	}
	return err;
}

static int ext2_symlink (struct inode * dir, struct dentry * dentry,
	const char * symname)
{
	struct super_block * sb = dir->i_sb;
	int err = -ENAMETOOLONG;
	unsigned l = strlen(symname)+1;
	struct inode * inode;

	if (l > sb->s_blocksize)
		goto out;

	dquot_initialize(dir);

	inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO, &dentry->d_name);
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out;

	if (l > sizeof (EXT2_I(inode)->i_data)) {
		/* slow symlink */
		inode->i_op = &ext2_symlink_inode_operations;
		if (test_opt(inode->i_sb, NOBH))
			inode->i_mapping->a_ops = &ext2_nobh_aops;
		else
			inode->i_mapping->a_ops = &ext2_aops;
		err = page_symlink(inode, symname, l);
		if (err)
			goto out_fail;
	} else {
		/* fast symlink */
		inode->i_op = &ext2_fast_symlink_inode_operations;
		memcpy((char*)(EXT2_I(inode)->i_data),symname,l);
		inode->i_size = l-1;
	}
	mark_inode_dirty(inode);

	err = ext2_add_nondir(dentry, inode);
out:
	return err;

out_fail:
	inode_dec_link_count(inode);
	unlock_new_inode(inode);
	iput (inode);
	goto out;
}

static int ext2_link (struct dentry * old_dentry, struct inode * dir,
	struct dentry *dentry)
{
	struct inode *inode = old_dentry->d_inode;
	int err;

	dquot_initialize(dir);

	inode->i_ctime = CURRENT_TIME_SEC;
	inode_inc_link_count(inode);
	ihold(inode);

	err = ext2_add_link(dentry, inode);
	if (!err) {
		d_instantiate(dentry, inode);
		return 0;
	}
	inode_dec_link_count(inode);
	iput(inode);
	return err;
}

static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
{
	struct inode * inode;
	int err;

	dquot_initialize(dir);

	inode_inc_link_count(dir);

	inode = ext2_new_inode(dir, S_IFDIR | mode, &dentry->d_name);
	err = PTR_ERR(inode);
	if (IS_ERR(inode))
		goto out_dir;

	inode->i_op = &ext2_dir_inode_operations;
	inode->i_fop = &ext2_dir_operations;
	if (test_opt(inode->i_sb, NOBH))
		inode->i_mapping->a_ops = &ext2_nobh_aops;
	else
		inode->i_mapping->a_ops = &ext2_aops;

	inode_inc_link_count(inode);

	err = ext2_make_empty(inode, dir);
	if (err)
		goto out_fail;

	err = ext2_add_link(dentry, inode);
	if (err)
		goto out_fail;

	unlock_new_inode(inode);
	d_instantiate(dentry, inode);
out:
	return err;

out_fail:
	inode_dec_link_count(inode);
	inode_dec_link_count(inode);
	unlock_new_inode(inode);
	iput(inode);
out_dir:
	inode_dec_link_count(dir);
	goto out;
}

static int ext2_unlink(struct inode * dir, struct dentry *dentry)
{
	struct inode * inode = dentry->d_inode;
	struct ext2_dir_entry_2 * de;
	struct page * page;
	int err = -ENOENT;

	dquot_initialize(dir);

	de = ext2_find_entry (dir, &dentry->d_name, &page);
	if (!de)
		goto out;

	err = ext2_delete_entry (de, page);
	if (err)
		goto out;

	inode->i_ctime = dir->i_ctime;
	inode_dec_link_count(inode);
	err = 0;
out:
	return err;
}

static int ext2_rmdir (struct inode * dir, struct dentry *dentry)
{
	struct inode * inode = dentry->d_inode;
	int err = -ENOTEMPTY;

	if (ext2_empty_dir(inode)) {
		err = ext2_unlink(dir, dentry);
		if (!err) {
			inode->i_size = 0;
			inode_dec_link_count(inode);
			inode_dec_link_count(dir);
		}
	}
	return err;
}

static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
	struct inode * new_dir,	struct dentry * new_dentry )
{
	struct inode * old_inode = old_dentry->d_inode;
	struct inode * new_inode = new_dentry->d_inode;
	struct page * dir_page = NULL;
	struct ext2_dir_entry_2 * dir_de = NULL;
	struct page * old_page;
	struct ext2_dir_entry_2 * old_de;
	int err = -ENOENT;

	dquot_initialize(old_dir);
	dquot_initialize(new_dir);

	old_de = ext2_find_entry (old_dir, &old_dentry->d_name, &old_page);
	if (!old_de)
		goto out;

	if (S_ISDIR(old_inode->i_mode)) {
		err = -EIO;
		dir_de = ext2_dotdot(old_inode, &dir_page);
		if (!dir_de)
			goto out_old;
	}

	if (new_inode) {
		struct page *new_page;
		struct ext2_dir_entry_2 *new_de;

		err = -ENOTEMPTY;
		if (dir_de && !ext2_empty_dir (new_inode))
			goto out_dir;

		err = -ENOENT;
		new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
		if (!new_de)
			goto out_dir;
		ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
		new_inode->i_ctime = CURRENT_TIME_SEC;
		if (dir_de)
			drop_nlink(new_inode);
		inode_dec_link_count(new_inode);
	} else {
		err = ext2_add_link(new_dentry, old_inode);
		if (err)
			goto out_dir;
		if (dir_de)
			inode_inc_link_count(new_dir);
	}

	/*
	 * Like most other Unix systems, set the ctime for inodes on a
 	 * rename.
	 */
	old_inode->i_ctime = CURRENT_TIME_SEC;
	mark_inode_dirty(old_inode);

	ext2_delete_entry (old_de, old_page);

	if (dir_de) {
		if (old_dir != new_dir)
			ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0);
		else {
			kunmap(dir_page);
			page_cache_release(dir_page);
		}
		inode_dec_link_count(old_dir);
	}
	return 0;


out_dir:
	if (dir_de) {
		kunmap(dir_page);
		page_cache_release(dir_page);
	}
out_old:
	kunmap(old_page);
	page_cache_release(old_page);
out:
	return err;
}

const struct inode_operations ext2_dir_inode_operations = {
	.create		= ext2_create,
	.lookup		= ext2_lookup,
	.link		= ext2_link,
	.unlink		= ext2_unlink,
	.symlink	= ext2_symlink,
	.mkdir		= ext2_mkdir,
	.rmdir		= ext2_rmdir,
	.mknod		= ext2_mknod,
	.rename		= ext2_rename,
#ifdef CONFIG_EXT2_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= ext2_listxattr,
	.removexattr	= generic_removexattr,
#endif
	.setattr	= ext2_setattr,
	.get_acl	= ext2_get_acl,
};

const struct inode_operations ext2_special_inode_operations = {
#ifdef CONFIG_EXT2_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= ext2_listxattr,
	.removexattr	= generic_removexattr,
#endif
	.setattr	= ext2_setattr,
	.get_acl	= ext2_get_acl,
};
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_node_frag *frag = f->fraglist;
	__u32 offset = pg->index << PAGE_CACHE_SHIFT;
	__u32 end = offset + PAGE_CACHE_SIZE;
	unsigned char *pg_buf;
	int ret;

	D1(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%x\n", inode->i_ino, offset));

	if (!PageLocked(pg))
                PAGE_BUG(pg);

	while(frag && frag->ofs + frag->size  <= offset) {
		//		D1(printk(KERN_DEBUG "skipping frag %d-%d; before the region we care about\n", frag->ofs, frag->ofs + frag->size));
		frag = frag->next;
	}

	pg_buf = kmap(pg);

	/* XXX FIXME: Where a single physical node actually shows up in two
	   frags, we read it twice. Don't do that. */
	/* Now we're pointing at the first frag which overlaps our page */
	while(offset < end) {
		D2(printk(KERN_DEBUG "jffs2_readpage: offset %d, end %d\n", offset, end));
		if (!frag || frag->ofs > offset) {
			__u32 holesize = end - offset;
			if (frag) {
				D1(printk(KERN_NOTICE "Eep. Hole in ino %ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", inode->i_ino, frag->ofs, offset));
				holesize = min(holesize, frag->ofs - offset);
				D1(jffs2_print_frag_list(f));
			}
			D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
			memset(pg_buf, 0, holesize);
			pg_buf += holesize;
			offset += holesize;
			continue;
		} else if (frag->ofs < offset && (offset & (PAGE_CACHE_SIZE-1)) != 0) {
			D1(printk(KERN_NOTICE "Eep. Overlap in ino #%ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
				  inode->i_ino, frag->ofs, offset));
			D1(jffs2_print_frag_list(f));
			memset(pg_buf, 0, end - offset);
			ClearPageUptodate(pg);
			SetPageError(pg);
			kunmap(pg);
			return -EIO;
		} else if (!frag->node) {
			__u32 holeend = min(end, frag->ofs + frag->size);
			D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
			memset(pg_buf, 0, holeend - offset);
			pg_buf += holeend - offset;
			offset = holeend;
			frag = frag->next;
			continue;
		} else {
			__u32 readlen;
			__u32 fragofs; /* offset within the frag to start reading */

			fragofs = offset - frag->ofs;
			readlen = min(frag->size - fragofs, end - offset);
			D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%x\n", frag->ofs+fragofs, 
				  fragofs+frag->ofs+readlen, frag->node->raw->flash_offset & ~3));
			ret = jffs2_read_dnode(c, frag->node, pg_buf, fragofs + frag->ofs - frag->node->ofs, readlen);
			D2(printk(KERN_DEBUG "node read done\n"));
			if (ret) {
				D1(printk(KERN_DEBUG"jffs2_readpage error %d\n",ret));
				memset(pg_buf, 0, readlen);
				ClearPageUptodate(pg);
				SetPageError(pg);
				kunmap(pg);
				return ret;
			}
		
			pg_buf += readlen;
			offset += readlen;
			frag = frag->next;
			D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
		}
	}
	D2(printk(KERN_DEBUG "readpage finishing\n"));
	SetPageUptodate(pg);
	ClearPageError(pg);

	flush_dcache_page(pg);

	kunmap(pg);
	D1(printk(KERN_DEBUG "readpage finished\n"));
	return 0;
}
Exemple #9
0
/*
 * discard a page cached in the pagecache
 */
static inline void afs_dir_put_page(struct page *page)
{
	kunmap(page);
	put_page(page);
}
Exemple #10
0
static void anon_pipe_buf_unmap(struct pipe_inode_info *info, struct pipe_buffer *buf)
{
	kunmap(buf->page);
}
Exemple #11
0
    /* filemap_write_and_wait(inode->i_mapping); */
    if (   inode->i_mapping->nrpages
        && filemap_fdatawrite(inode->i_mapping) != -EIO)
        filemap_fdatawait(inode->i_mapping);
#endif
    rc = VbglR0SfClose(&client_handle, &sf_g->map, sf_r->handle);
    if (RT_FAILURE(rc))
        LogFunc(("VbglR0SfClose failed rc=%Rrc\n", rc));

    kfree(sf_r);
    sf_i->file = NULL;
    sf_i->handle = SHFL_HANDLE_NIL;
    file->private_data = NULL;
    return 0;
}

#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type)
# define SET_TYPE(t) *type = (t)
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused)
# define SET_TYPE(t)
#endif
{
    struct page *page;
    char *buf;
    loff_t off;
    uint32_t nread = PAGE_SIZE;
    int err;
    struct file *file = vma->vm_file;
    struct inode *inode = GET_F_DENTRY(file)->d_inode;
    struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
    struct sf_reg_info *sf_r = file->private_data;

    TRACE();
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    if (vmf->pgoff > vma->vm_end)
        return VM_FAULT_SIGBUS;
#else
    if (vaddr > vma->vm_end)
    {
        SET_TYPE(VM_FAULT_SIGBUS);
        return NOPAGE_SIGBUS;
    }
#endif

    /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead()
     * which works on virtual addresses. On Linux cannot reliably determine the
     * physical address for high memory, see rtR0MemObjNativeLockKernel(). */
    page = alloc_page(GFP_USER);
    if (!page) {
        LogRelFunc(("failed to allocate page\n"));
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
        return VM_FAULT_OOM;
#else
        SET_TYPE(VM_FAULT_OOM);
        return NOPAGE_OOM;
#endif
    }

    buf = kmap(page);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    off = (vmf->pgoff << PAGE_SHIFT);
#else
    off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
#endif
    err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
    if (err)
    {
        kunmap(page);
        put_page(page);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
        return VM_FAULT_SIGBUS;
#else
        SET_TYPE(VM_FAULT_SIGBUS);
        return NOPAGE_SIGBUS;
#endif
    }

    BUG_ON (nread > PAGE_SIZE);
    if (!nread)
    {
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
        clear_user_page(page_address(page), vmf->pgoff, page);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
        clear_user_page(page_address(page), vaddr, page);
#else
        clear_user_page(page_address(page), vaddr);
#endif
    }
    else
        memset(buf + nread, 0, PAGE_SIZE - nread);

    flush_dcache_page(page);
    kunmap(page);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    vmf->page = page;
    return 0;
#else
    SET_TYPE(VM_FAULT_MAJOR);
    return page;
#endif
}
Exemple #12
0
static int jffs2_commit_write (struct file *filp, struct page *pg,
			       unsigned start, unsigned end)
{
	/* Actually commit the write from the page cache page we're looking at.
	 * For now, we write the full page out each time. It sucks, but it's simple
	 */
	struct inode *inode = pg->mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_raw_inode *ri;
	unsigned aligned_start = start & ~3;
	int ret = 0;
	uint32_t writtenlen = 0;

	D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
		  inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));

	if (!start && end == PAGE_CACHE_SIZE) {
		/* We need to avoid deadlock with page_cache_read() in
		   jffs2_garbage_collect_pass(). So we have to mark the
		   page up to date, to prevent page_cache_read() from
		   trying to re-lock it. */
		SetPageUptodate(pg);
	}

	ri = jffs2_alloc_raw_inode();

	if (!ri) {
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Allocation of raw inode failed\n"));
		return -ENOMEM;
	}

	/* Set the fields that the generic jffs2_write_inode_range() code can't find */
	ri->ino = cpu_to_je32(inode->i_ino);
	ri->mode = cpu_to_jemode(inode->i_mode);
	ri->uid = cpu_to_je16(inode->i_uid);
	ri->gid = cpu_to_je16(inode->i_gid);
	ri->isize = cpu_to_je32((uint32_t)inode->i_size);
	ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());

	/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
	   hurt to do it again. The alternative is ifdefs, which are ugly. */
	kmap(pg);

	ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
				      (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
				      end - aligned_start, &writtenlen);

	kunmap(pg);

	if (ret) {
		/* There was an error writing. */
		SetPageError(pg);
	}

	/* Adjust writtenlen for the padding we did, so we don't confuse our caller */
	if (writtenlen < (start&3))
		writtenlen = 0;
	else
		writtenlen -= (start&3);

	if (writtenlen) {
		if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) {
			inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen;
			inode->i_blocks = (inode->i_size + 511) >> 9;

			inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
		}
	}
Exemple #13
0
static int jffs2_write_end(struct file *filp, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *pg, void *fsdata)
{
	/* Actually commit the write from the page cache page we're looking at.
	 * For now, we write the full page out each time. It sucks, but it's simple
	 */
	struct inode *inode = mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_raw_inode *ri;
	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
	unsigned end = start + copied;
	unsigned aligned_start = start & ~3;
	int ret = 0;
	uint32_t writtenlen = 0;

//	D1(printk(KERN_DEBUG "jffs2_write_end(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
;

	/* We need to avoid deadlock with page_cache_read() in
	   jffs2_garbage_collect_pass(). So the page must be
	   up to date to prevent page_cache_read() from trying
	   to re-lock it. */
	BUG_ON(!PageUptodate(pg));

	if (end == PAGE_CACHE_SIZE) {
		/* When writing out the end of a page, write out the
		   _whole_ page. This helps to reduce the number of
		   nodes in files which have many short writes, like
		   syslog files. */
		aligned_start = 0;
	}

	ri = jffs2_alloc_raw_inode();

	if (!ri) {
;
		unlock_page(pg);
		page_cache_release(pg);
		return -ENOMEM;
	}

	/* Set the fields that the generic jffs2_write_inode_range() code can't find */
	ri->ino = cpu_to_je32(inode->i_ino);
	ri->mode = cpu_to_jemode(inode->i_mode);
	ri->uid = cpu_to_je16(inode->i_uid);
	ri->gid = cpu_to_je16(inode->i_gid);
	ri->isize = cpu_to_je32((uint32_t)inode->i_size);
	ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());

	/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
	   hurt to do it again. The alternative is ifdefs, which are ugly. */
	kmap(pg);

	ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
				      (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
				      end - aligned_start, &writtenlen);

	kunmap(pg);

	if (ret) {
		/* There was an error writing. */
		SetPageError(pg);
	}

	/* Adjust writtenlen for the padding we did, so we don't confuse our caller */
	writtenlen -= min(writtenlen, (start - aligned_start));

	if (writtenlen) {
		if (inode->i_size < pos + writtenlen) {
			inode->i_size = pos + writtenlen;
			inode->i_blocks = (inode->i_size + 511) >> 9;

			inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
		}
	}
Exemple #14
0
static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
			struct inode *new_dir, struct dentry *new_dentry)
{
	struct super_block *sb = old_dir->i_sb;
	struct f2fs_sb_info *sbi = F2FS_SB(sb);
	struct inode *old_inode = old_dentry->d_inode;
	struct inode *new_inode = new_dentry->d_inode;
	struct page *old_dir_page;
	struct page *old_page, *new_page;
	struct f2fs_dir_entry *old_dir_entry = NULL;
	struct f2fs_dir_entry *old_entry;
	struct f2fs_dir_entry *new_entry;
	int err = -ENOENT;

	f2fs_balance_fs(sbi);

	old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
	if (!old_entry)
		goto out;

	if (S_ISDIR(old_inode->i_mode)) {
		err = -EIO;
		old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
		if (!old_dir_entry)
			goto out_old;
	}

	f2fs_lock_op(sbi);

	if (new_inode) {

		err = -ENOTEMPTY;
		if (old_dir_entry && !f2fs_empty_dir(new_inode))
			goto out_dir;

		err = -ENOENT;
		new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
						&new_page);
		if (!new_entry)
			goto out_dir;

		err = acquire_orphan_inode(sbi);
		if (err)
			goto put_out_dir;

		if (update_dent_inode(old_inode, &new_dentry->d_name)) {
			release_orphan_inode(sbi);
			goto put_out_dir;
		}

		f2fs_set_link(new_dir, new_entry, new_page, old_inode);

		new_inode->i_ctime = CURRENT_TIME;
		if (old_dir_entry)
			drop_nlink(new_inode);
		drop_nlink(new_inode);

		if (!new_inode->i_nlink)
			add_orphan_inode(sbi, new_inode->i_ino);
		else
			release_orphan_inode(sbi);

		update_inode_page(old_inode);
		update_inode_page(new_inode);
	} else {
		err = f2fs_add_link(new_dentry, old_inode);
		if (err)
			goto out_dir;

		if (old_dir_entry) {
			inc_nlink(new_dir);
			update_inode_page(new_dir);
		}
	}

	old_inode->i_ctime = CURRENT_TIME;
	mark_inode_dirty(old_inode);

	f2fs_delete_entry(old_entry, old_page, NULL);

	if (old_dir_entry) {
		if (old_dir != new_dir) {
			f2fs_set_link(old_inode, old_dir_entry,
						old_dir_page, new_dir);
		} else {
			kunmap(old_dir_page);
			f2fs_put_page(old_dir_page, 0);
		}
		drop_nlink(old_dir);
		update_inode_page(old_dir);
	}

	f2fs_unlock_op(sbi);
	return 0;

put_out_dir:
	if (PageLocked(new_page))
		f2fs_put_page(new_page, 1);
	else
		f2fs_put_page(new_page, 0);
out_dir:
	if (old_dir_entry) {
		kunmap(old_dir_page);
		f2fs_put_page(old_dir_page, 0);
	}
	f2fs_unlock_op(sbi);
out_old:
	kunmap(old_page);
	f2fs_put_page(old_page, 0);
out:
	return err;
}
Exemple #15
0
/*
 * attach the data from a bunch of pages on an inode to a call
 */
static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
			  struct kvec *iov)
{
	struct page *pages[8];
	unsigned count, n, loop, offset, to;
	pgoff_t first = call->first, last = call->last;
	int ret;

	_enter("");

	offset = call->first_offset;
	call->first_offset = 0;

	do {
		_debug("attach %lx-%lx", first, last);

		count = last - first + 1;
		if (count > ARRAY_SIZE(pages))
			count = ARRAY_SIZE(pages);
		n = find_get_pages_contig(call->mapping, first, count, pages);
		ASSERTCMP(n, ==, count);

		loop = 0;
		do {
			msg->msg_flags = 0;
			to = PAGE_SIZE;
			if (first + loop >= last)
				to = call->last_to;
			else
				msg->msg_flags = MSG_MORE;
			iov->iov_base = kmap(pages[loop]) + offset;
			iov->iov_len = to - offset;
			offset = 0;

			_debug("- range %u-%u%s",
			       offset, to, msg->msg_flags ? " [more]" : "");
			msg->msg_iov = (struct iovec *) iov;
			msg->msg_iovlen = 1;

			/* have to change the state *before* sending the last
			 * packet as RxRPC might give us the reply before it
			 * returns from sending the request */
			if (first + loop >= last)
				call->state = AFS_CALL_AWAIT_REPLY;
			ret = rxrpc_kernel_send_data(call->rxcall, msg,
						     to - offset);
			kunmap(pages[loop]);
			if (ret < 0)
				break;
		} while (++loop < count);
		first += count;

		for (loop = 0; loop < count; loop++)
			put_page(pages[loop]);
		if (ret < 0)
			break;
	} while (first <= last);

	_leave(" = %d", ret);
	return ret;
}
Exemple #16
0
/**
 *	skb_copy_datagram_iovec - Copy a datagram to an iovec.
 *	@skb: buffer to copy
 *	@offset: offset in the buffer to start copying from
 *	@to: io vector to copy to
 *	@len: amount of data to copy from buffer to iovec
 *
 *	Note: the iovec is modified during the copy.
 */
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
			    struct iovec *to, int len)
{
	int start = skb_headlen(skb);
	int i, copy = start - offset;

	/* Copy header. */
	if (copy > 0) {
		if (copy > len)
			copy = len;
		if (memcpy_toiovec(to, skb->data + offset, copy))
			goto fault;
		if ((len -= copy) == 0)
			return 0;
		offset += copy;
	}

	/* Copy paged appendix. Hmm... why does this look so complicated? */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		int end;

		BUG_TRAP(start <= offset + len);

		end = start + skb_shinfo(skb)->frags[i].size;
		if ((copy = end - offset) > 0) {
			int err;
			u8  *vaddr;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			struct page *page = frag->page;

			if (copy > len)
				copy = len;
			vaddr = kmap(page);
			err = memcpy_toiovec(to, vaddr + frag->page_offset +
					     offset - start, copy);
			kunmap(page);
			if (err)
				goto fault;
			if (!(len -= copy))
				return 0;
			offset += copy;
		}
		start = end;
	}

	if (skb_shinfo(skb)->frag_list) {
		struct sk_buff *list = skb_shinfo(skb)->frag_list;

		for (; list; list = list->next) {
			int end;

			BUG_TRAP(start <= offset + len);

			end = start + list->len;
			if ((copy = end - offset) > 0) {
				if (copy > len)
					copy = len;
				if (skb_copy_datagram_iovec(list,
							    offset - start,
							    to, copy))
					goto fault;
				if ((len -= copy) == 0)
					return 0;
				offset += copy;
			}
			start = end;
		}
	}
	if (!len)
		return 0;

fault:
	return -EFAULT;
}
Exemple #17
0
static inline void dir_put_page(struct page *page)
{
	kunmap(page);
	page_cache_release(page);
}
Exemple #18
0
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
				      u8 __user *to, int len,
				      __wsum *csump)
{
	int start = skb_headlen(skb);
	int pos = 0;
	int i, copy = start - offset;

	/* Copy header. */
	if (copy > 0) {
		int err = 0;
		if (copy > len)
			copy = len;
		*csump = csum_and_copy_to_user(skb->data + offset, to, copy,
					       *csump, &err);
		if (err)
			goto fault;
		if ((len -= copy) == 0)
			return 0;
		offset += copy;
		to += copy;
		pos = copy;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		int end;

		BUG_TRAP(start <= offset + len);

		end = start + skb_shinfo(skb)->frags[i].size;
		if ((copy = end - offset) > 0) {
			__wsum csum2;
			int err = 0;
			u8  *vaddr;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			struct page *page = frag->page;

			if (copy > len)
				copy = len;
			vaddr = kmap(page);
			csum2 = csum_and_copy_to_user(vaddr +
							frag->page_offset +
							offset - start,
						      to, copy, 0, &err);
			kunmap(page);
			if (err)
				goto fault;
			*csump = csum_block_add(*csump, csum2, pos);
			if (!(len -= copy))
				return 0;
			offset += copy;
			to += copy;
			pos += copy;
		}
		start = end;
	}

	if (skb_shinfo(skb)->frag_list) {
		struct sk_buff *list = skb_shinfo(skb)->frag_list;

		for (; list; list=list->next) {
			int end;

			BUG_TRAP(start <= offset + len);

			end = start + list->len;
			if ((copy = end - offset) > 0) {
				__wsum csum2 = 0;
				if (copy > len)
					copy = len;
				if (skb_copy_and_csum_datagram(list,
							       offset - start,
							       to, copy,
							       &csum2))
					goto fault;
				*csump = csum_block_add(*csump, csum2, pos);
				if ((len -= copy) == 0)
					return 0;
				offset += copy;
				to += copy;
				pos += copy;
			}
			start = end;
		}
	}
	if (!len)
		return 0;

fault:
	return -EFAULT;
}
Exemple #19
0
/*
 * Returns a pointer to a buffer containing at least LEN bytes of
 * filesystem starting at byte offset OFFSET into the filesystem.
 */
static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len)
{
	struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
	struct page *pages[BLKS_PER_BUF];
	unsigned i, blocknr, buffer;
	unsigned long devsize;
	char *data;

	if (!len)
		return NULL;
	blocknr = offset >> PAGE_CACHE_SHIFT;
	offset &= PAGE_CACHE_SIZE - 1;

	/* Check if an existing buffer already has the data.. */
	for (i = 0; i < READ_BUFFERS; i++) {
		unsigned int blk_offset;

		if (buffer_dev[i] != sb)
			continue;
		if (blocknr < buffer_blocknr[i])
			continue;
		blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
		blk_offset += offset;
		if (blk_offset + len > BUFFER_SIZE)
			continue;
		return read_buffers[i] + blk_offset;
	}

	devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT;

	/* Ok, read in BLKS_PER_BUF pages completely first. */
	for (i = 0; i < BLKS_PER_BUF; i++) {
		struct page *page = NULL;

		if (blocknr + i < devsize) {
			page = read_mapping_page_async(mapping, blocknr + i,
									NULL);
			/* synchronous error? */
			if (IS_ERR(page))
				page = NULL;
		}
		pages[i] = page;
	}

	for (i = 0; i < BLKS_PER_BUF; i++) {
		struct page *page = pages[i];
		if (page) {
			wait_on_page_locked(page);
			if (!PageUptodate(page)) {
				/* asynchronous error */
				page_cache_release(page);
				pages[i] = NULL;
			}
		}
	}

	buffer = next_buffer;
	next_buffer = NEXT_BUFFER(buffer);
	buffer_blocknr[buffer] = blocknr;
	buffer_dev[buffer] = sb;

	data = read_buffers[buffer];
	for (i = 0; i < BLKS_PER_BUF; i++) {
		struct page *page = pages[i];
		if (page) {
			memcpy(data, kmap(page), PAGE_CACHE_SIZE);
			kunmap(page);
			page_cache_release(page);
		} else
			memset(data, 0, PAGE_CACHE_SIZE);
		data += PAGE_CACHE_SIZE;
	}
	return read_buffers[buffer] + offset;
}
Exemple #20
0
/**
 *	skb_copy_datagram_iovec - Copy a datagram to an iovec.
 *	@skb: buffer to copy
 *	@offset: offset in the buffer to start copying from
 *	@to: io vector to copy to
 *	@len: amount of data to copy from buffer to iovec
 *
 *	Note: the iovec is modified during the copy.
 */
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
			    struct iovec *to, int len)
{
	int start = skb_headlen(skb);
	int i, copy = start - offset;
	struct sk_buff *frag_iter;

	trace_skb_copy_datagram_iovec(skb, len);

	/* Copy header. */
	if (copy > 0) {
		if (copy > len)
			copy = len;
		if (memcpy_toiovec(to, skb->data + offset, copy))
			goto fault;
		if ((len -= copy) == 0)
			return 0;
		offset += copy;
	}

	/* Copy paged appendix. Hmm... why does this look so complicated? */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		int end;

		WARN_ON(start > offset + len);

		end = start + skb_shinfo(skb)->frags[i].size;
		if ((copy = end - offset) > 0) {
			int err;
			u8  *vaddr;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			struct page *page = frag->page;

			if (copy > len)
				copy = len;
			vaddr = kmap(page);
			err = memcpy_toiovec(to, vaddr + frag->page_offset +
					     offset - start, copy);
			kunmap(page);
			if (err)
				goto fault;
			if (!(len -= copy))
				return 0;
			offset += copy;
		}
		start = end;
	}

	skb_walk_frags(skb, frag_iter) {
		int end;

		WARN_ON(start > offset + len);

		end = start + frag_iter->len;
		if ((copy = end - offset) > 0) {
			if (copy > len)
				copy = len;
			if (skb_copy_datagram_iovec(frag_iter,
						    offset - start,
						    to, copy))
				goto fault;
			if ((len -= copy) == 0)
				return 0;
			offset += copy;
		}
		start = end;
	}
static int __recover_dot_dentries(struct inode *dir, nid_t pino)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct qstr dot = {.len = 1, .name = "."};
	struct qstr dotdot = {.len = 2, .name = ".."};
	struct f2fs_dir_entry *de;
	struct page *page;
	int err = 0;

	f2fs_lock_op(sbi);

	de = f2fs_find_entry(dir, &dot, &page);
	if (de) {
		f2fs_dentry_kunmap(dir, page);
		f2fs_put_page(page, 0);
	} else {
		err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
		if (err)
			goto out;
	}

	de = f2fs_find_entry(dir, &dotdot, &page);
	if (de) {
		f2fs_dentry_kunmap(dir, page);
		f2fs_put_page(page, 0);
	} else {
		err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
	}
out:
	if (!err) {
		clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS);
		mark_inode_dirty(dir);
	}

	f2fs_unlock_op(sbi);
	return err;
}

static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
		unsigned int flags)
{
	struct inode *inode = NULL;
	struct f2fs_dir_entry *de;
	struct page *page;
	nid_t ino;
	int err = 0;

	if (dentry->d_name.len > F2FS_NAME_LEN)
		return ERR_PTR(-ENAMETOOLONG);

	de = f2fs_find_entry(dir, &dentry->d_name, &page);
	if (!de)
		return d_splice_alias(inode, dentry);

	ino = le32_to_cpu(de->ino);
	f2fs_dentry_kunmap(dir, page);
	f2fs_put_page(page, 0);

	inode = f2fs_iget(dir->i_sb, ino);
	if (IS_ERR(inode))
		return ERR_CAST(inode);

	if (f2fs_has_inline_dots(inode)) {
		err = __recover_dot_dentries(inode, dir->i_ino);
		if (err)
			goto err_out;
	}
	return d_splice_alias(inode, dentry);

err_out:
	iget_failed(inode);
	return ERR_PTR(err);
}

static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode = dentry->d_inode;
	struct f2fs_dir_entry *de;
	struct page *page;
	int err = -ENOENT;

	trace_f2fs_unlink_enter(dir, dentry);
	f2fs_balance_fs(sbi);

	de = f2fs_find_entry(dir, &dentry->d_name, &page);
	if (!de)
		goto fail;

	f2fs_lock_op(sbi);
	err = acquire_orphan_inode(sbi);
	if (err) {
		f2fs_unlock_op(sbi);
		f2fs_dentry_kunmap(dir, page);
		f2fs_put_page(page, 0);
		goto fail;
	}
	f2fs_delete_entry(de, page, dir, inode);
	f2fs_unlock_op(sbi);

	/* In order to evict this inode, we set it dirty */
	mark_inode_dirty(inode);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);
fail:
	trace_f2fs_unlink_exit(inode, err);
	return err;
}

static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
	struct page *page;

	page = page_follow_link_light(dentry, nd);
	if (IS_ERR(page))
		return page;

	/* this is broken symlink case */
	if (*nd_get_link(nd) == 0) {
		kunmap(page);
		page_cache_release(page);
		return ERR_PTR(-ENOENT);
	}
	return page;
}

static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
					const char *symname)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode;
	size_t len = strlen(symname);
	size_t p_len;
	char *p_str;
	struct f2fs_str disk_link = FSTR_INIT(NULL, 0);
	struct f2fs_encrypted_symlink_data *sd = NULL;
	int err;

	if (len > dir->i_sb->s_blocksize)
		return -ENAMETOOLONG;

	f2fs_balance_fs(sbi);

	inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	if (f2fs_encrypted_inode(inode))
		inode->i_op = &f2fs_encrypted_symlink_inode_operations;
	else
		inode->i_op = &f2fs_symlink_inode_operations;
	inode->i_mapping->a_ops = &f2fs_dblock_aops;

	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out;
	f2fs_unlock_op(sbi);
	alloc_nid_done(sbi, inode->i_ino);

	if (f2fs_encrypted_inode(dir)) {
		struct qstr istr = QSTR_INIT(symname, len);

		err = f2fs_get_encryption_info(inode);
		if (err)
			goto err_out;

		err = f2fs_fname_crypto_alloc_buffer(inode, len, &disk_link);
		if (err)
			goto err_out;

		err = f2fs_fname_usr_to_disk(inode, &istr, &disk_link);
		if (err < 0)
			goto err_out;

		p_len = encrypted_symlink_data_len(disk_link.len) + 1;

		if (p_len > dir->i_sb->s_blocksize) {
			err = -ENAMETOOLONG;
			goto err_out;
		}

		sd = kzalloc(p_len, GFP_NOFS);
		if (!sd) {
			err = -ENOMEM;
			goto err_out;
		}
		memcpy(sd->encrypted_path, disk_link.name, disk_link.len);
		sd->len = cpu_to_le16(disk_link.len);
		p_str = (char *)sd;
	} else {
		p_len = len + 1;
		p_str = (char *)symname;
	}

	err = page_symlink(inode, p_str, p_len);

err_out:
	d_instantiate(dentry, inode);
	unlock_new_inode(inode);

	/*
	 * Let's flush symlink data in order to avoid broken symlink as much as
	 * possible. Nevertheless, fsyncing is the best way, but there is no
	 * way to get a file descriptor in order to flush that.
	 *
	 * Note that, it needs to do dir->fsync to make this recoverable.
	 * If the symlink path is stored into inline_data, there is no
	 * performance regression.
	 */
	if (!err)
		filemap_write_and_wait_range(inode->i_mapping, 0, p_len - 1);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);

	kfree(sd);
	f2fs_fname_crypto_free_buffer(&disk_link);
	return err;
out:
	handle_failed_inode(inode);
	return err;
}

static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode;
	int err;

	f2fs_balance_fs(sbi);

	inode = f2fs_new_inode(dir, S_IFDIR | mode);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = &f2fs_dir_inode_operations;
	inode->i_fop = &f2fs_dir_operations;
	inode->i_mapping->a_ops = &f2fs_dblock_aops;
	mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);

	set_inode_flag(F2FS_I(inode), FI_INC_LINK);
	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out_fail;
	f2fs_unlock_op(sbi);

	alloc_nid_done(sbi, inode->i_ino);

	d_instantiate(dentry, inode);
	unlock_new_inode(inode);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);
	return 0;

out_fail:
	clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
	handle_failed_inode(inode);
	return err;
}

static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
	if (f2fs_empty_dir(inode))
		return f2fs_unlink(dir, dentry);
	return -ENOTEMPTY;
}

static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
				umode_t mode, dev_t rdev)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode;
	int err = 0;

	if (!new_valid_dev(rdev))
		return -EINVAL;

	f2fs_balance_fs(sbi);

	inode = f2fs_new_inode(dir, mode);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	init_special_inode(inode, inode->i_mode, rdev);
	inode->i_op = &f2fs_special_inode_operations;

	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out;
	f2fs_unlock_op(sbi);

	alloc_nid_done(sbi, inode->i_ino);

	d_instantiate(dentry, inode);
	unlock_new_inode(inode);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);
	return 0;
out:
	handle_failed_inode(inode);
	return err;
}

static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
			struct inode *new_dir, struct dentry *new_dentry)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir);
	struct inode *old_inode = old_dentry->d_inode;
	struct inode *new_inode = new_dentry->d_inode;
	struct page *old_dir_page;
	struct page *old_page, *new_page;
	struct f2fs_dir_entry *old_dir_entry = NULL;
	struct f2fs_dir_entry *old_entry;
	struct f2fs_dir_entry *new_entry;
	int err = -ENOENT;

	if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) &&
		!f2fs_is_child_context_consistent_with_parent(new_dir,
							old_inode)) {
		err = -EPERM;
		goto out;
	}

	f2fs_balance_fs(sbi);

	old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
	if (!old_entry)
		goto out;

	if (S_ISDIR(old_inode->i_mode)) {
		err = -EIO;
		old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
		if (!old_dir_entry)
			goto out_old;
	}

	if (new_inode) {

		err = -ENOTEMPTY;
		if (old_dir_entry && !f2fs_empty_dir(new_inode))
			goto out_dir;

		err = -ENOENT;
		new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
						&new_page);
		if (!new_entry)
			goto out_dir;

		f2fs_lock_op(sbi);

		err = acquire_orphan_inode(sbi);
		if (err)
			goto put_out_dir;

		if (update_dent_inode(old_inode, new_inode,
						&new_dentry->d_name)) {
			release_orphan_inode(sbi);
			goto put_out_dir;
		}

		f2fs_set_link(new_dir, new_entry, new_page, old_inode);

		new_inode->i_ctime = CURRENT_TIME;
		down_write(&F2FS_I(new_inode)->i_sem);
		if (old_dir_entry)
			drop_nlink(new_inode);
		drop_nlink(new_inode);
		up_write(&F2FS_I(new_inode)->i_sem);

		mark_inode_dirty(new_inode);

		if (!new_inode->i_nlink)
			add_orphan_inode(sbi, new_inode->i_ino);
		else
			release_orphan_inode(sbi);

		update_inode_page(old_inode);
		update_inode_page(new_inode);
	} else {
		f2fs_lock_op(sbi);

		err = f2fs_add_link(new_dentry, old_inode);
		if (err) {
			f2fs_unlock_op(sbi);
			goto out_dir;
		}

		if (old_dir_entry) {
			inc_nlink(new_dir);
			update_inode_page(new_dir);
		}
	}

	down_write(&F2FS_I(old_inode)->i_sem);
	file_lost_pino(old_inode);
	if (new_inode && file_enc_name(new_inode))
		file_set_enc_name(old_inode);
	up_write(&F2FS_I(old_inode)->i_sem);

	old_inode->i_ctime = CURRENT_TIME;
	mark_inode_dirty(old_inode);

	f2fs_delete_entry(old_entry, old_page, old_dir, NULL);

	if (old_dir_entry) {
		if (old_dir != new_dir) {
			f2fs_set_link(old_inode, old_dir_entry,
						old_dir_page, new_dir);
			update_inode_page(old_inode);
		} else {
			f2fs_dentry_kunmap(old_inode, old_dir_page);
			f2fs_put_page(old_dir_page, 0);
		}
		drop_nlink(old_dir);
		mark_inode_dirty(old_dir);
		update_inode_page(old_dir);
	}

	f2fs_unlock_op(sbi);

	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
		f2fs_sync_fs(sbi->sb, 1);
	return 0;

put_out_dir:
	f2fs_unlock_op(sbi);
	f2fs_dentry_kunmap(new_dir, new_page);
	f2fs_put_page(new_page, 0);
out_dir:
	if (old_dir_entry) {
		f2fs_dentry_kunmap(old_inode, old_dir_page);
		f2fs_put_page(old_dir_page, 0);
	}
out_old:
	f2fs_dentry_kunmap(old_dir, old_page);
	f2fs_put_page(old_page, 0);
out:
	return err;
}

#ifdef CONFIG_F2FS_FS_ENCRYPTION
static void *f2fs_encrypted_follow_link(struct dentry *dentry,
						struct nameidata *nd)
{
	struct page *cpage = NULL;
	char *caddr, *paddr = NULL;
	struct f2fs_str cstr;
	struct f2fs_str pstr = FSTR_INIT(NULL, 0);
	struct inode *inode = dentry->d_inode;
	struct f2fs_encrypted_symlink_data *sd;
	loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
	u32 max_size = inode->i_sb->s_blocksize;
	int res;

	res = f2fs_get_encryption_info(inode);
	if (res)
		return ERR_PTR(res);

	cpage = read_mapping_page(inode->i_mapping, 0, NULL);
	if (IS_ERR(cpage))
		return cpage;
	caddr = kmap(cpage);
	caddr[size] = 0;

	/* Symlink is encrypted */
	sd = (struct f2fs_encrypted_symlink_data *)caddr;
	cstr.len = le16_to_cpu(sd->len);
	cstr.name = kmalloc(cstr.len, GFP_NOFS);
	if (!cstr.name) {
		res = -ENOMEM;
		goto errout;
	}
	memcpy(cstr.name, sd->encrypted_path, cstr.len);

	/* this is broken symlink case */
	if (cstr.name[0] == 0 && cstr.len == 0) {
		res = -ENOENT;
		goto errout;
	}

	if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
								max_size) {
		/* Symlink data on the disk is corrupted */
		res = -EIO;
		goto errout;
	}
	res = f2fs_fname_crypto_alloc_buffer(inode, cstr.len, &pstr);
	if (res)
		goto errout;

	res = f2fs_fname_disk_to_usr(inode, NULL, &cstr, &pstr);
	if (res < 0)
		goto errout;

	kfree(cstr.name);

	paddr = pstr.name;

	/* Null-terminate the name */
	paddr[res] = '\0';
	nd_set_link(nd, paddr);

	kunmap(cpage);
	page_cache_release(cpage);
	return NULL;
errout:
	kfree(cstr.name);
	f2fs_fname_crypto_free_buffer(&pstr);
	kunmap(cpage);
	page_cache_release(cpage);
	return ERR_PTR(res);
}

void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
		void *cookie)
{
	char *s = nd_get_link(nd);
	if (!IS_ERR(s))
		kfree(s);
}

const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
	.readlink       = generic_readlink,
	.follow_link    = f2fs_encrypted_follow_link,
	.put_link       = kfree_put_link,
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
};
#endif

const struct inode_operations f2fs_dir_inode_operations = {
	.create		= f2fs_create,
	.lookup		= f2fs_lookup,
	.link		= f2fs_link,
	.unlink		= f2fs_unlink,
	.symlink	= f2fs_symlink,
	.mkdir		= f2fs_mkdir,
	.rmdir		= f2fs_rmdir,
	.mknod		= f2fs_mknod,
	.rename		= f2fs_rename,
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
};

const struct inode_operations f2fs_symlink_inode_operations = {
	.readlink       = generic_readlink,
	.follow_link    = f2fs_follow_link,
	.put_link       = page_put_link,
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
};

const struct inode_operations f2fs_special_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr        = f2fs_setattr,
	.get_acl	= f2fs_get_acl,
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr       = generic_setxattr,
	.getxattr       = generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr    = generic_removexattr,
#endif
};
Exemple #22
0
static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
		      struct inode *new_dir, struct dentry *new_dentry,
		      unsigned int flags)
{
	struct inode *old_inode = d_inode(old_dentry);
	struct inode *new_inode = d_inode(new_dentry);
	struct page *dir_page = NULL;
	struct ufs_dir_entry * dir_de = NULL;
	struct page *old_page;
	struct ufs_dir_entry *old_de;
	int err = -ENOENT;

	if (flags & ~RENAME_NOREPLACE)
		return -EINVAL;

	old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
	if (!old_de)
		goto out;

	if (S_ISDIR(old_inode->i_mode)) {
		err = -EIO;
		dir_de = ufs_dotdot(old_inode, &dir_page);
		if (!dir_de)
			goto out_old;
	}

	if (new_inode) {
		struct page *new_page;
		struct ufs_dir_entry *new_de;

		err = -ENOTEMPTY;
		if (dir_de && !ufs_empty_dir(new_inode))
			goto out_dir;

		err = -ENOENT;
		new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
		if (!new_de)
			goto out_dir;
		ufs_set_link(new_dir, new_de, new_page, old_inode, 1);
		new_inode->i_ctime = current_time(new_inode);
		if (dir_de)
			drop_nlink(new_inode);
		inode_dec_link_count(new_inode);
	} else {
		err = ufs_add_link(new_dentry, old_inode);
		if (err)
			goto out_dir;
		if (dir_de)
			inode_inc_link_count(new_dir);
	}

	/*
	 * Like most other Unix systems, set the ctime for inodes on a
 	 * rename.
	 */
	old_inode->i_ctime = current_time(old_inode);

	ufs_delete_entry(old_dir, old_de, old_page);
	mark_inode_dirty(old_inode);

	if (dir_de) {
		if (old_dir != new_dir)
			ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0);
		else {
			kunmap(dir_page);
			put_page(dir_page);
		}
		inode_dec_link_count(old_dir);
	}
	return 0;


out_dir:
	if (dir_de) {
		kunmap(dir_page);
		put_page(dir_page);
	}
out_old:
	kunmap(old_page);
	put_page(old_page);
out:
	return err;
}
Exemple #23
0
static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
{
	int err;
	struct page *ipage;
	struct dnode_of_data dn;
	void *src_addr, *dst_addr;
	block_t new_blk_addr;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct f2fs_io_info fio = {
		.type = DATA,
		.rw = WRITE_SYNC | REQ_PRIO,
	};

	f2fs_lock_op(sbi);
	ipage = get_node_page(sbi, inode->i_ino);
	if (IS_ERR(ipage))
		return PTR_ERR(ipage);

	/*
	 * i_addr[0] is not used for inline data,
	 * so reserving new block will not destroy inline data
	 */
	set_new_dnode(&dn, inode, ipage, NULL, 0);
	err = f2fs_reserve_block(&dn, 0);
	if (err) {
		f2fs_unlock_op(sbi);
		return err;
	}

	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);

	/* Copy the whole inline data block */
	src_addr = inline_data_addr(ipage);
	dst_addr = kmap(page);
	memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
	kunmap(page);
	SetPageUptodate(page);

	/* write data page to try to make data consistent */
	set_page_writeback(page);
	write_data_page(page, &dn, &new_blk_addr, &fio);
	update_extent_cache(new_blk_addr, &dn);
	f2fs_wait_on_page_writeback(page, DATA);

	/* clear inline data and flag after data writeback */
	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
	stat_dec_inline_inode(inode);

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);
	f2fs_unlock_op(sbi);
	return err;
}

int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
{
	struct page *page;
	int err;

	if (!f2fs_has_inline_data(inode))
		return 0;
	else if (to_size <= MAX_INLINE_DATA)
		return 0;

	page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS);
	if (!page)
		return -ENOMEM;

	err = __f2fs_convert_inline_data(inode, page);
	f2fs_put_page(page, 1);
	return err;
}

int f2fs_write_inline_data(struct inode *inode,
			   struct page *page, unsigned size)
{
	void *src_addr, *dst_addr;
	struct page *ipage;
	struct dnode_of_data dn;
	int err;

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
	if (err)
		return err;
	ipage = dn.inode_page;

	zero_user_segment(ipage, INLINE_DATA_OFFSET,
				 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
	src_addr = kmap(page);
	dst_addr = inline_data_addr(ipage);
	memcpy(dst_addr, src_addr, size);
	kunmap(page);

	/* Release the first data block if it is allocated */
	if (!f2fs_has_inline_data(inode)) {
		truncate_data_blocks_range(&dn, 1);
		set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
		stat_inc_inline_inode(inode);
	}

	sync_inode_page(&dn);
	f2fs_put_dnode(&dn);

	return 0;
}
void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno,
				 struct buffer_head *bh)
{
	kunmap(bh->b_page);
	brelse(bh);
}
Exemple #25
0
/**
 * Compute the speed of specified hash function
 *
 * Run a speed test on the given hash algorithm on buffer of the given size.
 * The speed is stored internally in the cfs_crypto_hash_speeds[] array, and
 * is available through the cfs_crypto_hash_speed() function.
 *
 * \param[in] hash_alg	hash algorithm id (CFS_HASH_ALG_*)
 * \param[in] buf	data buffer on which to compute the hash
 * \param[in] buf_len	length of \buf on which to compute hash
 */
static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
{
	int			buf_len = max(PAGE_SIZE, 1048576UL);
	void			*buf;
	unsigned long		start, end;
	int			bcount, err = 0;
	struct page		*page;
	unsigned char		hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
	unsigned int		hash_len = sizeof(hash);

	page = alloc_page(GFP_KERNEL);
	if (page == NULL) {
		err = -ENOMEM;
		goto out_err;
	}

	buf = kmap(page);
	memset(buf, 0xAD, PAGE_SIZE);
	kunmap(page);

	for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC),
	     bcount = 0;
	     time_before(jiffies, end) && err == 0; bcount++) {
		struct cfs_crypto_hash_desc *hdesc;
		int i;

		hdesc = cfs_crypto_hash_init(hash_alg, NULL, 0);
		if (IS_ERR(hdesc)) {
			err = PTR_ERR(hdesc);
			break;
		}

		for (i = 0; i < buf_len / PAGE_SIZE; i++) {
			err = cfs_crypto_hash_update_page(hdesc, page, 0,
							  PAGE_SIZE);
			if (err != 0)
				break;
		}

		err = cfs_crypto_hash_final(hdesc, hash, &hash_len);
		if (err != 0)
			break;
	}
	end = jiffies;
	__free_page(page);
out_err:
	if (err != 0) {
		cfs_crypto_hash_speeds[hash_alg] = err;
		CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
		       cfs_crypto_hash_name(hash_alg), err);
	} else {
		unsigned long   tmp;

		tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
		       1000) / (1024 * 1024);
		cfs_crypto_hash_speeds[hash_alg] = (int)tmp;
		CDEBUG(D_CONFIG, "Crypto hash algorithm %s speed = %d MB/s\n",
		       cfs_crypto_hash_name(hash_alg),
		       cfs_crypto_hash_speeds[hash_alg]);
	}
}
void
VMCIHost_ReleaseUserMemory(PageStoreAttachInfo *attach,      // IN/OUT
                           VMCIQueue *produceQ,              // OUT
                           VMCIQueue *consumeQ)              // OUT
{

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    int i;

    ASSERT(attach->producePages);
    ASSERT(attach->consumePages);

    kunmap(attach->producePages[0]);
    kunmap(attach->consumePages[0]);

    for (i = 0; i < attach->numProducePages; i++) {
        ASSERT(attach->producePages[i]);

        set_page_dirty(attach->producePages[i]);
        page_cache_release(attach->producePages[i]);
    }

    for (i = 0; i < attach->numConsumePages; i++) {
        ASSERT(attach->consumePages[i]);

        set_page_dirty(attach->consumePages[i]);
        page_cache_release(attach->consumePages[i]);
    }

    VMCI_FreeKernelMem(attach->producePages,
                       attach->numProducePages *
                       sizeof attach->producePages[0]);
    VMCI_FreeKernelMem(attach->consumePages,
                       attach->numConsumePages *
                       sizeof attach->consumePages[0]);
#else
    /*
     * Host queue pair support for earlier kernels temporarily
     * disabled. See bug 365496.
     */

    ASSERT_NOT_IMPLEMENTED(FALSE);
#if 0
    kunmap(attach->produceIoBuf->maplist[0]);
    kunmap(attach->consumeIoBuf->maplist[0]);

    mark_dirty_kiobuf(attach->produceIoBuf,
                      attach->numProducePages * PAGE_SIZE);
    unmap_kiobuf(attach->produceIoBuf);

    mark_dirty_kiobuf(attach->consumeIoBuf,
                      attach->numConsumePages * PAGE_SIZE);
    unmap_kiobuf(attach->consumeIoBuf);

    VMCI_FreeKernelMem(attach->produceIoBuf,
                       sizeof *attach->produceIoBuf);
    VMCI_FreeKernelMem(attach->consumeIoBuf,
                       sizeof *attach->consumeIoBuf);
#endif
#endif
}
/**
 * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
 * @inode: inode of metadata file using this allocator
 * @req: nilfs_palloc_req structure exchanged for the allocation
 */
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
				     struct nilfs_palloc_req *req)
{
	struct buffer_head *desc_bh, *bitmap_bh;
	struct nilfs_palloc_group_desc *desc;
	unsigned char *bitmap;
	void *desc_kaddr, *bitmap_kaddr;
	unsigned long group, maxgroup, ngroups;
	unsigned long group_offset, maxgroup_offset;
	unsigned long n, entries_per_group, groups_per_desc_block;
	unsigned long i, j;
	int pos, ret;

	ngroups = nilfs_palloc_groups_count(inode);
	maxgroup = ngroups - 1;
	group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset);
	entries_per_group = nilfs_palloc_entries_per_group(inode);
	groups_per_desc_block = nilfs_palloc_groups_per_desc_block(inode);

	for (i = 0; i < ngroups; i += n) {
		if (group >= ngroups) {
			/* wrap around */
			group = 0;
			maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
						      &maxgroup_offset) - 1;
		}
		ret = nilfs_palloc_get_desc_block(inode, group, 1, &desc_bh);
		if (ret < 0)
			return ret;
		desc_kaddr = kmap(desc_bh->b_page);
		desc = nilfs_palloc_block_get_group_desc(
			inode, group, desc_bh, desc_kaddr);
		n = nilfs_palloc_rest_groups_in_desc_block(inode, group,
							   maxgroup);
		for (j = 0; j < n; j++, desc++, group++) {
			if (nilfs_palloc_group_desc_nfrees(inode, group, desc)
			    > 0) {
				ret = nilfs_palloc_get_bitmap_block(
					inode, group, 1, &bitmap_bh);
				if (ret < 0)
					goto out_desc;
				bitmap_kaddr = kmap(bitmap_bh->b_page);
				bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
				pos = nilfs_palloc_find_available_slot(
					inode, group, group_offset, bitmap,
					entries_per_group);
				if (pos >= 0) {
					/* found a free entry */
					nilfs_palloc_group_desc_add_entries(
						inode, group, desc, -1);
					req->pr_entry_nr =
						entries_per_group * group + pos;
					kunmap(desc_bh->b_page);
					kunmap(bitmap_bh->b_page);

					req->pr_desc_bh = desc_bh;
					req->pr_bitmap_bh = bitmap_bh;
					return 0;
				}
				kunmap(bitmap_bh->b_page);
				brelse(bitmap_bh);
			}

			group_offset = 0;
		}

		kunmap(desc_bh->b_page);
		brelse(desc_bh);
	}

	/* no entries left */
	return -ENOSPC;

 out_desc:
	kunmap(desc_bh->b_page);
	brelse(desc_bh);
	return ret;
}
static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
                                        struct iov_iter *i)
{
    size_t skip, copy, left, wanted;
    const struct iovec *iov;
    char __user *buf;
    void *kaddr, *to;

    if (unlikely(bytes > i->count))
        bytes = i->count;

    if (unlikely(!bytes))
        return 0;

    wanted = bytes;
    iov = i->iov;
    skip = i->iov_offset;
    buf = iov->iov_base + skip;
    copy = min(bytes, iov->iov_len - skip);

    if (!fault_in_pages_readable(buf, copy)) {
        kaddr = kmap_atomic(page);
        to = kaddr + offset;

        /* first chunk, usually the only one */
        left = __copy_from_user_inatomic(to, buf, copy);
        copy -= left;
        skip += copy;
        to += copy;
        bytes -= copy;

        while (unlikely(!left && bytes)) {
            iov++;
            buf = iov->iov_base;
            copy = min(bytes, iov->iov_len);
            left = __copy_from_user_inatomic(to, buf, copy);
            copy -= left;
            skip = copy;
            to += copy;
            bytes -= copy;
        }
        if (likely(!bytes)) {
            kunmap_atomic(kaddr);
            goto done;
        }
        offset = to - kaddr;
        buf += copy;
        kunmap_atomic(kaddr);
        copy = min(bytes, iov->iov_len - skip);
    }
    /* Too bad - revert to non-atomic kmap */
    kaddr = kmap(page);
    to = kaddr + offset;
    left = __copy_from_user(to, buf, copy);
    copy -= left;
    skip += copy;
    to += copy;
    bytes -= copy;
    while (unlikely(!left && bytes)) {
        iov++;
        buf = iov->iov_base;
        copy = min(bytes, iov->iov_len);
        left = __copy_from_user(to, buf, copy);
        copy -= left;
        skip = copy;
        to += copy;
        bytes -= copy;
    }
    kunmap(page);
done:
    if (skip == iov->iov_len) {
        iov++;
        skip = 0;
    }
    i->count -= wanted - bytes;
    i->nr_segs -= iov - i->iov;
    i->iov = iov;
    i->iov_offset = skip;
    return wanted - bytes;
}
Exemple #29
0
static int
HgfsDoWrite(HgfsHandle handle,             // IN: Handle for this file
            HgfsDataPacket dataPacket[],   // IN: Data description
            uint32 numEntries,             // IN: Number of entries in dataPacket
            loff_t offset)                 // IN: Offset to begin writing at
{
   HgfsReq *req;
   int result = 0;
   HgfsOp opUsed;
   uint32 requiredSize = 0;
   uint32 actualSize = 0;
   char *payload = NULL;
   uint32 reqSize;
   HgfsStatus replyStatus;
   char *buf;
   uint32 count;
   ASSERT(numEntries == 1);

   count = dataPacket[0].len;

   req = HgfsGetNewRequest();
   if (!req) {
      LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: out of memory while "
              "getting new request\n"));
      result = -ENOMEM;
      goto out;
   }

 retry:
   opUsed = hgfsVersionWrite;
   if (opUsed == HGFS_OP_WRITE_FAST_V4) {
      HgfsRequest *header;
      HgfsRequestWriteV3 *request;

      header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req));
      header->id = req->id;
      header->op = opUsed;

      request = (HgfsRequestWriteV3 *)(HGFS_REQ_PAYLOAD_V3(req));
      request->file = handle;
      request->flags = 0;
      request->offset = offset;
      request->requiredSize = count;
      request->reserved = 0;
      payload = request->payload;
      requiredSize = request->requiredSize;

      req->dataPacket = kmalloc(numEntries * sizeof req->dataPacket[0],
                                GFP_KERNEL);
      if (!req->dataPacket) {
         LOG(4, (KERN_WARNING "%s: Failed to allocate mem\n", __func__));
         result = -ENOMEM;
         goto out;
      }
      memcpy(req->dataPacket, dataPacket, numEntries * sizeof req->dataPacket[0]);
      req->numEntries = numEntries;
      reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request);
      req->payloadSize = reqSize;
      LOG(4, (KERN_WARNING "VMware hgfs: Fast Write V4\n"));
   } else if (opUsed == HGFS_OP_WRITE_V3) {
      HgfsRequest *header;
      HgfsRequestWriteV3 *request;

      header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req));
      header->id = req->id;
      header->op = opUsed;

      request = (HgfsRequestWriteV3 *)(HGFS_REQ_PAYLOAD_V3(req));
      request->file = handle;
      request->flags = 0;
      request->offset = offset;
      request->requiredSize = MIN(req->bufferSize - sizeof *header -
                                  sizeof *request, count);
      LOG(4, (KERN_WARNING "VMware hgfs: Using write V3\n"));
      request->reserved = 0;
      payload = request->payload;
      requiredSize = request->requiredSize;
      reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request);
      req->dataPacket = NULL;
      req->numEntries = 0;
      buf = kmap(dataPacket[0].page) + dataPacket[0].offset;
      memcpy(payload, buf, requiredSize);
      kunmap(dataPacket[0].page);

      req->payloadSize = reqSize + requiredSize - 1;
   } else {
      HgfsRequestWrite *request;

      request = (HgfsRequestWrite *)(HGFS_REQ_PAYLOAD(req));
      request->header.id = req->id;
      request->header.op = opUsed;
      request->file = handle;
      request->flags = 0;
      request->offset = offset;
      request->requiredSize = MIN(req->bufferSize - sizeof *request, count);
      payload = request->payload;
      requiredSize = request->requiredSize;
      reqSize = sizeof *request;
      req->dataPacket = NULL;
      req->numEntries = 0;
      buf = kmap(dataPacket[0].page) + dataPacket[0].offset;
      memcpy(payload, buf, requiredSize);
      kunmap(dataPacket[0].page);

      req->payloadSize = reqSize + requiredSize - 1;
   }

   /* Send the request and process the reply. */
   result = HgfsSendRequest(req);
   if (result == 0) {
      /* Get the reply. */
      replyStatus = HgfsReplyStatus(req);
      result = HgfsStatusConvertToLinux(replyStatus);

      LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: res %u\n", result));
      switch (result) {
      case 0:
         if (opUsed == HGFS_OP_WRITE_V3 || opUsed == HGFS_OP_WRITE_FAST_V4) {
            actualSize = ((HgfsReplyWriteV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize;
         } else {
            actualSize = ((HgfsReplyWrite *)HGFS_REQ_PAYLOAD(req))->actualSize;
         }

         /* Return result. */
         LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoWrite: wrote %u bytes\n",
                 actualSize));
         result = actualSize;
         break;

      case -EPROTO:
         /* Retry with older version(s). Set globally. */
         switch (opUsed) {
         case HGFS_OP_WRITE_FAST_V4:
            LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: Fast Write V4 not "
                    "supported. Falling back to V3 write.\n"));
            if (req->dataPacket) {
               kfree(req->dataPacket);
               req->dataPacket = NULL;
            }
            hgfsVersionWrite = HGFS_OP_WRITE_V3;
            goto retry;

         case HGFS_OP_WRITE_V3:
            LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: Version 3 not "
                    "supported. Falling back to version 1.\n"));
            hgfsVersionWrite = HGFS_OP_WRITE;
            goto retry;

         default:
            break;
         }
         break;

      default:
         LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: server "
                 "returned error: %d\n", result));
         break;
      }
   } else if (result == -EIO) {
      LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: timed out\n"));
   } else if (result == -EPROTO) {
      LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: server "
              "returned error: %d\n", result));
   } else {
      LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: unknown error: "
              "%d\n", result));
   }

out:
   if (req->dataPacket) {
      kfree(req->dataPacket);
   }
   HgfsFreeRequest(req);
   return result;
}
Exemple #30
0
/* I have followed the behavior from ecryptfs. write_begin sets up the page.
 * for writing. Following changes are made :
 * 1. If Encrypt is not enabled, then just grab the page and set it up for
 *    write_begin. It is almost similar to ecryptfs. When we seek to a position
 *    after EOF and write, then the copied bytes are adjusted accordingly and
 *    passed. For example, if the file contains 2000 bytes and if we write
 *    1000 bytes from 3000th position(by lseeking), then from contains 3000 and
 *    copied contains 1000.  So we can directly copy 1000 bytes to lower file.
 * 2. When Encrypt is enabled, three cases are possible which are commented
 *    below. We must handle zero bytes cases explicitly.
 */
int wrapfs_write_begin(struct file *file, struct address_space *mapping,
		loff_t pos, unsigned len, unsigned flags,
		struct page **pagep, void **fsdata)
{
	struct page *page;
	char *page_data;
	pgoff_t index;
	int err = 0;
	struct inode *cur_inode, *lower_inode;
	unsigned int offset = 0;

#ifdef WRAPFS_CRYPTO
	/* pgoff_t is unsigned long, loff_t is long long */
	loff_t cur_inode_size;
	pgoff_t cur_inode_last_index;
	unsigned int cur_inode_end_offset;
	unsigned int zero_count;
	char *page_data_zeros;
	struct page *page_to_zeros = NULL;
	pgoff_t tempindex;
	pgoff_t tempoffset;
	pgoff_t bytes_to_write;
	struct file *lower_file = wrapfs_lower_file(file);
	char *encrypted_buf;
	mm_segment_t old_fs;
#endif

	wrapfs_debug("");
	wrapfs_debug_aops(WRAPFS_SB(file->f_dentry->d_sb)->wrapfs_debug_a_ops,
				"");

	index = pos >> PAGE_CACHE_SHIFT;
	offset = pos & (PAGE_CACHE_SIZE - 1);
	wrapfs_debug("index : %lu, offset : %d\n", index, offset);

	page = grab_cache_page_write_begin(mapping, index, flags);
	if (!page) {
		wrapfs_debug("grab_cache_page_write_begin returned NULL!!");
		err = -ENOMEM;
		goto out;
	}
	page_data = (char *)kmap(page);
	*pagep = page;

	cur_inode = file->f_path.dentry->d_inode;
	if (cur_inode)
		lower_inode = wrapfs_lower_inode(cur_inode);

#ifdef WRAPFS_CRYPTO
	/* cur_inode* refers to the file's existing attributes */
	cur_inode_size = cur_inode->i_size;
	cur_inode_last_index = cur_inode_size >> (PAGE_CACHE_SHIFT);
	cur_inode_end_offset = cur_inode_size & (PAGE_CACHE_SIZE - 1);

	wrapfs_debug(
	"cur_inode->i_size : %lu, i_size_read(page->mapping->host) : %lu\n",
	(unsigned long)cur_inode->i_size,
	(unsigned long)i_size_read(page->mapping->host));

	if (index == cur_inode_last_index) {
		/* The page to write is same as last page in file */
		wrapfs_debug("");
		if (pos > cur_inode_size) {
			/* Need to fill zeroes upto pos,
			 * from cur_inode_size */
			wrapfs_debug("");
			zero_count = pos - cur_inode_size;
			memset(page_data + cur_inode_end_offset, 0x00,
				zero_count);
		} else if (pos == cur_inode_size) {
			wrapfs_debug("");
			/* Fine. Do a normal encryption in write_end */
		} else if (pos < cur_inode_size) {
			/* Fine. Do a normal encryption in write_end */
			wrapfs_debug("");

		}
	} else if (index < cur_inode_last_index) {
		/* The page to write is an intermediate file page.
		 * No special cases need to be handled here.
		 */
		wrapfs_debug("");
	} else if (index > cur_inode_last_index) {
		/* If we skip to a page more than the last page in file.
		 * Need to fill holes between cur_inode_last_index and index.
		 * First filling hole in the new index page upto offset.
		 */
		wrapfs_debug("");
		memset(page_data, 0x00, offset);
		tempoffset = cur_inode_end_offset;
		tempindex = cur_inode_last_index;
		lower_file->f_pos = cur_inode_size;
		encrypted_buf = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
		if (encrypted_buf == NULL) {
			wrapfs_debug("kmalloc failed!!");
			err = -ENOMEM;
			goto out_holes;
		}
		/* Fill zeroes in page cur_inode_last_index from cur off to end
		 * Then fill all pages from (cur_inode_last_index + 1) to index
		 * These must also be encrypted and written to lower file here
		 * itself as they are not reflected in write_end.
		 */
		while (tempindex < index) {
			page_to_zeros =
			grab_cache_page_write_begin(cur_inode->i_mapping,
							tempindex, flags);
			if (page_to_zeros == NULL) {
				wrapfs_debug("grab_cache_page failed!!");
				kfree(encrypted_buf);
				err = -ENOMEM;
				goto out_holes;
			}
			page_data_zeros = (char *)kmap(page_to_zeros);
			bytes_to_write = PAGE_CACHE_SIZE - tempoffset;
			memset(page_data_zeros + tempoffset, 0x00,
				bytes_to_write);
			err = my_encrypt(page_data_zeros, PAGE_CACHE_SIZE,
				encrypted_buf,
				PAGE_CACHE_SIZE,
				WRAPFS_SB(file->f_dentry->d_sb)->key,
				WRAPFS_CRYPTO_KEY_LEN);
			if (err < 0) {
				wrapfs_debug("Encryption failed!!");
				kfree(encrypted_buf);
				err = -EINVAL;
				goto free_pages_holes;
			}
			flush_dcache_page(page_to_zeros);

			old_fs = get_fs();
			set_fs(KERNEL_DS);
			err = vfs_write(lower_file,
					encrypted_buf + tempoffset,
					bytes_to_write,
					&lower_file->f_pos);
			set_fs(old_fs);
free_pages_holes:
			kunmap(page_to_zeros);
			unlock_page(page_to_zeros);
			page_cache_release(page_to_zeros);
			if (err < 0) {
				kfree(encrypted_buf);
				goto out_holes;
			}
			err = 0;
			mark_inode_dirty_sync(cur_inode);
			tempoffset = 0;
			tempindex++;
		} /* while ends */
out_holes:
		if ((err < 0) && (page_to_zeros != NULL))
			ClearPageUptodate(page_to_zeros);
	}
#endif

out:
	if (page)
		kunmap(page);
	if (unlikely(err)) {
		unlock_page(page);
		page_cache_release(page);
		*pagep = NULL;
	}
	wrapfs_debug_aops(WRAPFS_SB(file->f_dentry->d_sb)->wrapfs_debug_a_ops,
				"err : %d", err);
	return err;
}