Beispiel #1
0
// -------------------------------------------------------------------------
// jffs2_fo_write()
// Write data to file.
static int jffs2_extend_file (struct _inode *inode, struct jffs2_raw_inode *ri,
		       unsigned long offset)
{
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_full_dnode *fn;
	uint32_t alloc_len;
	int ret = 0;

	/* Make new hole frag from old EOF to new page */
	D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
		  (unsigned int)inode->i_size, offset));

	ret = jffs2_reserve_space(c, sizeof(*ri), &alloc_len, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
	if (ret)
		return ret;

	mutex_lock(&f->sem);

	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
	ri->totlen = cpu_to_je32(sizeof(*ri));
	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));

	ri->version = cpu_to_je32(++f->highest_version);
	ri->isize = cpu_to_je32(max((uint32_t)inode->i_size, offset));

	ri->offset = cpu_to_je32(inode->i_size);
	ri->dsize = cpu_to_je32(offset - inode->i_size);
	ri->csize = cpu_to_je32(0);
	ri->compr = JFFS2_COMPR_ZERO;
	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
	ri->data_crc = cpu_to_je32(0);
		
	fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL);
	jffs2_complete_reservation(c);
	if (IS_ERR(fn)) {
		ret = PTR_ERR(fn);
		mutex_unlock(&f->sem);
		return ret;
	}
	ret = jffs2_add_full_dnode_to_inode(c, f, fn);
	if (f->metadata) {
		jffs2_mark_node_obsolete(c, f->metadata->raw);
		jffs2_free_full_dnode(f->metadata);
		f->metadata = NULL;
	}
	if (ret) {
		D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret));
		jffs2_mark_node_obsolete(c, fn->raw);
		jffs2_free_full_dnode(fn);
		mutex_unlock(&f->sem);
		return ret;
	}
	inode->i_size = offset;
	mutex_unlock(&f->sem);
	return 0;
}
Beispiel #2
0
static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
					struct jffs2_inode_cache *ic)
{
	struct jffs2_full_dirent *fd;

	dbg_fsbuild("building directory inode #%u\n", ic->ino);

	/* For each child, increase nlink */
	for(fd = ic->scan_dents; fd; fd = fd->next) {
		struct jffs2_inode_cache *child_ic;
		if (!fd->ino)
			continue;

		/* we can get high latency here with huge directories */

		child_ic = jffs2_get_ino_cache(c, fd->ino);
		if (!child_ic) {
			dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
				  fd->name, fd->ino, ic->ino);
			jffs2_mark_node_obsolete(c, fd->raw);
			continue;
		}

		if (child_ic->nlink++ && fd->type == DT_DIR) {
			JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
				fd->name, fd->ino, ic->ino);
			/* TODO: What do we do about it? */
		}
		dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
		/* Can't free scan_dents so far. We might need them in pass 2 */
	}
}
Beispiel #3
0
static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
{
	struct jffs2_full_dirent *fd;

	D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino));

	/* For each child, increase nlink */
	for(fd = ic->scan_dents; fd; fd = fd->next) {
		struct jffs2_inode_cache *child_ic;
		if (!fd->ino)
			continue;

		/* XXX: Can get high latency here with huge directories */

		child_ic = jffs2_get_ino_cache(c, fd->ino);
		if (!child_ic) {
			printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
				  fd->name, fd->ino, ic->ino);
			jffs2_mark_node_obsolete(c, fd->raw);
			continue;
		}

		if (child_ic->nlink++ && fd->type == DT_DIR) {
			printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
			if (fd->ino == 1 && ic->ino == 1) {
				printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
				printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
			}
			/* What do we do about it? */
		}
		D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
		/* Can't free them. We might need them in pass 2 */
	}
}
Beispiel #4
0
/*
 * Helper function for jffs2_get_inode_nodes().
 * It is called every time an unknown node is found.
 *
 * Returns: 0 on success;
 * 	    negative error code on failure.
 */
static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
{
	/* We don't mark unknown nodes as REF_UNCHECKED */
	if (ref_flags(ref) == REF_UNCHECKED) {
		JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n",
			    ref_offset(ref));
		JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n",
			    je16_to_cpu(un->magic), je16_to_cpu(un->nodetype),
			    je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc));
		jffs2_mark_node_obsolete(c, ref);
		return 0;
	}

	un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));

	switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {

	case JFFS2_FEATURE_INCOMPAT:
		JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
			    je16_to_cpu(un->nodetype), ref_offset(ref));
		/* EEP */
		BUG();
		break;

	case JFFS2_FEATURE_ROCOMPAT:
		JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
			    je16_to_cpu(un->nodetype), ref_offset(ref));
		BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
		break;

	case JFFS2_FEATURE_RWCOMPAT_COPY:
		JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
			     je16_to_cpu(un->nodetype), ref_offset(ref));
		break;

	case JFFS2_FEATURE_RWCOMPAT_DELETE:
		JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
			     je16_to_cpu(un->nodetype), ref_offset(ref));
		jffs2_mark_node_obsolete(c, ref);
		return 0;
	}

	return 0;
}
void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
{
	struct jffs2_full_dirent *fd, *fds;
	int deleted;

	jffs2_clear_acl(f);
	jffs2_xattr_delete_inode(c, f->inocache);
	mutex_lock(&f->sem);
	deleted = f->inocache && !f->inocache->pino_nlink;

	if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
		jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);

	if (f->metadata) {
		if (deleted)
			jffs2_mark_node_obsolete(c, f->metadata->raw);
		jffs2_free_full_dnode(f->metadata);
	}

	jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);

	if (f->target) {
		kfree(f->target);
		f->target = NULL;
	}

	fds = f->dents;
	while(fds) {
		fd = fds;
		fds = fd->next;
		jffs2_free_full_dirent(fd);
	}

	if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
		jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
		if (f->inocache->nodes == (void *)f->inocache)
			jffs2_del_ino_cache(c, f->inocache);
	}

	mutex_unlock(&f->sem);
}
Beispiel #6
0
/*
 * Helper function for jffs2_add_older_frag_to_fragtree().
 *
 * Checks the node if we are in the checking stage.
 */
static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
{
	int ret;

	BUG_ON(ref_obsolete(tn->fn->raw));

	/* We only check the data CRC of unchecked nodes */
	if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
		return 0;

	dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n",
		      tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));

	ret = check_node_data(c, tn);
	if (unlikely(ret < 0)) {
		JFFS2_ERROR("check_node_data() returned error: %d.\n",
			ret);
	} else if (unlikely(ret > 0)) {
		dbg_readinode("CRC error, mark it obsolete.\n");
		jffs2_mark_node_obsolete(c, tn->fn->raw);
	}

	return ret;
}
Beispiel #7
0
int jffs2_setattr (struct dentry *dentry, struct iattr *iattr)
{
	struct jffs2_full_dnode *old_metadata, *new_metadata;
	struct inode *inode = dentry->d_inode;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_raw_inode *ri;
	unsigned short dev;
	unsigned char *mdata = NULL;
	int mdatalen = 0;
	unsigned int ivalid;
	__u32 phys_ofs, alloclen;
	int ret;
	D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
	ret = inode_change_ok(inode, iattr);
	if (ret) 
		return ret;

	/* Special cases - we don't want more than one data node
	   for these types on the medium at any time. So setattr
	   must read the original data associated with the node
	   (i.e. the device numbers or the target name) and write
	   it out again with the appropriate data attached */
	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
		/* For these, we don't actually need to read the old node */
		dev =  (MAJOR(to_kdev_t(dentry->d_inode->i_rdev)) << 8) | 
			MINOR(to_kdev_t(dentry->d_inode->i_rdev));
		mdata = (char *)&dev;
		mdatalen = sizeof(dev);
		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
	} else if (S_ISLNK(inode->i_mode)) {
		mdatalen = f->metadata->size;
		mdata = kmalloc(f->metadata->size, GFP_USER);
		if (!mdata)
			return -ENOMEM;
		ret = jffs2_read_dnode(c, f->metadata, mdata, 0, mdatalen);
		if (ret) {
			kfree(mdata);
			return ret;
		}
		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
	}

	ri = jffs2_alloc_raw_inode();
	if (!ri) {
		if (S_ISLNK(inode->i_mode))
			kfree(mdata);
		return -ENOMEM;
	}
		
	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL);
	if (ret) {
		jffs2_free_raw_inode(ri);
		if (S_ISLNK(inode->i_mode))
			 kfree(mdata);
		return ret;
	}
	down(&f->sem);
        ivalid = iattr->ia_valid;
	
	ri->magic = JFFS2_MAGIC_BITMASK;
	ri->nodetype = JFFS2_NODETYPE_INODE;
	ri->totlen = sizeof(*ri) + mdatalen;
	ri->hdr_crc = crc32(0, ri, sizeof(struct jffs2_unknown_node)-4);

	ri->ino = inode->i_ino;
	ri->version = ++f->highest_version;

	ri->mode = (ivalid & ATTR_MODE)?iattr->ia_mode:inode->i_mode;
	ri->uid = (ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid;
	ri->gid = (ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid;

	if (ivalid & ATTR_MODE && ri->mode & S_ISGID &&
	    !in_group_p(ri->gid) && !capable(CAP_FSETID))
		ri->mode &= ~S_ISGID;

	ri->isize = (ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size;
	ri->atime = (ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime;
	ri->mtime = (ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime;
	ri->ctime = (ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime;

	ri->offset = 0;
	ri->csize = ri->dsize = mdatalen;
	ri->compr = JFFS2_COMPR_NONE;
	if (inode->i_size < ri->isize) {
		/* It's an extension. Make it a hole node */
		ri->compr = JFFS2_COMPR_ZERO;
		ri->dsize = ri->isize - inode->i_size;
		ri->offset = inode->i_size;
	}
	ri->node_crc = crc32(0, ri, sizeof(*ri)-8);
	if (mdatalen)
		ri->data_crc = crc32(0, mdata, mdatalen);
	else
		ri->data_crc = 0;

	new_metadata = jffs2_write_dnode(inode, ri, mdata, mdatalen, phys_ofs, NULL);
	if (S_ISLNK(inode->i_mode))
		kfree(mdata);

	jffs2_complete_reservation(c);
	
	if (IS_ERR(new_metadata)) {
		jffs2_free_raw_inode(ri);
		up(&f->sem);
		return PTR_ERR(new_metadata);
	}
	/* It worked. Update the inode */
	inode->i_atime = ri->atime;
	inode->i_ctime = ri->ctime;
	inode->i_mtime = ri->mtime;
	inode->i_mode = ri->mode;
	inode->i_uid = ri->uid;
	inode->i_gid = ri->gid;


	old_metadata = f->metadata;

	if (inode->i_size > ri->isize) {
		vmtruncate(inode, ri->isize);
		jffs2_truncate_fraglist (c, &f->fraglist, ri->isize);
	}

	if (inode->i_size < ri->isize) {
		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
		inode->i_size = ri->isize;
		f->metadata = NULL;
	} else {
		f->metadata = new_metadata;
	}
	if (old_metadata) {
		jffs2_mark_node_obsolete(c, old_metadata->raw);
		jffs2_free_full_dnode(old_metadata);
	}
	jffs2_free_raw_inode(ri);
	up(&f->sem);
	return 0;
}
Beispiel #8
0
int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
{
	/* Actually commit the write from the page cache page we're looking at.
	 * For now, we write the full page out each time. It sucks, but it's simple
	 */
	struct inode *inode = pg->mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	__u32 newsize = max_t(__u32, filp->f_dentry->d_inode->i_size, (pg->index << PAGE_CACHE_SHIFT) + end);
	__u32 file_ofs = (pg->index << PAGE_CACHE_SHIFT);
	__u32 writelen = min((__u32)PAGE_CACHE_SIZE, newsize - file_ofs);
	struct jffs2_raw_inode *ri;
	int ret = 0;
	ssize_t writtenlen = 0;

	D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));

	if (!start && end == PAGE_CACHE_SIZE) {
		/* We need to avoid deadlock with page_cache_read() in
		   jffs2_garbage_collect_pass(). So we have to mark the
		   page up to date, to prevent page_cache_read() from 
		   trying to re-lock it. */
		SetPageUptodate(pg);
	}

	ri = jffs2_alloc_raw_inode();
	if (!ri)
		return -ENOMEM;

	while(writelen) {
		struct jffs2_full_dnode *fn;
		unsigned char *comprbuf = NULL;
		unsigned char comprtype = JFFS2_COMPR_NONE;
		__u32 phys_ofs, alloclen;
		__u32 datalen, cdatalen;

		D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, file_ofs));

		ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL);
		if (ret) {
			SetPageError(pg);
			D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
			break;
		}
		down(&f->sem);
		datalen = writelen;
		cdatalen = min(alloclen - sizeof(*ri), writelen);

		comprbuf = kmalloc(cdatalen, GFP_KERNEL);
		if (comprbuf) {
//			jffs2_bbc_model_set_act_sb(c); /**BBC**/
			comprtype = jffs2_compress(page_address(pg)+ (file_ofs & (PAGE_CACHE_SIZE-1)), comprbuf, &datalen, &cdatalen);
		}
		if (comprtype == JFFS2_COMPR_NONE) {
			/* Either compression failed, or the allocation of comprbuf failed */
			if (comprbuf)
				kfree(comprbuf);
			comprbuf = page_address(pg) + (file_ofs & (PAGE_CACHE_SIZE -1));
			datalen = cdatalen;
		}
		/* Now comprbuf points to the data to be written, be it compressed or not.
		   comprtype holds the compression type, and comprtype == JFFS2_COMPR_NONE means
		   that the comprbuf doesn't need to be kfree()d. 
		*/

		ri->magic = JFFS2_MAGIC_BITMASK;
		ri->nodetype = JFFS2_NODETYPE_INODE;
		ri->totlen = sizeof(*ri) + cdatalen;
		ri->hdr_crc = crc32(0, ri, sizeof(struct jffs2_unknown_node)-4);

		ri->ino = inode->i_ino;
		ri->version = ++f->highest_version;
		ri->mode = inode->i_mode;
		ri->uid = inode->i_uid;
		ri->gid = inode->i_gid;
		ri->isize = max((__u32)inode->i_size, file_ofs + datalen);
		ri->atime = ri->ctime = ri->mtime = CURRENT_TIME;
		ri->offset = file_ofs;
		ri->csize = cdatalen;
		ri->dsize = datalen;
		ri->compr = comprtype;
		ri->node_crc = crc32(0, ri, sizeof(*ri)-8);
		ri->data_crc = crc32(0, comprbuf, cdatalen);

		fn = jffs2_write_dnode(inode, ri, comprbuf, cdatalen, phys_ofs, NULL);

		jffs2_complete_reservation(c);

		if (comprtype != JFFS2_COMPR_NONE)
			kfree(comprbuf);

		if (IS_ERR(fn)) {
			ret = PTR_ERR(fn);
			up(&f->sem);
			SetPageError(pg);
			break;
		}
		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
		if (f->metadata) {
			jffs2_mark_node_obsolete(c, f->metadata->raw);
			jffs2_free_full_dnode(f->metadata);
			f->metadata = NULL;
		}
		up(&f->sem);
		if (ret) {
			/* Eep */
			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret));
			jffs2_mark_node_obsolete(c, fn->raw);
			jffs2_free_full_dnode(fn);
			SetPageError(pg);
			break;
		}
		inode->i_size = ri->isize;
		inode->i_blocks = (inode->i_size + 511) >> 9;
		inode->i_ctime = inode->i_mtime = ri->ctime;
		if (!datalen) {
			printk(KERN_WARNING "Eep. We didn't actually write any bloody data\n");
			ret = -EIO;
			SetPageError(pg);
			break;
		}
		D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen));
		writtenlen += datalen;
		file_ofs += datalen;
		writelen -= datalen;
	}

	jffs2_free_raw_inode(ri);

	if (writtenlen < end) {
		/* generic_file_write has written more to the page cache than we've
		   actually written to the medium. Mark the page !Uptodate so that 
		   it gets reread */
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n"));
		SetPageError(pg);
		ClearPageUptodate(pg);
	}
	if (writtenlen <= start) {
		/* We didn't even get to the start of the affected part */
		ret = ret?ret:-ENOSPC;
		D1(printk(KERN_DEBUG "jffs2_commit_write(): Only %x bytes written to page. start (%x) not reached, returning %d\n", writtenlen, start, ret));
	}
	writtenlen = min(end-start, writtenlen-start);

	D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d. nrpages is %ld\n",writtenlen?writtenlen:ret, inode->i_mapping->nrpages));
	return writtenlen?writtenlen:ret;
}
Beispiel #9
0
/* Called with alloc sem _and_ erase_completion_lock */
static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len)
{
	struct jffs2_eraseblock *jeb = c->nextblock;
	
 restart:
	if (jeb && minsize > jeb->free_size) {
		/* Skip the end of this block and file it as having some dirty space */
		/* If there's a pending write to it, flush now */
		if (jffs2_wbuf_dirty(c)) {
			spin_unlock(&c->erase_completion_lock);
			D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));			    
			jffs2_flush_wbuf_pad(c);
			spin_lock(&c->erase_completion_lock);
			jeb = c->nextblock;
			goto restart;
		}
		c->wasted_size += jeb->free_size;
		c->free_size -= jeb->free_size;
		jeb->wasted_size += jeb->free_size;
		jeb->free_size = 0;
		
		/* Check, if we have a dirty block now, or if it was dirty already */
		if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
			c->dirty_size += jeb->wasted_size;
			c->wasted_size -= jeb->wasted_size;
			jeb->dirty_size += jeb->wasted_size;
			jeb->wasted_size = 0;
			if (VERYDIRTY(c, jeb->dirty_size)) {
				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
				list_add_tail(&jeb->list, &c->very_dirty_list);
			} else {
				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
				list_add_tail(&jeb->list, &c->dirty_list);
			}
		} else { 
			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
			list_add_tail(&jeb->list, &c->clean_list);
		}
		c->nextblock = jeb = NULL;
	}
	
	if (!jeb) {
		struct list_head *next;
		/* Take the next block off the 'free' list */

		if (list_empty(&c->free_list)) {

			if (!c->nr_erasing_blocks && 
			    !list_empty(&c->erasable_list)) {
				struct jffs2_eraseblock *ejeb;

				ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
				list_del(&ejeb->list);
				list_add_tail(&ejeb->list, &c->erase_pending_list);
				c->nr_erasing_blocks++;
				jffs2_erase_pending_trigger(c);
				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
					  ejeb->offset));
			}

			if (!c->nr_erasing_blocks && 
			    !list_empty(&c->erasable_pending_wbuf_list)) {
				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
				/* c->nextblock is NULL, no update to c->nextblock allowed */			    
				spin_unlock(&c->erase_completion_lock);
				jffs2_flush_wbuf_pad(c);
				spin_lock(&c->erase_completion_lock);
				/* Have another go. It'll be on the erasable_list now */
				return -EAGAIN;
			}

			if (!c->nr_erasing_blocks) {
				/* Ouch. We're in GC, or we wouldn't have got here.
				   And there's no space left. At all. */
				printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", 
				       c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", 
				       list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
				return -ENOSPC;
			}

			spin_unlock(&c->erase_completion_lock);
			/* Don't wait for it; just erase one right now */
			jffs2_erase_pending_blocks(c, 1);
			spin_lock(&c->erase_completion_lock);

			/* An erase may have failed, decreasing the
			   amount of free space available. So we must
			   restart from the beginning */
			return -EAGAIN;
		}

		next = c->free_list.next;
		list_del(next);
		c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
		c->nr_free_blocks--;

		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
			goto restart;
		}
	}
	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
	   enough space */
	*ofs = jeb->offset + (c->sector_size - jeb->free_size);
	*len = jeb->free_size;

	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
	    !jeb->first_node->next_in_ino) {
		/* Only node in it beforehand was a CLEANMARKER node (we think). 
		   So mark it obsolete now that there's going to be another node
		   in the block. This will reduce used_size to zero but We've 
		   already set c->nextblock so that jffs2_mark_node_obsolete()
		   won't try to refile it to the dirty_list.
		*/
		spin_unlock(&c->erase_completion_lock);
		jffs2_mark_node_obsolete(c, jeb->first_node);
		spin_lock(&c->erase_completion_lock);
	}

	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
	return 0;
}
Beispiel #10
0
static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
					struct jffs2_inode_info *f,
					struct jffs2_raw_inode *latest_node)
{
	struct jffs2_tmp_dnode_info *tn;
	struct rb_root tn_list;
	struct rb_node *rb, *repl_rb;
	struct jffs2_full_dirent *fd_list;
	struct jffs2_full_dnode *fn, *first_fn = NULL;
	uint32_t crc;
	uint32_t latest_mctime, mctime_ver;
	size_t retlen;
	int ret;

	dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);

	/* Grab all nodes relevant to this ino */
	ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);

	if (ret) {
		JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
		if (f->inocache->state == INO_STATE_READING)
			jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
		return ret;
	}
	f->dents = fd_list;

	rb = rb_first(&tn_list);

	while (rb) {
		cond_resched();
		tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
		fn = tn->fn;
		ret = 1;
		dbg_readinode("consider node ver %u, phys offset "
			"%#08x(%d), range %u-%u.\n", tn->version,
			ref_offset(fn->raw), ref_flags(fn->raw),
			fn->ofs, fn->ofs + fn->size);

		if (fn->size) {
			ret = jffs2_add_older_frag_to_fragtree(c, f, tn);
			/* TODO: the error code isn't checked, check it */
			jffs2_dbg_fragtree_paranoia_check_nolock(f);
			BUG_ON(ret < 0);
			if (!first_fn && ret == 0)
				first_fn = fn;
		} else if (!first_fn) {
			first_fn = fn;
			f->metadata = fn;
			ret = 0; /* Prevent freeing the metadata update node */
		} else
			jffs2_mark_node_obsolete(c, fn->raw);

		BUG_ON(rb->rb_left);
		if (rb->rb_parent && rb->rb_parent->rb_left == rb) {
			/* We were then left-hand child of our parent. We need
			 * to move our own right-hand child into our place. */
			repl_rb = rb->rb_right;
			if (repl_rb)
				repl_rb->rb_parent = rb->rb_parent;
		} else
			repl_rb = NULL;

		rb = rb_next(rb);

		/* Remove the spent tn from the tree; don't bother rebalancing
		 * but put our right-hand child in our own place. */
		if (tn->rb.rb_parent) {
			if (tn->rb.rb_parent->rb_left == &tn->rb)
				tn->rb.rb_parent->rb_left = repl_rb;
			else if (tn->rb.rb_parent->rb_right == &tn->rb)
				tn->rb.rb_parent->rb_right = repl_rb;
			else BUG();
		} else if (tn->rb.rb_right)
			tn->rb.rb_right->rb_parent = NULL;

		jffs2_free_tmp_dnode_info(tn);
		if (ret) {
			dbg_readinode("delete dnode %u-%u.\n",
				fn->ofs, fn->ofs + fn->size);
			jffs2_free_full_dnode(fn);
		}
	}
	jffs2_dbg_fragtree_paranoia_check_nolock(f);

	BUG_ON(first_fn && ref_obsolete(first_fn->raw));

	fn = first_fn;
	if (unlikely(!first_fn)) {
		/* No data nodes for this inode. */
		if (f->inocache->ino != 1) {
			JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
			if (!fd_list) {
				if (f->inocache->state == INO_STATE_READING)
					jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
				return -EIO;
			}
			JFFS2_NOTICE("but it has children so we fake some modes for it\n");
		}
		latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
		latest_node->version = cpu_to_je32(0);
		latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
		latest_node->isize = cpu_to_je32(0);
		latest_node->gid = cpu_to_je16(0);
		latest_node->uid = cpu_to_je16(0);
		if (f->inocache->state == INO_STATE_READING)
			jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
		return 0;
	}

	ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
	if (ret || retlen != sizeof(*latest_node)) {
		JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
			ret, retlen, sizeof(*latest_node));
		/* FIXME: If this fails, there seems to be a memory leak. Find it. */
		up(&f->sem);
		jffs2_do_clear_inode(c, f);
		return ret?ret:-EIO;
	}

	crc = crc32(0, latest_node, sizeof(*latest_node)-8);
	if (crc != je32_to_cpu(latest_node->node_crc)) {
		JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
			f->inocache->ino, ref_offset(fn->raw));
		up(&f->sem);
		jffs2_do_clear_inode(c, f);
		return -EIO;
	}

	switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
	case S_IFDIR:
		if (mctime_ver > je32_to_cpu(latest_node->version)) {
			/* The times in the latest_node are actually older than
			   mctime in the latest dirent. Cheat. */
			latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
		}
		break;


	case S_IFREG:
		/* If it was a regular file, truncate it to the latest node's isize */
		jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
		break;

	case S_IFLNK:
		/* Hack to work around broken isize in old symlink code.
		   Remove this when dwmw2 comes to his senses and stops
		   symlinks from being an entirely gratuitous special
		   case. */
		if (!je32_to_cpu(latest_node->isize))
			latest_node->isize = latest_node->dsize;

		if (f->inocache->state != INO_STATE_CHECKING) {
			/* Symlink's inode data is the target path. Read it and
			 * keep in RAM to facilitate quick follow symlink
			 * operation. */
			f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
			if (!f->target) {
				JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
				up(&f->sem);
				jffs2_do_clear_inode(c, f);
				return -ENOMEM;
			}

			ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
						je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);

			if (ret  || retlen != je32_to_cpu(latest_node->csize)) {
				if (retlen != je32_to_cpu(latest_node->csize))
					ret = -EIO;
				kfree(f->target);
				f->target = NULL;
				up(&f->sem);
				jffs2_do_clear_inode(c, f);
				return -ret;
			}

			f->target[je32_to_cpu(latest_node->csize)] = '\0';
			dbg_readinode("symlink's target '%s' cached\n", f->target);
		}

		/* fall through... */

	case S_IFBLK:
	case S_IFCHR:
		/* Certain inode types should have only one data node, and it's
		   kept as the metadata node */
		if (f->metadata) {
			JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
			       f->inocache->ino, jemode_to_cpu(latest_node->mode));
			up(&f->sem);
			jffs2_do_clear_inode(c, f);
			return -EIO;
		}
		if (!frag_first(&f->fragtree)) {
			JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
			       f->inocache->ino, jemode_to_cpu(latest_node->mode));
			up(&f->sem);
			jffs2_do_clear_inode(c, f);
			return -EIO;
		}
		/* ASSERT: f->fraglist != NULL */
		if (frag_next(frag_first(&f->fragtree))) {
			JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
			       f->inocache->ino, jemode_to_cpu(latest_node->mode));
			/* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
			up(&f->sem);
			jffs2_do_clear_inode(c, f);
			return -EIO;
		}
		/* OK. We're happy */
		f->metadata = frag_first(&f->fragtree)->node;
		jffs2_free_node_frag(frag_first(&f->fragtree));
		f->fragtree = RB_ROOT;
		break;
	}
	if (f->inocache->state == INO_STATE_READING)
		jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);

	return 0;
}
Beispiel #11
0
static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
{
	jffs2_mark_node_obsolete(c, tn->fn->raw);
	jffs2_free_full_dnode(tn->fn);
	jffs2_free_tmp_dnode_info(tn);
}
Beispiel #12
0
static int jffs2_do_unlink(struct inode *dir_i, struct dentry *dentry, int rename)
{
	struct jffs2_inode_info *dir_f, *f;
	struct jffs2_sb_info *c;
	struct jffs2_raw_dirent *rd;
	struct jffs2_full_dirent *fd;
	__u32 alloclen, phys_ofs;
	int ret;

	c = JFFS2_SB_INFO(dir_i->i_sb);

	rd = jffs2_alloc_raw_dirent();
	if (!rd)
		return -ENOMEM;

	ret = jffs2_reserve_space(c, sizeof(*rd)+dentry->d_name.len, &phys_ofs, &alloclen, ALLOC_DELETION);
	if (ret) {
		jffs2_free_raw_dirent(rd);
		return ret;
	}

	dir_f = JFFS2_INODE_INFO(dir_i);
	down(&dir_f->sem);

	/* Build a deletion node */
	rd->magic = JFFS2_MAGIC_BITMASK;
	rd->nodetype = JFFS2_NODETYPE_DIRENT;
	rd->totlen = sizeof(*rd) + dentry->d_name.len;
	rd->hdr_crc = crc32(0, rd, sizeof(struct jffs2_unknown_node)-4);

	rd->pino = dir_i->i_ino;
	rd->version = ++dir_f->highest_version;
	rd->ino = 0;
	rd->mctime = CURRENT_TIME;
	rd->nsize = dentry->d_name.len;
	rd->type = DT_UNKNOWN;
	rd->node_crc = crc32(0, rd, sizeof(*rd)-8);
	rd->name_crc = crc32(0, dentry->d_name.name, dentry->d_name.len);

	fd = jffs2_write_dirent(dir_i, rd, dentry->d_name.name, dentry->d_name.len, phys_ofs, NULL);
	
	jffs2_complete_reservation(c);
	jffs2_free_raw_dirent(rd);

	if (IS_ERR(fd)) {
		up(&dir_f->sem);
		return PTR_ERR(fd);
	}

	/* File it. This will mark the old one obsolete. */
	jffs2_add_fd_to_list(c, fd, &dir_f->dents);
	up(&dir_f->sem);
	
	if (!rename) {
		f = JFFS2_INODE_INFO(dentry->d_inode);
		down(&f->sem);

		while (f->dents) {
			/* There can be only deleted ones */
			fd = f->dents;
			
			f->dents = fd->next;
			
			if (fd->ino) {
				printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
				       f->inocache->ino, fd->name, fd->ino);
			} else {
				D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", fd->name, f->inocache->ino));
			}
			jffs2_mark_node_obsolete(c, fd->raw);
			jffs2_free_full_dirent(fd);
		}
		/* Don't oops on unlinking a bad inode */
		if (f->inocache)
			f->inocache->nlink--;
		dentry->d_inode->i_nlink--;
		up(&f->sem);
	}

	return 0;
}
/* Called with alloc sem _and_ erase_completion_lock */
static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize)
{
	struct jffs2_eraseblock *jeb = c->nextblock;
	uint32_t reserved_size; 			/* for summary information at the end of the jeb */
	int ret;

 restart:
	reserved_size = 0;

	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
							/* NOSUM_SIZE means not to generate summary */

		if (jeb) {
			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
			dbg_summary("minsize=%d , jeb->free=%d ,"
						"summary->size=%d , sumsize=%d\n",
						minsize, jeb->free_size,
						c->summary->sum_size, sumsize);
		}

		/* Is there enough space for writing out the current node, or we have to
		   write out summary information now, close this jeb and select new nextblock? */
		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {

			/* Has summary been disabled for this jeb? */
			if (jffs2_sum_is_disabled(c->summary)) {
				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
				goto restart;
			}

			/* Writing out the collected summary information */
			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
			ret = jffs2_sum_write_sumnode(c);

			if (ret)
				return ret;

			if (jffs2_sum_is_disabled(c->summary)) {
				/* jffs2_write_sumnode() couldn't write out the summary information
				   diabling summary for this jeb and free the collected information
				 */
				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
				goto restart;
			}

			jffs2_close_nextblock(c, jeb);
			jeb = NULL;
			/* keep always valid value in reserved_size */
			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
		}
	} else {
		if (jeb && minsize > jeb->free_size) {
			/* Skip the end of this block and file it as having some dirty space */
			/* If there's a pending write to it, flush now */

			if (jffs2_wbuf_dirty(c)) {
				spin_unlock(&c->erase_completion_lock);
				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
				jffs2_flush_wbuf_pad(c);
				spin_lock(&c->erase_completion_lock);
				jeb = c->nextblock;
				goto restart;
			}

			c->wasted_size += jeb->free_size;
			c->free_size -= jeb->free_size;
			jeb->wasted_size += jeb->free_size;
			jeb->free_size = 0;

			jffs2_close_nextblock(c, jeb);
			jeb = NULL;
		}
	}

	if (!jeb) {

		ret = jffs2_find_nextblock(c);
		if (ret)
			return ret;

		jeb = c->nextblock;

		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
			goto restart;
		}
	}
	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
	   enough space */
	*ofs = jeb->offset + (c->sector_size - jeb->free_size);
	*len = jeb->free_size - reserved_size;

	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
	    !jeb->first_node->next_in_ino) {
		/* Only node in it beforehand was a CLEANMARKER node (we think).
		   So mark it obsolete now that there's going to be another node
		   in the block. This will reduce used_size to zero but We've
		   already set c->nextblock so that jffs2_mark_node_obsolete()
		   won't try to refile it to the dirty_list.
		*/
		spin_unlock(&c->erase_completion_lock);
		jffs2_mark_node_obsolete(c, jeb->first_node);
		spin_lock(&c->erase_completion_lock);
	}

	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
	return 0;
}
Beispiel #14
0
static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
{
	struct jffs2_full_dnode *old_metadata, *new_metadata;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_raw_inode *ri;
	unsigned short dev;
	unsigned char *mdata = NULL;
	int mdatalen = 0;
	unsigned int ivalid;
	uint32_t phys_ofs, alloclen;
	int ret;
	D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
	ret = inode_change_ok(inode, iattr);
	if (ret)
		return ret;

	/* Special cases - we don't want more than one data node
	   for these types on the medium at any time. So setattr
	   must read the original data associated with the node
	   (i.e. the device numbers or the target name) and write
	   it out again with the appropriate data attached */
	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
		/* For these, we don't actually need to read the old node */
		dev = old_encode_dev(inode->i_rdev);
		mdata = (char *)&dev;
		mdatalen = sizeof(dev);
		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
	} else if (S_ISLNK(inode->i_mode)) {
		mdatalen = f->metadata->size;
		mdata = kmalloc(f->metadata->size, GFP_USER);
		if (!mdata)
			return -ENOMEM;
		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
		if (ret) {
			kfree(mdata);
			return ret;
		}
		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
	}

	ri = jffs2_alloc_raw_inode();
	if (!ri) {
		if (S_ISLNK(inode->i_mode))
			kfree(mdata);
		return -ENOMEM;
	}

	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen,
				ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
	if (ret) {
		jffs2_free_raw_inode(ri);
		if (S_ISLNK(inode->i_mode & S_IFMT))
			 kfree(mdata);
		return ret;
	}
	down(&f->sem);
	ivalid = iattr->ia_valid;

	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));

	ri->ino = cpu_to_je32(inode->i_ino);
	ri->version = cpu_to_je32(++f->highest_version);

	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);

	if (ivalid & ATTR_MODE)
		if (iattr->ia_mode & S_ISGID &&
		    !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
			ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
		else
			ri->mode = cpu_to_jemode(iattr->ia_mode);
	else
		ri->mode = cpu_to_jemode(inode->i_mode);


	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));

	ri->offset = cpu_to_je32(0);
	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
	ri->compr = JFFS2_COMPR_NONE;
	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
		/* It's an extension. Make it a hole node */
		ri->compr = JFFS2_COMPR_ZERO;
		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
		ri->offset = cpu_to_je32(inode->i_size);
	}
	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
	if (mdatalen)
		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
	else
		ri->data_crc = cpu_to_je32(0);

	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL);
	if (S_ISLNK(inode->i_mode))
		kfree(mdata);

	if (IS_ERR(new_metadata)) {
		jffs2_complete_reservation(c);
		jffs2_free_raw_inode(ri);
		up(&f->sem);
		return PTR_ERR(new_metadata);
	}
	/* It worked. Update the inode */
	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
	inode->i_mode = jemode_to_cpu(ri->mode);
	inode->i_uid = je16_to_cpu(ri->uid);
	inode->i_gid = je16_to_cpu(ri->gid);


	old_metadata = f->metadata;

	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
		jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);

	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
		inode->i_size = iattr->ia_size;
		f->metadata = NULL;
	} else {
		f->metadata = new_metadata;
	}
	if (old_metadata) {
		jffs2_mark_node_obsolete(c, old_metadata->raw);
		jffs2_free_full_dnode(old_metadata);
	}
	jffs2_free_raw_inode(ri);

	up(&f->sem);
	jffs2_complete_reservation(c);

	/* We have to do the vmtruncate() without f->sem held, since
	   some pages may be locked and waiting for it in readpage().
	   We are protected from a simultaneous write() extending i_size
	   back past iattr->ia_size, because do_truncate() holds the
	   generic inode semaphore. */
	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
		vmtruncate(inode, iattr->ia_size);

	return 0;
}
Beispiel #15
0
static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned flags,
			struct page **pagep, void **fsdata)
{
	struct page *pg;
	struct inode *inode = mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
	uint32_t pageofs = index << PAGE_CACHE_SHIFT;
	int ret = 0;

	pg = grab_cache_page_write_begin(mapping, index, flags);
	if (!pg)
		return -ENOMEM;
	*pagep = pg;

	jffs2_dbg(1, "%s()\n", __func__);

	if (pageofs > inode->i_size) {
		/* Make new hole frag from old EOF to new page */
		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
		struct jffs2_raw_inode ri;
		struct jffs2_full_dnode *fn;
		uint32_t alloc_len;

		jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
			  (unsigned int)inode->i_size, pageofs);

		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
		if (ret)
			goto out_page;

		mutex_lock(&f->sem);
		memset(&ri, 0, sizeof(ri));

		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
		ri.totlen = cpu_to_je32(sizeof(ri));
		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));

		ri.ino = cpu_to_je32(f->inocache->ino);
		ri.version = cpu_to_je32(++f->highest_version);
		ri.mode = cpu_to_jemode(inode->i_mode);
		ri.uid = cpu_to_je16(inode->i_uid);
		ri.gid = cpu_to_je16(inode->i_gid);
		ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
		ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds());
		ri.offset = cpu_to_je32(inode->i_size);
		ri.dsize = cpu_to_je32(pageofs - inode->i_size);
		ri.csize = cpu_to_je32(0);
		ri.compr = JFFS2_COMPR_ZERO;
		ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
		ri.data_crc = cpu_to_je32(0);

		fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL);

		if (IS_ERR(fn)) {
			ret = PTR_ERR(fn);
			jffs2_complete_reservation(c);
			mutex_unlock(&f->sem);
			goto out_page;
		}
		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
		if (f->metadata) {
			jffs2_mark_node_obsolete(c, f->metadata->raw);
			jffs2_free_full_dnode(f->metadata);
			f->metadata = NULL;
		}
		if (ret) {
			jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n",
				  ret);
			jffs2_mark_node_obsolete(c, fn->raw);
			jffs2_free_full_dnode(fn);
			jffs2_complete_reservation(c);
			mutex_unlock(&f->sem);
			goto out_page;
		}
		jffs2_complete_reservation(c);
		inode->i_size = pageofs;
		mutex_unlock(&f->sem);
	}

	/*
	 * Read in the page if it wasn't already present. Cannot optimize away
	 * the whole page write case until jffs2_write_end can handle the
	 * case of a short-copy.
	 */
	if (!PageUptodate(pg)) {
		mutex_lock(&f->sem);
		ret = jffs2_do_readpage_nolock(inode, pg);
		mutex_unlock(&f->sem);
		if (ret)
			goto out_page;
	}
	jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
	return ret;

out_page:
	unlock_page(pg);
	page_cache_release(pg);
	return ret;
}
Beispiel #16
0
static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
				 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
				 uint32_t *highest_version, uint32_t *latest_mctime,
				 uint32_t *mctime_ver)
{
	struct jffs2_raw_node_ref *ref, *valid_ref;
	struct rb_root ret_tn = RB_ROOT;
	struct jffs2_full_dirent *ret_fd = NULL;
	union jffs2_node_union node;
	size_t retlen;
	int err;

	*mctime_ver = 0;
	
	JFFS2_DBG_READINODE("ino #%u\n", f->inocache->ino);

	spin_lock(&c->erase_completion_lock);

	valid_ref = jffs2_first_valid_node(f->inocache->nodes);

	if (!valid_ref && (f->inocache->ino != 1))
		JFFS2_WARNING("no valid nodes for ino #%u\n", f->inocache->ino);

	while (valid_ref) {
		/* We can hold a pointer to a non-obsolete node without the spinlock,
		   but _obsolete_ nodes may disappear at any time, if the block
		   they're in gets erased. So if we mark 'ref' obsolete while we're
		   not holding the lock, it can go away immediately. For that reason,
		   we find the next valid node first, before processing 'ref'.
		*/
		ref = valid_ref;
		valid_ref = jffs2_first_valid_node(ref->next_in_ino);
		spin_unlock(&c->erase_completion_lock);

		cond_resched();

		/* FIXME: point() */
		err = jffs2_flash_read(c, (ref_offset(ref)), 
				       min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)),
				       &retlen, (void *)&node);
		if (err) {
			JFFS2_ERROR("error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref));
			goto free_out;
		}
			
		switch (je16_to_cpu(node.u.nodetype)) {
			
		case JFFS2_NODETYPE_DIRENT:
			JFFS2_DBG_READINODE("node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref));
			
			if (retlen < sizeof(node.d)) {
				JFFS2_ERROR("short read dirent at %#08x\n", ref_offset(ref));
				err = -EIO;
				goto free_out;
			}

			err = read_direntry(c, ref, &node.d, retlen, &ret_fd, (int32_t *)latest_mctime, mctime_ver);
			if (err == 1) {
				jffs2_mark_node_obsolete(c, ref);
				break;
			} else if (unlikely(err))
				goto free_out;
			
			if (je32_to_cpu(node.d.version) > *highest_version)
				*highest_version = je32_to_cpu(node.d.version);

			break;

		case JFFS2_NODETYPE_INODE:
			JFFS2_DBG_READINODE("node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref));
			
			if (retlen < sizeof(node.i)) {
				JFFS2_ERROR("short read dnode at %#08x\n", ref_offset(ref));
				err = -EIO;
				goto free_out;
			}

			err = read_dnode(c, ref, &node.i, retlen, &ret_tn, (int32_t *)latest_mctime, mctime_ver);
			if (err == 1) {
				jffs2_mark_node_obsolete(c, ref);
				break;
			} else if (unlikely(err))
				goto free_out;

			if (je32_to_cpu(node.i.version) > *highest_version)
				*highest_version = je32_to_cpu(node.i.version);
			
			JFFS2_DBG_READINODE("version %d, highest_version now %d\n",
					je32_to_cpu(node.i.version), *highest_version);

			break;

		default:
			/* Check we've managed to read at least the common node header */
			if (retlen < sizeof(struct jffs2_unknown_node)) {
				JFFS2_ERROR("short read unknown node at %#08x\n", ref_offset(ref));
				return -EIO;
			}

			err = read_unknown(c, ref, &node.u, retlen);
			if (err == 1) {
				jffs2_mark_node_obsolete(c, ref);
				break;
			} else if (unlikely(err))
				goto free_out;

		}
		spin_lock(&c->erase_completion_lock);

	}
	spin_unlock(&c->erase_completion_lock);
	*tnp = ret_tn;
	*fdp = ret_fd;

	return 0;

 free_out:
	jffs2_free_tmp_dnode_info_list(&ret_tn);
	jffs2_free_full_dirent_list(ret_fd);
	return err;
}
Beispiel #17
0
/* Build final, normal fragtree from tn tree. It doesn't matter which order
   we add nodes to the real fragtree, as long as they don't overlap. And
   having thrown away the majority of overlapped nodes as we went, there
   really shouldn't be many sets of nodes which do overlap. If we start at
   the end, we can use the overlap markers -- we can just eat nodes which
   aren't overlapped, and when we encounter nodes which _do_ overlap we
   sort them all into a temporary tree in version order before replaying them. */
static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c,
				      struct jffs2_inode_info *f,
				      struct jffs2_readinode_info *rii)
{
	struct jffs2_tmp_dnode_info *pen, *last, *this;
	struct rb_root ver_root = RB_ROOT;
	uint32_t high_ver = 0;

	if (rii->mdata_tn) {
		dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn);
		high_ver = rii->mdata_tn->version;
		rii->latest_ref = rii->mdata_tn->fn->raw;
	}
#ifdef JFFS2_DBG_READINODE_MESSAGES
	this = tn_last(&rii->tn_root);
	while (this) {
		dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs,
			      this->fn->ofs+this->fn->size, this->overlapped);
		this = tn_prev(this);
	}
#endif
	pen = tn_last(&rii->tn_root);
	while ((last = pen)) {
		pen = tn_prev(last);

		eat_last(&rii->tn_root, &last->rb);
		ver_insert(&ver_root, last);

		if (unlikely(last->overlapped)) {
			if (pen)
				continue;
			/*
			 * We killed a node which set the overlapped
			 * flags during the scan. Fix it up.
			 */
			last->overlapped = 0;
		}

		/* Now we have a bunch of nodes in reverse version
		   order, in the tree at ver_root. Most of the time,
		   there'll actually be only one node in the 'tree',
		   in fact. */
		this = tn_last(&ver_root);

		while (this) {
			struct jffs2_tmp_dnode_info *vers_next;
			int ret;
			vers_next = tn_prev(this);
			eat_last(&ver_root, &this->rb);
			if (check_tn_node(c, this)) {
				dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n",
					     this->version, this->fn->ofs,
					     this->fn->ofs+this->fn->size);
				jffs2_kill_tn(c, this);
			} else {
				if (this->version > high_ver) {
					/* Note that this is different from the other
					   highest_version, because this one is only
					   counting _valid_ nodes which could give the
					   latest inode metadata */
					high_ver = this->version;
					rii->latest_ref = this->fn->raw;
				}
				dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n",
					     this, this->version, this->fn->ofs,
					     this->fn->ofs+this->fn->size, this->overlapped);

				ret = jffs2_add_full_dnode_to_inode(c, f, this->fn);
				if (ret) {
					/* Free the nodes in vers_root; let the caller
					   deal with the rest */
					JFFS2_ERROR("Add node to tree failed %d\n", ret);
					while (1) {
						vers_next = tn_prev(this);
						if (check_tn_node(c, this))
							jffs2_mark_node_obsolete(c, this->fn->raw);
						jffs2_free_full_dnode(this->fn);
						jffs2_free_tmp_dnode_info(this);
						this = vers_next;
						if (!this)
							break;
						eat_last(&ver_root, &vers_next->rb);
					}
					return ret;
				}
				jffs2_free_tmp_dnode_info(this);
			}
			this = vers_next;
		}
	}
	return 0;
}
Beispiel #18
0
/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
   with this ino, returning the former in order of version */
static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
				 struct rb_root *tnp, struct jffs2_full_dirent **fdp,
				 uint32_t *highest_version, uint32_t *latest_mctime,
				 uint32_t *mctime_ver)
{
	struct jffs2_raw_node_ref *ref, *valid_ref;
	struct rb_root ret_tn = RB_ROOT;
	struct jffs2_full_dirent *ret_fd = NULL;
	unsigned char *buf = NULL;
	union jffs2_node_union *node;
	size_t retlen;
	int len, err;

	*mctime_ver = 0;

	dbg_readinode("ino #%u\n", f->inocache->ino);

	if (jffs2_is_writebuffered(c)) {
		/*
		 * If we have the write buffer, we assume the minimal I/O unit
		 * is c->wbuf_pagesize. We implement some optimizations which in
		 * this case and we need a temporary buffer of size =
		 * 2*c->wbuf_pagesize bytes (see comments in read_dnode()).
		 * Basically, we want to read not only the node header, but the
		 * whole wbuf (NAND page in case of NAND) or 2, if the node
		 * header overlaps the border between the 2 wbufs.
		 */
		len = 2*c->wbuf_pagesize;
	} else {
		/*
		 * When there is no write buffer, the size of the temporary
		 * buffer is the size of the larges node header.
		 */
		len = sizeof(union jffs2_node_union);
	}

	/* FIXME: in case of NOR and available ->point() this
	 * needs to be fixed. */
	buf = kmalloc(len, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	spin_lock(&c->erase_completion_lock);
	valid_ref = jffs2_first_valid_node(f->inocache->nodes);
	if (!valid_ref && f->inocache->ino != 1)
		JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
	while (valid_ref) {
		unsigned char *bufstart;

		/* We can hold a pointer to a non-obsolete node without the spinlock,
		   but _obsolete_ nodes may disappear at any time, if the block
		   they're in gets erased. So if we mark 'ref' obsolete while we're
		   not holding the lock, it can go away immediately. For that reason,
		   we find the next valid node first, before processing 'ref'.
		*/
		ref = valid_ref;
		valid_ref = jffs2_first_valid_node(ref->next_in_ino);
		spin_unlock(&c->erase_completion_lock);

		cond_resched();

		/*
		 * At this point we don't know the type of the node we're going
		 * to read, so we do not know the size of its header. In order
		 * to minimize the amount of flash IO we assume the node has
		 * size = JFFS2_MIN_NODE_HEADER.
		 */
		if (jffs2_is_writebuffered(c)) {
			/*
			 * We treat 'buf' as 2 adjacent wbufs. We want to
			 * adjust bufstart such as it points to the
			 * beginning of the node within this wbuf.
			 */
			bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize);
			/* We will read either one wbuf or 2 wbufs. */
			len = c->wbuf_pagesize - (bufstart - buf);
			if (JFFS2_MIN_NODE_HEADER + (int)(bufstart - buf) > c->wbuf_pagesize) {
				/* The header spans the border of the first wbuf */
				len += c->wbuf_pagesize;
			}
		} else {
			bufstart = buf;
			len = JFFS2_MIN_NODE_HEADER;
		}

		dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));

		/* FIXME: point() */
		err = jffs2_flash_read(c, ref_offset(ref), len,
				       &retlen, bufstart);
		if (err) {
			JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
			goto free_out;
		}

		if (retlen < len) {
			JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len);
			err = -EIO;
			goto free_out;
		}

		node = (union jffs2_node_union *)bufstart;

		switch (je16_to_cpu(node->u.nodetype)) {

		case JFFS2_NODETYPE_DIRENT:

			if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
				err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart);
				if (unlikely(err))
					goto free_out;
			}

			err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver);
			if (err == 1) {
				jffs2_mark_node_obsolete(c, ref);
				break;
			} else if (unlikely(err))
				goto free_out;

			if (je32_to_cpu(node->d.version) > *highest_version)
				*highest_version = je32_to_cpu(node->d.version);

			break;

		case JFFS2_NODETYPE_INODE:

			if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
				err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart);
				if (unlikely(err))
					goto free_out;
			}

			err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver);
			if (err == 1) {
				jffs2_mark_node_obsolete(c, ref);
				break;
			} else if (unlikely(err))
				goto free_out;

			if (je32_to_cpu(node->i.version) > *highest_version)
				*highest_version = je32_to_cpu(node->i.version);

			break;

		default:
			if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
				err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart);
				if (unlikely(err))
					goto free_out;
			}

			err = read_unknown(c, ref, &node->u);
			if (err == 1) {
				jffs2_mark_node_obsolete(c, ref);
				break;
			} else if (unlikely(err))
				goto free_out;

		}
		spin_lock(&c->erase_completion_lock);
	}

	spin_unlock(&c->erase_completion_lock);
	*tnp = ret_tn;
	*fdp = ret_fd;
	kfree(buf);

	dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
			f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver);
	return 0;

 free_out:
	jffs2_free_tmp_dnode_info_list(&ret_tn);
	jffs2_free_full_dirent_list(ret_fd);
	kfree(buf);
	return err;
}
Beispiel #19
0
/*
 * Helper function for jffs2_get_inode_nodes().
 * It is called every time an directory entry node is found.
 *
 * Returns: 0 on success;
 * 	    negative error code on failure.
 */
static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
				struct jffs2_raw_dirent *rd, size_t read,
				struct jffs2_readinode_info *rii)
{
	struct jffs2_full_dirent *fd;
	uint32_t crc;

	/* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
	BUG_ON(ref_obsolete(ref));

	crc = crc32(0, rd, sizeof(*rd) - 8);
	if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
		JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
			     ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
		jffs2_mark_node_obsolete(c, ref);
		return 0;
	}

	/* If we've never checked the CRCs on this node, check them now */
	if (ref_flags(ref) == REF_UNCHECKED) {
		struct jffs2_eraseblock *jeb;
		int len;

		/* Sanity check */
		if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
			JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
				    ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
			jffs2_mark_node_obsolete(c, ref);
			return 0;
		}

		jeb = &c->blocks[ref->flash_offset / c->sector_size];
		len = ref_totlen(c, jeb, ref);

		spin_lock(&c->erase_completion_lock);
		jeb->used_size += len;
		jeb->unchecked_size -= len;
		c->used_size += len;
		c->unchecked_size -= len;
		ref->flash_offset = ref_offset(ref) | dirent_node_state(rd);
		spin_unlock(&c->erase_completion_lock);
	}

	fd = jffs2_alloc_full_dirent(rd->nsize + 1);
	if (unlikely(!fd))
		return -ENOMEM;

	fd->raw = ref;
	fd->version = je32_to_cpu(rd->version);
	fd->ino = je32_to_cpu(rd->ino);
	fd->type = rd->type;

	if (fd->version > rii->highest_version)
		rii->highest_version = fd->version;

	/* Pick out the mctime of the latest dirent */
	if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) {
		rii->mctime_ver = fd->version;
		rii->latest_mctime = je32_to_cpu(rd->mctime);
	}

	/*
	 * Copy as much of the name as possible from the raw
	 * dirent we've already read from the flash.
	 */
	if (read > sizeof(*rd))
		memcpy(&fd->name[0], &rd->name[0],
		       min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));

	/* Do we need to copy any more of the name directly from the flash? */
	if (rd->nsize + sizeof(*rd) > read) {
		/* FIXME: point() */
		int err;
		int already = read - sizeof(*rd);

		err = jffs2_flash_read(c, (ref_offset(ref)) + read,
				rd->nsize - already, &read, &fd->name[already]);
		if (unlikely(read != rd->nsize - already) && likely(!err))
			return -EIO;

		if (unlikely(err)) {
			JFFS2_ERROR("read remainder of name: error %d\n", err);
			jffs2_free_full_dirent(fd);
			return -EIO;
		}
	}

	fd->nhash = full_name_hash(fd->name, rd->nsize);
	fd->next = NULL;
	fd->name[rd->nsize] = '\0';

	/*
	 * Wheee. We now have a complete jffs2_full_dirent structure, with
	 * the name in it and everything. Link it into the list
	 */
	jffs2_add_fd_to_list(c, fd, &rii->fds);

	return 0;
}
Beispiel #20
0
static int jffs2_do_setattr (struct _inode *inode, struct iattr *iattr)
{
	struct jffs2_full_dnode *old_metadata, *new_metadata;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_raw_inode *ri;
	unsigned char *mdata = NULL;
	int mdatalen = 0;
	unsigned int ivalid;
	uint32_t alloclen;
	int ret;
	int alloc_type = ALLOC_NORMAL;

	jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);

	/* Special cases - we don't want more than one data node
	   for these types on the medium at any time. So setattr
	   must read the original data associated with the node
	   (i.e. the device numbers or the target name) and write
	   it out again with the appropriate data attached */
	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
		return -EIO;
	} else if (S_ISLNK(inode->i_mode)) {
		mutex_lock(&f->sem);
		mdatalen = f->metadata->size;
		mdata = kmalloc(f->metadata->size, GFP_USER);
		if (!mdata) {
			mutex_unlock(&f->sem);
			return -ENOMEM;
		}
		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
		if (ret) {
			mutex_unlock(&f->sem);
			kfree(mdata);
			return ret;
		}
		mutex_unlock(&f->sem);
		jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
			  __func__, mdatalen);
	}

	ri = jffs2_alloc_raw_inode();
	if (!ri) {
		if (S_ISLNK(inode->i_mode))
			kfree(mdata);
		return -ENOMEM;
	}

	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
	if (ret) {
		jffs2_free_raw_inode(ri);
		if (S_ISLNK(inode->i_mode))
			 kfree(mdata);
		return ret;
	}
	mutex_lock(&f->sem);
	ivalid = iattr->ia_valid;

	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));

	ri->ino = cpu_to_je32(inode->i_ino);
	ri->version = cpu_to_je32(++f->highest_version);

	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
		from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
		from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));

	if (ivalid & ATTR_MODE)
		ri->mode = cpu_to_jemode(iattr->ia_mode);
	else
		ri->mode = cpu_to_jemode(inode->i_mode);


	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));

	ri->offset = cpu_to_je32(0);
	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
	ri->compr = JFFS2_COMPR_NONE;
	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
		/* It's an extension. Make it a hole node */
		ri->compr = JFFS2_COMPR_ZERO;
		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
		ri->offset = cpu_to_je32(inode->i_size);
	} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
		/* For truncate-to-zero, treat it as deletion because
		   it'll always be obsoleting all previous nodes */
		alloc_type = ALLOC_DELETION;
	}
	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
	if (mdatalen)
		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
	else
		ri->data_crc = cpu_to_je32(0);

	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
	if (S_ISLNK(inode->i_mode))
		kfree(mdata);

	if (IS_ERR(new_metadata)) {
		jffs2_complete_reservation(c);
		jffs2_free_raw_inode(ri);
		mutex_unlock(&f->sem);
		return PTR_ERR(new_metadata);
	}
	/* It worked. Update the inode */
	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
	inode->i_mode = jemode_to_cpu(ri->mode);
	i_uid_write(inode, je16_to_cpu(ri->uid));
	i_gid_write(inode, je16_to_cpu(ri->gid));


	old_metadata = f->metadata;

	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
		jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);

	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
		inode->i_size = iattr->ia_size;
		f->metadata = NULL;
	} else {
		f->metadata = new_metadata;
	}
	if (old_metadata) {
		jffs2_mark_node_obsolete(c, old_metadata->raw);
		jffs2_free_full_dnode(old_metadata);
	}
	jffs2_free_raw_inode(ri);

	mutex_unlock(&f->sem);
	jffs2_complete_reservation(c);

	/* We have to do the truncate_setsize() without f->sem held, since
	   some pages may be locked and waiting for it in readpage().
	   We are protected from a simultaneous write() extending i_size
	   back past iattr->ia_size, because do_truncate() holds the
	   generic inode semaphore. */
	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
		truncate_setsize(inode, iattr->ia_size);
	}	

	return 0;
}
Beispiel #21
0
/*
 * Helper function for jffs2_get_inode_nodes().
 * It is called every time an inode node is found.
 *
 * Returns: 0 on success (possibly after marking a bad node obsolete);
 * 	    negative error code on failure.
 */
static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
			     struct jffs2_raw_inode *rd, int rdlen,
			     struct jffs2_readinode_info *rii)
{
	struct jffs2_tmp_dnode_info *tn;
	uint32_t len, csize;
	int ret = 0;
	uint32_t crc;

	/* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
	BUG_ON(ref_obsolete(ref));

	crc = crc32(0, rd, sizeof(*rd) - 8);
	if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
		JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
			     ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
		jffs2_mark_node_obsolete(c, ref);
		return 0;
	}

	tn = jffs2_alloc_tmp_dnode_info();
	if (!tn) {
		JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
		return -ENOMEM;
	}

	tn->partial_crc = 0;
	csize = je32_to_cpu(rd->csize);

	/* If we've never checked the CRCs on this node, check them now */
	if (ref_flags(ref) == REF_UNCHECKED) {

		/* Sanity checks */
		if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
		    unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
			JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
			jffs2_dbg_dump_node(c, ref_offset(ref));
			jffs2_mark_node_obsolete(c, ref);
			goto free_out;
		}

		if (jffs2_is_writebuffered(c) && csize != 0) {
			/* At this point we are supposed to check the data CRC
			 * of our unchecked node. But thus far, we do not
			 * know whether the node is valid or obsolete. To
			 * figure this out, we need to walk all the nodes of
			 * the inode and build the inode fragtree. We don't
			 * want to spend time checking data of nodes which may
			 * later be found to be obsolete. So we put off the full
			 * data CRC checking until we have read all the inode
			 * nodes and have started building the fragtree.
			 *
			 * The fragtree is being built starting with nodes
			 * having the highest version number, so we'll be able
			 * to detect whether a node is valid (i.e., it is not
			 * overlapped by a node with higher version) or not.
			 * And we'll be able to check only those nodes, which
			 * are not obsolete.
			 *
			 * Of course, this optimization only makes sense in case
			 * of NAND flashes (or other flashes with
			 * !jffs2_can_mark_obsolete()), since on NOR flashes
			 * nodes are marked obsolete physically.
			 *
			 * Since NAND flashes (or other flashes with
			 * jffs2_is_writebuffered(c)) are anyway read by
			 * fractions of c->wbuf_pagesize, and we have just read
			 * the node header, it is likely that the starting part
			 * of the node data is also read when we read the
			 * header. So we don't mind to check the CRC of the
			 * starting part of the data of the node now, and check
			 * the second part later (in jffs2_check_node_data()).
			 * Of course, we will not need to re-read and re-check
			 * the NAND page which we have just read. This is why we
			 * read the whole NAND page at jffs2_get_inode_nodes(),
			 * while we needed only the node header.
			 */
			unsigned char *buf;

			/* 'buf' will point to the start of data */
			buf = (unsigned char *)rd + sizeof(*rd);
			/* len will be the read data length */
			len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
			tn->partial_crc = crc32(0, buf, len);

			dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);

			/* If we actually calculated the whole data CRC
			 * and it is wrong, drop the node. */
			if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
				JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
					ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
				jffs2_mark_node_obsolete(c, ref);
				goto free_out;
			}

		} else if (csize == 0) {
			/*
			 * We checked the header CRC. If the node has no data, adjust
			 * the space accounting now. For other nodes this will be done
			 * later either when the node is marked obsolete or when its
			 * data is checked.
			 */
			struct jffs2_eraseblock *jeb;

			dbg_readinode("the node has no data.\n");
			jeb = &c->blocks[ref->flash_offset / c->sector_size];
			len = ref_totlen(c, jeb, ref);

			spin_lock(&c->erase_completion_lock);
			jeb->used_size += len;
			jeb->unchecked_size -= len;
			c->used_size += len;
			c->unchecked_size -= len;
			ref->flash_offset = ref_offset(ref) | REF_NORMAL;
			spin_unlock(&c->erase_completion_lock);
		}
	}

	tn->fn = jffs2_alloc_full_dnode();
	if (!tn->fn) {
		JFFS2_ERROR("alloc fn failed\n");
		ret = -ENOMEM;
		goto free_out;
	}

	tn->version = je32_to_cpu(rd->version);
	tn->fn->ofs = je32_to_cpu(rd->offset);
	tn->data_crc = je32_to_cpu(rd->data_crc);
	tn->csize = csize;
	tn->fn->raw = ref;
	tn->overlapped = 0;

	if (tn->version > rii->highest_version)
		rii->highest_version = tn->version;

	/* There was a bug where we wrote hole nodes out with
	   csize/dsize swapped. Deal with it */
	if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
		tn->fn->size = csize;
	else // normal case...
		tn->fn->size = je32_to_cpu(rd->dsize);

	dbg_readinode2("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
		       ref_offset(ref), je32_to_cpu(rd->version),
		       je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);

	ret = jffs2_add_tn_to_tree(c, rii, tn);

	if (ret) {
		jffs2_free_full_dnode(tn->fn);
	free_out:
		jffs2_free_tmp_dnode_info(tn);
		return ret;
	}
#ifdef JFFS2_DBG_READINODE2_MESSAGES
	dbg_readinode2("After adding ver %d:\n", je32_to_cpu(rd->version));
	tn = tn_first(&rii->tn_root);
	while (tn) {
		dbg_readinode2("%p: v %d r 0x%x-0x%x ov %d\n",
			       tn, tn->version, tn->fn->ofs,
			       tn->fn->ofs+tn->fn->size, tn->overlapped);
		tn = tn_next(tn);
	}
#endif
	return 0;
}
Beispiel #22
0
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
					struct jffs2_inode_cache *ic,
					struct jffs2_full_dirent **dead_fds)
{
	struct jffs2_raw_node_ref *raw;
	struct jffs2_full_dirent *fd;

	dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);

	raw = ic->nodes;
	while (raw != (void *)ic) {
		struct jffs2_raw_node_ref *next = raw->next_in_ino;
		dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
		jffs2_mark_node_obsolete(c, raw);
		raw = next;
	}

	if (ic->scan_dents) {
		int whinged = 0;
		dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);

		while(ic->scan_dents) {
			struct jffs2_inode_cache *child_ic;

			fd = ic->scan_dents;
			ic->scan_dents = fd->next;

			if (!fd->ino) {
				/* It's a deletion dirent. Ignore it */
				dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
				jffs2_free_full_dirent(fd);
				continue;
			}
			if (!whinged)
				whinged = 1;

			dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);

			child_ic = jffs2_get_ino_cache(c, fd->ino);
			if (!child_ic) {
				dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
						fd->name, fd->ino);
				jffs2_free_full_dirent(fd);
				continue;
			}

			/* Reduce nlink of the child. If it's now zero, stick it on the
			   dead_fds list to be cleaned up later. Else just free the fd */

			if (fd->type == DT_DIR)
				child_ic->pino_nlink = 0;
			else
				child_ic->pino_nlink--;

			if (!child_ic->pino_nlink) {
				dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
					  fd->ino, fd->name);
				fd->next = *dead_fds;
				*dead_fds = fd;
			} else {
				dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
					  fd->ino, fd->name, child_ic->pino_nlink);
				jffs2_free_full_dirent(fd);
			}
		}
	}

	/*
	   We don't delete the inocache from the hash list and free it yet.
	   The erase code will do that, when all the nodes are completely gone.
	*/
}
Beispiel #23
0
/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
   with this ino. Perform a preliminary ordering on data nodes, throwing away
   those which are completely obsoleted by newer ones. The naïve approach we
   use to take of just returning them _all_ in version order will cause us to
   run out of memory in certain degenerate cases. */
static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
				 struct jffs2_readinode_info *rii)
{
	struct jffs2_raw_node_ref *ref, *valid_ref;
	unsigned char *buf = NULL;
	union jffs2_node_union *node;
	size_t retlen;
	int len, err;

	rii->mctime_ver = 0;

	dbg_readinode("ino #%u\n", f->inocache->ino);

	/* FIXME: in case of NOR and available ->point() this
	 * needs to be fixed. */
	len = sizeof(union jffs2_node_union) + c->wbuf_pagesize;
	buf = kmalloc(len, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	spin_lock(&c->erase_completion_lock);
	valid_ref = jffs2_first_valid_node(f->inocache->nodes);
	if (!valid_ref && f->inocache->ino != 1)
		JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
	while (valid_ref) {
		/* We can hold a pointer to a non-obsolete node without the spinlock,
		   but _obsolete_ nodes may disappear at any time, if the block
		   they're in gets erased. So if we mark 'ref' obsolete while we're
		   not holding the lock, it can go away immediately. For that reason,
		   we find the next valid node first, before processing 'ref'.
		*/
		ref = valid_ref;
		valid_ref = jffs2_first_valid_node(ref->next_in_ino);
		spin_unlock(&c->erase_completion_lock);

		cond_resched();

		/*
		 * At this point we don't know the type of the node we're going
		 * to read, so we do not know the size of its header. In order
		 * to minimize the amount of flash IO we assume the header is
		 * of size = JFFS2_MIN_NODE_HEADER.
		 */
		len = JFFS2_MIN_NODE_HEADER;
		if (jffs2_is_writebuffered(c)) {
			int end, rem;

			/*
			 * We are about to read JFFS2_MIN_NODE_HEADER bytes,
			 * but this flash has some minimal I/O unit. It is
			 * possible that we'll need to read more soon, so read
			 * up to the next min. I/O unit, in order not to
			 * re-read the same min. I/O unit twice.
			 */
			end = ref_offset(ref) + len;
			rem = end % c->wbuf_pagesize;
			if (rem)
				end += c->wbuf_pagesize - rem;
			len = end - ref_offset(ref);
		}

		dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));

		/* FIXME: point() */
		err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf);
		if (err) {
			JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
			goto free_out;
		}

		if (retlen < len) {
			JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
			err = -EIO;
			goto free_out;
		}

		node = (union jffs2_node_union *)buf;

		/* No need to mask in the valid bit; it shouldn't be invalid */
		if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
			JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
				     ref_offset(ref), je16_to_cpu(node->u.magic),
				     je16_to_cpu(node->u.nodetype),
				     je32_to_cpu(node->u.totlen),
				     je32_to_cpu(node->u.hdr_crc));
			jffs2_dbg_dump_node(c, ref_offset(ref));
			jffs2_mark_node_obsolete(c, ref);
			goto cont;
		}
		if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) {
			/* Not a JFFS2 node, whinge and move on */
			JFFS2_NOTICE("Wrong magic bitmask 0x%04x in node header at %#08x.\n",
				     je16_to_cpu(node->u.magic), ref_offset(ref));
			jffs2_mark_node_obsolete(c, ref);
			goto cont;
		}

		switch (je16_to_cpu(node->u.nodetype)) {

		case JFFS2_NODETYPE_DIRENT:

			if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) &&
			    len < sizeof(struct jffs2_raw_dirent)) {
				err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf);
				if (unlikely(err))
					goto free_out;
			}

			err = read_direntry(c, ref, &node->d, retlen, rii);
			if (unlikely(err))
				goto free_out;

			break;

		case JFFS2_NODETYPE_INODE:

			if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) &&
			    len < sizeof(struct jffs2_raw_inode)) {
				err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf);
				if (unlikely(err))
					goto free_out;
			}

			err = read_dnode(c, ref, &node->i, len, rii);
			if (unlikely(err))
				goto free_out;

			break;

		default:
			if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node) &&
			    len < sizeof(struct jffs2_unknown_node)) {
				err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf);
				if (unlikely(err))
					goto free_out;
			}

			err = read_unknown(c, ref, &node->u);
			if (unlikely(err))
				goto free_out;

		}
	cont:
		spin_lock(&c->erase_completion_lock);
	}

	spin_unlock(&c->erase_completion_lock);
	kfree(buf);

	f->highest_version = rii->highest_version;

	dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
		      f->inocache->ino, rii->highest_version, rii->latest_mctime,
		      rii->mctime_ver);
	return 0;

 free_out:
	jffs2_free_tmp_dnode_info_list(&rii->tn_root);
	jffs2_free_full_dirent_list(rii->fds);
	rii->fds = NULL;
	kfree(buf);
	return err;
}
Beispiel #24
0
int jffs2_prepare_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
{
	struct inode *inode = pg->mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	uint32_t pageofs = pg->index << PAGE_CACHE_SHIFT;
	int ret = 0;

	D1(printk(KERN_DEBUG "jffs2_prepare_write()\n"));

	if (pageofs > inode->i_size) {
		/* Make new hole frag from old EOF to new page */
		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
		struct jffs2_raw_inode ri;
		struct jffs2_full_dnode *fn;
		uint32_t phys_ofs, alloc_len;
		
		D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
			  (unsigned int)inode->i_size, pageofs));

		ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL);
		if (ret)
			return ret;

		down(&f->sem);
		memset(&ri, 0, sizeof(ri));

		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
		ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
		ri.totlen = cpu_to_je32(sizeof(ri));
		ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));

		ri.ino = cpu_to_je32(f->inocache->ino);
		ri.version = cpu_to_je32(++f->highest_version);
		ri.mode = cpu_to_jemode(inode->i_mode);
		ri.uid = cpu_to_je16(inode->i_uid);
		ri.gid = cpu_to_je16(inode->i_gid);
		ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
		ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds());
		ri.offset = cpu_to_je32(inode->i_size);
		ri.dsize = cpu_to_je32(pageofs - inode->i_size);
		ri.csize = cpu_to_je32(0);
		ri.compr = JFFS2_COMPR_ZERO;
		ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
		ri.data_crc = cpu_to_je32(0);
		
		fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL);

		if (IS_ERR(fn)) {
			ret = PTR_ERR(fn);
			jffs2_complete_reservation(c);
			up(&f->sem);
			return ret;
		}
		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
		if (f->metadata) {
			jffs2_mark_node_obsolete(c, f->metadata->raw);
			jffs2_free_full_dnode(f->metadata);
			f->metadata = NULL;
		}
		if (ret) {
			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret));
			jffs2_mark_node_obsolete(c, fn->raw);
			jffs2_free_full_dnode(fn);
			jffs2_complete_reservation(c);
			up(&f->sem);
			return ret;
		}
		jffs2_complete_reservation(c);
		inode->i_size = pageofs;
		up(&f->sem);
	}
	
	/* Read in the page if it wasn't already present, unless it's a whole page */
	if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) {
		down(&f->sem);
		ret = jffs2_do_readpage_nolock(inode, pg);
		up(&f->sem);
	}
	D1(printk(KERN_DEBUG "end prepare_write(). pg->flags %lx\n", pg->flags));
	return ret;
}
Beispiel #25
0
int jffs2_prepare_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
{
	struct inode *inode = filp->f_dentry->d_inode;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	__u32 pageofs = pg->index << PAGE_CACHE_SHIFT;
	int ret = 0;

	down(&f->sem);
	D1(printk(KERN_DEBUG "jffs2_prepare_write() nrpages %ld\n", inode->i_mapping->nrpages));

	if (pageofs > inode->i_size) {
		/* Make new hole frag from old EOF to new page */
		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
		struct jffs2_raw_inode ri;
		struct jffs2_full_dnode *fn;
		__u32 phys_ofs, alloc_len;
		
		D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
			  (unsigned int)inode->i_size, pageofs));

		ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL);
		if (ret) {
			up(&f->sem);
			return ret;
		}
		memset(&ri, 0, sizeof(ri));

		ri.magic = JFFS2_MAGIC_BITMASK;
		ri.nodetype = JFFS2_NODETYPE_INODE;
		ri.totlen = sizeof(ri);
		ri.hdr_crc = crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4);

		ri.ino = f->inocache->ino;
		ri.version = ++f->highest_version;
		ri.mode = inode->i_mode;
		ri.uid = inode->i_uid;
		ri.gid = inode->i_gid;
		ri.isize = max((__u32)inode->i_size, pageofs);
		ri.atime = ri.ctime = ri.mtime = CURRENT_TIME;
		ri.offset = inode->i_size;
		ri.dsize = pageofs - inode->i_size;
		ri.csize = 0;
		ri.compr = JFFS2_COMPR_ZERO;
		ri.node_crc = crc32(0, &ri, sizeof(ri)-8);
		ri.data_crc = 0;
		
		fn = jffs2_write_dnode(inode, &ri, NULL, 0, phys_ofs, NULL);
		jffs2_complete_reservation(c);
		if (IS_ERR(fn)) {
			ret = PTR_ERR(fn);
			up(&f->sem);
			return ret;
		}
		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
		if (f->metadata) {
			jffs2_mark_node_obsolete(c, f->metadata->raw);
			jffs2_free_full_dnode(f->metadata);
			f->metadata = NULL;
		}
		if (ret) {
			D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret));
			jffs2_mark_node_obsolete(c, fn->raw);
			jffs2_free_full_dnode(fn);
			up(&f->sem);
			return ret;
		}
		inode->i_size = pageofs;
	}
	

	/* Read in the page if it wasn't already present */
	if (!Page_Uptodate(pg) && (start || end < PAGE_SIZE))
		ret = jffs2_do_readpage_nolock(inode, pg);
	D1(printk(KERN_DEBUG "end prepare_write(). nrpages %ld\n", inode->i_mapping->nrpages));
	up(&f->sem);
	return ret;
}