Exemplo n.º 1
0
/*
 * no public solution to solve our requirement yet.
 * see: <required buffer size for LZ4_decompress_safe_partial>
 *      https://groups.google.com/forum/#!topic/lz4c/_3kkz5N6n00
 */
static FORCE_INLINE int customized_lz4_decompress_safe_partial(
	const void * const source,
	void * const dest,
	int inputSize,
	int outputSize)
{
	/* Local Variables */
	const BYTE *ip = (const BYTE *) source;
	const BYTE * const iend = ip + inputSize;

	BYTE *op = (BYTE *) dest;
	BYTE * const oend = op + outputSize;
	BYTE *cpy;

	static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
	static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };

	/* Empty output buffer */
	if (unlikely(outputSize == 0))
		return ((inputSize == 1) && (*ip == 0)) ? 0 : -1;

	/* Main Loop : decode sequences */
	while (1) {
		size_t length;
		const BYTE *match;
		size_t offset;

		/* get literal length */
		unsigned int const token = *ip++;

		length = token>>ML_BITS;

		if (length == RUN_MASK) {
			unsigned int s;

			do {
				s = *ip++;
				length += s;
			} while ((ip < iend - RUN_MASK) & (s == 255));

			if (unlikely((size_t)(op + length) < (size_t)(op))) {
				/* overflow detection */
				goto _output_error;
			}
			if (unlikely((size_t)(ip + length) < (size_t)(ip))) {
				/* overflow detection */
				goto _output_error;
			}
		}

		/* copy literals */
		cpy = op + length;
		if ((cpy > oend - WILDCOPYLENGTH) ||
			(ip + length > iend - (2 + 1 + LASTLITERALS))) {
			if (cpy > oend) {
				memcpy(op, ip, length = oend - op);
				op += length;
				break;
			}

			if (unlikely(ip + length > iend)) {
				/*
				 * Error :
				 * read attempt beyond
				 * end of input buffer
				 */
				goto _output_error;
			}

			memcpy(op, ip, length);
			ip += length;
			op += length;

			if (ip > iend - 2)
				break;
			/* Necessarily EOF, due to parsing restrictions */
			/* break; */
		} else {
			LZ4_wildCopy(op, ip, cpy);
			ip += length;
			op = cpy;
		}

		/* get offset */
		offset = LZ4_readLE16(ip);
		ip += 2;
		match = op - offset;

		if (unlikely(match < (const BYTE *)dest)) {
			/* Error : offset outside buffers */
			goto _output_error;
		}

		/* get matchlength */
		length = token & ML_MASK;
		if (length == ML_MASK) {
			unsigned int s;

			do {
				s = *ip++;

				if (ip > iend - LASTLITERALS)
					goto _output_error;

				length += s;
			} while (s == 255);

			if (unlikely((size_t)(op + length) < (size_t)op)) {
				/* overflow detection */
				goto _output_error;
			}
		}

		length += MINMATCH;

		/* copy match within block */
		cpy = op + length;

		if (unlikely(cpy >= oend - WILDCOPYLENGTH)) {
			if (cpy >= oend) {
				while (op < oend)
					*op++ = *match++;
				break;
			}
			goto __match;
		}

		/* costs ~1%; silence an msan warning when offset == 0 */
		LZ4_write32(op, (U32)offset);

		if (unlikely(offset < 8)) {
			const int dec64 = dec64table[offset];

			op[0] = match[0];
			op[1] = match[1];
			op[2] = match[2];
			op[3] = match[3];
			match += dec32table[offset];
			memcpy(op + 4, match, 4);
			match -= dec64;
		} else {
			LZ4_copy8(op, match);
			match += 8;
		}

		op += 8;

		if (unlikely(cpy > oend - 12)) {
			BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);

			if (op < oCopyLimit) {
				LZ4_wildCopy(op, match, oCopyLimit);
				match += oCopyLimit - op;
				op = oCopyLimit;
			}
__match:
			while (op < cpy)
				*op++ = *match++;
		} else {
			LZ4_copy8(op, match);

			if (length > 16)
				LZ4_wildCopy(op + 8, match + 8, cpy);
		}

		op = cpy; /* correction */
	}
	DBG_BUGON((void *)ip - source > inputSize);
	DBG_BUGON((void *)op - dest > outputSize);

	/* Nb of output bytes decoded */
	return (int) ((void *)op - dest);

	/* Overflow error detected */
_output_error:
	return -ERANGE;
}
Exemplo n.º 2
0
Arquivo: xattr.c Projeto: avagin/linux
static int init_inode_xattrs(struct inode *inode)
{
	struct erofs_vnode *const vi = EROFS_V(inode);
	struct xattr_iter it;
	unsigned int i;
	struct erofs_xattr_ibody_header *ih;
	struct super_block *sb;
	struct erofs_sb_info *sbi;
	bool atomic_map;
	int ret = 0;

	/* the most case is that xattrs of this inode are initialized. */
	if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
		return 0;

	if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
		return -ERESTARTSYS;

	/* someone has initialized xattrs for us? */
	if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
		goto out_unlock;

	/*
	 * bypass all xattr operations if ->xattr_isize is not greater than
	 * sizeof(struct erofs_xattr_ibody_header), in detail:
	 * 1) it is not enough to contain erofs_xattr_ibody_header then
	 *    ->xattr_isize should be 0 (it means no xattr);
	 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
	 *    undefined right now (maybe use later with some new sb feature).
	 */
	if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
		errln("xattr_isize %d of nid %llu is not supported yet",
		      vi->xattr_isize, vi->nid);
		ret = -ENOTSUPP;
		goto out_unlock;
	} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
		if (unlikely(vi->xattr_isize)) {
			DBG_BUGON(1);
			ret = -EIO;
			goto out_unlock;	/* xattr ondisk layout error */
		}
		ret = -ENOATTR;
		goto out_unlock;
	}

	sb = inode->i_sb;
	sbi = EROFS_SB(sb);
	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);

	it.page = erofs_get_inline_page(inode, it.blkaddr);
	if (IS_ERR(it.page)) {
		ret = PTR_ERR(it.page);
		goto out_unlock;
	}

	/* read in shared xattr array (non-atomic, see kmalloc below) */
	it.kaddr = kmap(it.page);
	atomic_map = false;

	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);

	vi->xattr_shared_count = ih->h_shared_count;
	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
						sizeof(uint), GFP_KERNEL);
	if (!vi->xattr_shared_xattrs) {
		xattr_iter_end(&it, atomic_map);
		ret = -ENOMEM;
		goto out_unlock;
	}

	/* let's skip ibody header */
	it.ofs += sizeof(struct erofs_xattr_ibody_header);

	for (i = 0; i < vi->xattr_shared_count; ++i) {
		if (unlikely(it.ofs >= EROFS_BLKSIZ)) {
			/* cannot be unaligned */
			BUG_ON(it.ofs != EROFS_BLKSIZ);
			xattr_iter_end(&it, atomic_map);

			it.page = erofs_get_meta_page(sb, ++it.blkaddr,
						      S_ISDIR(inode->i_mode));
			if (IS_ERR(it.page)) {
				kfree(vi->xattr_shared_xattrs);
				vi->xattr_shared_xattrs = NULL;
				ret = PTR_ERR(it.page);
				goto out_unlock;
			}

			it.kaddr = kmap_atomic(it.page);
			atomic_map = true;
			it.ofs = 0;
		}
		vi->xattr_shared_xattrs[i] =
			le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
		it.ofs += sizeof(__le32);
	}
	xattr_iter_end(&it, atomic_map);

	set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);

out_unlock:
	clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
	return ret;
}