コード例 #1
0
ファイル: zram_drv.c プロジェクト: Scorpio92/mediatek
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{
	int ret = LZO_E_OK;
	size_t clen = PAGE_SIZE;
	unsigned char *cmem;
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;

	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
		memset(mem, 0, PAGE_SIZE);
		return 0;
	}

	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
	if (meta->table[index].size == PAGE_SIZE)
		memcpy(mem, cmem, PAGE_SIZE);
	else 
#ifdef CONFIG_MT_ENG_BUILD
	{
		/* Move to the start of bitstream */
		cmem += GUIDE_BYTES_HALFLEN;
#endif
		ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
						mem, &clen);
#ifdef CONFIG_MT_ENG_BUILD
	}
#endif

	zs_unmap_object(meta->mem_pool, handle);

	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		zram_stat64_inc(zram, &zram->stats.failed_reads);
#ifdef CONFIG_MT_ENG_BUILD
		{
			int idx;
			size_t tlen;
			printk(KERN_ALERT "\n@@@@@@@@@@\n");
			tlen = meta->table[index].size + GUIDE_BYTES_LENGTH;
			cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
			/* Head guide bytes */
			for (idx = 0; idx < GUIDE_BYTES_HALFLEN; idx++) {
				printk(KERN_ALERT "%x ",(int)*cmem++);
			}
			printk(KERN_ALERT "\n=========\n");
			for (;idx < tlen; idx++) {
				printk(KERN_ALERT "%x ",(int)*cmem++);
			}
			zs_unmap_object(meta->mem_pool, handle);
			printk(KERN_ALERT "\n!!!!!!!!!\n");
		}
#endif
		return ret;
	} 

	return 0;
}
コード例 #2
0
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
{
	int ret = LZO_E_OK;
	size_t clen = PAGE_SIZE;
	unsigned char *cmem;
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;

	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
		memset(mem, 0, PAGE_SIZE);
		return 0;
	}

	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
	if (meta->table[index].size == PAGE_SIZE)
		memcpy(mem, cmem, PAGE_SIZE);
	else
		ret = lzo1x_decompress_safe(cmem, meta->table[index].size,
						mem, &clen);
	zs_unmap_object(meta->mem_pool, handle);

	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		zram_stat64_inc(zram, &zram->stats.failed_reads);
		return ret;
	}

	return 0;
}
コード例 #3
0
static struct zram_table_entry *search_node_in_zram_list(struct zram *zram, struct zram_meta *meta,
	struct zram_table_entry *input_node, struct zram_table_entry *found_node, unsigned char *match_content,
	u32 clen)
{
	struct list_head *list_node = NULL;
	struct zram_table_entry *current_node = NULL;
	unsigned char *cmem;
	int one_node_in_list = 0;
	int compare_count = 0;
	int ret;

	list_node = found_node->head.next;
	if (list_node == &(found_node->head))
		one_node_in_list = 1;
	while ((list_node != &(found_node->head)) || one_node_in_list) {
		one_node_in_list = 0;
		current_node  = list_entry(list_node, struct zram_table_entry, head);
		if ((clen != TABLE_GET_SIZE(current_node->value)) || !zsm_test_flag(meta, current_node,
			ZRAM_FIRST_NODE)) {
			list_node = list_node->next;
		} else {
			if (zsm_test_flag(meta, current_node, ZRAM_ZSM_DONE_NODE) && (current_node->handle != 0)) {
				cmem = zs_map_object(meta->mem_pool, current_node->handle, ZS_MM_RO);
				ret = memcmp(cmem, match_content, TABLE_GET_SIZE(input_node->value));
				compare_count++;
				if (ret == 0) {
					zs_unmap_object(meta->mem_pool, current_node->handle);
					return current_node;
				}
				list_node = list_node->next;
				zs_unmap_object(meta->mem_pool, current_node->handle);
			} else {
				pr_warn("[ZSM] current node is not ready %x and handle is %x\n",
					current_node->copy_index, current_node->handle, current_node->handle);
				list_node = list_node->next;
			}
		}
	}
	return NULL;
}
コード例 #4
0
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
{
	int ret = 0;
	size_t clen;
	unsigned long handle;
	struct page *page;
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
	struct zram_meta *meta = zram->meta;

	page = bvec->bv_page;
	src = meta->compress_buffer;

	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
		ret = zram_decompress_page(zram, uncmem, index);
		if (ret)
			goto out;
	}

	/*
	 * System overwrites unused sectors. Free memory associated
	 * with this sector now.
	 */
	if (meta->table[index].handle ||
	    zram_test_flag(meta, index, ZRAM_ZERO))
		zram_free_page(zram, index);

	user_mem = kmap_atomic(page);

	if (is_partial_io(bvec)) {
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
		uncmem = user_mem;
	}

	if (page_zero_filled(uncmem)) {
		kunmap_atomic(user_mem);
		zram->stats.pages_zero++;
		zram_set_flag(meta, index, ZRAM_ZERO);
		ret = 0;
		goto out;
	}

	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
			       meta->compress_workmem);

	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}

	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Compression failed! err=%d\n", ret);
		goto out;
	}

	if (unlikely(clen > max_zpage_size)) {
		zram->stats.bad_compress++;
		clen = PAGE_SIZE;
		src = NULL;
		if (is_partial_io(bvec))
			src = uncmem;
	}

	handle = zs_malloc(meta->mem_pool, clen);
	if (!handle) {
		pr_info("Error allocating memory for compressed "
			"page: %u, size=%zu\n", index, clen);
		ret = -ENOMEM;
		goto out;
	}
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);

	if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
		src = kmap_atomic(page);
	memcpy(cmem, src, clen);
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
		kunmap_atomic(src);

	zs_unmap_object(meta->mem_pool, handle);

	meta->table[index].handle = handle;
	meta->table[index].size = clen;

	/* Update stats */
	zram_stat64_add(zram, &zram->stats.compr_size, clen);
	zram->stats.pages_stored++;
	if (clen <= PAGE_SIZE / 2)
		zram->stats.good_compress++;

out:
	if (is_partial_io(bvec))
		kfree(uncmem);

	if (ret)
		zram_stat64_inc(zram, &zram->stats.failed_writes);
	return ret;
}