static void zram_free_page(struct zram *zram, size_t index) { struct zram_meta *meta = zram->meta; unsigned long handle = meta->table[index].handle; u16 size = meta->table[index].size; if (unlikely(!handle)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ if (zram_test_flag(meta, index, ZRAM_ZERO)) { zram_clear_flag(meta, index, ZRAM_ZERO); zram->stats.pages_zero--; } return; } if (unlikely(size > max_zpage_size)) zram->stats.bad_compress--; zs_free(meta->mem_pool, handle); if (size <= PAGE_SIZE / 2) zram->stats.good_compress--; zram_stat64_sub(zram, &zram->stats.compr_size, meta->table[index].size); zram->stats.pages_stored--; meta->table[index].handle = 0; meta->table[index].size = 0; }
static void zram_free_page(struct zram *zram, size_t index) { u32 clen; void *obj; struct page *page = zram->table[index].page; u32 offset = zram->table[index].offset; if (unlikely(!page)) { /* * No memory is allocated for zero filled pages. * Simply clear zero page flag. */ if (zram_test_flag(zram, index, ZRAM_ZERO)) { zram_clear_flag(zram, index, ZRAM_ZERO); zram_stat_dec(&zram->stats.pages_zero); } return; } if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { clen = PAGE_SIZE; __free_page(page); zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED); zram_stat_dec(&zram->stats.pages_expand); goto out; } obj = kmap_atomic(page, KM_USER0) + offset; clen = xv_get_object_size(obj) - sizeof(struct zobj_header); kunmap_atomic(obj, KM_USER0); xv_free(zram->mem_pool, page, offset); if (clen <= PAGE_SIZE / 2) zram_stat_dec(&zram->stats.good_compress); out: zram_stat64_sub(zram, &zram->stats.compr_size, clen); zram_stat_dec(&zram->stats.pages_stored); zram->table[index].page = NULL; zram->table[index].offset = 0; }