/* * returns 0 if the page was successfully decompressed * return -1 on entry not found or error */ static int zswap_frontswap_load(unsigned type, pgoff_t offset, struct page *page) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry; u8 *src, *dst; unsigned int dlen; int ret; /* find */ spin_lock(&tree->lock); entry = zswap_entry_find_get(&tree->rbroot, offset); if (!entry) { /* entry was written back */ spin_unlock(&tree->lock); return -1; } spin_unlock(&tree->lock); if (entry->zero_flag == 1) { dst = kmap_atomic(page); memset(dst, 0, PAGE_SIZE); kunmap_atomic(dst); goto zeropage_out; } /* decompress */ dlen = PAGE_SIZE; src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, ZPOOL_MM_RO) + sizeof(struct zswap_header); dst = kmap_atomic(page); ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, dst, &dlen); if (ret) { hexdump("src buffer", src, entry->length); if (dlen) hexdump("dest buffer", dst, dlen); printk("zswap_comp_op returned %d\n", ret); } kunmap_atomic(dst); zpool_unmap_handle(zswap_pool, entry->handle); BUG_ON(ret); zeropage_out: spin_lock(&tree->lock); zswap_entry_put(tree, entry); spin_unlock(&tree->lock); return 0; }
/* * returns 0 if the page was successfully decompressed * return -1 on entry not found or error */ static int zswap_frontswap_load(unsigned type, pgoff_t offset, struct page *page) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry; u8 *src, *dst; unsigned int dlen; int ret; /* find */ spin_lock(&tree->lock); entry = zswap_entry_find_get(&tree->rbroot, offset); if (!entry) { /* entry was written back */ spin_unlock(&tree->lock); return -1; } spin_unlock(&tree->lock); /* decompress */ dlen = PAGE_SIZE; src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, ZPOOL_MM_RO) + sizeof(struct zswap_header); dst = kmap_atomic(page); ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, dst, &dlen); kunmap_atomic(dst); zpool_unmap_handle(zswap_pool, entry->handle); BUG_ON(ret); spin_lock(&tree->lock); zswap_entry_put(tree, entry); spin_unlock(&tree->lock); return 0; }
/* attempts to compress and store an single page */ static int zswap_frontswap_store(unsigned type, pgoff_t offset, struct page *page) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry, *dupentry; int ret; unsigned int dlen = PAGE_SIZE, len; unsigned long handle; char *buf; u8 *src, *dst; struct zswap_header *zhdr; if (!tree) { ret = -ENODEV; goto reject; } /* if this page got EIO on pageout before, give up immediately */ if (PageError(page)) { ret = -ENOMEM; goto reject; } /* reclaim space if needed */ if (zswap_is_full()) { zswap_pool_limit_hit++; if (zpool_shrink(zswap_pool, 1, NULL)) { zswap_reject_reclaim_fail++; ret = -ENOMEM; goto reject; } } /* allocate entry */ entry = zswap_entry_cache_alloc(GFP_KERNEL); if (!entry) { zswap_reject_kmemcache_fail++; ret = -ENOMEM; goto reject; } /* compress */ src = kmap_atomic(page); if (page_zero_filled(src)) { atomic_inc(&zswap_zero_pages); entry->zero_flag = 1; kunmap_atomic(src); handle = 0; dlen = PAGE_SIZE; goto zeropage_out; } dst = get_cpu_var(zswap_dstmem); ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen); kunmap_atomic(src); if (ret) { ret = -EINVAL; goto freepage; } /* store */ len = dlen + sizeof(struct zswap_header); ret = zpool_malloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN, &handle); if (ret == -ENOSPC) { zswap_reject_compress_poor++; goto freepage; } if (ret) { zswap_reject_alloc_fail++; goto freepage; } zhdr = zpool_map_handle(zswap_pool, handle, ZPOOL_MM_RW); zhdr->swpentry = swp_entry(type, offset); buf = (u8 *)(zhdr + 1); memcpy(buf, dst, dlen); zpool_unmap_handle(zswap_pool, handle); put_cpu_var(zswap_dstmem); zeropage_out: /* populate entry */ entry->offset = offset; entry->handle = handle; entry->length = dlen; /* map */ spin_lock(&tree->lock); do { ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry); if (ret == -EEXIST) { zswap_duplicate_entry++; /* remove from rbtree */ zswap_rb_erase(&tree->rbroot, dupentry); zswap_entry_put(tree, dupentry); } } while (ret == -EEXIST); spin_unlock(&tree->lock); /* update stats */ atomic_inc(&zswap_stored_pages); zswap_pool_total_size = zpool_get_total_size(zswap_pool); zswap_pool_pages = zswap_pool_total_size >> PAGE_SHIFT; return 0; freepage: put_cpu_var(zswap_dstmem); zswap_entry_cache_free(entry); reject: return ret; }
/* * Attempts to free an entry by adding a page to the swap cache, * decompressing the entry data into the page, and issuing a * bio write to write the page back to the swap device. * * This can be thought of as a "resumed writeback" of the page * to the swap device. We are basically resuming the same swap * writeback path that was intercepted with the frontswap_store() * in the first place. After the page has been decompressed into * the swap cache, the compressed version stored by zswap can be * freed. */ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) { struct zswap_header *zhdr; swp_entry_t swpentry; struct zswap_tree *tree; pgoff_t offset; struct zswap_entry *entry; struct page *page; u8 *src, *dst; unsigned int dlen; int ret; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, }; /* extract swpentry from data */ zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); swpentry = zhdr->swpentry; /* here */ zpool_unmap_handle(pool, handle); tree = zswap_trees[swp_type(swpentry)]; offset = swp_offset(swpentry); /* find and ref zswap entry */ spin_lock(&tree->lock); entry = zswap_entry_find_get(&tree->rbroot, offset); if (!entry) { /* entry was invalidated */ spin_unlock(&tree->lock); return 0; } spin_unlock(&tree->lock); BUG_ON(offset != entry->offset); /* try to allocate swap cache page */ switch (zswap_get_swap_cache_page(swpentry, &page)) { case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */ ret = -ENOMEM; goto fail; case ZSWAP_SWAPCACHE_EXIST: /* page is already in the swap cache, ignore for now */ page_cache_release(page); ret = -EEXIST; goto fail; case ZSWAP_SWAPCACHE_NEW: /* page is locked */ /* decompress */ dlen = PAGE_SIZE; src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, ZPOOL_MM_RO) + sizeof(struct zswap_header); dst = kmap_atomic(page); ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, dst, &dlen); kunmap_atomic(dst); zpool_unmap_handle(zswap_pool, entry->handle); BUG_ON(ret); BUG_ON(dlen != PAGE_SIZE); /* page is up to date */ SetPageUptodate(page); } /* move it to the tail of the inactive list after end_writeback */ SetPageReclaim(page); /* start writeback */ __swap_writepage(page, &wbc, end_swap_bio_write); page_cache_release(page); zswap_written_back_pages++; spin_lock(&tree->lock); /* drop local reference */ zswap_entry_put(tree, entry); /* * There are two possible situations for entry here: * (1) refcount is 1(normal case), entry is valid and on the tree * (2) refcount is 0, entry is freed and not on the tree * because invalidate happened during writeback * search the tree and free the entry if find entry */ if (entry == zswap_rb_search(&tree->rbroot, offset)) zswap_entry_put(tree, entry); spin_unlock(&tree->lock); goto end; /* * if we get here due to ZSWAP_SWAPCACHE_EXIST * a load may happening concurrently * it is safe and okay to not free the entry * if we free the entry in the following put * it it either okay to return !0 */ fail: spin_lock(&tree->lock); zswap_entry_put(tree, entry); spin_unlock(&tree->lock); end: return ret; }
/* attempts to compress and store an single page */ static int zswap_frontswap_store(unsigned type, pgoff_t offset, struct page *page) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry, *dupentry; int ret; unsigned int dlen = PAGE_SIZE, len; unsigned long handle; char *buf; u8 *src, *dst; struct zswap_header *zhdr; if (!tree) { ret = -ENODEV; goto reject; } /* reclaim space if needed */ if (zswap_is_full()) { zswap_pool_limit_hit++; if (zbud_reclaim_page(tree->pool, 8)) { zswap_reject_reclaim_fail++; ret = -ENOMEM; goto reject; } } /* allocate entry */ entry = zswap_entry_cache_alloc(GFP_KERNEL); if (!entry) { zswap_reject_kmemcache_fail++; ret = -ENOMEM; goto reject; } /* compress */ dst = get_cpu_var(zswap_dstmem); src = kmap_atomic(page); ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen); kunmap_atomic(src); if (ret) { ret = -EINVAL; goto freepage; } /* store */ len = dlen + sizeof(struct zswap_header); ret = zbud_alloc(tree->pool, len, __GFP_NORETRY | __GFP_NOWARN, &handle); if (ret == -ENOSPC) { zswap_reject_compress_poor++; goto freepage; } if (ret) { zswap_reject_alloc_fail++; goto freepage; } zhdr = zbud_map(tree->pool, handle); zhdr->swpentry = swp_entry(type, offset); buf = (u8 *)(zhdr + 1); memcpy(buf, dst, dlen); zbud_unmap(tree->pool, handle); put_cpu_var(zswap_dstmem); /* populate entry */ entry->offset = offset; entry->handle = handle; entry->length = dlen; /* map */ spin_lock(&tree->lock); do { ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry); if (ret == -EEXIST) { zswap_duplicate_entry++; /* remove from rbtree */ zswap_rb_erase(&tree->rbroot, dupentry); zswap_entry_put(tree, dupentry); } } while (ret == -EEXIST); spin_unlock(&tree->lock); /* update stats */ atomic_inc(&zswap_stored_pages); zswap_pool_pages = zbud_get_pool_size(tree->pool); return 0; freepage: put_cpu_var(zswap_dstmem); zswap_entry_cache_free(entry); reject: return ret; }