/* show available compressors */ ssize_t zcomp_available_show(const char *comp, char *buf) { bool known_algorithm = false; ssize_t sz = 0; int i = 0; for (; backends[i]; i++) { if (!strcmp(comp, backends[i])) { known_algorithm = true; sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "[%s] ", backends[i]); } else { sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "%s ", backends[i]); } } /* * Out-of-tree module known to crypto api or a missing * entry in `backends'. */ if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1) sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "[%s] ", comp); sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n"); return sz; }
static int __init zswap_comp_init(void) { if (!crypto_has_comp(zswap_compressor, 0, 0)) { pr_info("%s compressor not available\n", zswap_compressor); /* fall back to default compressor */ zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; if (!crypto_has_comp(zswap_compressor, 0, 0)) /* can't even load the default compressor */ return -ENODEV; } pr_info("using %s compressor\n", zswap_compressor); /* alloc percpu transforms */ zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *); if (!zswap_comp_pcpu_tfms) return -ENOMEM; return 0; }
static int __init zram_comp_init(void) { int ret; ret = crypto_has_comp(zram_compressor, 0, 0); if (!ret) { pr_info("%s is not available\n", zram_compressor); zram_compressor = ZRAM_COMPRESSOR_DEFAULT; ret = crypto_has_comp(zram_compressor, 0, 0); if (!ret) return -ENODEV; } pr_info("using %s compressor\n", zram_compressor); /* alloc percpu transforms */ zram_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *); if (!zram_comp_pcpu_tfms) return -ENOMEM; return 0; }
static void allocate_buf_for_compression(void) { struct crypto_comp *ctx; int size; char *buf; /* Skip if not built-in or compression backend not selected yet. */ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend) return; /* Skip if no pstore backend yet or compression init already done. */ if (!psinfo || tfm) return; if (!crypto_has_comp(zbackend->name, 0, 0)) { pr_err("Unknown compression: %s\n", zbackend->name); return; } size = zbackend->zbufsize(psinfo->bufsize); if (size <= 0) { pr_err("Invalid compression size for %s: %d\n", zbackend->name, size); return; } buf = kmalloc(size, GFP_KERNEL); if (!buf) { pr_err("Failed %d byte compression buffer allocation for: %s\n", size, zbackend->name); return; } ctx = crypto_alloc_comp(zbackend->name, 0, 0); if (IS_ERR_OR_NULL(ctx)) { kfree(buf); pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name, PTR_ERR(ctx)); return; } /* A non-NULL big_oops_buf indicates compression is available. */ tfm = ctx; big_oops_buf_sz = size; big_oops_buf = buf; pr_info("Using crash dump compression: %s\n", zbackend->name); }
bool zcomp_available_algorithm(const char *comp) { int i = 0; while (backends[i]) { if (sysfs_streq(comp, backends[i])) return true; i++; } /* * Crypto does not ignore a trailing new line symbol, * so make sure you don't supply a string containing * one. * This also means that we permit zcomp initialisation * with any compressing algorithm known to crypto api. */ return crypto_has_comp(comp, 0, 0) == 1; }