static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, .final = ghash_final, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), .base = { .cra_name = "__ghash", .cra_driver_name = "__ghash-pclmulqdqni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ghash_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(ghash_alg.base.cra_list), }, }; static int ghash_async_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *cryptd_req = ahash_request_ctx(req); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!irq_fpu_usable()) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_init(cryptd_req); } else {
.sync_page = block_sync_page, .set_page_dirty = __set_page_dirty_nobuffers, .migratepage = migrate_page, }; static struct backing_dev_info swap_backing_dev_info = { .name = "swap", .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, .unplug_io_fn = swap_unplug_io_fn, }; struct address_space swapper_space = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock), .a_ops = &swap_aops, .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear), .backing_dev_info = &swap_backing_dev_info, }; #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) static struct { unsigned long add_total; unsigned long del_total; unsigned long find_success; unsigned long find_total; } swap_cache_info; void show_swap_cache_info(void) { printk("%lu pages in swap cache\n", total_swapcache_pages);
if (walk.src.virt.addr != walk.dst.virt.addr) memcpy(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); err = blkcipher_walk_done(desc, &walk, 0); } return err; } static struct crypto_alg compress_null = { .cra_name = "compress_null", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_blocksize = NULL_BLOCK_SIZE, .cra_ctxsize = 0, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(compress_null.cra_list), .cra_u = { .compress = { .coa_compress = null_compress, .coa_decompress = null_compress } } }; static struct shash_alg digest_null = { .digestsize = NULL_DIGEST_SIZE, .setkey = null_hash_setkey, .init = null_init, .update = null_update, .finup = null_digest, .digest = null_digest, .final = null_final, .base = { .cra_name = "digest_null",
AES_BLOCK_SIZE); break; } } static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_driver_name = "aes-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_aes_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, .cia_encrypt = aes_encrypt, .cia_decrypt = aes_decrypt, } } }; static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
#include <linux/rculist.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/cpu.h> static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); static struct { struct list_head queue; spinlock_t lock; } call_function __cacheline_aligned_in_smp = { .queue = LIST_HEAD_INIT(call_function.queue), .lock = __SPIN_LOCK_UNLOCKED(call_function.lock), }; enum { CSD_FLAG_LOCK = 0x01, }; struct call_function_data { struct call_single_data csd; spinlock_t lock; unsigned int refs; cpumask_var_t cpumask; }; struct call_single_queue {
.trace = print_syscall_exit, }; struct ftrace_event_class __refdata event_class_syscall_enter = { .system = "syscalls", .reg = syscall_enter_register, .define_fields = syscall_enter_define_fields, .get_fields = syscall_get_enter_fields, .raw_init = init_syscall_trace, }; struct ftrace_event_class __refdata event_class_syscall_exit = { .system = "syscalls", .reg = syscall_exit_register, .define_fields = syscall_exit_define_fields, .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), .raw_init = init_syscall_trace, }; unsigned long __init __weak arch_syscall_addr(int nr) { return (unsigned long)sys_call_table[nr]; } void __init init_ftrace_syscalls(void) { struct syscall_metadata *meta; unsigned long addr; int i; syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
pr_debug("version: %d\n", tboot->version); pr_debug("log_addr: 0x%08x\n", tboot->log_addr); pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry); pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base); pr_debug("tboot_size: 0x%x\n", tboot->tboot_size); } static pgd_t *tboot_pg_dir; static struct mm_struct tboot_mm = { .mm_rb = RB_ROOT, .pgd = swapper_pg_dir, .mm_users = ATOMIC_INIT(2), .mm_count = ATOMIC_INIT(1), .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem), .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock), .mmlist = LIST_HEAD_INIT(init_mm.mmlist), }; static inline void switch_to_tboot_pt(void) { write_cr3(virt_to_phys(tboot_pg_dir)); } static int map_tboot_page(unsigned long vaddr, unsigned long pfn, pgprot_t prot) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte;
u8 hash[SHA512_DIGEST_SIZE]; sha512_final(tfm, hash); memcpy(out, hash, SHA384_DIGEST_SIZE); /* copy digest to out */ memset(sctx, 0, sizeof *sctx); /* wipe context */ } static struct crypto_alg sha512_alg = { .cra_name = "sha512", .cra_driver_name = "sha512-s390", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_sha512_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(sha512_alg.cra_list), .cra_u = { .digest = { .dia_digestsize = SHA512_DIGEST_SIZE, .dia_init = sha512_init, .dia_update = sha512_update, .dia_final = sha512_final } } }; static struct crypto_alg sha384_alg = { .cra_name = "sha384", .cra_driver_name = "sha384-s390", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct s390_sha512_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(sha384_alg.cra_list),
.end = IRQ_LCDIF_DMA, }, { .flags = IORESOURCE_IRQ, .start = IRQ_LCDIF_ERROR, .end = IRQ_LCDIF_ERROR, }, { .flags = IORESOURCE_IRQ, .start = IRQ_PXP, .end = IRQ_PXP, }, }; static struct stmp3xxx_platform_fb_data stmp3xxx_framebuffer_pdata = { .list = LIST_HEAD_INIT(stmp3xxx_framebuffer_pdata.list), }; struct platform_device stmp3xxx_framebuffer = { .name = "stmp3xxx-fb", .id = -1, .dev = { .dma_mask = &common_dmamask, .coherent_dma_mask = COMMON_COHERENT_DMAMASK, .platform_data = &stmp3xxx_framebuffer_pdata, }, .num_resources = ARRAY_SIZE(fb_resource), .resource = fb_resource, }; /* PxP */
.bAssocTerminal = SPEAKER_OUTPUT_TERMINAL_ID, .wChannelConfig = 0x3, }; struct uac1_output_terminal_descriptor speaker_output_terminal_desc = { .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, .bTerminalID = SPEAKER_OUTPUT_TERMINAL_ID, .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER, .bAssocTerminal = SPEAKER_INPUT_TERMINAL_ID, .bSourceID = SPEAKER_INPUT_TERMINAL_ID, }; static struct usb_audio_control speaker_mute_control = { .list = LIST_HEAD_INIT(speaker_mute_control.list), .name = "Speaker Mute Control", .type = UAC_FU_MUTE, /* Todo: add real Mute control code */ .set = generic_set_cmd, .get = generic_get_cmd, }; static struct usb_audio_control speaker_volume_control = { .list = LIST_HEAD_INIT(speaker_volume_control.list), .name = "Speaker Volume Control", .type = UAC_FU_VOLUME, /* Todo: add real Volume control code */ .set = generic_set_cmd, .get = generic_get_cmd, };
cma_map_length = len; return 0; } #if defined CONFIG_CMA_CMDLINE early_param("cma.map", cma_map_param); #endif /************************* Early regions *************************/ struct list_head cma_early_regions __initdata = LIST_HEAD_INIT(cma_early_regions); #ifdef CONFIG_CMA_CMDLINE /* * regions-attr ::= [ regions [ ';' ] ] * regions ::= region [ ';' regions ] * * region ::= [ '-' ] reg-name * '=' size * [ '@' start ] * [ '/' alignment ] * [ ':' alloc-name ] * * See Documentation/contiguous-memory.txt for details. *
/* * Locking model: * * audit_filter_mutex: * Synchronizes writes and blocking reads of audit's filterlist * data. Rcu is used to traverse the filterlist and access * contents of structs audit_entry, audit_watch and opaque * LSM rules during filtering. If modified, these structures * must be copied and replace their counterparts in the filterlist. * An audit_parent struct is not accessed during filtering, so may * be written directly provided audit_filter_mutex is held. */ /* Audit filter lists, defined in <linux/audit.h> */ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = { LIST_HEAD_INIT(audit_filter_list[0]), LIST_HEAD_INIT(audit_filter_list[1]), LIST_HEAD_INIT(audit_filter_list[2]), LIST_HEAD_INIT(audit_filter_list[3]), LIST_HEAD_INIT(audit_filter_list[4]), LIST_HEAD_INIT(audit_filter_list[5]), #if AUDIT_NR_FILTERS != 6 #error Fix audit_filter_list initialiser #endif }; static struct list_head audit_rules_list[AUDIT_NR_FILTERS] = { LIST_HEAD_INIT(audit_rules_list[0]), LIST_HEAD_INIT(audit_rules_list[1]), LIST_HEAD_INIT(audit_rules_list[2]), LIST_HEAD_INIT(audit_rules_list[3]), LIST_HEAD_INIT(audit_rules_list[4]),
void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) { irq_err_count += 1; printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); } static struct irqchip bad_chip = { .ack = dummy_mask_unmask_irq, .mask = dummy_mask_unmask_irq, .unmask = dummy_mask_unmask_irq, }; static struct irqdesc bad_irq_desc = { .chip = &bad_chip, .handle = do_bad_IRQ, .pend = LIST_HEAD_INIT(bad_irq_desc.pend), .disable_depth = 1, }; #ifdef CONFIG_SMP void synchronize_irq(unsigned int irq) { struct irqdesc *desc = irq_desc + irq; while (desc->running) barrier(); } EXPORT_SYMBOL(synchronize_irq); #define smp_set_running(desc) do { desc->running = 1; } while (0) #define smp_clear_running(desc) do { desc->running = 0; } while (0)
#include <linux/fs.h> #include <linux/version.h> #include <asm/uaccess.h> #include <linux/file.h> #include <linux/kmod.h> #include "llite_internal.h" /* for obd_capa.c_list, client capa might stay in three places: * 1. ll_capa_list. * 2. ll_idle_capas. * 3. stand alone: just allocated. */ /* capas for oss writeback and those failed to renew */ static struct list_head ll_idle_capas = LIST_HEAD_INIT(ll_idle_capas); static struct ptlrpc_thread ll_capa_thread; static struct list_head *ll_capa_list = &capa_list[CAPA_SITE_CLIENT]; /* llite capa renewal timer */ struct timer_list ll_capa_timer; /* for debug: indicate whether capa on llite is enabled or not */ static atomic_t ll_capa_debug = ATOMIC_INIT(0); static unsigned long long ll_capa_renewed = 0; static unsigned long long ll_capa_renewal_noent = 0; static unsigned long long ll_capa_renewal_failed = 0; static unsigned long long ll_capa_renewal_retries = 0; static int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa); static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry)
const struct ip_vs_iphdr *iph, __be16 dport, __be16 cport) { if (likely(iph->protocol == IPPROTO_UDP)) return ip_vs_new_conn_out(svc, dest, skb, iph, dport, cport); /* currently no need to handle other than UDP */ return NULL; } static struct ip_vs_pe ip_vs_sip_pe = { .name = "sip", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .n_list = LIST_HEAD_INIT(ip_vs_sip_pe.n_list), .fill_param = ip_vs_sip_fill_param, .ct_match = ip_vs_sip_ct_match, .hashkey_raw = ip_vs_sip_hashkey_raw, .show_pe_data = ip_vs_sip_show_pe_data, .conn_out = ip_vs_sip_conn_out, }; static int __init ip_vs_sip_init(void) { return register_ip_vs_pe(&ip_vs_sip_pe); } static void __exit ip_vs_sip_cleanup(void) { unregister_ip_vs_pe(&ip_vs_sip_pe);
#include "ble/btstack_run_loop.h" #if 0 static struct list_head timer_head = LIST_HEAD_INIT(timer_head); void btstack_timer_remove(timer_source_t *timer) { } void btstack_timer_register(timer_source_t *timer, u32 msecs, void (*process)(timer_source_t *timer)) { } static struct list_head run_loop_head = LIST_HEAD_INIT(run_loop_head); void btstack_run_loop_init() { INIT_LIST_HEAD(&timer_head); INIT_LIST_HEAD(&run_loop_head); } void btstack_run_loop_register(data_source_t *loop, void *(process)(data_source_t*)) { __run_loop_register(&run_loop_head, loop, process); } void btstack_run_loop_schedule() {
IP_VS_DBG_BUF(6, "DH: destination IP address %s --> server %s:%d\n", IP_VS_DBG_ADDR(svc->af, &iph.daddr), IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); return dest; } static struct ip_vs_scheduler ip_vs_dh_scheduler = { .name = "dh", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), .init_service = ip_vs_dh_init_svc, .done_service = ip_vs_dh_done_svc, .update_service = ip_vs_dh_update_svc, .schedule = ip_vs_dh_schedule, }; static int __init ip_vs_dh_init(void) { return register_ip_vs_scheduler(&ip_vs_dh_scheduler); } static void __exit ip_vs_dh_cleanup(void) {
* */ #include <stdio.h> #include <string.h> #include <common/config.h> #include <common/errors.h> #include <common/mini-clist.h> #include <common/standard.h> #include <proto/acl.h> #include <proto/fd.h> /* List head of all registered protocols */ static struct list protocols = LIST_HEAD_INIT(protocols); /* This function adds the specified listener's file descriptor to the polling * lists if it is in the LI_LISTEN state. The listener enters LI_READY or * LI_FULL state depending on its number of connections. */ void enable_listener(struct listener *listener) { if (listener->state == LI_LISTEN) { if (listener->nbconn < listener->maxconn) { EV_FD_SET(listener->fd, DIR_RD); listener->state = LI_READY; } else { listener->state = LI_FULL; } }
ok ( store_setting ( _settings, _setting, raw, \ sizeof ( raw ) ) == 0 ); \ ok ( fetchn_setting ( _settings, _setting, NULL, NULL, \ &actual ) == 0 ); \ DBGC ( _settings, "Fetched %s %#lx from:\n", \ (_setting)->type->name, actual ); \ DBGC_HDA ( _settings, 0, raw, sizeof ( raw ) ); \ ok ( actual == ( unsigned long ) _numeric ); \ } while ( 0 ) /** Test generic settings block */ struct generic_settings test_generic_settings = { .settings = { .refcnt = NULL, .siblings = LIST_HEAD_INIT ( test_generic_settings.settings.siblings ), .children = LIST_HEAD_INIT ( test_generic_settings.settings.children ), .op = &generic_settings_operations, }, .list = LIST_HEAD_INIT ( test_generic_settings.list ), }; /** Test settings block */ #define test_settings test_generic_settings.settings /** Test string setting */ static struct setting test_string_setting = { .name = "test_string", .type = &setting_type_string, };
} crypto_free_hash(ctx(tfm)->fallback.tfm); ctx(tfm)->fallback.tfm = NULL; } static struct crypto_alg sha1_alg = { .cra_name = "sha1", .cra_driver_name = "sha1-padlock", .cra_priority = PADLOCK_CRA_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_DIGEST | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct padlock_sha_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(sha1_alg.cra_list), .cra_init = padlock_sha1_cra_init, .cra_exit = padlock_cra_exit, .cra_u = { .digest = { .dia_digestsize = SHA1_DIGEST_SIZE, .dia_init = padlock_sha_init, .dia_update = padlock_sha_update, .dia_final = padlock_sha_final, } } }; static struct crypto_alg sha256_alg = { .cra_name = "sha256", .cra_driver_name = "sha256-padlock",
#endif #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) /* * These are the page tables (2MB each) covering uncached, DMA consistent allocations */ static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; #include "vmregion.h" static struct arm_vmregion_head consistent_head = { .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), .vm_start = CONSISTENT_BASE, .vm_end = CONSISTENT_END, }; #ifdef CONFIG_HUGETLB_PAGE #error ARM Coherent DMA allocator does not (yet) support huge TLB #endif /* * Initialise the consistent memory allocation. */ static int __init consistent_init(void) { int ret = 0; pgd_t *pgd;
* * Portal-RPC reconnection and replay operations, for use in recovery. */ #define DEBUG_SUBSYSTEM S_RPC #include <obd_support.h> #include <obd_class.h> #include "ptlrpc_internal.h" static int suppress_pings; CFS_MODULE_PARM(suppress_pings, "i", int, 0644, "Suppress pings"); struct mutex pinger_mutex; static struct list_head pinger_imports = LIST_HEAD_INIT(pinger_imports); static struct list_head timeout_list = LIST_HEAD_INIT(timeout_list); int ptlrpc_pinger_suppress_pings() { return suppress_pings; } EXPORT_SYMBOL(ptlrpc_pinger_suppress_pings); struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp) { struct ptlrpc_request *req; req = ptlrpc_request_alloc_pack(imp, &RQF_OBD_PING,
/* Const, except for .mc.list */ static struct line_driver driver = { .name = "UML serial line", .device_name = "ttyS", .major = TTY_MAJOR, .minor_start = 64, .type = TTY_DRIVER_TYPE_SERIAL, .subtype = 0, .read_irq = SSL_IRQ, .read_irq_name = "ssl", .write_irq = SSL_WRITE_IRQ, .write_irq_name = "ssl-write", .mc = { .list = LIST_HEAD_INIT(driver.mc.list), .name = "ssl", .config = ssl_config, .get_config = ssl_get_config, .id = line_id, .remove = ssl_remove, }, }; /* The array is initialized by line_init, at initcall time. The * elements are locked individually as needed. */ static struct line serial_lines[NR_PORTS] = { [0 ... NR_PORTS - 1] = LINE_INIT(CONFIG_SSL_CHAN, &driver) }; static int ssl_config(char *str, char **error_out)
struct result { uint32_t rx_ts; uint32_t tx_ts; }; #define FR_N_RES 128 struct result_frame { struct result r[FR_N_RES]; uint8_t key; uint64_t ts; } __attribute__ ((packed)); struct list_head g_pkt_queue[2] = { LIST_HEAD_INIT(g_pkt_queue[0]), LIST_HEAD_INIT(g_pkt_queue[1]) }; struct enqueued_frame { struct list_node node; struct result_frame fr; } __attribute__ ((packed)); #define DIST_SZ (1 << 25) unsigned long long dist1[DIST_SZ], dist2[DIST_SZ], dist_min[DIST_SZ]; void result_ntoh(struct result *r) { r->rx_ts = ntohl(r->rx_ts); r->tx_ts = ntohl(r->tx_ts);
watchdog. */ if ((!handled) && (preop_val == WDOG_PREOP_PANIC)) { /* On some machines, the heartbeat will give an error and not work unless we re-enable the timer. So do so. */ pretimeout_since_last_heartbeat = 1; if (atomic_inc_and_test(&preop_panic_excl)) panic(PFX "pre-timeout"); } return NOTIFY_DONE; } static struct nmi_handler ipmi_nmi_handler = { .link = LIST_HEAD_INIT(ipmi_nmi_handler.link), .dev_name = "ipmi_watchdog", .dev_id = NULL, .handler = ipmi_nmi, .priority = 0, /* Call us last. */ }; int nmi_handler_registered; #endif static int wdog_reboot_handler(struct notifier_block *this, unsigned long code, void *unused) { static int reboot_event_handled = 0; if ((watchdog_user) && (!reboot_event_handled)) {
size_t __slen = slen; err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len); if (err < 0) return -EINVAL; *dlen = tmp_len; return err; } static struct crypto_alg alg_lz4 = { .cra_name = "lz4", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct lz4_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg_lz4.cra_list), .cra_init = lz4_init, .cra_exit = lz4_exit, .cra_u = { .compress = { .coa_compress = lz4_compress_crypto, .coa_decompress = lz4_decompress_crypto } } }; static int __init lz4_mod_init(void) { return crypto_register_alg(&alg_lz4); } static void __exit lz4_mod_fini(void) { crypto_unregister_alg(&alg_lz4);
Km = c->Km[0]; Kr = c->Kr[0]; QBAR (block, Kr, Km); dst[0] = cpu_to_be32(block[0]); dst[1] = cpu_to_be32(block[1]); dst[2] = cpu_to_be32(block[2]); dst[3] = cpu_to_be32(block[3]); } static struct crypto_alg alg = { .cra_name = "cast6", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = CAST6_BLOCK_SIZE, .cra_ctxsize = sizeof(struct cast6_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = CAST6_MIN_KEY_SIZE, .cia_max_keysize = CAST6_MAX_KEY_SIZE, .cia_setkey = cast6_setkey, .cia_encrypt = cast6_encrypt, .cia_decrypt = cast6_decrypt} } }; static int __init init(void) { return crypto_register_alg(&alg); }
.lookup_cred = nul_lookup_cred, }; static struct rpc_auth null_auth = { .au_cslack = NUL_CALLSLACK, .au_rslack = NUL_REPLYSLACK, .au_ops = &authnull_ops, .au_flavor = RPC_AUTH_NULL, .au_count = REFCOUNT_INIT(1), }; static const struct rpc_credops null_credops = { .cr_name = "AUTH_NULL", .crdestroy = nul_destroy_cred, .crmatch = nul_match, .crmarshal = nul_marshal, .crrefresh = nul_refresh, .crvalidate = nul_validate, }; static struct rpc_cred null_cred = { .cr_lru = LIST_HEAD_INIT(null_cred.cr_lru), .cr_auth = &null_auth, .cr_ops = &null_credops, .cr_count = REFCOUNT_INIT(2), .cr_flags = 1UL << RPCAUTH_CRED_UPTODATE, };
kernel_fpu_begin(); aesni_dec(ctx, dst, src); kernel_fpu_end(); } } static struct crypto_alg aesni_alg = { .cra_name = "aes", .cra_driver_name = "aes-aesni", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1, .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = aes_set_key, .cia_encrypt = aes_encrypt, .cia_decrypt = aes_decrypt } } }; static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
#include "sysfs.h" /* Random magic number */ #define SYSFS_MAGIC 0x62656572 struct vfsmount *sysfs_mount; struct super_block * sysfs_sb = NULL; kmem_cache_t *sysfs_dir_cachep; static struct super_operations sysfs_ops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, }; static struct sysfs_dirent sysfs_root = { .s_sibling = LIST_HEAD_INIT(sysfs_root.s_sibling), .s_children = LIST_HEAD_INIT(sysfs_root.s_children), .s_element = NULL, .s_type = SYSFS_ROOT, .s_iattr = NULL, }; static int sysfs_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct dentry *root; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = SYSFS_MAGIC; sb->s_op = &sysfs_ops;