/* * fcoe_sw_exit - unregisters fcoe_sw_transport * * Returns : 0 on success */ int __exit fcoe_sw_exit(void) { /* dettach the transport */ fc_release_transport(scsi_transport_fcoe_sw); fcoe_transport_unregister(&fcoe_sw_transport); return 0; }
static int __init zfcp_module_init(void) { int retval = -ENOMEM; zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create( sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf"); if (!zfcp_data.fsf_req_qtcb_cache) goto out; zfcp_data.sr_buffer_cache = zfcp_cache_create( sizeof(struct fsf_status_read_buffer), "zfcp_sr"); if (!zfcp_data.sr_buffer_cache) goto out_sr_cache; zfcp_data.gid_pn_cache = zfcp_cache_create( sizeof(struct zfcp_gid_pn_data), "zfcp_gid"); if (!zfcp_data.gid_pn_cache) goto out_gid_cache; INIT_LIST_HEAD(&zfcp_data.adapter_list_head); INIT_LIST_HEAD(&zfcp_data.adapter_remove_lh); sema_init(&zfcp_data.config_sema, 1); rwlock_init(&zfcp_data.config_lock); zfcp_data.scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); if (!zfcp_data.scsi_transport_template) goto out_transport; retval = misc_register(&zfcp_cfdc_misc); if (retval) { pr_err("zfcp: registration of misc device zfcp_cfdc failed\n"); goto out_misc; } retval = zfcp_ccw_register(); if (retval) { pr_err("zfcp: Registration with common I/O layer failed.\n"); goto out_ccw_register; } if (zfcp_device_setup(device)) zfcp_init_device_configure(); goto out; out_ccw_register: misc_deregister(&zfcp_cfdc_misc); out_misc: fc_release_transport(zfcp_data.scsi_transport_template); out_transport: kmem_cache_destroy(zfcp_data.gid_pn_cache); out_gid_cache: kmem_cache_destroy(zfcp_data.sr_buffer_cache); out_sr_cache: kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache); out: return retval; }
static void __exit zfcp_module_exit(void) { ccw_driver_unregister(&zfcp_ccw_driver); fc_release_transport(zfcp_scsi_transport_template); kmem_cache_destroy(zfcp_fc_req_cache); kmem_cache_destroy(zfcp_fsf_qtcb_cache); }
static int __init zfcp_module_init(void) { int retval = -ENOMEM; zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create( sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf"); if (!zfcp_data.fsf_req_qtcb_cache) goto out; zfcp_data.sr_buffer_cache = zfcp_cache_create( sizeof(struct fsf_status_read_buffer), "zfcp_sr"); if (!zfcp_data.sr_buffer_cache) goto out_sr_cache; zfcp_data.gid_pn_cache = zfcp_cache_create( sizeof(struct zfcp_gid_pn_data), "zfcp_gid"); if (!zfcp_data.gid_pn_cache) goto out_gid_cache; zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq"); sema_init(&zfcp_data.config_sema, 1); rwlock_init(&zfcp_data.config_lock); zfcp_data.scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); if (!zfcp_data.scsi_transport_template) goto out_transport; retval = misc_register(&zfcp_cfdc_misc); if (retval) { pr_err("Registering the misc device zfcp_cfdc failed\n"); goto out_misc; } retval = zfcp_ccw_register(); if (retval) { pr_err("The zfcp device driver could not register with " "the common I/O layer\n"); goto out_ccw_register; } if (init_device) zfcp_init_device_setup(init_device); return 0; out_ccw_register: misc_deregister(&zfcp_cfdc_misc); out_misc: fc_release_transport(zfcp_data.scsi_transport_template); out_transport: kmem_cache_destroy(zfcp_data.gid_pn_cache); out_gid_cache: kmem_cache_destroy(zfcp_data.sr_buffer_cache); out_sr_cache: kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache); out: return retval; }
static void __exit fnic_cleanup_module(void) { pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); fc_release_transport(fnic_fc_transport); }
static void __exit zfcp_module_exit(void) { ccw_driver_unregister(&zfcp_ccw_driver); misc_deregister(&zfcp_cfdc_misc); fc_release_transport(zfcp_data.scsi_transport_template); kmem_cache_destroy(zfcp_data.adisc_cache); kmem_cache_destroy(zfcp_data.gid_pn_cache); kmem_cache_destroy(zfcp_data.sr_buffer_cache); kmem_cache_destroy(zfcp_data.qtcb_cache); kmem_cache_destroy(zfcp_data.gpn_ft_cache); }
static int __init zfcp_module_init(void) { int retval = -ENOMEM; zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb", sizeof(struct fsf_qtcb)); if (!zfcp_fsf_qtcb_cache) goto out_qtcb_cache; zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req", sizeof(struct zfcp_fc_req)); if (!zfcp_fc_req_cache) goto out_fc_cache; zfcp_scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); if (!zfcp_scsi_transport_template) goto out_transport; scsi_transport_reserve_device(zfcp_scsi_transport_template, sizeof(struct zfcp_scsi_dev)); retval = misc_register(&zfcp_cfdc_misc); if (retval) { pr_err("Registering the misc device zfcp_cfdc failed\n"); goto out_misc; } retval = ccw_driver_register(&zfcp_ccw_driver); if (retval) { pr_err("The zfcp device driver could not register with " "the common I/O layer\n"); goto out_ccw_register; } if (init_device) zfcp_init_device_setup(init_device); return 0; out_ccw_register: misc_deregister(&zfcp_cfdc_misc); out_misc: fc_release_transport(zfcp_scsi_transport_template); out_transport: kmem_cache_destroy(zfcp_fc_req_cache); out_fc_cache: kmem_cache_destroy(zfcp_fsf_qtcb_cache); out_qtcb_cache: return retval; }
static void __exit fnic_cleanup_module(void) { teardown_fnic_procfs(); pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); if (fnic_fip_queue) { flush_workqueue(fnic_fip_queue); destroy_workqueue(fnic_fip_queue); } kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); fc_release_transport(fnic_fc_transport); fnic_trace_free(); }
static int __init zfcp_module_init(void) { int retval = -ENOMEM; zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", sizeof(struct ct_iu_gpn_ft_req)); if (!zfcp_data.gpn_ft_cache) goto out; zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb", sizeof(struct fsf_qtcb)); if (!zfcp_data.qtcb_cache) goto out_qtcb_cache; zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr", sizeof(struct fsf_status_read_buffer)); if (!zfcp_data.sr_buffer_cache) goto out_sr_cache; zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", sizeof(struct zfcp_gid_pn_data)); if (!zfcp_data.gid_pn_cache) goto out_gid_cache; mutex_init(&zfcp_data.config_mutex); rwlock_init(&zfcp_data.config_lock); zfcp_data.scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); if (!zfcp_data.scsi_transport_template) goto out_transport; retval = misc_register(&zfcp_cfdc_misc); if (retval) { pr_err("Registering the misc device zfcp_cfdc failed\n"); goto out_misc; } retval = zfcp_ccw_register(); if (retval) { pr_err("The zfcp device driver could not register with " "the common I/O layer\n"); goto out_ccw_register; } if (init_device) zfcp_init_device_setup(init_device); return 0; out_ccw_register: misc_deregister(&zfcp_cfdc_misc); out_misc: fc_release_transport(zfcp_data.scsi_transport_template); out_transport: kmem_cache_destroy(zfcp_data.gid_pn_cache); out_gid_cache: kmem_cache_destroy(zfcp_data.sr_buffer_cache); out_sr_cache: kmem_cache_destroy(zfcp_data.qtcb_cache); out_qtcb_cache: kmem_cache_destroy(zfcp_data.gpn_ft_cache); out: return retval; }
static int __init fnic_init_module(void) { size_t len; int err = 0; printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); /* Create debugfs entries for fnic */ err = fnic_debugfs_init(); if (err < 0) { printk(KERN_ERR PFX "Failed to create fnic directory " "for tracing and stats logging\n"); fnic_debugfs_terminate(); } /* Allocate memory for trace buffer */ err = fnic_trace_buf_init(); if (err < 0) { printk(KERN_ERR PFX "Trace buffer initialization Failed " "Fnic Tracing utility is disabled\n"); fnic_trace_free(); } /* Create a cache for allocation of default size sgls */ len = sizeof(struct fnic_dflt_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, SLAB_HWCACHE_ALIGN, NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) { printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n"); err = -ENOMEM; goto err_create_fnic_sgl_slab_dflt; } /* Create a cache for allocation of max size sgls*/ len = sizeof(struct fnic_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, SLAB_HWCACHE_ALIGN, NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); err = -ENOMEM; goto err_create_fnic_sgl_slab_max; } /* Create a cache of io_req structs for use via mempool */ fnic_io_req_cache = kmem_cache_create("fnic_io_req", sizeof(struct fnic_io_req), 0, SLAB_HWCACHE_ALIGN, NULL); if (!fnic_io_req_cache) { printk(KERN_ERR PFX "failed to create fnic io_req slab\n"); err = -ENOMEM; goto err_create_fnic_ioreq_slab; } fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); if (!fnic_event_queue) { printk(KERN_ERR PFX "fnic work queue create failed\n"); err = -ENOMEM; goto err_create_fnic_workq; } spin_lock_init(&fnic_list_lock); INIT_LIST_HEAD(&fnic_list); fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); if (!fnic_fip_queue) { printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); err = -ENOMEM; goto err_create_fip_workq; } fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); if (!fnic_fc_transport) { printk(KERN_ERR PFX "fc_attach_transport error\n"); err = -ENOMEM; goto err_fc_transport; } /* register the driver with PCI system */ err = pci_register_driver(&fnic_driver); if (err < 0) { printk(KERN_ERR PFX "pci register error\n"); goto err_pci_register; } return err; err_pci_register: fc_release_transport(fnic_fc_transport); err_fc_transport: destroy_workqueue(fnic_fip_queue); err_create_fip_workq: destroy_workqueue(fnic_event_queue); err_create_fnic_workq: kmem_cache_destroy(fnic_io_req_cache); err_create_fnic_ioreq_slab: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); err_create_fnic_sgl_slab_max: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); err_create_fnic_sgl_slab_dflt: fnic_trace_free(); fnic_debugfs_terminate(); return err; }
static int __init fnic_init_module(void) { size_t len; int err = 0; printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); len = sizeof(struct fnic_dflt_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) { printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n"); err = -ENOMEM; goto err_create_fnic_sgl_slab_dflt; } len = sizeof(struct fnic_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); err = -ENOMEM; goto err_create_fnic_sgl_slab_max; } fnic_io_req_cache = kmem_cache_create("fnic_io_req", sizeof(struct fnic_io_req), 0, SLAB_HWCACHE_ALIGN, NULL); if (!fnic_io_req_cache) { printk(KERN_ERR PFX "failed to create fnic io_req slab\n"); err = -ENOMEM; goto err_create_fnic_ioreq_slab; } fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); if (!fnic_event_queue) { printk(KERN_ERR PFX "fnic work queue create failed\n"); err = -ENOMEM; goto err_create_fnic_workq; } spin_lock_init(&fnic_list_lock); INIT_LIST_HEAD(&fnic_list); fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); if (!fnic_fc_transport) { printk(KERN_ERR PFX "fc_attach_transport error\n"); err = -ENOMEM; goto err_fc_transport; } err = pci_register_driver(&fnic_driver); if (err < 0) { printk(KERN_ERR PFX "pci register error\n"); goto err_pci_register; } return err; err_pci_register: fc_release_transport(fnic_fc_transport); err_fc_transport: destroy_workqueue(fnic_event_queue); err_create_fnic_workq: kmem_cache_destroy(fnic_io_req_cache); err_create_fnic_ioreq_slab: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); err_create_fnic_sgl_slab_max: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); err_create_fnic_sgl_slab_dflt: return err; }
static int __init fnic_init_module(void) { size_t len; int err = 0; ; /* Create a cache for allocation of default size sgls */ len = sizeof(struct fnic_dflt_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create ("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) { ; err = -ENOMEM; goto err_create_fnic_sgl_slab_dflt; } /* Create a cache for allocation of max size sgls*/ len = sizeof(struct fnic_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { ; err = -ENOMEM; goto err_create_fnic_sgl_slab_max; } /* Create a cache of io_req structs for use via mempool */ fnic_io_req_cache = kmem_cache_create("fnic_io_req", sizeof(struct fnic_io_req), 0, SLAB_HWCACHE_ALIGN, NULL); if (!fnic_io_req_cache) { ; err = -ENOMEM; goto err_create_fnic_ioreq_slab; } fnic_event_queue = create_singlethread_workqueue("fnic_event_wq"); if (!fnic_event_queue) { ; err = -ENOMEM; goto err_create_fnic_workq; } spin_lock_init(&fnic_list_lock); INIT_LIST_HEAD(&fnic_list); fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); if (!fnic_fc_transport) { ; err = -ENOMEM; goto err_fc_transport; } /* register the driver with PCI system */ err = pci_register_driver(&fnic_driver); if (err < 0) { ; goto err_pci_register; } return err; err_pci_register: fc_release_transport(fnic_fc_transport); err_fc_transport: destroy_workqueue(fnic_event_queue); err_create_fnic_workq: kmem_cache_destroy(fnic_io_req_cache); err_create_fnic_ioreq_slab: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); err_create_fnic_sgl_slab_max: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); err_create_fnic_sgl_slab_dflt: return err; }