void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) { int i; struct mlx4_ib_sriov *sriov = &dev->sriov; struct mlx4_alias_guid_work_context *cb_ctx; struct mlx4_sriov_alias_guid_port_rec_det *det; struct ib_sa_query *sa_query; unsigned long flags; for (i = 0 ; i < dev->num_ports; i++) { cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work); det = &sriov->alias_guid.ports_guid[i]; spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); while (!list_empty(&det->cb_list)) { cb_ctx = list_entry(det->cb_list.next, struct mlx4_alias_guid_work_context, list); sa_query = cb_ctx->sa_query; cb_ctx->sa_query = NULL; list_del(&cb_ctx->list); spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); ib_sa_cancel_query(cb_ctx->query_id, sa_query); wait_for_completion(&cb_ctx->done); kfree(cb_ctx); spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); } spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); } for (i = 0 ; i < dev->num_ports; i++) { flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); } ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); kfree(dev->sriov.alias_guid.sa_client); }
static void __exit verbs_exit (void) { if (sock) sock_release (sock); del_timer (&verbs_timer); ib_unregister_client (&client); ib_sa_unregister_client(&verbs_sa_client); }
int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev) { char alias_wq_name[15]; int ret = 0; int i, j, k; union ib_gid gid; if (!mlx4_is_master(dev->dev)) return 0; dev->sriov.alias_guid.sa_client = kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL); if (!dev->sriov.alias_guid.sa_client) return -ENOMEM; ib_sa_register_client(dev->sriov.alias_guid.sa_client); spin_lock_init(&dev->sriov.alias_guid.ag_work_lock); for (i = 1; i <= dev->num_ports; ++i) { if (dev->ib_dev.query_gid(&dev->ib_dev , i, 0, &gid)) { ret = -EFAULT; goto err_unregister; } } for (i = 0 ; i < dev->num_ports; i++) { memset(&dev->sriov.alias_guid.ports_guid[i], 0, sizeof (struct mlx4_sriov_alias_guid_port_rec_det)); /*Check if the SM doesn't need to assign the GUIDs*/ for (j = 0; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) { if (mlx4_ib_sm_guid_assign) { dev->sriov.alias_guid.ports_guid[i]. all_rec_per_port[j]. ownership = MLX4_GUID_DRIVER_ASSIGN; continue; } dev->sriov.alias_guid.ports_guid[i].all_rec_per_port[j]. ownership = MLX4_GUID_NONE_ASSIGN; /*mark each val as it was deleted, till the sysAdmin will give it valid val*/ for (k = 0; k < NUM_ALIAS_GUID_IN_REC; k++) { *(__be64 *)&dev->sriov.alias_guid.ports_guid[i]. all_rec_per_port[j].all_recs[GUID_REC_SIZE * k] = cpu_to_be64(MLX4_GUID_FOR_DELETE_VAL); } } INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); /*prepare the records, set them to be allocated by sm*/ for (j = 0 ; j < NUM_ALIAS_GUID_REC_IN_PORT; j++) invalidate_guid_record(dev, i + 1, j); dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; dev->sriov.alias_guid.ports_guid[i].port = i; if (mlx4_ib_sm_guid_assign) set_all_slaves_guids(dev, i); snprintf(alias_wq_name, sizeof alias_wq_name, "alias_guid%d", i); dev->sriov.alias_guid.ports_guid[i].wq = create_singlethread_workqueue(alias_wq_name); if (!dev->sriov.alias_guid.ports_guid[i].wq) { ret = -ENOMEM; goto err_thread; } INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work, alias_guid_work); } return 0; err_thread: for (--i; i >= 0; i--) { destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); dev->sriov.alias_guid.ports_guid[i].wq = NULL; } err_unregister: ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); kfree(dev->sriov.alias_guid.sa_client); dev->sriov.alias_guid.sa_client = NULL; pr_err("init_alias_guid_service: Failed. (ret:%d)\n", ret); return ret; }