static void gid_table_cleanup_one(struct ib_device *ib_dev) { struct ib_gid_table *table; u8 port; for (port = 0; port < ib_dev->phys_port_cnt; port++) { table = ib_dev->cache.ports[port].gid; cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), table); } }
static void gid_table_cleanup_one(struct ib_device *ib_dev) { struct ib_gid_table **table = ib_dev->cache.gid_cache; u8 port; if (!table) return; for (port = 0; port < ib_dev->phys_port_cnt; port++) cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), table[port]); }
static int _gid_table_setup_one(struct ib_device *ib_dev) { u8 port; struct ib_gid_table **table; int err = 0; table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL); if (!table) { pr_warn("failed to allocate ib gid cache for %s\n", ib_dev->name); return -ENOMEM; } for (port = 0; port < ib_dev->phys_port_cnt; port++) { u8 rdma_port = port + rdma_start_port(ib_dev); table[port] = alloc_gid_table( ib_dev->port_immutable[rdma_port].gid_tbl_len); if (!table[port]) { err = -ENOMEM; goto rollback_table_setup; } err = gid_table_reserve_default(ib_dev, port + rdma_start_port(ib_dev), table[port]); if (err) goto rollback_table_setup; } ib_dev->cache.gid_cache = table; return 0; rollback_table_setup: for (port = 0; port < ib_dev->phys_port_cnt; port++) { cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev), table[port]); release_gid_table(table[port]); } kfree(table); return err; }