static void ib_cache_update(struct ib_device *device, u8 port, bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; int i; int ret; if (!rdma_is_port_valid(device, port)) return; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return; ret = ib_query_port(device, port, tprops); if (ret) { dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret); goto err; } if (!rdma_protocol_roce(device, port)) { ret = config_non_roce_gid_cache(device, port, tprops->gid_tbl_len); if (ret) goto err; } pkey_cache = kmalloc(struct_size(pkey_cache, table, tprops->pkey_tbl_len), GFP_KERNEL); if (!pkey_cache) goto err; pkey_cache->table_len = tprops->pkey_tbl_len; for (i = 0; i < pkey_cache->table_len; ++i) { ret = ib_query_pkey(device, port, i, pkey_cache->table + i); if (ret) { dev_warn(&device->dev, "ib_query_pkey failed (%d) for index %d\n", ret, i); goto err; } } write_lock_irq(&device->cache.lock); old_pkey_cache = device->cache.ports[port - rdma_start_port(device)].pkey; device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache; device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc; device->cache.ports[port - rdma_start_port(device)].port_state = tprops->state; device->cache.ports[port - rdma_start_port(device)].subnet_prefix = tprops->subnet_prefix; write_unlock_irq(&device->cache.lock); if (enforce_security) ib_security_cache_change(device, port, tprops->subnet_prefix); kfree(old_pkey_cache); kfree(tprops); return; err: kfree(pkey_cache); kfree(tprops); }
static void ib_cache_update(struct ib_device *device, u8 port) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; struct ib_gid_cache *gid_cache = NULL, *old_gid_cache; int i; int ret; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return; ret = ib_query_port(device, port, tprops); if (ret) { printk(KERN_WARNING "ib_query_port failed (%d) for %s\n", ret, device->name); goto err; } pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * sizeof *pkey_cache->table, GFP_KERNEL); if (!pkey_cache) goto err; pkey_cache->table_len = tprops->pkey_tbl_len; gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len * sizeof *gid_cache->table, GFP_KERNEL); if (!gid_cache) goto err; gid_cache->table_len = tprops->gid_tbl_len; for (i = 0; i < pkey_cache->table_len; ++i) { ret = ib_query_pkey(device, port, i, pkey_cache->table + i); if (ret) { printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n", ret, device->name, i); goto err; } } for (i = 0; i < gid_cache->table_len; ++i) { ret = ib_query_gid(device, port, i, gid_cache->table + i); if (ret) { printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n", ret, device->name, i); goto err; } } write_lock_irq(&device->cache.lock); old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; old_gid_cache = device->cache.gid_cache [port - start_port(device)]; device->cache.pkey_cache[port - start_port(device)] = pkey_cache; device->cache.gid_cache [port - start_port(device)] = gid_cache; device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; write_unlock_irq(&device->cache.lock); kfree(old_pkey_cache); kfree(old_gid_cache); kfree(tprops); return; err: kfree(pkey_cache); kfree(gid_cache); kfree(tprops); }
static void ib_cache_update(struct ib_device *device, u8 port) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; struct ib_gid_cache *gid_cache = NULL, *old_gid_cache; int i; int ret; tprops = malloc(sizeof *tprops, M_DEVBUF, M_NOWAIT); if (!tprops) return; ret = ib_query_port(device, port, tprops); if (ret) { log(LOG_WARNING, "ib_query_port failed (%d) for %s\n", ret, device->name); goto err; } pkey_cache = malloc(sizeof *pkey_cache + tprops->pkey_tbl_len * sizeof *pkey_cache->table, M_DEVBUF, M_NOWAIT); if (!pkey_cache) goto err; pkey_cache->table_len = tprops->pkey_tbl_len; gid_cache = malloc(sizeof *gid_cache + tprops->gid_tbl_len * sizeof *gid_cache->table, M_DEVBUF, M_NOWAIT); if (!gid_cache) goto err; gid_cache->table_len = tprops->gid_tbl_len; for (i = 0; i < pkey_cache->table_len; ++i) { ret = ib_query_pkey(device, port, i, pkey_cache->table + i); if (ret) { log(LOG_WARNING, "ib_query_pkey failed (%d) for %s (index %d)\n", ret, device->name, i); goto err; } } for (i = 0; i < gid_cache->table_len; ++i) { ret = ib_query_gid(device, port, i, gid_cache->table + i); if (ret) { log(LOG_WARNING, "ib_query_gid failed (%d) for %s (index %d)\n", ret, device->name, i); goto err; } } mtx_lock(&device->cache.lock); old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; old_gid_cache = device->cache.gid_cache [port - start_port(device)]; device->cache.pkey_cache[port - start_port(device)] = pkey_cache; device->cache.gid_cache [port - start_port(device)] = gid_cache; device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; mtx_unlock(&device->cache.lock); free(old_pkey_cache, M_DEVBUF); free(old_gid_cache, M_DEVBUF); free(tprops, M_DEVBUF); return; err: free(pkey_cache, M_DEVBUF); free(gid_cache, M_DEVBUF); free(tprops, M_DEVBUF); }
static void ib_cache_update(struct ib_device *device, u8 port) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; struct ib_gid_cache { int table_len; union ib_gid table[0]; } *gid_cache = NULL; int i; int ret; struct ib_gid_table *table; struct ib_gid_table **ports_table = device->cache.gid_cache; bool use_roce_gid_table = rdma_cap_roce_gid_table(device, port); if (port < rdma_start_port(device) || port > rdma_end_port(device)) return; table = ports_table[port - rdma_start_port(device)]; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return; ret = ib_query_port(device, port, tprops); if (ret) { printk(KERN_WARNING "ib_query_port failed (%d) for %s\n", ret, device->name); goto err; } pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * sizeof *pkey_cache->table, GFP_KERNEL); if (!pkey_cache) goto err; pkey_cache->table_len = tprops->pkey_tbl_len; if (!use_roce_gid_table) { gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len * sizeof(*gid_cache->table), GFP_KERNEL); if (!gid_cache) goto err; gid_cache->table_len = tprops->gid_tbl_len; } for (i = 0; i < pkey_cache->table_len; ++i) { ret = ib_query_pkey(device, port, i, pkey_cache->table + i); if (ret) { printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n", ret, device->name, i); goto err; } } if (!use_roce_gid_table) { for (i = 0; i < gid_cache->table_len; ++i) { ret = ib_query_gid(device, port, i, gid_cache->table + i, NULL); if (ret) { printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n", ret, device->name, i); goto err; } } } write_lock_irq(&device->cache.lock); old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)]; device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache; if (!use_roce_gid_table) { for (i = 0; i < gid_cache->table_len; i++) { modify_gid(device, port, table, i, gid_cache->table + i, &zattr, false); } } device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc; write_unlock_irq(&device->cache.lock); kfree(gid_cache); kfree(old_pkey_cache); kfree(tprops); return; err: kfree(pkey_cache); kfree(gid_cache); kfree(tprops); }
static void ib_cache_update(struct ib_device *device, u8 port) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; struct ib_gid_cache *gid_cache = NULL, *old_gid_cache; int i, j; int ret; union ib_gid gid, empty_gid; u16 pkey; tprops = kmalloc(sizeof *tprops, GFP_KERNEL); if (!tprops) return; ret = ib_query_port(device, port, tprops); if (ret) { printk(KERN_WARNING "ib_query_port failed (%d) for %s\n", ret, device->name); goto err; } pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * sizeof *pkey_cache->entry, GFP_KERNEL); if (!pkey_cache) goto err; pkey_cache->table_len = 0; gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len * sizeof *gid_cache->entry, GFP_KERNEL); if (!gid_cache) goto err; gid_cache->table_len = 0; for (i = 0, j = 0; i < tprops->pkey_tbl_len; ++i) { ret = ib_query_pkey(device, port, i, &pkey); if (ret) { printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n", ret, device->name, i); goto err; } /* pkey 0xffff must be the default pkeyand 0x0000 must be the invalid * pkey per IBTA spec */ if (pkey) { pkey_cache->entry[j].index = i; pkey_cache->entry[j++].pkey = pkey; } } pkey_cache->table_len = j; memset(&empty_gid, 0, sizeof empty_gid); for (i = 0, j = 0; i < tprops->gid_tbl_len; ++i) { ret = ib_query_gid(device, port, i, &gid); if (ret) { printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n", ret, device->name, i); goto err; } /* if the lower 8 bytes the device GID entry is all 0, * our entry is a blank, invalid entry... * depending on device, the upper 8 bytes might or might * not be prefilled with a valid subnet prefix, so * don't rely on them for determining a valid gid * entry */ if (memcmp(&gid + 8, &empty_gid + 8, sizeof gid - 8)) { gid_cache->entry[j].index = i; gid_cache->entry[j++].gid = gid; } } gid_cache->table_len = j; old_pkey_cache = pkey_cache; pkey_cache = kmalloc(sizeof *pkey_cache + old_pkey_cache->table_len * sizeof *pkey_cache->entry, GFP_KERNEL); if (!pkey_cache) pkey_cache = old_pkey_cache; else { pkey_cache->table_len = old_pkey_cache->table_len; memcpy(&pkey_cache->entry[0], &old_pkey_cache->entry[0], pkey_cache->table_len * sizeof *pkey_cache->entry); kfree(old_pkey_cache); } old_gid_cache = gid_cache; gid_cache = kmalloc(sizeof *gid_cache + old_gid_cache->table_len * sizeof *gid_cache->entry, GFP_KERNEL); if (!gid_cache) gid_cache = old_gid_cache; else { gid_cache->table_len = old_gid_cache->table_len; memcpy(&gid_cache->entry[0], &old_gid_cache->entry[0], gid_cache->table_len * sizeof *gid_cache->entry); kfree(old_gid_cache); } write_lock_irq(&device->cache.lock); old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; old_gid_cache = device->cache.gid_cache [port - start_port(device)]; device->cache.pkey_cache[port - start_port(device)] = pkey_cache; device->cache.gid_cache [port - start_port(device)] = gid_cache; device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; write_unlock_irq(&device->cache.lock); kfree(old_pkey_cache); kfree(old_gid_cache); kfree(tprops); return; err: kfree(pkey_cache); kfree(gid_cache); kfree(tprops); }
static void verbs_add_device (struct ib_device *dev) { int ret; struct ib_qp_init_attr attrs; if (ib_dev) return; /* durty hack for ib_dma_map_single not to segfault */ dev->dma_ops = NULL; ib_dev = dev; printk (KERN_INFO "IB add device called. Name = %s\n", dev->name); ret = ib_query_device (dev, &dev_attr); if (ret) { printk (KERN_INFO "ib_quer_device failed: %d\n", ret); return; } printk (KERN_INFO "IB device caps: max_qp %d, max_mcast_grp: %d, max_pkeys: %d\n", dev_attr.max_qp, dev_attr.max_mcast_grp, (int)dev_attr.max_pkeys); /* We'll work with first port. It's a sample module, anyway. Who is that moron which decided * to count ports from one? */ ret = ib_query_port (dev, 1, &port_attr); if (ret) { printk (KERN_INFO "ib_query_port failed: %d\n", ret); return; } printk (KERN_INFO "Port info: lid: %u, sm_lid: %u, max_msg_size: %u\n", (unsigned)port_attr.lid, (unsigned)port_attr.sm_lid, port_attr.max_msg_sz); pd = ib_alloc_pd (dev); if (IS_ERR (pd)) { ret = PTR_ERR (pd); printk (KERN_INFO "pd allocation failed: %d\n", ret); return; } printk (KERN_INFO "PD allocated\n"); mr = ib_get_dma_mr (pd, IB_ACCESS_LOCAL_WRITE); if (IS_ERR (mr)) { ret = PTR_ERR (mr); printk (KERN_INFO "get_dma_mr failed: %d\n", ret); return; } send_cq = ib_create_cq (dev, NULL, NULL, NULL, 1, 1); if (IS_ERR (send_cq)) { ret = PTR_ERR (send_cq); printk (KERN_INFO "ib_create_cq failed: %d\n", ret); return; } recv_cq = ib_create_cq (dev, verbs_comp_handler_recv, NULL, NULL, 1, 1); if (IS_ERR (recv_cq)) { ret = PTR_ERR (recv_cq); printk (KERN_INFO "ib_create_cq failed: %d\n", ret); return; } ib_req_notify_cq (recv_cq, IB_CQ_NEXT_COMP); printk (KERN_INFO "CQs allocated\n"); ib_query_pkey (dev, 1, 0, &pkey); /* allocate memory */ send_buf = kmalloc (buf_size + 40, GFP_KERNEL); recv_buf = kmalloc (buf_size + 40, GFP_KERNEL); if (!send_buf || !recv_buf) { printk (KERN_INFO "Memory allocation error\n"); return; } printk (KERN_INFO "Trying to register regions\n"); if (ib_dev->dma_ops) printk (KERN_INFO "DMA ops are defined\n"); memset (send_buf, 0, buf_size+40); memset (send_buf, 0, buf_size+40); send_key = ib_dma_map_single (ib_dev, send_buf, buf_size, DMA_FROM_DEVICE); printk (KERN_INFO "send_key obtained %llx\n", send_key); recv_key = ib_dma_map_single (ib_dev, recv_buf, buf_size, DMA_TO_DEVICE); printk (KERN_INFO "recv_key obtained %llx\n", recv_key); if (ib_dma_mapping_error (ib_dev, send_key)) { printk (KERN_INFO "Error mapping send buffer\n"); return; } if (ib_dma_mapping_error (ib_dev, recv_key)) { printk (KERN_INFO "Error mapping recv buffer\n"); return; } memset (&attrs, 0, sizeof (attrs)); attrs.qp_type = IB_QPT_UD; attrs.sq_sig_type = IB_SIGNAL_ALL_WR; attrs.event_handler = verbs_qp_event; attrs.cap.max_send_wr = CQ_SIZE; attrs.cap.max_recv_wr = CQ_SIZE; attrs.cap.max_send_sge = 1; attrs.cap.max_recv_sge = 1; attrs.send_cq = send_cq; attrs.recv_cq = recv_cq; qp = ib_create_qp (pd, &attrs); if (IS_ERR (qp)) { ret = PTR_ERR (qp); printk (KERN_INFO "qp allocation failed: %d\n", ret); return; } printk (KERN_INFO "Create QP with num %x\n", qp->qp_num); if (init_qp (qp)) { printk (KERN_INFO "Failed to initialize QP\n"); return; } ret = ib_query_gid (ib_dev, 1, 0, &local_info.gid); if (ret) { printk (KERN_INFO "query_gid failed %d\n", ret); return; } local_info.qp_num = qp->qp_num; local_info.lid = port_attr.lid; /* now we are ready to send our QP number and other stuff to other party */ if (!server_addr) { schedule_work (&sock_accept); flush_scheduled_work (); } else exchange_info (server_addr); if (!have_remote_info) { printk (KERN_INFO "Have no remote info, give up\n"); return; } ret = path_rec_lookup_start (); if (ret) { printk (KERN_INFO "path_rec lookup start failed: %d\n", ret); return; } /* post receive request */ verbs_post_recv_req (); mod_timer (&verbs_timer, NEXTJIFF(1)); }