/* * Allocate page pods for DDP buffer 1 (the user buffer) and set up the tag in * the TCB. We allocate page pods in multiples of PPOD_CLUSTER_SIZE. First we * try to allocate enough page pods to accommodate the whole buffer, subject to * the MAX_PPODS limit. If that fails we try to allocate PPOD_CLUSTER_SIZE page * pods before failing entirely. */ static int alloc_buf1_ppods(struct toepcb *toep, struct ddp_state *p, unsigned long addr, unsigned int len) { int err, tag, npages, nppods; struct tom_data *d = TOM_DATA(toep->tp_toedev); #if 0 SOCKBUF_LOCK_ASSERT(&so->so_rcv); #endif npages = ((addr & PAGE_MASK) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; nppods = min(pages2ppods(npages), MAX_PPODS); nppods = roundup2(nppods, PPOD_CLUSTER_SIZE); err = t3_alloc_ppods(d, nppods, &tag); if (err && nppods > PPOD_CLUSTER_SIZE) { nppods = PPOD_CLUSTER_SIZE; err = t3_alloc_ppods(d, nppods, &tag); } if (err) return (ENOMEM); p->ubuf_nppods = nppods; p->ubuf_tag = tag; #if NUM_DDP_KBUF == 1 t3_set_ddp_tag(toep, 1, tag << 6); #endif return (0); }
/* * Add an skb to the deferred skb queue for processing from process context. */ void t3_defer_reply(struct mbuf *m, struct toedev *dev, defer_handler_t handler) { struct tom_data *td = TOM_DATA(dev); m_set_handler(m, handler); mtx_lock(&td->deferq.lock); mbufq_tail(&td->deferq, m); if (mbufq_len(&td->deferq) == 1) taskqueue_enqueue(td->tq, &td->deferq_task); mtx_lock(&td->deferq.lock); }
static inline int lookup_port(struct net_device *slave_dev) { int i, port = -1; struct toedev *tdev = TOEDEV(slave_dev); struct adap_ports *port_info = TOM_DATA(tdev)->ports; for (i = 0; i < port_info->nports; i++) { if (slave_dev != port_info->lldevs[i]) continue; port = i; break; } return port; }
/* * This is a companion to t3_cleanup_ddp() and releases the HW resources * associated with a connection's DDP state, such as the page pods. * It's called when HW is done with a connection. The rest of the state * remains available until both HW and the app are done with the connection. */ void t3_release_ddp_resources(struct toepcb *toep) { struct ddp_state *p = &toep->tp_ddp_state; struct tom_data *d = TOM_DATA(toep->tp_toedev); int idx; for (idx = 0; idx < NUM_DDP_KBUF; idx++) { t3_free_ppods(d, p->kbuf_tag[idx], p->kbuf_nppods[idx]); unmap_ddp_gl(p->kbuf[idx]); } if (p->ubuf_nppods) { t3_free_ppods(d, p->ubuf_tag, p->ubuf_nppods); p->ubuf_nppods = 0; } if (p->ubuf) unmap_ddp_gl(p->ubuf); }
/* * Prepare a socket for DDP. Must be called when the socket is known to be * open. */ int t3_enter_ddp(struct toepcb *toep, unsigned int kbuf_size, unsigned int waitall, int nonblock) { int i, err = ENOMEM; static vm_pindex_t color; unsigned int nppods, kbuf_pages, idx = 0; struct ddp_state *p = &toep->tp_ddp_state; struct tom_data *d = TOM_DATA(toep->tp_toedev); if (kbuf_size > M_TCB_RX_DDP_BUF0_LEN) return (EINVAL); #ifdef notyet SOCKBUF_LOCK_ASSERT(&so->so_rcv); #endif kbuf_pages = (kbuf_size + PAGE_SIZE - 1) >> PAGE_SHIFT; nppods = pages2ppods(kbuf_pages); p->kbuf_noinval = !!waitall; p->kbuf_tag[NUM_DDP_KBUF - 1] = -1; for (idx = 0; idx < NUM_DDP_KBUF; idx++) { p->kbuf[idx] = malloc(sizeof (struct ddp_gather_list) + kbuf_pages * sizeof(vm_page_t *), M_DEVBUF, M_NOWAIT|M_ZERO); if (p->kbuf[idx] == NULL) goto err; err = t3_alloc_ppods(d, nppods, &p->kbuf_tag[idx]); if (err) { printf("t3_alloc_ppods failed err=%d\n", err); goto err; } p->kbuf_nppods[idx] = nppods; p->kbuf[idx]->dgl_length = kbuf_size; p->kbuf[idx]->dgl_offset = 0; p->kbuf[idx]->dgl_nelem = kbuf_pages; for (i = 0; i < kbuf_pages; ++i) { p->kbuf[idx]->dgl_pages[i] = vm_page_alloc(NULL, color, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if (p->kbuf[idx]->dgl_pages[i] == NULL) { p->kbuf[idx]->dgl_nelem = i; printf("failed to allocate kbuf pages\n"); goto err; } } #ifdef NEED_BUSDMA /* * XXX we'll need this for VT-d or any platform with an iommu :-/ * */ for (i = 0; i < kbuf_pages; ++i) p->kbuf[idx]->phys_addr[i] = pci_map_page(p->pdev, p->kbuf[idx]->pages[i], 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); #endif t3_setup_ppods(toep, p->kbuf[idx], nppods, p->kbuf_tag[idx], p->kbuf[idx]->dgl_length, 0, 0); } cxgb_log_tcb(TOEP_T3C_DEV(toep)->adapter, toep->tp_tid); t3_set_ddp_tag(toep, 0, p->kbuf_tag[0] << 6); t3_set_ddp_buf(toep, 0, 0, p->kbuf[0]->dgl_length); t3_repost_kbuf(toep, 0, 0, 1, nonblock); t3_set_rcv_coalesce_enable(toep, TOM_TUNABLE(toep->tp_toedev, ddp_rcvcoalesce)); t3_set_dack_mss(toep, TOM_TUNABLE(toep->tp_toedev, delack)>>1); #ifdef T3_TRACE T3_TRACE4(TIDTB(so), "t3_enter_ddp: kbuf_size %u waitall %u tag0 %d tag1 %d", kbuf_size, waitall, p->kbuf_tag[0], p->kbuf_tag[1]); #endif CTR4(KTR_TOM, "t3_enter_ddp: kbuf_size %u waitall %u tag0 %d tag1 %d", kbuf_size, waitall, p->kbuf_tag[0], p->kbuf_tag[1]); cxgb_log_tcb(TOEP_T3C_DEV(toep)->adapter, toep->tp_tid); return (0); err: t3_release_ddp_resources(toep); t3_cleanup_ddp(toep); return (err); }
/* * Allocate a TOM data structure, * initialize its cpl_handlers * and register it as a T3C client */ static void t3c_tom_add(struct t3cdev *cdev) { int i; unsigned int wr_len; struct tom_data *t; struct toedev *tdev; struct adap_ports *port_info; t = malloc(sizeof(*t), M_CXGB, M_NOWAIT|M_ZERO); if (t == NULL) return; cdev->send = t3_offload_tx; cdev->ctl = cxgb_offload_ctl; if (cdev->ctl(cdev, GET_WR_LEN, &wr_len) < 0) goto out_free_tom; port_info = malloc(sizeof(*port_info), M_CXGB, M_NOWAIT|M_ZERO); if (!port_info) goto out_free_tom; if (cdev->ctl(cdev, GET_PORTS, port_info) < 0) goto out_free_all; t3_init_wr_tab(wr_len); t->cdev = cdev; t->client = &t3c_tom_client; /* Register TCP offload device */ tdev = &t->tdev; tdev->tod_ttid = cdev2type(cdev); tdev->tod_lldev = cdev->lldev; if (register_toedev(tdev, "toe%d")) { printf("unable to register offload device"); goto out_free_all; } TOM_DATA(tdev) = t; for (i = 0; i < port_info->nports; i++) { struct ifnet *ifp = port_info->lldevs[i]; TOEDEV(ifp) = tdev; CTR1(KTR_TOM, "enabling toe on %p", ifp); ifp->if_capabilities |= IFCAP_TOE4; ifp->if_capenable |= IFCAP_TOE4; } t->ports = port_info; /* Add device to the list of offload devices */ t3cdev_add(t); /* Activate TCP offload device */ cxgb_offload_activate(TOM_DATA(tdev)->cdev->adapter); activate_offload(tdev); cxgb_register_listeners(); return; out_free_all: printf("out_free_all fail\n"); free(port_info, M_CXGB); out_free_tom: printf("out_free_tom fail\n"); free(t, M_CXGB); return; }
static inline int total_ports(struct toedev *tdev) { struct adap_ports *port_info = TOM_DATA(tdev)->ports; return port_info->nports; }