static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) { void __iomem *mmio = ndev->self_mmio; struct device *dev = &ndev->ntb.pdev->dev; u32 status; status = readl(mmio + AMD_INTSTAT_OFFSET); if (!(status & AMD_EVENT_INTMASK)) return; dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec); status &= AMD_EVENT_INTMASK; switch (status) { case AMD_PEER_FLUSH_EVENT: dev_info(dev, "Flush is done.\n"); break; case AMD_PEER_RESET_EVENT: amd_ack_smu(ndev, AMD_PEER_RESET_EVENT); /* link down first */ ntb_link_event(&ndev->ntb); /* polling peer status */ schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); break; case AMD_PEER_D3_EVENT: case AMD_PEER_PMETO_EVENT: case AMD_LINK_UP_EVENT: case AMD_LINK_DOWN_EVENT: amd_ack_smu(ndev, status); /* link down */ ntb_link_event(&ndev->ntb); break; case AMD_PEER_D0_EVENT: mmio = ndev->peer_mmio; status = readl(mmio + AMD_PMESTAT_OFFSET); /* check if this is WAKEUP event */ if (status & 0x1) dev_info(dev, "Wakeup is done.\n"); amd_ack_smu(ndev, AMD_PEER_D0_EVENT); /* start a timer to poll link status */ schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); break; default: dev_info(dev, "event status = 0x%x.\n", status); break; } }
static void amd_link_hb(struct work_struct *work) { struct amd_ntb_dev *ndev = hb_ndev(work); if (amd_poll_link(ndev)) ntb_link_event(&ndev->ntb); if (!amd_link_is_up(ndev)) schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); }
static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb) { struct pci_dev *pdev = ntb->pdev; struct perf_ctx *perf; int node; int rc = 0; if (ntb_spad_count(ntb) < MAX_SPAD) { dev_err(&ntb->dev, "Not enough scratch pad registers for %s", DRIVER_NAME); return -EIO; } node = dev_to_node(&pdev->dev); perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node); if (!perf) { rc = -ENOMEM; goto err_perf; } perf->ntb = ntb; perf->perf_threads = 1; atomic_set(&perf->tsync, 0); mutex_init(&perf->run_mutex); spin_lock_init(&perf->db_lock); perf_setup_mw(ntb, perf); init_waitqueue_head(&perf->link_wq); INIT_DELAYED_WORK(&perf->link_work, perf_link_work); rc = ntb_set_ctx(ntb, perf, &perf_ops); if (rc) goto err_ctx; perf->link_is_up = false; ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); ntb_link_event(ntb); rc = perf_debugfs_setup(perf); if (rc) goto err_ctx; perf_clear_thread_status(perf); return 0; err_ctx: cancel_delayed_work_sync(&perf->link_work); kfree(perf); err_perf: return rc; }
static int pp_setup_ctx(struct pp_ctx *pp) { int ret; ret = ntb_set_ctx(pp->ntb, pp, &pp_ops); if (ret) return ret; ntb_link_enable(pp->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); /* Might be not necessary */ ntb_link_event(pp->ntb); return 0; }
static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb) { struct pci_dev *pdev = ntb->pdev; struct perf_ctx *perf; int node; int rc = 0; node = dev_to_node(&pdev->dev); perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node); if (!perf) { rc = -ENOMEM; goto err_perf; } perf->ntb = ntb; perf->perf_threads = 1; atomic_set(&perf->tsync, 0); perf->run = false; spin_lock_init(&perf->db_lock); perf_setup_mw(ntb, perf); INIT_DELAYED_WORK(&perf->link_work, perf_link_work); INIT_WORK(&perf->link_cleanup, perf_link_cleanup); rc = ntb_set_ctx(ntb, perf, &perf_ops); if (rc) goto err_ctx; perf->link_is_up = false; ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); ntb_link_event(ntb); rc = perf_debugfs_setup(perf); if (rc) goto err_ctx; return 0; err_ctx: cancel_delayed_work_sync(&perf->link_work); cancel_work_sync(&perf->link_cleanup); kfree(perf); err_perf: return rc; }
static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb) { struct tool_ctx *tc; int rc; int i; if (!ntb->ops->mw_set_trans) { dev_dbg(&ntb->dev, "need inbound MW based NTB API\n"); rc = -EINVAL; goto err_tc; } if (ntb_spad_count(ntb) < 1) { dev_dbg(&ntb->dev, "no enough scratchpads\n"); rc = -EINVAL; goto err_tc; } if (ntb_db_is_unsafe(ntb)) dev_dbg(&ntb->dev, "doorbell is unsafe\n"); if (ntb_spad_is_unsafe(ntb)) dev_dbg(&ntb->dev, "scratchpad is unsafe\n"); if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT) dev_warn(&ntb->dev, "multi-port NTB is unsupported\n"); tc = kzalloc(sizeof(*tc), GFP_KERNEL); if (!tc) { rc = -ENOMEM; goto err_tc; } tc->ntb = ntb; init_waitqueue_head(&tc->link_wq); tc->mw_count = min(ntb_peer_mw_count(tc->ntb), MAX_MWS); for (i = 0; i < tc->mw_count; i++) { rc = tool_init_mw(tc, i); if (rc) goto err_ctx; } tool_setup_dbgfs(tc); rc = ntb_set_ctx(ntb, tc, &tool_ops); if (rc) goto err_ctx; ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); ntb_link_event(ntb); return 0; err_ctx: tool_free_mws(tc); debugfs_remove_recursive(tc->dbgfs); kfree(tc); err_tc: return rc; }
static int ntb_transport_probe(struct ntb_softc *ntb) { struct ntb_transport_ctx *nt = &net_softc; struct ntb_transport_mw *mw; uint64_t qp_bitmap; int rc; unsigned i; nt->mw_count = ntb_mw_count(ntb); for (i = 0; i < nt->mw_count; i++) { mw = &nt->mw_vec[i]; rc = ntb_mw_get_range(ntb, i, &mw->phys_addr, &mw->vbase, &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size, &mw->addr_limit); if (rc != 0) goto err; mw->buff_size = 0; mw->xlat_size = 0; mw->virt_addr = NULL; mw->dma_addr = 0; } qp_bitmap = ntb_db_valid_mask(ntb); nt->qp_count = flsll(qp_bitmap); KASSERT(nt->qp_count != 0, ("bogus db bitmap")); nt->qp_count -= 1; if (max_num_clients != 0 && max_num_clients < nt->qp_count) nt->qp_count = max_num_clients; else if (nt->mw_count < nt->qp_count) nt->qp_count = nt->mw_count; KASSERT(nt->qp_count <= QP_SETSIZE, ("invalid qp_count")); mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF); mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF); nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_IF, M_WAITOK | M_ZERO); for (i = 0; i < nt->qp_count; i++) { set_bit(i, &nt->qp_bitmap); set_bit(i, &nt->qp_bitmap_free); ntb_transport_init_queue(nt, i); } callout_init(&nt->link_work, 0); callout_init(&nt->link_watchdog, 0); TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt); rc = ntb_set_ctx(ntb, nt, &ntb_transport_ops); if (rc != 0) goto err; nt->link_is_up = false; ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); ntb_link_event(ntb); callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); if (enable_xeon_watchdog != 0) callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt); return (0); err: free(nt->qp_vec, M_NTB_IF); nt->qp_vec = NULL; return (rc); }