static int pp_check_ntb(struct ntb_dev *ntb) { u64 pmask; if (ntb_db_is_unsafe(ntb)) { dev_dbg(&ntb->dev, "Doorbell is unsafe\n"); if (!unsafe) return -EINVAL; } if (ntb_spad_is_unsafe(ntb)) { dev_dbg(&ntb->dev, "Scratchpad is unsafe\n"); if (!unsafe) return -EINVAL; } pmask = GENMASK_ULL(ntb_peer_port_count(ntb), 0); if ((ntb_db_valid_mask(ntb) & pmask) != pmask) { dev_err(&ntb->dev, "Unsupported DB configuration\n"); return -EINVAL; } if (ntb_spad_count(ntb) < 1 && ntb_msg_count(ntb) < 1) { dev_err(&ntb->dev, "Scratchpads and messages unsupported\n"); return -EINVAL; } else if (ntb_spad_count(ntb) < 1) { dev_dbg(&ntb->dev, "Scratchpads unsupported\n"); } else if (ntb_msg_count(ntb) < 1) { dev_dbg(&ntb->dev, "Messages unsupported\n"); } return 0; }
void fjes_hw_init_command_registers(struct fjes_hw *hw, struct fjes_device_command_param *param) { /* Request Buffer length */ wr32(XSCT_REQBL, (__le32)(param->req_len)); /* Response Buffer Length */ wr32(XSCT_RESPBL, (__le32)(param->res_len)); /* Request Buffer Address */ wr32(XSCT_REQBAL, (__le32)(param->req_start & GENMASK_ULL(31, 0))); wr32(XSCT_REQBAH, (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32)); /* Response Buffer Address */ wr32(XSCT_RESPBAL, (__le32)(param->res_start & GENMASK_ULL(31, 0))); wr32(XSCT_RESPBAH, (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32)); /* Share status address */ wr32(XSCT_SHSTSAL, (__le32)(param->share_start & GENMASK_ULL(31, 0))); wr32(XSCT_SHSTSAH, (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32)); }
static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 interrupt_num) { struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; u16 flags, mme, data, data_mask; u8 msi_count; u64 pci_addr, pci_addr_mask = 0xff; /* Check whether the MSI feature has been enabled by the PCI host. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); if (!(flags & PCI_MSI_FLAGS_ENABLE)) return -EINVAL; /* Get the number of enabled MSIs */ mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; msi_count = 1 << mme; if (!interrupt_num || interrupt_num > msi_count) return -EINVAL; /* Compute the data value to be written. */ data_mask = msi_count - 1; data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); /* Get the PCI address where to write the data into. */ pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); pci_addr <<= 32; pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); pci_addr &= GENMASK_ULL(63, 2); /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || ep->irq_pci_fn != fn)) { /* Last region was reserved for IRQ writes. */ cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1, false, ep->irq_phys_addr, pci_addr & ~pci_addr_mask, pci_addr_mask + 1); ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); ep->irq_pci_fn = fn; } writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); return 0; }
static void pp_init_flds(struct pp_ctx *pp) { int pidx, lport, pcnt; /* Find global port index */ lport = ntb_port_number(pp->ntb); pcnt = ntb_peer_port_count(pp->ntb); for (pidx = 0; pidx < pcnt; pidx++) { if (lport < ntb_peer_port_number(pp->ntb, pidx)) break; } pp->in_db = BIT_ULL(lport); pp->pmask = GENMASK_ULL(pidx, 0) >> 1; pp->nmask = GENMASK_ULL(pcnt - 1, pidx); dev_dbg(&pp->ntb->dev, "Inbound db %#llx, prev %#llx, next %#llx\n", pp->in_db, pp->pmask, pp->nmask); }
/* * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It * currently assumes that for memory starting above 4G, 32-bit devices will * use a DMA offset. */ static phys_addr_t max_zone_dma_phys(void) { phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); return min(offset + (1ULL << 32), memblock_end_of_DRAM()); }
static int get_bus_id(u64 reg) { return FIELD_GET(GENMASK_ULL(61, 60), reg); }
static void clear_bus_id(u64 *reg) { u64 bus_id_mask = GENMASK_ULL(61, 60); *reg &= ~bus_id_mask; }
static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) { u64 mask = GENMASK_ULL(h, l); *raw_cmd &= ~mask; *raw_cmd |= (val << l) & mask; }