Example #1
0
static u64 amd_ntb_link_is_up(struct ntb_dev *ntb,
			      enum ntb_speed *speed,
			      enum ntb_width *width)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	int ret = 0;

	if (amd_link_is_up(ndev)) {
		if (speed)
			*speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
		if (width)
			*width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);

		dev_dbg(&ntb->pdev->dev, "link is up.\n");

		ret = 1;
	} else {
		if (speed)
			*speed = NTB_SPEED_NONE;
		if (width)
			*width = NTB_WIDTH_NONE;

		dev_dbg(&ntb->pdev->dev, "link is down.\n");
	}

	return ret;
}
Example #2
0
static u64 amd_ntb_db_read(struct ntb_dev *ntb)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	void __iomem *mmio = ndev->self_mmio;

	return (u64)readw(mmio + AMD_DBSTAT_OFFSET);
}
Example #3
0
static int amd_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
				phys_addr_t *base,
				resource_size_t *size,
				resource_size_t *align,
				resource_size_t *align_size)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	int bar;

	bar = ndev_mw_to_bar(ndev, idx);
	if (bar < 0)
		return bar;

	if (base)
		*base = pci_resource_start(ndev->ntb.pdev, bar);

	if (size)
		*size = pci_resource_len(ndev->ntb.pdev, bar);

	if (align)
		*align = SZ_4K;

	if (align_size)
		*align_size = 1;

	return 0;
}
Example #4
0
static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx)
{
	if (pidx != NTB_DEF_PEER_IDX)
		return -EINVAL;

	return ntb_ndev(ntb)->mw_count;
}
Example #5
0
static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	void __iomem *mmio = ndev->self_mmio;

	writew((u16)db_bits, mmio + AMD_DBREQ_OFFSET);

	return 0;
}
Example #6
0
static u32 amd_ntb_spad_read(struct ntb_dev *ntb, int idx)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	void __iomem *mmio = ndev->self_mmio;
	u32 offset;

	if (idx < 0 || idx >= ndev->spad_count)
		return 0;

	offset = ndev->self_spad + (idx << 2);
	return readl(mmio + AMD_SPAD_OFFSET + offset);
}
Example #7
0
static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	void __iomem *mmio = ndev->self_mmio;
	u32 offset;

	if (sidx < 0 || sidx >= ndev->spad_count)
		return -EINVAL;

	offset = ndev->peer_spad + (sidx << 2);
	return readl(mmio + AMD_SPAD_OFFSET + offset);
}
Example #8
0
static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
				   int sidx, u32 val)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	void __iomem *mmio = ndev->self_mmio;
	u32 offset;

	if (sidx < 0 || sidx >= ndev->spad_count)
		return -EINVAL;

	offset = ndev->peer_spad + (sidx << 2);
	writel(val, mmio + AMD_SPAD_OFFSET + offset);

	return 0;
}
Example #9
0
static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	void __iomem *mmio = ndev->self_mmio;
	unsigned long flags;

	if (db_bits & ~ndev->db_valid_mask)
		return -EINVAL;

	spin_lock_irqsave(&ndev->db_mask_lock, flags);
	ndev->db_mask &= ~db_bits;
	writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET);
	spin_unlock_irqrestore(&ndev->db_mask_lock, flags);

	return 0;
}
Example #10
0
static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
				    phys_addr_t *base, resource_size_t *size)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	int bar;

	bar = ndev_mw_to_bar(ndev, idx);
	if (bar < 0)
		return bar;

	if (base)
		*base = pci_resource_start(ndev->ntb.pdev, bar);

	if (size)
		*size = pci_resource_len(ndev->ntb.pdev, bar);

	return 0;
}
Example #11
0
static int amd_ntb_link_disable(struct ntb_dev *ntb)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	void __iomem *mmio = ndev->self_mmio;
	u32 ntb_ctl;

	/* Disable event interrupt */
	ndev->int_mask |= AMD_EVENT_INTMASK;
	writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);

	if (ndev->ntb.topo == NTB_TOPO_SEC)
		return -EINVAL;
	dev_dbg(&ntb->pdev->dev, "Enabling Link.\n");

	ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
	ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
	writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);

	return 0;
}
Example #12
0
static int amd_ntb_link_enable(struct ntb_dev *ntb,
			       enum ntb_speed max_speed,
			       enum ntb_width max_width)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	void __iomem *mmio = ndev->self_mmio;
	u32 ntb_ctl;

	/* Enable event interrupt */
	ndev->int_mask &= ~AMD_EVENT_INTMASK;
	writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET);

	if (ndev->ntb.topo == NTB_TOPO_SEC)
		return -EINVAL;
	dev_dbg(ndev_dev(ndev), "Enabling Link.\n");

	ntb_ctl = readl(mmio + AMD_CNTL_OFFSET);
	ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
	writel(ntb_ctl, mmio + AMD_CNTL_OFFSET);

	return 0;
}
Example #13
0
static int amd_ntb_db_vector_count(struct ntb_dev *ntb)
{
	return ntb_ndev(ntb)->db_count;
}
Example #14
0
static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
				dma_addr_t addr, resource_size_t size)
{
	struct amd_ntb_dev *ndev = ntb_ndev(ntb);
	unsigned long xlat_reg, limit_reg = 0;
	resource_size_t mw_size;
	void __iomem *mmio, *peer_mmio;
	u64 base_addr, limit, reg_val;
	int bar;

	if (pidx != NTB_DEF_PEER_IDX)
		return -EINVAL;

	bar = ndev_mw_to_bar(ndev, idx);
	if (bar < 0)
		return bar;

	mw_size = pci_resource_len(ntb->pdev, bar);

	/* make sure the range fits in the usable mw size */
	if (size > mw_size)
		return -EINVAL;

	mmio = ndev->self_mmio;
	peer_mmio = ndev->peer_mmio;

	base_addr = pci_resource_start(ntb->pdev, bar);

	if (bar != 1) {
		xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2);
		limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2);

		/* Set the limit if supported */
		limit = size;

		/* set and verify setting the translation address */
		write64(addr, peer_mmio + xlat_reg);
		reg_val = read64(peer_mmio + xlat_reg);
		if (reg_val != addr) {
			write64(0, peer_mmio + xlat_reg);
			return -EIO;
		}

		/* set and verify setting the limit */
		write64(limit, peer_mmio + limit_reg);
		reg_val = read64(peer_mmio + limit_reg);
		if (reg_val != limit) {
			write64(base_addr, mmio + limit_reg);
			write64(0, peer_mmio + xlat_reg);
			return -EIO;
		}
	} else {
		xlat_reg = AMD_BAR1XLAT_OFFSET;
		limit_reg = AMD_BAR1LMT_OFFSET;

		/* Set the limit if supported */
		limit = size;

		/* set and verify setting the translation address */
		write64(addr, peer_mmio + xlat_reg);
		reg_val = read64(peer_mmio + xlat_reg);
		if (reg_val != addr) {
			write64(0, peer_mmio + xlat_reg);
			return -EIO;
		}

		/* set and verify setting the limit */
		writel(limit, peer_mmio + limit_reg);
		reg_val = readl(peer_mmio + limit_reg);
		if (reg_val != limit) {
			writel(base_addr, mmio + limit_reg);
			writel(0, peer_mmio + xlat_reg);
			return -EIO;
		}
	}

	return 0;
}
Example #15
0
static int amd_ntb_peer_mw_count(struct ntb_dev *ntb)
{
	/* The same as for inbound MWs */
	return ntb_ndev(ntb)->mw_count;
}
Example #16
0
static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb)
{
	return ntb_ndev(ntb)->db_valid_mask;
}
Example #17
0
static int amd_ntb_spad_count(struct ntb_dev *ntb)
{
	return ntb_ndev(ntb)->spad_count;
}
Example #18
0
static int amd_ntb_mw_count(struct ntb_dev *ntb)
{
	return ntb_ndev(ntb)->mw_count;
}