Пример #1
0
static void perf_link_work(struct work_struct *work)
{
	struct perf_ctx *perf =
		container_of(work, struct perf_ctx, link_work.work);
	struct ntb_dev *ndev = perf->ntb;
	struct pci_dev *pdev = ndev->pdev;
	u32 val;
	u64 size;
	int rc;

	dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);

	size = perf->mw.phys_size;
	ntb_peer_spad_write(ndev, MW_SZ_HIGH, upper_32_bits(size));
	ntb_peer_spad_write(ndev, MW_SZ_LOW, lower_32_bits(size));
	ntb_peer_spad_write(ndev, VERSION, PERF_VERSION);

	/* now read what peer wrote */
	val = ntb_spad_read(ndev, VERSION);
	if (val != PERF_VERSION) {
		dev_dbg(&pdev->dev, "Remote version = %#x\n", val);
		goto out;
	}

	val = ntb_spad_read(ndev, MW_SZ_HIGH);
	size = (u64)val << 32;

	val = ntb_spad_read(ndev, MW_SZ_LOW);
	size |= val;

	dev_dbg(&pdev->dev, "Remote MW size = %#llx\n", size);

	rc = perf_set_mw(perf, size);
	if (rc)
		goto out1;

	perf->link_is_up = true;

	return;

out1:
	perf_free_mw(perf);

out:
	if (ntb_link_is_up(ndev, NULL, NULL) == 1)
		schedule_delayed_work(&perf->link_work,
				      msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
}
Пример #2
0
static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
{
	struct snd_pcm_substream *substream = lx_stream->stream;
	const unsigned int is_capture = lx_stream->is_capture;

	int err;

	const u32 channels = substream->runtime->channels;
	const u32 bytes_per_frame = channels * 3;
	const u32 period_size = substream->runtime->period_size;
	const u32 periods = substream->runtime->periods;
	const u32 period_bytes = period_size * bytes_per_frame;

	dma_addr_t buf = substream->dma_buffer.addr;
	int i;

	u32 needed, freed;
	u32 size_array[5];

	for (i = 0; i != periods; ++i) {
		u32 buffer_index = 0;

		err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed,
				    size_array);
		dev_dbg(chip->card->dev, "starting: needed %d, freed %d\n",
			    needed, freed);

		err = lx_buffer_give(chip, 0, is_capture, period_bytes,
				     lower_32_bits(buf), upper_32_bits(buf),
				     &buffer_index);

		dev_dbg(chip->card->dev, "starting: buffer index %x on 0x%lx (%d bytes)\n",
			    buffer_index, (unsigned long)buf, period_bytes);
		buf += period_bytes;
	}

	err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
	dev_dbg(chip->card->dev, "starting: needed %d, freed %d\n", needed, freed);

	dev_dbg(chip->card->dev, "starting: starting stream\n");
	err = lx_stream_start(chip, 0, is_capture);
	if (err < 0)
		dev_err(chip->card->dev, "couldn't start stream\n");
	else
		lx_stream->status = LX_STREAM_STATUS_RUNNING;

	lx_stream->frame_pos = 0;
}
Пример #3
0
static int
gf100_sw_vblsem_release(struct nvkm_notify *notify)
{
	struct nv50_sw_chan *chan =
		container_of(notify, typeof(*chan), vblank.notify[notify->index]);
	struct nv50_sw_priv *priv = (void *)nv_object(chan)->engine;
	struct nvkm_bar *bar = nvkm_bar(priv);

	nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
	bar->flush(bar);
	nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
	nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
	nv_wr32(priv, 0x060014, chan->vblank.value);

	return NVKM_NOTIFY_DROP;
}
Пример #4
0
static int
nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
{
	struct nouveau_software_chan *chan =
		container_of(event, struct nouveau_software_chan, vblank.event);
	struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
	struct nouveau_bar *bar = nouveau_bar(priv);

	nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
	bar->flush(bar);
	nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
	nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
	nv_wr32(priv, 0x060014, chan->vblank.value);

	return NVKM_EVENT_DROP;
}
Пример #5
0
static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
				  struct qed_ptt *p_ptt)
{
	struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;

	memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));

	sb_info->index = 0;
	sb_info->known_attn = 0;

	/* Configure Attention Status Block in IGU */
	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
	       lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
	qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
	       upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
}
Пример #6
0
/**
 * Debug a segment with an xHCI ring.
 *
 * @return The Link TRB of the segment, or NULL if there is no Link TRB
 * (which is a bug, since all segments must have a Link TRB).
 *
 * Prints out all TRBs in the segment, even those after the Link TRB.
 *
 * XXX: should we print out TRBs that the HC owns?  As long as we don't
 * write, that should be fine...  We shouldn't expect that the memory pointed to
 * by the TRB is valid at all.  Do we care about ones the HC owns?  Probably,
 * for HC debugging.
 */
void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
	int i;
	u64 addr = seg->dma;
	union xhci_trb *trb = seg->trbs;

	for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
		trb = &seg->trbs[i];
		xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
			 lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
			 upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
			 le32_to_cpu(trb->link.intr_target),
			 le32_to_cpu(trb->link.control));
		addr += sizeof(*trb);
	}
}
Пример #7
0
/**
 * Debug a segment with an xHCI ring.
 *
 * @return The Link TRB of the segment, or NULL if there is no Link TRB
 * (which is a bug, since all segments must have a Link TRB).
 *
 * Prints out all TRBs in the segment, even those after the Link TRB.
 *
 * XXX: should we print out TRBs that the HC owns?  As long as we don't
 * write, that should be fine...  We shouldn't expect that the memory pointed to
 * by the TRB is valid at all.  Do we care about ones the HC owns?  Probably,
 * for HC debugging.
 */
void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
	int i;
	u32 addr = (u32) seg->dma;
	union xhci_trb *trb = seg->trbs;

	for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
		trb = &seg->trbs[i];
		xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
				lower_32_bits(trb->link.segment_ptr),
				upper_32_bits(trb->link.segment_ptr),
				(unsigned int) trb->link.intr_target,
				(unsigned int) trb->link.control);
		addr += sizeof(*trb);
	}
}
Пример #8
0
/**
 * dwc3_event_buffers_setup - setup our allocated event buffers
 * @dwc: pointer to our controller context structure
 *
 * Returns 0 on success otherwise negative errno.
 */
static int dwc3_event_buffers_setup(struct dwc3 *dwc)
{
	struct dwc3_event_buffer	*evt;

	evt = dwc->ev_buf;
	evt->lpos = 0;
	dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
			lower_32_bits(evt->dma));
	dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
			upper_32_bits(evt->dma));
	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
			DWC3_GEVNTSIZ_SIZE(evt->length));
	dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);

	return 0;
}
Пример #9
0
/**
 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
 *
 * @ring: amdgpu_ring pointer
 *
 * Make sure all previous operations are completed (CIK).
 */
static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
	uint32_t seq = ring->fence_drv.sync_seq;
	uint64_t addr = ring->fence_drv.gpu_addr;

	/* wait for idle */
	amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0,
					    SDMA_POLL_REG_MEM_EXTRA_OP(0) |
					    SDMA_POLL_REG_MEM_EXTRA_FUNC(3) | /* equal */
					    SDMA_POLL_REG_MEM_EXTRA_M));
	amdgpu_ring_write(ring, addr & 0xfffffffc);
	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
	amdgpu_ring_write(ring, seq); /* reference */
	amdgpu_ring_write(ring, 0xfffffff); /* mask */
	amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
}
Пример #10
0
/**
 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
 *
 * @rdev: radeon_device pointer
 * @fence: radeon fence object
 *
 * Add a DMA fence packet to the ring to write
 * the fence seq number and DMA trap packet to generate
 * an interrupt if needed (CIK).
 */
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
			      struct radeon_fence *fence)
{
	struct radeon_ring *ring = &rdev->ring[fence->ring];
	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;

	/* write the fence */
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
	radeon_ring_write(ring, lower_32_bits(addr));
	radeon_ring_write(ring, upper_32_bits(addr));
	radeon_ring_write(ring, fence->seq);
	/* generate an interrupt */
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
	/* flush HDP */
	cik_sdma_hdp_flush_ring_emit(rdev, fence->ring);
}
Пример #11
0
static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
	uint32_t seq = ring->fence_drv.sync_seq;
	uint64_t addr = ring->fence_drv.gpu_addr;

	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
	amdgpu_ring_write(ring, lower_32_bits(addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
	amdgpu_ring_write(ring, upper_32_bits(addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
	amdgpu_ring_write(ring, 0xffffffff); /* mask */
	amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
	amdgpu_ring_write(ring, seq);
	amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
	amdgpu_ring_write(ring, 0xE);
}
Пример #12
0
unsigned char mgmt_check_supported_fw(struct be_ctrl_info *ctrl)
{
	struct be_dma_mem nonemb_cmd;
	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
	struct be_mgmt_controller_attributes *req;
	struct be_sge *sge = nonembedded_sgl(wrb);
	int status = 0;

	nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
				sizeof(struct be_mgmt_controller_attributes),
				&nonemb_cmd.dma);
	if (nonemb_cmd.va == NULL) {
		SE_DEBUG(DBG_LVL_1,
			 "Failed to allocate memory for mgmt_check_supported_fw"
			 "\n");
		return -1;
	}
	nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
	req = nonemb_cmd.va;
	spin_lock(&ctrl->mbox_lock);
	memset(wrb, 0, sizeof(*wrb));
	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			   OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd.size);

	status = be_mbox_notify(ctrl);
	if (!status) {
		struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
		SE_DEBUG(DBG_LVL_8, "Firmware version of CMD: %s\n",
			resp->params.hba_attribs.flashrom_version_string);
		SE_DEBUG(DBG_LVL_8, "Firmware version is : %s\n",
			resp->params.hba_attribs.firmware_version_string);
		SE_DEBUG(DBG_LVL_8,
			"Developer Build, not performing version check...\n");

	} else
		SE_DEBUG(DBG_LVL_1, " Failed in mgmt_check_supported_fw\n");
	if (nonemb_cmd.va)
		pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
				    nonemb_cmd.va, nonemb_cmd.dma);

	spin_unlock(&ctrl->mbox_lock);
	return status;
}
Пример #13
0
/**
 * cik_sdma_ring_test - simple async dma engine test
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring structure holding ring information
 *
 * Test the DMA engine by writing using it to write an
 * value to memory. (CIK).
 * Returns 0 for success, error for failure.
 */
int cik_sdma_ring_test(struct radeon_device *rdev,
		       struct radeon_ring *ring)
{
	unsigned i;
	int r;
	unsigned index;
	u32 tmp;
	u64 gpu_addr;

	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
		index = R600_WB_DMA_RING_TEST_OFFSET;
	else
		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;

	gpu_addr = rdev->wb.gpu_addr + index;

	tmp = 0xCAFEDEAD;
	rdev->wb.wb[index/4] = cpu_to_le32(tmp);

	r = radeon_ring_lock(rdev, ring, 5);
	if (r) {
		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
		return r;
	}
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
	radeon_ring_write(ring, lower_32_bits(gpu_addr));
	radeon_ring_write(ring, upper_32_bits(gpu_addr));
	radeon_ring_write(ring, 1); /* number of DWs to follow */
	radeon_ring_write(ring, 0xDEADBEEF);
	radeon_ring_unlock_commit(rdev, ring, false);

	for (i = 0; i < rdev->usec_timeout; i++) {
		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}

	if (i < rdev->usec_timeout) {
		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
	} else {
		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
			  ring->idx, tmp);
		r = -EINVAL;
	}
	return r;
}
Пример #14
0
/**
 * radeon_vce_get_destroy_msg - generate a VCE destroy msg
 *
 * @rdev: radeon_device pointer
 * @ring: ring we should submit the msg to
 * @handle: VCE session handle to use
 * @fence: optional fence to return
 *
 * Close up a stream for HW test or if userspace failed to do so
 */
int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
			       uint32_t handle, struct radeon_fence **fence)
{
	const unsigned ib_size_dw = 1024;
	struct radeon_ib ib;
	uint64_t dummy;
	int i, r;

	r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
	if (r) {
		DRM_ERROR("radeon: failed to get ib (%d).\n", r);
		return r;
	}

	dummy = ib.gpu_addr + 1024;

	/* stitch together an VCE destroy msg */
	ib.length_dw = 0;
	ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
	ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
	ib.ptr[ib.length_dw++] = handle;

	ib.ptr[ib.length_dw++] = 0x00000014; /* len */
	ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
	ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
	ib.ptr[ib.length_dw++] = dummy;
	ib.ptr[ib.length_dw++] = 0x00000001;

	ib.ptr[ib.length_dw++] = 0x00000008; /* len */
	ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */

	for (i = ib.length_dw; i < ib_size_dw; ++i)
		ib.ptr[i] = 0x0;

	r = radeon_ib_schedule(rdev, &ib, NULL);
	if (r) {
	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
	}

	if (fence)
		*fence = radeon_fence_ref(ib.fence);

	radeon_ib_free(rdev, &ib);

	return r;
}
Пример #15
0
static int
nv84_fence_sync(struct nouveau_fence *fence,
		struct nouveau_channel *prev, struct nouveau_channel *chan)
{
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
		OUT_RING  (chan, NvSema);
		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(prev->id * 16));
		OUT_RING  (chan, lower_32_bits(prev->id * 16));
		OUT_RING  (chan, fence->sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
		FIRE_RING (chan);
	}
	return ret;
}
Пример #16
0
static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz,
		uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
{
	struct usm_cmd_memory_map_region mem_region_map;
	int rc = 0;

	if (this_mmap.apr == NULL) {
		pr_err("%s: APR handle NULL\n", __func__);
		return -EINVAL;
	}

	q6usm_add_mmaphdr(&mem_region_map.hdr,
			  sizeof(struct usm_cmd_memory_map_region), true,
			  ((session << 8) | dir));

	mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
	mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;

	mem_region_map.num_regions = 1;
	mem_region_map.flags = 0;

	mem_region_map.shm_addr_lsw = lower_32_bits(buf_add);
	mem_region_map.shm_addr_msw = upper_32_bits(buf_add);
	mem_region_map.mem_size_bytes = bufsz * bufcnt;

	rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map);
	if (rc < 0) {
		pr_err("%s: mem_map op[0x%x]rc[%d]\n",
		       __func__, mem_region_map.hdr.opcode, rc);
		rc = -EINVAL;
		goto fail_cmd;
	}

	rc = wait_event_timeout(this_mmap.cmd_wait,
				(atomic_read(&this_mmap.cmd_state) == 0),
				Q6USM_TIMEOUT_JIFFIES);
	if (!rc) {
		rc = -ETIME;
		pr_err("%s: timeout. waited for memory_map\n", __func__);
	} else {
		*mem_handle = this_mmap.mem_handle;
		rc = 0;
	}
fail_cmd:
	return rc;
}
Пример #17
0
static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
{
	dma_addr_t dma = ring->desc_dma_addr;
	struct hclge_dev *hdev = ring->dev;
	struct hclge_hw *hw = &hdev->hw;

	if (ring->ring_type == HCLGE_TYPE_CSQ) {
		hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
				lower_32_bits(dma));
		hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
				upper_32_bits(dma));
		hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
				(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
				HCLGE_NIC_CMQ_ENABLE);
		hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
		hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
	} else {
Пример #18
0
/**
 * uvd_v6_0_ring_emit_ib - execute indirect buffer
 *
 * @ring: amdgpu_ring pointer
 * @ib: indirect buffer to execute
 *
 * Write ring commands to execute the indirect buffer
 */
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
				  struct amdgpu_job *job,
				  struct amdgpu_ib *ib,
				  uint32_t flags)
{
	unsigned vmid = AMDGPU_JOB_GET_VMID(job);

	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
	amdgpu_ring_write(ring, vmid);

	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
	amdgpu_ring_write(ring, ib->length_dw);
}
Пример #19
0
void ms_xhci_dump_erst(struct xhci_erst *pErst)
{
	u64 addr = pErst->dma_addr;
	int i;
	struct xhci_event_ring_seg_table_entry *pErst_entry;

	for (i = 0; i < pErst->entry_count; ++i) {
		pErst_entry = &pErst->entries[i];
		ms_dbg("%016llx %08x %08x %08x %08x\n",
			 addr,
			 lower_32_bits(pErst_entry->ring_base_addr),
			 upper_32_bits(pErst_entry->ring_base_addr),
			 (unsigned int) pErst_entry->ring_seg_size,
			 (unsigned int) pErst_entry->reserved);
		addr += sizeof(*pErst_entry);
	}
}
Пример #20
0
static int
gf100_sw_chan_vblsem_release(struct nvkm_notify *notify)
{
	struct nv50_sw_chan *chan =
		container_of(notify, typeof(*chan), vblank.notify[notify->index]);
	struct nvkm_sw *sw = chan->base.sw;
	struct nvkm_device *device = sw->engine.subdev.device;
	u32 inst = chan->base.fifo->inst->addr >> 12;

	nvkm_wr32(device, 0x001718, 0x80000000 | inst);
	nvkm_bar_flush(device->bar);
	nvkm_wr32(device, 0x06000c, upper_32_bits(chan->vblank.offset));
	nvkm_wr32(device, 0x060010, lower_32_bits(chan->vblank.offset));
	nvkm_wr32(device, 0x060014, chan->vblank.value);

	return NVKM_NOTIFY_DROP;
}
Пример #21
0
static int
nv84_fence_emit(struct nouveau_fence *fence)
{
	struct nouveau_channel *chan = fence->channel;
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
		OUT_RING  (chan, NvSema);
		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(chan->id * 16));
		OUT_RING  (chan, lower_32_bits(chan->id * 16));
		OUT_RING  (chan, fence->sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
		FIRE_RING (chan);
	}
	return ret;
}
Пример #22
0
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
	u32 addr = (u32) erst->erst_dma_addr;
	int i;
	struct xhci_erst_entry *entry;

	for (i = 0; i < erst->num_entries; ++i) {
		entry = &erst->entries[i];
		xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
				(unsigned int) addr,
				lower_32_bits(entry->seg_addr),
				upper_32_bits(entry->seg_addr),
				(unsigned int) entry->seg_size,
				(unsigned int) entry->rsvd);
		addr += sizeof(*entry);
	}
}
Пример #23
0
unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
				   unsigned int icd, unsigned int cid)
{
	struct be_dma_mem nonemb_cmd;
	struct be_ctrl_info *ctrl = &phba->ctrl;
	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
	struct be_sge *sge = nonembedded_sgl(wrb);
	struct invalidate_commands_params_in *req;
	int status = 0;

	nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
				sizeof(struct invalidate_commands_params_in),
				&nonemb_cmd.dma);
	if (nonemb_cmd.va == NULL) {
		SE_DEBUG(DBG_LVL_1,
			 "Failed to allocate memory for"
			 "mgmt_invalidate_icds \n");
		return -1;
	}
	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
	req = nonemb_cmd.va;
	spin_lock(&ctrl->mbox_lock);
	memset(wrb, 0, sizeof(*wrb));

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
			OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
			sizeof(*req));
	req->ref_handle = 0;
	req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
	req->icd_count = 0;
	req->table[req->icd_count].icd = icd;
	req->table[req->icd_count].cid = cid;
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd.size);

	status = be_mbox_notify(ctrl);
	if (status)
		SE_DEBUG(DBG_LVL_1, "ICDS Invalidation Failed\n");
	spin_unlock(&ctrl->mbox_lock);
	if (nonemb_cmd.va)
		pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
				    nonemb_cmd.va, nonemb_cmd.dma);
	return status;
}
Пример #24
0
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
	u64 addr = erst->erst_dma_addr;
	int i;
	struct xhci_erst_entry *entry;

	for (i = 0; i < erst->num_entries; ++i) {
		entry = &erst->entries[i];
		xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
			 addr,
			 lower_32_bits(le64_to_cpu(entry->seg_addr)),
			 upper_32_bits(le64_to_cpu(entry->seg_addr)),
			 le32_to_cpu(entry->seg_size),
			 le32_to_cpu(entry->rsvd));
		addr += sizeof(*entry);
	}
}
Пример #25
0
int q6usm_get_us_stream_param(int dir, struct us_client *usc,
                              uint32_t module_id, uint32_t param_id, uint32_t buf_size)
{
    int rc = 0;
    struct usm_stream_cmd_get_param cmd_get_param;
    struct us_port_data *port = NULL;

    if ((usc == NULL) || (usc->apr == NULL)) {
        pr_err("%s: APR handle NULL\n", __func__);
        return -EINVAL;
    }
    port = &usc->port[dir];

    q6usm_add_hdr(usc, &cmd_get_param.hdr,
                  (sizeof(cmd_get_param) - APR_HDR_SIZE), true);

    cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
    cmd_get_param.buf_size = buf_size;
    cmd_get_param.buf_addr_msw = upper_32_bits(port->param_phys);
    cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
    cmd_get_param.mem_map_handle =
        *((uint32_t *)(port->param_buf_mem_handle));
    cmd_get_param.module_id = module_id;
    cmd_get_param.param_id = param_id;
    cmd_get_param.hdr.token = 0;

    rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);

    if (rc < 0) {
        pr_err("%s:write op[0x%x];rc[%d]\n",
               __func__, cmd_get_param.hdr.opcode, rc);
    }

    rc = wait_event_timeout(usc->cmd_wait,
                            (atomic_read(&usc->cmd_state) == 0),
                            Q6USM_TIMEOUT_JIFFIES);
    if (!rc) {
        rc = -ETIME;
        pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
               __func__, Q6USM_TIMEOUT_JIFFIES);
    } else
        rc = 0;

    return rc;
}
static int update_mqd(struct mqd_manager *mm, void *mqd,
			struct queue_properties *q)
{
	struct cik_mqd *m;

	BUG_ON(!mm || !q || !mqd);

	pr_debug("kfd: In func %s\n", __func__);

	m = get_mqd(mqd);
	m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
				DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;

	/*
	 * Calculating queue size which is log base 2 of actual queue size -1
	 * dwords and another -1 for ffs
	 */
	m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
								- 1 - 1;
	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
	m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
					DOORBELL_OFFSET(q->doorbell_off);

	m->cp_hqd_vmid = q->vmid;

	if (q->format == KFD_QUEUE_FORMAT_AQL) {
		m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
	}

	update_cu_mask(mm, mqd, q);

	m->cp_hqd_active = 0;
	q->is_active = false;
	if (q->queue_size > 0 &&
			q->queue_address != 0 &&
			q->queue_percent > 0) {
		m->cp_hqd_active = 1;
		q->is_active = true;
	}

	return 0;
}
Пример #27
0
unsigned int  mgmt_invalidate_icds(struct beiscsi_hba *phba,
				struct invalidate_command_table *inv_tbl,
				unsigned int num_invalidate, unsigned int cid,
				struct be_dma_mem *nonemb_cmd)

{
	struct be_ctrl_info *ctrl = &phba->ctrl;
	struct be_mcc_wrb *wrb;
	struct be_sge *sge;
	struct invalidate_commands_params_in *req;
	unsigned int i, tag = 0;

	spin_lock(&ctrl->mbox_lock);
	tag = alloc_mcc_tag(phba);
	if (!tag) {
		spin_unlock(&ctrl->mbox_lock);
		return tag;
	}

	req = nonemb_cmd->va;
	memset(req, 0, sizeof(*req));
	wrb = wrb_from_mccq(phba);
	sge = nonembedded_sgl(wrb);
	wrb->tag0 |= tag;

	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
			OPCODE_COMMON_ISCSI_ERROR_RECOVERY_INVALIDATE_COMMANDS,
			sizeof(*req));
	req->ref_handle = 0;
	req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
	for (i = 0; i < num_invalidate; i++) {
		req->table[i].icd = inv_tbl->icd;
		req->table[i].cid = inv_tbl->cid;
		req->icd_count++;
		inv_tbl++;
	}
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

	be_mcc_notify(phba);
	spin_unlock(&ctrl->mbox_lock);
	return tag;
}
Пример #28
0
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret = 0;

	if (IS_I965G(dev))
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
		ret = 0;
		goto out;
	}
#endif

	/* Get some space for it */
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0,   pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
		dev_priv->mch_res.start = 0;
		goto out;
	}

	if (IS_I965G(dev))
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
out:
	return ret;
}
Пример #29
0
/**
 * cik_sdma_ring_test - simple async dma engine test
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring structure holding ring information
 *
 * Test the DMA engine by writing using it to write an
 * value to memory. (CIK).
 * Returns 0 for success, error for failure.
 */
int cik_sdma_ring_test(struct radeon_device *rdev,
		       struct radeon_ring *ring)
{
	unsigned i;
	int r;
	void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
	u32 tmp;

	if (!ptr) {
		DRM_ERROR("invalid vram scratch pointer\n");
		return -EINVAL;
	}

	tmp = 0xCAFEDEAD;
	writel(tmp, ptr);

	r = radeon_ring_lock(rdev, ring, 5);
	if (r) {
		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
		return r;
	}
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
	radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
	radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
	radeon_ring_write(ring, 1); /* number of DWs to follow */
	radeon_ring_write(ring, 0xDEADBEEF);
	radeon_ring_unlock_commit(rdev, ring);

	for (i = 0; i < rdev->usec_timeout; i++) {
		tmp = readl(ptr);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}

	if (i < rdev->usec_timeout) {
		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
	} else {
		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
			  ring->idx, tmp);
		r = -EINVAL;
	}
	return r;
}
Пример #30
0
unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
				   u32 boot_session_handle,
				   struct be_dma_mem *nonemb_cmd)
{
	struct be_ctrl_info *ctrl = &phba->ctrl;
	struct be_mcc_wrb *wrb;
	unsigned int tag = 0;
	struct  be_cmd_get_session_req *req;
	struct be_cmd_get_session_resp *resp;
	struct be_sge *sge;

	beiscsi_log(phba, KERN_INFO,
		    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
		    "BG_%d : In beiscsi_get_session_info\n");

	spin_lock(&ctrl->mbox_lock);
	tag = alloc_mcc_tag(phba);
	if (!tag) {
		spin_unlock(&ctrl->mbox_lock);
		return tag;
	}

	nonemb_cmd->size = sizeof(*resp);
	req = nonemb_cmd->va;
	memset(req, 0, sizeof(*req));
	wrb = wrb_from_mccq(phba);
	sge = nonembedded_sgl(wrb);
	wrb->tag0 |= tag;


	wrb->tag0 |= tag;
	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
			   OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
			   sizeof(*resp));
	req->session_handle = boot_session_handle;
	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
	sge->len = cpu_to_le32(nonemb_cmd->size);

	be_mcc_notify(phba);
	spin_unlock(&ctrl->mbox_lock);
	return tag;
}