Example #1
0
/**
 * cik_sdma_ring_test - simple async dma engine test
 *
 * @rdev: radeon_device pointer
 * @ring: radeon_ring structure holding ring information
 *
 * Test the DMA engine by writing using it to write an
 * value to memory. (CIK).
 * Returns 0 for success, error for failure.
 */
int cik_sdma_ring_test(struct radeon_device *rdev,
		       struct radeon_ring *ring)
{
	unsigned i;
	int r;
	unsigned index;
	u32 tmp;
	u64 gpu_addr;

	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
		index = R600_WB_DMA_RING_TEST_OFFSET;
	else
		index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;

	gpu_addr = rdev->wb.gpu_addr + index;

	tmp = 0xCAFEDEAD;
	rdev->wb.wb[index/4] = cpu_to_le32(tmp);

	r = radeon_ring_lock(rdev, ring, 5);
	if (r) {
		DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
		return r;
	}
	radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
	radeon_ring_write(ring, lower_32_bits(gpu_addr));
	radeon_ring_write(ring, upper_32_bits(gpu_addr));
	radeon_ring_write(ring, 1); /* number of DWs to follow */
	radeon_ring_write(ring, 0xDEADBEEF);
	radeon_ring_unlock_commit(rdev, ring, false);

	for (i = 0; i < rdev->usec_timeout; i++) {
		tmp = le32_to_cpu(rdev->wb.wb[index/4]);
		if (tmp == 0xDEADBEEF)
			break;
		DRM_UDELAY(1);
	}

	if (i < rdev->usec_timeout) {
		DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
	} else {
		DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
			  ring->idx, tmp);
		r = -EINVAL;
	}
	return r;
}
Example #2
0
void ms_xhci_dump_erst(struct xhci_erst *pErst)
{
	u64 addr = pErst->dma_addr;
	int i;
	struct xhci_event_ring_seg_table_entry *pErst_entry;

	for (i = 0; i < pErst->entry_count; ++i) {
		pErst_entry = &pErst->entries[i];
		ms_dbg("%016llx %08x %08x %08x %08x\n",
			 addr,
			 lower_32_bits(pErst_entry->ring_base_addr),
			 upper_32_bits(pErst_entry->ring_base_addr),
			 (unsigned int) pErst_entry->ring_seg_size,
			 (unsigned int) pErst_entry->reserved);
		addr += sizeof(*pErst_entry);
	}
}
Example #3
0
/**
 * uvd_v6_0_ring_emit_ib - execute indirect buffer
 *
 * @ring: amdgpu_ring pointer
 * @ib: indirect buffer to execute
 *
 * Write ring commands to execute the indirect buffer
 */
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
				  struct amdgpu_job *job,
				  struct amdgpu_ib *ib,
				  uint32_t flags)
{
	unsigned vmid = AMDGPU_JOB_GET_VMID(job);

	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
	amdgpu_ring_write(ring, vmid);

	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
	amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
	amdgpu_ring_write(ring, ib->length_dw);
}
static int
nv84_fence_sync(struct nouveau_fence *fence,
		struct nouveau_channel *prev, struct nouveau_channel *chan)
{
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
		OUT_RING  (chan, NvSema);
		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(prev->id * 16));
		OUT_RING  (chan, lower_32_bits(prev->id * 16));
		OUT_RING  (chan, fence->sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
		FIRE_RING (chan);
	}
	return ret;
}
static int
nv84_fence_emit(struct nouveau_fence *fence)
{
	struct nouveau_channel *chan = fence->channel;
	int ret = RING_SPACE(chan, 7);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
		OUT_RING  (chan, NvSema);
		BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
		OUT_RING  (chan, upper_32_bits(chan->id * 16));
		OUT_RING  (chan, lower_32_bits(chan->id * 16));
		OUT_RING  (chan, fence->sequence);
		OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
		FIRE_RING (chan);
	}
	return ret;
}
Example #6
0
static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
{
	dma_addr_t dma = ring->desc_dma_addr;
	struct hclge_dev *hdev = ring->dev;
	struct hclge_hw *hw = &hdev->hw;

	if (ring->ring_type == HCLGE_TYPE_CSQ) {
		hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
				lower_32_bits(dma));
		hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
				upper_32_bits(dma));
		hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
				(ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
				HCLGE_NIC_CMQ_ENABLE);
		hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
		hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
	} else {
Example #7
0
File: gf100.c Project: 020gzh/linux
static int
gf100_sw_chan_vblsem_release(struct nvkm_notify *notify)
{
	struct nv50_sw_chan *chan =
		container_of(notify, typeof(*chan), vblank.notify[notify->index]);
	struct nvkm_sw *sw = chan->base.sw;
	struct nvkm_device *device = sw->engine.subdev.device;
	u32 inst = chan->base.fifo->inst->addr >> 12;

	nvkm_wr32(device, 0x001718, 0x80000000 | inst);
	nvkm_bar_flush(device->bar);
	nvkm_wr32(device, 0x06000c, upper_32_bits(chan->vblank.offset));
	nvkm_wr32(device, 0x060010, lower_32_bits(chan->vblank.offset));
	nvkm_wr32(device, 0x060014, chan->vblank.value);

	return NVKM_NOTIFY_DROP;
}
Example #8
0
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
	u64 addr = erst->erst_dma_addr;
	int i;
	struct xhci_erst_entry *entry;

	for (i = 0; i < erst->num_entries; ++i) {
		entry = &erst->entries[i];
		xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
			 addr,
			 lower_32_bits(le64_to_cpu(entry->seg_addr)),
			 upper_32_bits(le64_to_cpu(entry->seg_addr)),
			 le32_to_cpu(entry->seg_size),
			 le32_to_cpu(entry->rsvd));
		addr += sizeof(*entry);
	}
}
Example #9
0
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
	u32 addr = (u32) erst->erst_dma_addr;
	int i;
	struct xhci_erst_entry *entry;

	for (i = 0; i < erst->num_entries; ++i) {
		entry = &erst->entries[i];
		xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
				(unsigned int) addr,
				lower_32_bits(entry->seg_addr),
				upper_32_bits(entry->seg_addr),
				(unsigned int) entry->seg_size,
				(unsigned int) entry->rsvd);
		addr += sizeof(*entry);
	}
}
Example #10
0
static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz,
		uint32_t bufcnt, uint32_t session, uint32_t *mem_handle)
{
	struct usm_cmd_memory_map_region mem_region_map;
	int rc = 0;

	if (this_mmap.apr == NULL) {
		pr_err("%s: APR handle NULL\n", __func__);
		return -EINVAL;
	}

	q6usm_add_mmaphdr(&mem_region_map.hdr,
			  sizeof(struct usm_cmd_memory_map_region), true,
			  ((session << 8) | dir));

	mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION;
	mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;

	mem_region_map.num_regions = 1;
	mem_region_map.flags = 0;

	mem_region_map.shm_addr_lsw = lower_32_bits(buf_add);
	mem_region_map.shm_addr_msw = upper_32_bits(buf_add);
	mem_region_map.mem_size_bytes = bufsz * bufcnt;

	rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map);
	if (rc < 0) {
		pr_err("%s: mem_map op[0x%x]rc[%d]\n",
		       __func__, mem_region_map.hdr.opcode, rc);
		rc = -EINVAL;
		goto fail_cmd;
	}

	rc = wait_event_timeout(this_mmap.cmd_wait,
				(atomic_read(&this_mmap.cmd_state) == 0),
				Q6USM_TIMEOUT_JIFFIES);
	if (!rc) {
		rc = -ETIME;
		pr_err("%s: timeout. waited for memory_map\n", __func__);
	} else {
		*mem_handle = this_mmap.mem_handle;
		rc = 0;
	}
fail_cmd:
	return rc;
}
Example #11
0
int q6usm_get_us_stream_param(int dir, struct us_client *usc,
                              uint32_t module_id, uint32_t param_id, uint32_t buf_size)
{
    int rc = 0;
    struct usm_stream_cmd_get_param cmd_get_param;
    struct us_port_data *port = NULL;

    if ((usc == NULL) || (usc->apr == NULL)) {
        pr_err("%s: APR handle NULL\n", __func__);
        return -EINVAL;
    }
    port = &usc->port[dir];

    q6usm_add_hdr(usc, &cmd_get_param.hdr,
                  (sizeof(cmd_get_param) - APR_HDR_SIZE), true);

    cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM;
    cmd_get_param.buf_size = buf_size;
    cmd_get_param.buf_addr_msw = upper_32_bits(port->param_phys);
    cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys);
    cmd_get_param.mem_map_handle =
        *((uint32_t *)(port->param_buf_mem_handle));
    cmd_get_param.module_id = module_id;
    cmd_get_param.param_id = param_id;
    cmd_get_param.hdr.token = 0;

    rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param);

    if (rc < 0) {
        pr_err("%s:write op[0x%x];rc[%d]\n",
               __func__, cmd_get_param.hdr.opcode, rc);
    }

    rc = wait_event_timeout(usc->cmd_wait,
                            (atomic_read(&usc->cmd_state) == 0),
                            Q6USM_TIMEOUT_JIFFIES);
    if (!rc) {
        rc = -ETIME;
        pr_err("%s: CMD_GET_PARAM: timeout=%d\n",
               __func__, Q6USM_TIMEOUT_JIFFIES);
    } else
        rc = 0;

    return rc;
}
Example #12
0
/**
 * i40e_aq_get_set_rss_lut
 * @hw: pointer to the hardware structure
 * @vsi_id: vsi fw index
 * @pf_lut: for PF table set true, for VSI table set false
 * @lut: pointer to the lut buffer provided by the caller
 * @lut_size: size of the lut buffer
 * @set: set true to set the table, false to get the table
 *
 * Internal function to get or set RSS look up table
 **/
static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
					   u16 vsi_id, bool pf_lut,
					   u8 *lut, u16 lut_size,
					   bool set)
{
	i40e_status status;
	struct i40e_aq_desc desc;
	struct i40e_aqc_get_set_rss_lut *cmd_resp =
		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;

	if (set)
		i40evf_fill_default_direct_cmd_desc(&desc,
						    i40e_aqc_opc_set_rss_lut);
	else
		i40evf_fill_default_direct_cmd_desc(&desc,
						    i40e_aqc_opc_get_rss_lut);

	/* Indirect command */
	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);

	cmd_resp->vsi_id =
			cpu_to_le16((u16)((vsi_id <<
					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);

	if (pf_lut)
		cmd_resp->flags |= cpu_to_le16((u16)
					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
	else
		cmd_resp->flags |= cpu_to_le16((u16)
					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));

	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));

	status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);

	return status;
}
static int update_mqd(struct mqd_manager *mm, void *mqd,
			struct queue_properties *q)
{
	struct cik_mqd *m;

	BUG_ON(!mm || !q || !mqd);

	pr_debug("kfd: In func %s\n", __func__);

	m = get_mqd(mqd);
	m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
				DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;

	/*
	 * Calculating queue size which is log base 2 of actual queue size -1
	 * dwords and another -1 for ffs
	 */
	m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
								- 1 - 1;
	m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
	m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
	m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
	m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
	m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
					DOORBELL_OFFSET(q->doorbell_off);

	m->cp_hqd_vmid = q->vmid;

	if (q->format == KFD_QUEUE_FORMAT_AQL) {
		m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
	}

	update_cu_mask(mm, mqd, q);

	m->cp_hqd_active = 0;
	q->is_active = false;
	if (q->queue_size > 0 &&
			q->queue_address != 0 &&
			q->queue_percent > 0) {
		m->cp_hqd_active = 1;
		q->is_active = true;
	}

	return 0;
}
Example #14
0
static int ccp5_perform_aes(struct ccp_op *op)
{
	struct ccp5_desc desc;
	union ccp_function function;
	u32 key_addr = op->sb_key * LSB_ITEM_SIZE;

	op->cmd_q->total_aes_ops++;

	/* Zero out all the fields of the command desc */
	memset(&desc, 0, Q_DESC_SIZE);

	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES;

	CCP5_CMD_SOC(&desc) = op->soc;
	CCP5_CMD_IOC(&desc) = 1;
	CCP5_CMD_INIT(&desc) = op->init;
	CCP5_CMD_EOM(&desc) = op->eom;
	CCP5_CMD_PROT(&desc) = 0;

	function.raw = 0;
	CCP_AES_ENCRYPT(&function) = op->u.aes.action;
	CCP_AES_MODE(&function) = op->u.aes.mode;
	CCP_AES_TYPE(&function) = op->u.aes.type;
	CCP_AES_SIZE(&function) = op->u.aes.size;

	CCP5_CMD_FUNCTION(&desc) = function.raw;

	CCP5_CMD_LEN(&desc) = op->src.u.dma.length;

	CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma);
	CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma);
	CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM;

	CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma);
	CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma);
	CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM;

	CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr);
	CCP5_CMD_KEY_HI(&desc) = 0;
	CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB;
	CCP5_CMD_LSB_ID(&desc) = op->sb_ctx;

	return ccp5_do_cmd(&desc, op->cmd_q);
}
Example #15
0
/* If hardware is busy, don't restart async read.
 * if status register is 0 - meaning initial state, restart async read,
 * probably for the first time when populating a receive buffer.
 * If read status indicate not busy and a status, restart the async
 * DMA read.
 */
static int sgdma_async_read(struct altera_tse_private *priv)
{
	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
	struct sgdma_descrip *descbase =
		(struct sgdma_descrip *)priv->rx_dma_desc;

	struct sgdma_descrip *cdesc = &descbase[0];
	struct sgdma_descrip *ndesc = &descbase[1];

	struct tse_buffer *rxbuffer = NULL;

	if (!sgdma_rxbusy(priv)) {
		rxbuffer = queue_rx_peekhead(priv);
		if (rxbuffer == NULL) {
			netdev_err(priv->dev, "no rx buffers available\n");
			return 0;
		}

		sgdma_setup_descrip(cdesc,		/* current descriptor */
				    ndesc,		/* next descriptor */
				    sgdma_rxphysaddr(priv, ndesc),
				    0,			/* read addr 0 for rx dma */
				    rxbuffer->dma_addr, /* write addr for rx dma */
				    0,			/* read 'til EOP */
				    0,			/* EOP: NA for rx dma */
				    0,			/* read fixed: NA for rx dma */
				    0);			/* SOP: NA for rx DMA */

		dma_sync_single_for_device(priv->device,
					   priv->rxdescphys,
					   priv->sgdmadesclen,
					   DMA_TO_DEVICE);

		iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
			  &csr->next_descrip);

		iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
			  &csr->control);

		return 1;
	}

	return 0;
}
Example #16
0
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
	u32 temp_lo, temp_hi = 0;
	u64 mchbar_addr;
	int ret = 0;

	if (IS_I965G(dev))
		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;

	/* If ACPI doesn't have it, assume we need to allocate it ourselves */
#ifdef CONFIG_PNP
	if (mchbar_addr &&
	    pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
		ret = 0;
		goto out;
	}
#endif

	/* Get some space for it */
	ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
				     MCHBAR_SIZE, MCHBAR_SIZE,
				     PCIBIOS_MIN_MEM,
				     0,   pcibios_align_resource,
				     dev_priv->bridge_dev);
	if (ret) {
		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
		dev_priv->mch_res.start = 0;
		goto out;
	}

	if (IS_I965G(dev))
		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
				       upper_32_bits(dev_priv->mch_res.start));

	pci_write_config_dword(dev_priv->bridge_dev, reg,
			       lower_32_bits(dev_priv->mch_res.start));
out:
	return ret;
}
Example #17
0
static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
				       struct psp_gfx_cmd_resp *cmd)
{
	int ret;
	uint64_t fw_mem_mc_addr = ucode->mc_addr;

	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));

	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
	cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;

	ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
	if (ret)
		DRM_ERROR("Unknown firmware type\n");

	return ret;
}
Example #18
0
/**
 * sunxi_event_buffers_setup - setup our allocated event buffers
 * @otgc: pointer to our controller context structure
 *
 * Returns 0 on success otherwise negative errno.
 */
static int __devinit sunxi_event_buffers_setup(struct sunxi_otgc *otgc)
{
	struct sunxi_otgc_event_buffer	*evt;
	int				n;

	for (n = 0; n < otgc->num_event_buffers; n++) {
		evt = otgc->ev_buffs[n];
		dev_dbg(otgc->dev, "Event buf %p dma %08llx length %d\n",
				evt->buf, (unsigned long long) evt->dma,
				evt->length);

		sunxi_set_gevntadrlo(otgc->regs, n, lower_32_bits(evt->dma));
		sunxi_set_gevntadrhi(otgc->regs, n, upper_32_bits(evt->dma));
		sunxi_set_gevntsiz(otgc->regs, n, evt->length & 0xffff);
		sunxi_set_gevntcount(otgc->regs, n, 0);
	}

	return 0;
}
Example #19
0
/**
 * dwc3_event_buffers_setup - setup our allocated event buffers
 * @dwc: pointer to our controller context structure
 *
 * Returns 0 on success otherwise negative errno.
 */
int dwc3_event_buffers_setup(struct dwc3 *dwc)
{
	struct dwc3_event_buffer	*evt;

	if (dwc->dr_mode == USB_DR_MODE_HOST)
		return 0;

	evt = dwc->ev_buf;
	evt->lpos = 0;
	dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
			lower_32_bits(evt->dma));
	dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
			upper_32_bits(evt->dma));
	dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
			DWC3_GEVNTSIZ_SIZE(evt->length));
	dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);

	return 0;
}
Example #20
0
/**
 * To ensure that updates to comparator value register do not set the
 * Interrupt Status Register proceed as follows:
 * 1. Clear the Comp Enable bit in the Timer Control Register.
 * 2. Write the lower 32-bit Comparator Value Register.
 * 3. Write the upper 32-bit Comparator Value Register.
 * 4. Set the Comp Enable bit and, if necessary, the IRQ enable bit.
 */
static void gt_compare_set(unsigned long delta, int periodic)
{
	u64 counter = gt_counter_read();
	unsigned long ctrl;

	counter += delta;
	ctrl = GT_CONTROL_TIMER_ENABLE;
	writel(ctrl, gt_base + GT_CONTROL);
	writel(lower_32_bits(counter), gt_base + GT_COMP0);
	writel(upper_32_bits(counter), gt_base + GT_COMP1);

	if (periodic) {
		writel(delta, gt_base + GT_AUTO_INC);
		ctrl |= GT_CONTROL_AUTO_INC;
	}

	ctrl |= GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE;
	writel(ctrl, gt_base + GT_CONTROL);
}
Example #21
0
File: bgmac.c Project: 7799/linux
static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring, int desc_idx)
{
	struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
	u32 ctl0 = 0, ctl1 = 0;

	if (desc_idx == ring->num_slots - 1)
		ctl0 |= BGMAC_DESC_CTL0_EOT;
	ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
	/* Is there any BGMAC device that requires extension? */
	/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
	 * B43_DMA64_DCTL1_ADDREXT_MASK;
	 */

	dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);
}
Example #22
0
/**
 * huc_fw_xfer() - DMA's the firmware
 * @huc_fw: the firmware descriptor
 * @vma: the firmware image (bound into the GGTT)
 *
 * Transfer the firmware image to RAM for execution by the microcontroller.
 *
 * Return: 0 on success, non-zero on failure
 */
static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
{
	struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
	struct drm_i915_private *dev_priv = huc_to_i915(huc);
	unsigned long offset = 0;
	u32 size;
	int ret;

	GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);

	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

	/* Set the source address for the uCode */
	offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
		 huc_fw->header_offset;
	I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
	I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);

	/* Hardware doesn't look at destination address for HuC. Set it to 0,
	 * but still program the correct address space.
	 */
	I915_WRITE(DMA_ADDR_1_LOW, 0);
	I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);

	size = huc_fw->header_size + huc_fw->ucode_size;
	I915_WRITE(DMA_COPY_SIZE, size);

	/* Start the DMA */
	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));

	/* Wait for DMA to finish */
	ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);

	DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);

	/* Disable the bits once DMA is over */
	I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);

	return ret;
}
Example #23
0
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
	u32 temp, temp2;
	union xhci_trb *trb;

	spin_lock(&xhci->lock);
	trb = xhci->event_ring->dequeue;
	
	temp = xhci_readl(xhci, &xhci->op_regs->status);
	temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
	if (temp == 0xffffffff && temp2 == 0xffffffff)
		goto hw_died;

	if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
		spin_unlock(&xhci->lock);
		return IRQ_NONE;
	}
	xhci_dbg(xhci, "op reg status = %08x\n", temp);
	xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
	xhci_dbg(xhci, "Event ring dequeue ptr:\n");
	xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
			(unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
			lower_32_bits(trb->link.segment_ptr),
			upper_32_bits(trb->link.segment_ptr),
			(unsigned int) trb->link.intr_target,
			(unsigned int) trb->link.control);

	if (temp & STS_FATAL) {
		xhci_warn(xhci, "WARNING: Host System Error\n");
		xhci_halt(xhci);
hw_died:
		xhci_to_hcd(xhci)->state = HC_STATE_HALT;
		spin_unlock(&xhci->lock);
		return -ESHUTDOWN;
	}

	xhci_work(xhci);
	spin_unlock(&xhci->lock);

	return IRQ_HANDLED;
}
Example #24
0
int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd)
{
	int ret;
	uint64_t fw_mem_mc_addr = ucode->mc_addr;
	struct  common_firmware_header *header;

	memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
	header = (struct common_firmware_header *)ucode->fw;

	cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
	cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
	cmd->cmd.cmd_load_ip_fw.fw_size = le32_to_cpu(header->ucode_size_bytes);

	ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
	if (ret)
		DRM_ERROR("Unknown firmware type\n");

	return ret;
}
Example #25
0
static void
bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
		     int i, int len, u32 ctl0)
{
	struct bgmac_slot_info *slot;
	struct bgmac_dma_desc *dma_desc;
	u32 ctl1;

	if (i == BGMAC_TX_RING_SLOTS - 1)
		ctl0 |= BGMAC_DESC_CTL0_EOT;

	ctl1 = len & BGMAC_DESC_CTL1_LEN;

	slot = &ring->slots[i];
	dma_desc = &ring->cpu_base[i];
	dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);
}
static int data_fifo_config(u8 dep_num, phys_addr_t addr,
		 u32 size, u8 dst_pipe_idx)
{
	u8 dbm_ep = dst_pipe_idx;
	u32 lo = lower_32_bits(addr);
	u32 hi = upper_32_bits(addr);

	dbm_data->ep_num_mapping[dbm_ep] = dep_num;

	msm_dbm_write_reg(dbm_data->base,
				DBM_DATA_FIFO_LSB(dbm_ep), lo);
	msm_dbm_write_reg(dbm_data->base,
				DBM_DATA_FIFO_MSB(dbm_ep), hi);

	msm_dbm_write_reg_field(dbm_data->base, DBM_DATA_FIFO_SIZE(dbm_ep),
		DBM_DATA_FIFO_SIZE_MASK, size);

	return 0;

}
Example #27
0
static int lsm_lab_buffer_sanity(struct lsm_priv *prtd,
		struct lsm_cmd_read_done *read_done, int *index)
{
	int i = 0, rc = -EINVAL;
	if (!prtd || !read_done || !index) {
		pr_err("%s: Invalid params prtd %p read_done %p index %p\n",
			__func__, prtd, read_done, index);
		return -EINVAL;
	}
	if (!prtd->lsm_client->lab_enable || !prtd->lsm_client->lab_buffer) {
		pr_err("%s: Lab not enabled %d invalid lab buffer %p\n",
			__func__, prtd->lsm_client->lab_enable,
			prtd->lsm_client->lab_buffer);
		return -EINVAL;
	}
	for (i = 0; i < prtd->lsm_client->hw_params.period_count; i++) {
		if ((lower_32_bits(prtd->lsm_client->lab_buffer[i].phys) ==
			read_done->buf_addr_lsw) &&
			(upper_32_bits(prtd->lsm_client->lab_buffer[i].phys) ==
			read_done->buf_addr_msw) &&
			(prtd->lsm_client->lab_buffer[i].mem_map_handle ==
			read_done->mem_map_handle)) {
			pr_debug("%s: Buffer found %pa memmap handle %d\n",
			__func__, &prtd->lsm_client->lab_buffer[i].phys,
			prtd->lsm_client->lab_buffer[i].mem_map_handle);
			if (read_done->total_size >
				prtd->lsm_client->lab_buffer[i].size) {
				pr_err("%s: Size mismatch call back size %d actual size %zd\n",
				__func__, read_done->total_size,
				prtd->lsm_client->lab_buffer[i].size);
				rc = -EINVAL;
				break;
			} else {
				*index = i;
				rc = 0;
				break;
			}
		}
	}
	return rc;
}
Example #28
0
/**
 * Generic function for queueing a command TRB on the command ring.
 * Check to make sure there's room on the command ring for one command TRB.
 *
 * @param ctrl		Host controller data structure
 * @param ptr		Pointer address to write in the first two fields (opt.)
 * @param slot_id	Slot ID to encode in the flags field (opt.)
 * @param ep_index	Endpoint index to encode in the flags field (opt.)
 * @param cmd		Command type to enqueue
 * @return none
 */
void xhci_queue_command(struct xhci_ctrl *ctrl, u8 *ptr, u32 slot_id,
			u32 ep_index, trb_type cmd)
{
	u32 fields[4];
	u64 val_64 = (uintptr_t)ptr;

	BUG_ON(prepare_ring(ctrl, ctrl->cmd_ring, EP_STATE_RUNNING));

	fields[0] = lower_32_bits(val_64);
	fields[1] = upper_32_bits(val_64);
	fields[2] = 0;
	fields[3] = TRB_TYPE(cmd) | EP_ID_FOR_TRB(ep_index) |
		    SLOT_ID_FOR_TRB(slot_id) | ctrl->cmd_ring->cycle_state;

	queue_trb(ctrl, ctrl->cmd_ring, false, fields);

	/* Ring the command ring doorbell */
	xhci_writel(&ctrl->dba->doorbell[0], DB_VALUE_HOST);
        xhci_readl(&ctrl->dba->doorbell[0]);

}
Example #29
0
static int dwc3_setup_scratch_buffers(struct dwc3 *dwc)
{
	u32 param;
	int ret;

	if (dwc->dr_mode == USB_DR_MODE_HOST)
		return 0;

	if (!dwc->has_hibernation)
		return 0;

	if (!dwc->nr_scratch)
		return 0;

	 /* should never fall here */
	if (WARN_ON(!dwc->scratchbuf))
		return 0;

	param = lower_32_bits(dwc->scratch_addr);

	ret = dwc3_send_gadget_generic_command(dwc,
			DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO, param);
	if (ret < 0)
		goto err1;

	param = upper_32_bits(dwc->scratch_addr);

	ret = dwc3_send_gadget_generic_command(dwc,
			DWC3_DGCMD_SET_SCRATCHPAD_ADDR_HI, param);
	if (ret < 0)
		goto err1;

	return 0;

err1:
	dma_unmap_single(dwc->sysdev, dwc->scratch_addr, dwc->nr_scratch *
			DWC3_SCRATCHBUF_SIZE, DMA_BIDIRECTIONAL);

	return ret;
}
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
		size_t count, loff_t *f_pos)
{
	s32 value;
	struct pm_qos_request *req;

	if (count == sizeof(s32)) {
		if (copy_from_user(&value, buf, sizeof(s32)))
			return -EFAULT;
	} else if (count <= 11) { /* ASCII perhaps? */
		char ascii_value[11];
		unsigned long int ulval;
		int ret;

		if (copy_from_user(ascii_value, buf, count))
			return -EFAULT;

		if (count > 10) {
			if (ascii_value[10] == '\n')
				ascii_value[10] = '\0';
			else
				return -EINVAL;
		} else {
			ascii_value[count] = '\0';
		}
		ret = kstrtoul(ascii_value, 16, &ulval);
		if (ret) {
			pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
			return -EINVAL;
		}
		value = (s32)lower_32_bits(ulval);
	} else {
		return -EINVAL;
	}

	req = filp->private_data;
	pm_qos_update_request(req, value);

	return count;
}