Exemplo n.º 1
0
/****************************************************************************
*
* Invalidate the Data cache for the given address range.
* If the bytes specified by the address (adr) are cached by the Data cache,
* the cacheline containing that byte is invalidated.	If the cacheline
* is modified (dirty), the modified contents are lost and are NOT
* written to system memory before the line is invalidated.
*
* In this function, if start address or end address is not aligned to cache-line,
* particular cache-line containing unaligned start or end address is flush first
* and then invalidated the others as invalidating the same unaligned cache line
* may result into loss of data. This issue raises few possibilities.
*
*
* If the address to be invalidated is not cache-line aligned, the
* following choices are available:
* 1) Invalidate the cache line when required and do not bother much for the
* side effects. Though it sounds good, it can result in hard-to-debug issues.
* The problem is, if some other variable are allocated in the
* same cache line and had been recently updated (in cache), the invalidation
* would result in loss of data.
*
* 2) Flush the cache line first. This will ensure that if any other variable
* present in the same cache line and updated recently are flushed out to memory.
* Then it can safely be invalidated. Again it sounds good, but this can result
* in issues. For example, when the invalidation happens
* in a typical ISR (after a DMA transfer has updated the memory), then flushing
* the cache line means, loosing data that were updated recently before the ISR
* got invoked.
*
* Linux prefers the second one. To have uniform implementation (across standalone
* and Linux), the second option is implemented.
* This being the case, follwoing needs to be taken care of:
* 1) Whenever possible, the addresses must be cache line aligned. Please nore that,
* not just start address, even the end address must be cache line aligned. If that
* is taken care of, this will always work.
* 2) Avoid situations where invalidation has to be done after the data is updated by
* peripheral/DMA directly into the memory. It is not tough to achieve (may be a bit
* risky). The common use case to do invalidation is when a DMA happens. Generally
* for such use cases, buffers can be allocated first and then start the DMA. The
* practice that needs to be followed here is, immediately after buffer allocation
* and before starting the DMA, do the invalidation. With this approach, invalidation
* need not to be done after the DMA transfer is over.
*
* This is going to always work if done carefully.
* However, the concern is, there is no guarantee that invalidate has not needed to be
* done after DMA is complete. For example, because of some reasons if the first cache
* line or last cache line (assuming the buffer in question comprises of multiple cache
* lines) are brought into cache (between the time it is invalidated and DMA completes)
* because of some speculative prefetching or reading data for a variable present
* in the same cache line, then we will have to invalidate the cache after DMA is complete.
*
*
* @param	Start address of range to be invalidated.
* @param	Length of range to be invalidated in bytes.
*
* @return	None.
*
* @note		None.
*
****************************************************************************/
void Xil_DCacheInvalidateRange(INTPTR adr, u32 len)
{
	const u32 cacheline = 32U;
	u32 end;
	u32 tempadr = adr;
	u32 tempend;
	u32 currmask;
	volatile u32 *L2CCOffset = (volatile u32 *)(XPS_L2CC_BASEADDR +
				    XPS_L2CC_CACHE_INVLD_PA_OFFSET);

	currmask = mfcpsr();
	mtcpsr(currmask | IRQ_FIQ_MASK);

	if (len != 0U) {
		end = tempadr + len;
		tempend = end;
		/* Select L1 Data cache in CSSR */
		mtcp(XREG_CP15_CACHE_SIZE_SEL, 0U);

		if ((tempadr & (cacheline-1U)) != 0U) {
			tempadr &= (~(cacheline - 1U));

			Xil_L1DCacheFlushLine(tempadr);
			/* Disable Write-back and line fills */
			Xil_L2WriteDebugCtrl(0x3U);
			Xil_L2CacheFlushLine(tempadr);
			/* Enable Write-back and line fills */
			Xil_L2WriteDebugCtrl(0x0U);
			Xil_L2CacheSync();
			tempadr += cacheline;
		}
		if ((tempend & (cacheline-1U)) != 0U) {
			tempend &= (~(cacheline - 1U));

			Xil_L1DCacheFlushLine(tempend);
			/* Disable Write-back and line fills */
			Xil_L2WriteDebugCtrl(0x3U);
			Xil_L2CacheFlushLine(tempend);
			/* Enable Write-back and line fills */
			Xil_L2WriteDebugCtrl(0x0U);
			Xil_L2CacheSync();
		}

		while (tempadr < tempend) {
			/* Invalidate L2 cache line */
			*L2CCOffset = tempadr;
			dsb();
#ifdef __GNUC__
			/* Invalidate L1 Data cache line */
			__asm__ __volatile__("mcr " \
			XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr));
#elif defined (__ICCARM__)
			__asm volatile ("mcr " \
			XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (tempadr));
#else
			{ volatile register u32 Reg
				__asm(XREG_CP15_INVAL_DC_LINE_MVA_POC);
			  Reg = tempadr; }
#endif
			tempadr += cacheline;
		}
	}
Exemplo n.º 2
0
/*
 * static function
 */
static inline void cpu_enter_lowpower(unsigned int cpu)
{
    //HOTPLUG_INFO("cpu_enter_lowpower\n");

    if (((cpu == 4) && (cpu_online(5) == 0) && (cpu_online(6) == 0) && (cpu_online(7) == 0)) ||
        ((cpu == 5) && (cpu_online(4) == 0) && (cpu_online(6) == 0) && (cpu_online(7) == 0)) ||
        ((cpu == 6) && (cpu_online(4) == 0) && (cpu_online(5) == 0) && (cpu_online(7) == 0)) ||
        ((cpu == 7) && (cpu_online(4) == 0) && (cpu_online(5) == 0) && (cpu_online(6) == 0)))
    {
    #if 0
        /* Clear the SCTLR C bit to prevent further data cache allocation */
        __disable_dcache();

        /* Clean and invalidate all data from the L1/L2 data cache */
        inner_dcache_flush_L1();
        //flush_cache_all();

        /* Execute a CLREX instruction */
        __asm__ __volatile__("clrex");

        /* Clean all data from the L2 data cache */
        inner_dcache_flush_L2();
    #else
        __disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2();
    #endif

        /* Switch the processor from SMP mode to AMP mode by clearing the ACTLR SMP bit */
        __switch_to_amp();

        /* Execute an ISB instruction to ensure that all of the CP15 register changes from the previous steps have been committed */
        isb();

        /* Execute a DSB instruction to ensure that all cache, TLB and branch predictor maintenance operations issued by any processor in the multiprocessor device before the SMP bit was cleared have completed */
        dsb();

        /* Disable snoop requests and DVM message requests */
        REG_WRITE(CCI400_SI3_SNOOP_CONTROL, REG_READ(CCI400_SI3_SNOOP_CONTROL) & ~(SNOOP_REQ | DVM_MSG_REQ));
        while (REG_READ(CCI400_STATUS) & CHANGE_PENDING);

        /* Disable CA15L snoop function */
        mcusys_smc_write(MP1_AXI_CONFIG, REG_READ(MP1_AXI_CONFIG) | ACINACTM);
    }
    else
    {
    #if 0
        /* Clear the SCTLR C bit to prevent further data cache allocation */
        __disable_dcache();

        /* Clean and invalidate all data from the L1 data cache */
        inner_dcache_flush_L1();
        //Just flush the cache.
        //flush_cache_all();

        /* Clean all data from the L2 data cache */
        //__inner_clean_dcache_L2();
    #else
        //FIXME: why __disable_dcache__inner_flush_dcache_L1 fail but 2 steps ok?
        //__disable_dcache__inner_flush_dcache_L1();
        __disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2();
    #endif

        /* Execute a CLREX instruction */
        __asm__ __volatile__("clrex");

        /* Switch the processor from SMP mode to AMP mode by clearing the ACTLR SMP bit */
        __switch_to_amp();
    }
}
Exemplo n.º 3
0
/*****************************************************************************
* FUNCTION
*  hal_btif_send_data
* DESCRIPTION
*  send data through btif in FIFO mode
* PARAMETERS
* p_base   [IN]        BTIF module's base address
* p_buf     [IN]        pointer to rx data buffer
* max_len  [IN]        tx buffer length
* RETURNS
*   positive means number of data sent; 0 means no data put to FIFO; negative means error happens
*****************************************************************************/
int hal_btif_send_data(P_MTK_BTIF_INFO_STR p_btif,
		       const unsigned char *p_buf, const unsigned int buf_len)
{
/*Chaozhong: To be implement*/
	int i_ret = -1;

	unsigned int ava_len = 0;
	unsigned int sent_len = 0;

#if !(NEW_TX_HANDLING_SUPPORT)
	unsigned int base = p_btif->base;
	unsigned int lsr = 0;
	unsigned int left_len = 0;
	unsigned char *p_data = (unsigned char *)p_buf;
#endif

/*check parameter valid or not*/
	if ((NULL == p_buf) || (buf_len == 0)) {
		i_ret = ERR_INVALID_PAR;
		return i_ret;
	}
#if NEW_TX_HANDLING_SUPPORT
	ava_len = _get_btif_tx_fifo_room(p_btif);
	sent_len = buf_len <= ava_len ? buf_len : ava_len;
	if (0 < sent_len) {
		int enqueue_len = 0;
		unsigned long flag = 0;
		spin_lock_irqsave(&(p_btif->tx_fifo_spinlock), flag);
		enqueue_len = kfifo_in(p_btif->p_tx_fifo,
				       (unsigned char *)p_buf, sent_len);
		if (sent_len != enqueue_len) {
			BTIF_ERR_FUNC("target tx len:%d, len sent:%d\n",
				      sent_len, enqueue_len);
		}
		i_ret = enqueue_len;
		dsb();
/*enable BTIF Tx IRQ*/
		hal_btif_tx_ier_ctrl(p_btif, true);
		spin_unlock_irqrestore(&(p_btif->tx_fifo_spinlock), flag);
		BTIF_DBG_FUNC("enqueue len:%d\n", enqueue_len);
	} else {
		i_ret = 0;
	}
#else
	while ((_btif_is_tx_allow(p_btif)) && (sent_len < buf_len)) {
/*read LSR and check THER or TEMT, either one is 1 means can accept tx data*/
		lsr = BTIF_READ32(BTIF_LSR(base));

		if (lsr & BTIF_LSR_TEMT_BIT) {
/*Tx Holding Register if empty, which means we can write tx FIFO count to BTIF*/
			ava_len = BTIF_TX_FIFO_SIZE;
		} else if (lsr & BTIF_LSR_THRE_BIT) {
/*Tx Holding Register if empty, which means we can write (Tx FIFO count - Tx threshold)to BTIF*/
			ava_len = BTIF_TX_FIFO_SIZE - BTIF_TX_FIFO_THRE;
		} else {
/*this means data size in tx FIFO is more than Tx threshold, we will not write data to THR*/
			ava_len = 0;
			break;
		}

		left_len = buf_len - sent_len;
/*ava_len will be real length will write to BTIF THR*/
		ava_len = ava_len > left_len ? left_len : ava_len;
/*update sent length valud after this operation*/
		sent_len += ava_len;
/*whether we need memory barrier here?
Ans: No, no memory ordering issue exist,
CPU will make sure logically right
*/
		while (ava_len--)
			btif_reg_sync_writeb(*(p_data++), BTIF_THR(base));

	}
/* while ((hal_btif_is_tx_allow()) && (sent_len < buf_len)); */

	i_ret = sent_len;

/*enable BTIF Tx IRQ*/
	hal_btif_tx_ier_ctrl(p_btif, true);
#endif
	return i_ret;
}
Exemplo n.º 4
0
void arch_vcpu_switch(struct vmm_vcpu *tvcpu,
		      struct vmm_vcpu *vcpu,
		      arch_regs_t *regs)
{
	u32 ite;
	irq_flags_t flags;

	/* Clear hypervisor context */
	msr(hcr_el2, HCR_DEFAULT_BITS);
	msr(cptr_el2, 0x0);
	msr(hstr_el2, 0x0);

	/* Save user registers & banked registers */
	if (tvcpu) {
		arm_regs(tvcpu)->pc = regs->pc;
		arm_regs(tvcpu)->lr = regs->lr;
		arm_regs(tvcpu)->sp = regs->sp;
		for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
			arm_regs(tvcpu)->gpr[ite] = regs->gpr[ite];
		}
		arm_regs(tvcpu)->pstate = regs->pstate;
		if (tvcpu->is_normal) {
			/* Update last host CPU */
			arm_priv(tvcpu)->last_hcpu = vmm_smp_processor_id();
			/* Save VGIC context */
			arm_vgic_save(tvcpu);
			/* Save sysregs context */
			cpu_vcpu_sysregs_save(tvcpu);
			/* Save VFP and SIMD context */
			cpu_vcpu_vfp_save(tvcpu);
			/* Save generic timer */
			if (arm_feature(tvcpu, ARM_FEATURE_GENERIC_TIMER)) {
				generic_timer_vcpu_context_save(tvcpu,
						arm_gentimer_context(tvcpu));
			}
		}
	}
	/* Restore user registers & special registers */
	regs->pc = arm_regs(vcpu)->pc;
	regs->lr = arm_regs(vcpu)->lr;
	regs->sp = arm_regs(vcpu)->sp;
	for (ite = 0; ite < CPU_GPR_COUNT; ite++) {
		regs->gpr[ite] = arm_regs(vcpu)->gpr[ite];
	}
	regs->pstate = arm_regs(vcpu)->pstate;
	if (vcpu->is_normal) {
		/* Restore generic timer */
		if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
			generic_timer_vcpu_context_restore(vcpu,
						arm_gentimer_context(vcpu));
		}
		/* Restore VFP and SIMD context */
		cpu_vcpu_vfp_restore(vcpu);
		/* Restore sysregs context */
		cpu_vcpu_sysregs_restore(vcpu);
		/* Restore VGIC context */
		arm_vgic_restore(vcpu);
		/* Update hypervisor context */
		vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
		msr(hcr_el2, arm_priv(vcpu)->hcr);
		vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);
		msr(cptr_el2, arm_priv(vcpu)->cptr);
		msr(hstr_el2, arm_priv(vcpu)->hstr);
		/* Update hypervisor Stage2 MMU context */
		mmu_lpae_stage2_chttbl(vcpu->guest->id,
			       arm_guest_priv(vcpu->guest)->ttbl);
		/* Flush TLB if moved to new host CPU */
		if (arm_priv(vcpu)->last_hcpu != vmm_smp_processor_id()) {
			/* Invalidate all guest TLB enteries because
			 * we might have stale guest TLB enteries from
			 * our previous run on new_hcpu host CPU
			 */
			inv_tlb_guest_allis();
			/* Ensure changes are visible */
			dsb();
			isb();
		}
	}
	/* Clear exclusive monitor */
	clrex();
}
Exemplo n.º 5
0
/*
 * We define our own outer_disable() to avoid L2 flush upon LP2 entry.
 * Since the Tegra kernel will always be in single core mode when
 * L2 is being disabled, we can omit the locking. Since we are not
 * accessing the spinlock we also avoid the problem of the spinlock
 * storage getting out of sync.
 */
static inline void tegra_l2x0_disable(void)
{
	void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
	writel_relaxed(0, p + L2X0_CTRL);
	dsb();
}
Exemplo n.º 6
0
static uint32_t
kgsl_ringbuffer_addcmds(struct kgsl_ringbuffer *rb,
				unsigned int flags, unsigned int *cmds,
				int sizedwords)
{
	unsigned int *ringcmds;
	unsigned int timestamp;
	unsigned int total_sizedwords = sizedwords + 6;
	unsigned int i;
	unsigned int rcmd_gpu;

	/* reserve space to temporarily turn off protected mode
	*  error checking if needed
	*/
	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;

	ringcmds = kgsl_ringbuffer_allocspace(rb, total_sizedwords);
	rcmd_gpu = rb->buffer_desc.gpuaddr
		+ sizeof(uint)*(rb->wptr-total_sizedwords);

	if (flags & KGSL_CMD_FLAGS_PMODE) {
		/* disable protected mode error checking */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
	}

	for (i = 0; i < sizedwords; i++) {
		GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
		cmds++;
	}

	if (flags & KGSL_CMD_FLAGS_PMODE) {
		/* re-enable protected mode error checking */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
	}

	rb->timestamp++;
	timestamp = rb->timestamp;

	/* start-of-pipeline and end-of-pipeline timestamps */
	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
	GSL_RB_WRITE(ringcmds, rcmd_gpu,
		     (rb->device->memstore.gpuaddr +
		      KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);

	/*memory barriers added for the timestamp update*/
	mb();
	dsb();
	outer_sync();

	if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
		/* Conditional execution based on memory values */
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_COND_EXEC, 4));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
			KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
		/* # of conditional command DWORDs */
		GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
		GSL_RB_WRITE(ringcmds, rcmd_gpu,
			pm4_type3_packet(PM4_INTERRUPT, 1));
		GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
	}
Exemplo n.º 7
0
static void trace_stop(void)
{ 
	int i;	 
	int pwr_down;

	if (tracer.state == TRACE_STATE_STOP) {
		pr_info("ETM trace is already stop!\n");
		return;
	}
  
	get_online_cpus();

	mutex_lock(&tracer.mutex);
  
	etb_unlock(&tracer);
  
	/* "Trace program done" */
	/* "Disable trace components" */
	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			cs_cpu_ptm_progbit(tracer.etm_regs[i]);
		}
	}

#if 0
	cs_cpu_flushandstop(tracer.tpiu_regs);
#endif
  
	/* Disable ETB capture (ETB_CTL bit0 = 0x0) */
	cs_cpu_write(tracer.etb_regs, 0x20, 0x0);
	/* Reset ETB RAM Read Data Pointer (ETB_RRP = 0x0) */
	/* no need to reset RRP */
#if 0
	cs_cpu_write (tracer.etb_regs, 0x14, 0x0);
#endif

	/* power down */
	for (i = 0; i < tracer.nr_etm_regs; i++) {
		if (tracer.etm_info[i].pwr_down == NULL) {
			pwr_down = 0;
		} else {
			pwr_down = *(tracer.etm_info[i].pwr_down);
		}
		if (!pwr_down) {
			cs_cpu_write(tracer.etm_regs[i], 0x0, 0x1);
		}
	}

	dsb();

	tracer.state = TRACE_STATE_STOP;

	etb_lock(&tracer);

	mutex_unlock(&tracer.mutex);

	put_online_cpus();
}
Exemplo n.º 8
0
int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
{
	struct device *dev = &pdev->dev;
	VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
	struct resource *res;
	void *slot_mem;
	dma_addr_t slot_phys;
	int slot_mem_size, frag_mem_size;
	int err, irq, i;

	g_virt_to_bus_offset = virt_to_dma(dev, (void *)0);

	/* Allocate space for the channels in coherent memory */
	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
	frag_mem_size = PAGE_ALIGN(sizeof(FRAGMENTS_T) * MAX_FRAGMENTS);

	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
				       &slot_phys, GFP_KERNEL);
	if (!slot_mem) {
		dev_err(dev, "could not allocate DMA memory\n");
		return -ENOMEM;
	}

	WARN_ON(((int)slot_mem & (PAGE_SIZE - 1)) != 0);

	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
	if (!vchiq_slot_zero)
		return -EINVAL;

	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
		(int)slot_phys + slot_mem_size;
	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
		MAX_FRAGMENTS;

	g_fragments_base = (FRAGMENTS_T *)(slot_mem + slot_mem_size);
	slot_mem_size += frag_mem_size;

	g_free_fragments = g_fragments_base;
	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
		*(FRAGMENTS_T **)&g_fragments_base[i] =
			&g_fragments_base[i + 1];
	}
	*(FRAGMENTS_T **)&g_fragments_base[i] = NULL;
	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);

	if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
		return -EINVAL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	g_regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(g_regs))
		return PTR_ERR(g_regs);

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		dev_err(dev, "failed to get IRQ\n");
		return irq;
	}

	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
			       "VCHIQ doorbell", state);
	if (err) {
		dev_err(dev, "failed to register irq=%d\n", irq);
		return err;
	}

	/* Send the base address of the slots to VideoCore */

	dsb(); /* Ensure all writes have completed */

	err = bcm_mailbox_write(MBOX_CHAN_VCHIQ, (unsigned int)slot_phys);
	if (err) {
		dev_err(dev, "mailbox write failed\n");
		return err;
	}

	vchiq_log_info(vchiq_arm_log_level,
		"vchiq_init - done (slots %x, phys %pad)",
		(unsigned int)vchiq_slot_zero, &slot_phys);

	vchiq_call_connected_callbacks();

   return 0;
}
Exemplo n.º 9
0
extern bool bcm_dma_is_busy(void __iomem *dma_chan_base)
{
	dsb();

	return readl(dma_chan_base + BCM2708_DMA_CS) & BCM2708_DMA_ACTIVE;
}
Exemplo n.º 10
0
/* read device ready pin */
static int em_x270_nand_device_ready(struct mtd_info *mtd)
{
	dsb();

	return gpio_get_value(nand_rb);
}
Exemplo n.º 11
0
int ipp_blit(const struct rk29_ipp_req *req)
{
	uint32_t rotate;
	uint32_t pre_scale	= 0;
	uint32_t post_scale = 0;
	uint32_t pre_scale_w, pre_scale_h;//pre_scale para
	uint32_t post_scale_w = 0x1000;
	uint32_t post_scale_h = 0x1000;
	uint32_t pre_scale_output_w=0, pre_scale_output_h=0;//pre_scale output with&height
	uint32_t post_scale_input_w, post_scale_input_h;//post_scale input width&height
	uint32_t dst0_YrgbMst=0,dst0_CbrMst=0;
	uint32_t ret = 0;
	uint32_t deinterlace_config = 0;
	uint32_t src0_w = req->src0.w;
	uint32_t src0_h = req->src0.h;
	
	DBG("Omegamoon >>> IPP Blit Start\n");
	if (drvdata == NULL) {			/* [email protected] : check driver is normal or not */
		printk("%s drvdata is NULL, IPP driver probe is fail!!\n", __FUNCTION__);
		return -EPERM;
	}

	drvdata->ipp_result = -1;

	//When ipp_blit_async is called in kernel space req->complete should NOT be NULL, otherwise req->complete should be NULL
	if(req->complete)
	{
		drvdata->ipp_irq_callback = req->complete;
	}
	else
	{
		drvdata->ipp_irq_callback = ipp_blit_complete;
	}

	ret = ipp_check_param(req);
	if(ret ==  -EINVAL)
	{	
		printk("IPP invalid input!\n");
		goto erorr_input;
	}
	
	rotate = req->flag;
	switch (rotate) {
	case IPP_ROT_90:
		//for rotation 90 degree
		DBG("rotate %d, src0.fmt %d",rotate,req->src0.fmt);
		switch(req->src0.fmt)
		{
		case IPP_XRGB_8888:
			dst0_YrgbMst  =  req->dst0.YrgbMst + req->dst0.w*4;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_RGB_565:
			dst0_YrgbMst  =  req->dst0.YrgbMst +  req->dst0.w*2;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_Y_CBCR_H1V1:
			dst0_YrgbMst  =  req->dst0.YrgbMst + req->dst0.w;
			dst0_CbrMst   =  req->dst0.CbrMst + req->dst0.w*2;
			break;

		case IPP_Y_CBCR_H2V1:
			dst0_YrgbMst =	req->dst0.YrgbMst + req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst + req->dst0.w*2;
			break;

		case IPP_Y_CBCR_H2V2:
			dst0_YrgbMst = req->dst0.YrgbMst+ req->dst0.w;
			dst0_CbrMst  = req->dst0.CbrMst + req->dst0.w;
			break;

		default:

			break;
		}
	break;

	case IPP_ROT_180:
		//for rotation 180 degree
		DBG("rotate %d, src0.fmt %d",rotate,req->src0.fmt);
		switch(req->src0.fmt)
		{
		case IPP_XRGB_8888:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w*4+req->dst0.w*4;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_RGB_565:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w*2+req->dst0.w*2;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_Y_CBCR_H1V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w*2+req->dst0.w*2;
			break;

		case IPP_Y_CBCR_H2V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w+req->dst0.w;
			break;

		case IPP_Y_CBCR_H2V2:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+((req->dst0.h/2)-1)*req->dst_vir_w+req->dst0.w;
			break;

		default:
			break;
		}
			break;

	case IPP_ROT_270:
		DBG("rotate %d, src0.fmt %d \n",rotate,req->src0.fmt);
		switch(req->src0.fmt)
		{
		case IPP_XRGB_8888:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w*4;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_RGB_565:
			dst0_YrgbMst =	req->dst0.YrgbMst +(req->dst0.h-1)*req->dst_vir_w*2;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_Y_CBCR_H1V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst  =	req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w*2;
			break;

		case IPP_Y_CBCR_H2V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst  =	req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w*2;
			break;

		case IPP_Y_CBCR_H2V2:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst  =	req->dst0.CbrMst+((req->dst0.h/2)-1)*req->dst_vir_w;
			break;

		default:
			break;
		}
		break;

	case IPP_ROT_X_FLIP:
		DBG("rotate %d, src0.fmt %d",rotate,req->src0.fmt);
		switch(req->src0.fmt)
		{
		case IPP_XRGB_8888:
			dst0_YrgbMst =	req->dst0.YrgbMst+req->dst0.w*4;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_RGB_565:
			dst0_YrgbMst =	req->dst0.YrgbMst+req->dst0.w*2;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_Y_CBCR_H1V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+req->dst0.w*2;
			break;

		case IPP_Y_CBCR_H2V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+req->dst0.w;
			break;

		case IPP_Y_CBCR_H2V2:
			dst0_YrgbMst =	req->dst0.YrgbMst+req->dst0.w;
			dst0_CbrMst  =	req->dst0.CbrMst+req->dst0.w;
			break;

		default:
			break;
		}

	break;

	case IPP_ROT_Y_FLIP:
		DBG("rotate %d, src0.fmt %d",rotate,req->src0.fmt);
		switch(req->src0.fmt)
		{
		case IPP_XRGB_8888:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w*4;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_RGB_565:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w*2;
			dst0_CbrMst  = req->dst0.CbrMst;
			break;

		case IPP_Y_CBCR_H1V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst = req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w*2;
			break;

		case IPP_Y_CBCR_H2V1:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst = req->dst0.CbrMst+(req->dst0.h-1)*req->dst_vir_w;
			break;

		case IPP_Y_CBCR_H2V2:
			dst0_YrgbMst =	req->dst0.YrgbMst+(req->dst0.h-1)*req->dst_vir_w;
			dst0_CbrMst = req->dst0.CbrMst+((req->dst0.h/2)-1)*req->dst_vir_w;
			break;

	default:
		break;
	}

	break;
	case IPP_ROT_0:
		//for  0 degree
		DBG("rotate %d, src0.fmt %d",rotate,req->src0.fmt);
		dst0_YrgbMst =	req->dst0.YrgbMst;
		dst0_CbrMst  = req->dst0.CbrMst;
	break;

	default:
		ERR("ipp is not surpport degree!!\n" );
		break;
	}
	
	//we start to interact with hw now
	wq_condition = 0;

	ipp_power_on();
	
	
	//check if IPP is idle
	if(((ipp_read(IPP_INT)>>6)&0x3) !=0)// idle
	{
		printk("IPP staus is not idle,can not set register\n");
		goto error_status;
	}


	/* Configure source image */
	DBG("src YrgbMst 0x%x , CbrMst0x%x,  %dx%d, fmt = %d\n", req->src0.YrgbMst,req->src0.CbrMst,
					req->src0.w, req->src0.h, req->src0.fmt);

	ipp_write(req->src0.YrgbMst, IPP_SRC0_Y_MST);
	if(IS_YCRCB(req->src0.fmt))
	{
		ipp_write(req->src0.CbrMst, IPP_SRC0_CBR_MST);
	}
	//ipp_write(req->src0.h<<16|req->src0.w, IPP_SRC_IMG_INFO);
	ipp_write((ipp_read(IPP_CONFIG)&(~0x7))|req->src0.fmt, IPP_CONFIG);

	/* Configure destination image */
	DBG("dst YrgbMst 0x%x , CbrMst0x%x, %dx%d\n", dst0_YrgbMst,dst0_CbrMst,
					req->dst0.w, req->dst0.h);

	ipp_write(dst0_YrgbMst, IPP_DST0_Y_MST);

	if(IS_YCRCB(req->src0.fmt))
	{
		ipp_write(dst0_CbrMst, IPP_DST0_CBR_MST);
	}

	ipp_write(req->dst0.h<<16|req->dst0.w, IPP_DST_IMG_INFO);

	/*Configure Pre_scale*/
	if((IPP_ROT_90 == rotate) || (IPP_ROT_270 == rotate))
	{
		pre_scale = ((req->dst0.w <= req->src0.h/2) ? 1 : 0)||((req->dst0.h <= req->src0.w/2) ? 1 : 0);
	}
	else //other degree
	{
		pre_scale = ((req->dst0.w <= req->src0.w/2) ? 1 : 0)||((req->dst0.h <= req->src0.h/2) ? 1 : 0);
	}

	if(pre_scale)
	{
		if(((IPP_ROT_90 == rotate) || (IPP_ROT_270 == rotate)))
		{
			if((req->src0.w>req->dst0.h))
			{
				pre_scale_w = (uint32_t)( req->src0.w/req->dst0.h);//floor
			}
			else
			{
			   pre_scale_w = 1;
			}
			if((req->src0.h>req->dst0.w))
			{
				pre_scale_h = (uint32_t)( req->src0.h/req->dst0.w);//floor
			}
			else
			{
				pre_scale_h = 1;
			}

			DBG("!!!!!pre_scale_h %d,pre_scale_w %d \n",pre_scale_h,pre_scale_w);
		}
		else//0 180 x ,y
		{
			if((req->src0.w>req->dst0.w))
			{
				pre_scale_w = (uint32_t)( req->src0.w/req->dst0.w);//floor
			}
			else
			{
			   pre_scale_w = 1;
			}

			if((req->src0.h>req->dst0.h))
			{
				pre_scale_h = (uint32_t)( req->src0.h/req->dst0.h);//floor
			}
			else
			{
				pre_scale_h = 1;
			}
			DBG("!!!!!pre_scale_h %d,pre_scale_w %d \n",pre_scale_h,pre_scale_w);
		}

		//pre_scale only support 1/2 to 1/8 time
		if(pre_scale_w > 8)
		{
			if(pre_scale_w < 16)
			{
				pre_scale_w = 8;
			}
			else
			{
				printk("invalid pre_scale operation! Down scaling ratio should not be more than 16!\n");
				goto error_scale;
			}
		}
		if(pre_scale_h > 8)
		{
			if(pre_scale_h < 16)
			{
				pre_scale_h = 8;
			}
			else
			{
				printk("invalid pre_scale operation! Down scaling ratio should not be more than 16!\n");
				goto error_scale;
			}
		}
			
		if((req->src0.w%pre_scale_w)!=0) //ceil
		{
			pre_scale_output_w = req->src0.w/pre_scale_w+1;
		}
		else
		{
			pre_scale_output_w = req->src0.w/pre_scale_w;
		}

		if((req->src0.h%pre_scale_h)!=0)//ceil
		{
			pre_scale_output_h	= req->src0.h/pre_scale_h +1;
		}
		else
		{
			pre_scale_output_h = req->src0.h/pre_scale_h;
		}

		//when fmt is YUV,make sure pre_scale_output_w and pre_scale_output_h are even
		if(IS_YCRCB(req->src0.fmt))
		{
			if((pre_scale_output_w & 0x1) != 0)
			{
				pre_scale_output_w -=1;
				src0_w = pre_scale_output_w * pre_scale_w;
			}
			if((pre_scale_output_h & 0x1) != 0)
			{
				pre_scale_output_h -=1;
				src0_h = pre_scale_output_h * pre_scale_h;
			}
		}
			
		ipp_write((ipp_read(IPP_CONFIG)&0xffffffef)|PRE_SCALE, IPP_CONFIG); 		//enable pre_scale
		ipp_write((pre_scale_h-1)<<3|(pre_scale_w-1),IPP_PRE_SCL_PARA);
		ipp_write(((pre_scale_output_h)<<16)|(pre_scale_output_w), IPP_PRE_IMG_INFO);

	}
	else//no pre_scale
	{
		ipp_write(ipp_read(IPP_CONFIG)&(~PRE_SCALE), IPP_CONFIG); //disable pre_scale
		ipp_write(0,IPP_PRE_SCL_PARA);
		ipp_write((req->src0.h<<16)|req->src0.w, IPP_PRE_IMG_INFO);
	}

	ipp_write(src0_h<<16|src0_w, IPP_SRC_IMG_INFO);
	
	/*Configure Post_scale*/
	if((IPP_ROT_90 == rotate) || (IPP_ROT_270 == rotate))
	{
		if (( (req->src0.h%req->dst0.w)!=0)||( (req->src0.w%req->dst0.h)!= 0)//non-interger down-scaling 
			||((req->src0.h/req->dst0.w)>8)||((req->src0.h/req->dst0.w)>8)	 //down-scaling ratio > 8
			||(req->dst0.w > req->src0.h)  ||(req->dst0.h > req->src0.w))	 //up-scaling
							
		{
			post_scale = 1;
		}
		else
		{
			post_scale = 0;
		}
	}
	else  //0 180 x-flip y-flip
	{
		if (( (req->src0.w%req->dst0.w)!=0)||( (req->src0.h%req->dst0.h)!= 0)//non-interger down-scaling 
			||((req->src0.w/req->dst0.w)>8)||((req->src0.h/req->dst0.h)>8)	 //down-scaling ratio > 8
			||(req->dst0.w > req->src0.w)  ||(req->dst0.h > req->src0.h))	 //up-scaling
		{
			post_scale = 1;
		}
		else
		{
			post_scale = 0;
		}
	}

	if(post_scale)
	{
		if(pre_scale)
		{
			post_scale_input_w = pre_scale_output_w;
			post_scale_input_h = pre_scale_output_h;
		}
		else
		{
			post_scale_input_w = req->src0.w;
			post_scale_input_h = req->src0.h;
		}
			
		if((IPP_ROT_90 == rotate) || (IPP_ROT_270 == rotate))
		{
			DBG("post_scale_input_w %d ,post_scale_input_h %d !!!\n",post_scale_input_w,post_scale_input_h);

			switch(req->src0.fmt)
			{
			case IPP_XRGB_8888:
			case IPP_RGB_565:
			case IPP_Y_CBCR_H1V1:
				//In horiaontial scale case, the factor must be minus 1 if the result of the factor is integer
				 if(((4096*(post_scale_input_w-1))%(req->dst0.h-1))==0)
				 {
					post_scale_w = (uint32_t)(4096*(post_scale_input_w-1)/(req->dst0.h-1))-1;
				 }
				 else
				 {
					 post_scale_w = (uint32_t)(4096*(post_scale_input_w-1)/(req->dst0.h-1));
				 }
				 break;

			case IPP_Y_CBCR_H2V1:
			case IPP_Y_CBCR_H2V2:
				//In horiaontial scale case, the factor must be minus 1 if the result of the factor is integer
				 if(((4096*(post_scale_input_w/2-1))%(req->dst0.h/2-1))==0)
				 {
					post_scale_w = (uint32_t)(4096*(post_scale_input_w/2-1)/(req->dst0.h/2-1))-1;
				 }
				 else
				 {
					 post_scale_w = (uint32_t)(4096*(post_scale_input_w/2-1)/(req->dst0.h/2-1));
				 }
				 break;

			default:
				break;
			}
			post_scale_h = (uint32_t)(4096*(post_scale_input_h -1)/(req->dst0.w-1));

			DBG("1111 post_scale_w %x,post_scale_h %x!!! \n",post_scale_w,post_scale_h);
		}
		else// 0 180 x-flip y-flip
		{
			switch(req->src0.fmt)
			{
			case IPP_XRGB_8888:
			case IPP_RGB_565:
			case IPP_Y_CBCR_H1V1:
				//In horiaontial scale case, the factor must be minus 1 if the result of the factor is integer
				 if(((4096*(post_scale_input_w-1))%(req->dst0.w-1))==0)
				 {
					post_scale_w = (uint32_t)(4096*(post_scale_input_w-1)/(req->dst0.w-1))-1;
				 }
				 else
				 {
					 post_scale_w = (uint32_t)(4096*(post_scale_input_w-1)/(req->dst0.w-1));
				 }
				 break;

			case IPP_Y_CBCR_H2V1:
			case IPP_Y_CBCR_H2V2:
				////In horiaontial scale case, the factor must be minus 1 if the result of the factor is integer
				 if(((4096*(post_scale_input_w/2-1))%(req->dst0.w/2-1))==0)
				 {
					post_scale_w = (uint32_t)(4096*(post_scale_input_w/2-1)/(req->dst0.w/2-1))-1;
				 }
				 else
				 {
					 post_scale_w = (uint32_t)(4096*(post_scale_input_w/2-1)/(req->dst0.w/2-1));
				 }
				 break;

			default:
				break;
			}
			post_scale_h = (uint32_t)(4096*(post_scale_input_h -1)/(req->dst0.h-1));

		}

		/*only support 1/2 to 4 times scaling,but two cases can pass
		1.176*144->480*800, YUV420 
		2.128*128->480*800, YUV420
		*/
		if(!((req->src0.fmt == IPP_Y_CBCR_H2V2)&&
			(((req->src0.w == 176)&&(req->src0.h == 144))||((req->src0.w == 128)&&(req->src0.h == 128)))&&
			((req->dst0.w == 480)&&(req->dst0.h == 800))))	
		{	
			
			if(post_scale_w<0x3ff || post_scale_w>0x1fff || post_scale_h<0x400 || post_scale_h>0x2000 )
			{
				printk("invalid post_scale para!\n");
				goto error_scale;
			}
		}
		ipp_write((ipp_read(IPP_CONFIG)&0xfffffff7)|POST_SCALE, IPP_CONFIG); //enable post_scale
		ipp_write((post_scale_h<<16)|post_scale_w, IPP_POST_SCL_PARA);
	}
	else //no post_scale
	{
		DBG("no post_scale !!!!!! \n");
		ipp_write(ipp_read(IPP_CONFIG)&(~POST_SCALE), IPP_CONFIG); //disable post_scale
		ipp_write((post_scale_h<<16)|post_scale_w, IPP_POST_SCL_PARA);
	}

	/* Configure rotation */

	if(IPP_ROT_0 == req->flag)
	{
		ipp_write(ipp_read(IPP_CONFIG)&(~ROT_ENABLE), IPP_CONFIG);
	}
	else
	{
		ipp_write(ipp_read(IPP_CONFIG)|ROT_ENABLE, IPP_CONFIG);
		ipp_write((ipp_read(IPP_CONFIG)&0xffffff1f)|(rotate<<5), IPP_CONFIG);
	}

	/*Configure deinterlace*/
	if(req->deinterlace_enable == 1)
	{
		//only support YUV format
		if(IS_YCRCB(req->src0.fmt))
		{
			//If pre_scale is enable, Deinterlace is done by scale filter
			if(!pre_scale)
			{
				//check the deinterlace parameters
				if((req->deinterlace_para0 < 32) && (req->deinterlace_para1 < 32) && (req->deinterlace_para2 < 32) 
					&& ((req->deinterlace_para0 + req->deinterlace_para1 + req->deinterlace_para2) == 32))
				{
					deinterlace_config = (req->deinterlace_enable<<24) | (req->deinterlace_para0<<19) | (req->deinterlace_para1<<14) | (req->deinterlace_para2<<9);
					DBG("para0 %d, para1 %d, para2 %d,deinterlace_config  %x\n",req->deinterlace_para0,req->deinterlace_para1,req->deinterlace_para2,deinterlace_config);
					#ifdef CONFIG_DEINTERLACE
						ipp_write((ipp_read(IPP_CONFIG)&0xFE0001FF)|deinterlace_config, IPP_CONFIG);
					#else
						printk("does not support deinterlacing!\n");
						ipp_write(ipp_read(IPP_CONFIG)&(~DEINTERLACE_ENABLE), IPP_CONFIG); //disable deinterlace
					#endif
				}
				else
				{
					ERR("invalid deinterlace parameters!\n");
				}
			}
		}
		else
		{
			ERR("only support YUV format!\n");
		}
	}
	else
	{
		ipp_write(ipp_read(IPP_CONFIG)&(~DEINTERLACE_ENABLE), IPP_CONFIG); //disable deinterlace
	}

	/*Configure other*/
	ipp_write((req->dst_vir_w<<16)|req->src_vir_w, IPP_IMG_VIR);

	//store clip mode 
	if(req->store_clip_mode == 1)
	{
		ipp_write(ipp_read(IPP_CONFIG)|STORE_CLIP_MODE, IPP_CONFIG);
	}
	else
	{
		ipp_write(ipp_read(IPP_CONFIG)&(~STORE_CLIP_MODE), IPP_CONFIG);
	}


	/* Start the operation */
	ipp_write(8, IPP_INT);		

	ipp_write(1, IPP_PROCESS_ST);

	dsb();
	dmac_clean_range(drvdata->ipp_base,drvdata->ipp_base+0x54);
#ifdef IPP_TEST
	hw_start = ktime_get(); 
#endif	

    goto error_noerror;


error_status:
error_scale:
	ret = -EINVAL;
	ipp_soft_reset();
	ipp_power_off(NULL);
erorr_input:
error_noerror:
	drvdata->ipp_result = ret;
	DBG("Omegamoon >>> IPP Blit Finished\n");
	return ret;
}
Exemplo n.º 12
0
static void nand_cs_off(void)
{
	dsb();

	gpio_set_value(GPIO11_NAND_CS, 1);
}
Exemplo n.º 13
0
XStatus
emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p)
{
	struct pbuf *q;
	int n_pbufs;
	XEmacPs_Bd *txbdset, *txbd, *last_txbd = NULL;
	XStatus Status;
	XEmacPs_BdRing *txring;
	unsigned int BdIndex;
	unsigned int Index;
	u32 RegVal;
	u32 Cntr;
	unsigned int *Temp;

	Xil_ExceptionDisable();
#ifdef PEEP
    while((XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress, XEMACPS_TXSR_OFFSET)) & 0x08);
#endif
	txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));

	/* first count the number of pbufs */
	for (q = p, n_pbufs = 0; q != NULL; q = q->next)
		n_pbufs++;

	/* obtain as many BD's */
	Status = XEmacPs_BdRingAlloc(txring, n_pbufs, &txbdset);
	if (Status != XST_SUCCESS) {
		LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n"));
		Xil_ExceptionEnable();
		return ERR_IF;
	}

	for(q = p, txbd = txbdset; q != NULL; q = q->next) {
		BdIndex = XEMACPS_BD_TO_INDEX(txring, txbd);
		if (tx_pbufs_storage[BdIndex] != 0) {
			Xil_ExceptionEnable();
			LWIP_DEBUGF(NETIF_DEBUG, ("PBUFS not available\r\n"));
			return ERR_IF;
		}

		/* Send the data from the pbuf to the interface, one pbuf at a
		   time. The size of the data in each pbuf is kept in the ->len
		   variable. */
		Xil_DCacheFlushRange((unsigned int)q->payload, (unsigned)q->len);
		XEmacPs_BdSetAddressTx(txbd, (u32)q->payload);
		if (q->len > (XEMACPS_MAX_FRAME_SIZE - 18))
			XEmacPs_BdSetLength(txbd, (XEMACPS_MAX_FRAME_SIZE - 18) & 0x3FFF);
		else
			XEmacPs_BdSetLength(txbd, q->len & 0x3FFF);

		tx_pbufs_storage[BdIndex] = (int)q;

		pbuf_ref(q);
		last_txbd = txbd;
		XEmacPs_BdClearLast(txbd);
		dmb();
		dsb();
 		txbd = XEmacPs_BdRingNext(txring, txbd);
	}
	XEmacPs_BdSetLast(last_txbd);
for (txbd = txbdset, Cntr = n_pbufs; Cntr != 0; Cntr--) {
	XEmacPs_BdClearTxUsed(txbd);
	txbd = XEmacPs_BdRingNext(txring, txbd);
}

	dmb();
	dsb();
	Status = XEmacPs_BdRingToHw(txring, n_pbufs, txbdset);
	if (Status != XST_SUCCESS) {
		Xil_ExceptionEnable();
		LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error submitting TxBD\r\n"));
		return ERR_IF;
	}
	
	/* Start transmit */
	XEmacPs_WriteReg((xemacpsif->emacps).Config.BaseAddress,
	XEMACPS_NWCTRL_OFFSET,
	(XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress,
	XEMACPS_NWCTRL_OFFSET) | XEMACPS_NWCTRL_STARTTX_MASK));
	dmb();
	dsb();
	Xil_ExceptionEnable();
	return Status;
}
/*****************************************************************************
* FUNCTION
*  hal_rx_dma_irq_handler
* DESCRIPTION
*  lower level rx interrupt handler
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* p_buf     [IN/OUT] pointer to rx data buffer
* max_len  [IN]        max length of rx buffer
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_rx_dma_irq_handler(P_MTK_DMA_INFO_STR p_dma_info,
			   unsigned char *p_buf, const unsigned int max_len)
{
	int i_ret = -1;
	unsigned int valid_len = 0;
	unsigned int wpt_wrap = 0;
	unsigned int rpt_wrap = 0;
	unsigned int wpt = 0;
	unsigned int rpt = 0;
	unsigned int tail_len = 0;
	unsigned int real_len = 0;
	unsigned int base = p_dma_info->base;
	P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo;
	dma_rx_buf_write rx_cb = p_dma_info->rx_cb;
	unsigned char *p_vff_buf = NULL;
	unsigned char *vff_base = p_vfifo->p_vir_addr;
	unsigned int vff_size = p_vfifo->vfifo_size;
	P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo,
							MTK_BTIF_DMA_VFIFO,
							vfifo);
	unsigned long flag = 0;

	spin_lock_irqsave(&(g_clk_cg_spinlock), flag);
#if MTK_BTIF_ENABLE_CLK_CTL
	if (0 == clock_is_on(MTK_BTIF_APDMA_CLK_CG)) {
		spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
		BTIF_ERR_FUNC("%s: clock is off before irq handle done!!!\n",
			      __FILE__);
		return i_ret;
	}
#endif
/*disable DMA Rx IER*/
	hal_btif_dma_ier_ctrl(p_dma_info, false);

/*clear Rx DMA's interrupt status*/
	BTIF_SET_BIT(RX_DMA_INT_FLAG(base), RX_DMA_INT_DONE | RX_DMA_INT_THRE);

	valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base));
	rpt = BTIF_READ32(RX_DMA_VFF_RPT(base));
	wpt = BTIF_READ32(RX_DMA_VFF_WPT(base));
	if ((0 == valid_len) && (rpt == wpt)) {
		BTIF_DBG_FUNC
		    ("rx interrupt, no data available in Rx DMA, wpt(0x%08x), rpt(0x%08x)\n",
		     rpt, wpt);
	}

	i_ret = 0;

	while ((0 < valid_len) || (rpt != wpt)) {
		rpt_wrap = rpt & DMA_RPT_WRAP;
		wpt_wrap = wpt & DMA_WPT_WRAP;
		rpt &= DMA_RPT_MASK;
		wpt &= DMA_WPT_MASK;

/*calcaute length of available data  in vFIFO*/
		if (wpt_wrap != p_mtk_vfifo->last_wpt_wrap) {
			real_len = wpt + vff_size - rpt;
		} else {
			real_len = wpt - rpt;
		}

		if (NULL != rx_cb) {
			tail_len = vff_size - rpt;
			p_vff_buf = vff_base + rpt;
			if (tail_len >= real_len) {
				(*rx_cb) (p_dma_info, p_vff_buf, real_len);
			} else {
				(*rx_cb) (p_dma_info, p_vff_buf, tail_len);
				p_vff_buf = vff_base;
				(*rx_cb) (p_dma_info, p_vff_buf, real_len -
					  tail_len);
			}
			i_ret += real_len;
		} else {
			BTIF_ERR_FUNC
			    ("no rx_cb found, please check your init process\n");
		}
		dsb();
		rpt += real_len;
		if (rpt >= vff_size) {
/*read wrap bit should be revert*/
			rpt_wrap ^= DMA_RPT_WRAP;
			rpt %= vff_size;
		}
		rpt |= rpt_wrap;
/*record wpt, last_wpt_wrap, rpt, last_rpt_wrap*/
		p_mtk_vfifo->wpt = wpt;
		p_mtk_vfifo->last_wpt_wrap = wpt_wrap;

		p_mtk_vfifo->rpt = rpt;
		p_mtk_vfifo->last_rpt_wrap = rpt_wrap;

/*update rpt information to DMA controller*/
		btif_reg_sync_writel(rpt, RX_DMA_VFF_RPT(base));

/*get vff valid size again and check if rx data is processed completely*/
		valid_len = BTIF_READ32(RX_DMA_VFF_VALID_SIZE(base));

		rpt = BTIF_READ32(RX_DMA_VFF_RPT(base));
		wpt = BTIF_READ32(RX_DMA_VFF_WPT(base));
	}

/*enable DMA Rx IER*/
	hal_btif_dma_ier_ctrl(p_dma_info, true);
	spin_unlock_irqrestore(&(g_clk_cg_spinlock), flag);
	return i_ret;
}
/*****************************************************************************
* FUNCTION
*  hal_dma_send_data
* DESCRIPTION
*  send data through btif in DMA mode
* PARAMETERS
* p_dma_info   [IN]        pointer to BTIF dma channel's information
* p_buf     [IN]        pointer to rx data buffer
* max_len  [IN]        tx buffer length
* RETURNS
*  0 means success, negative means fail
*****************************************************************************/
int hal_dma_send_data(P_MTK_DMA_INFO_STR p_dma_info,
		      const unsigned char *p_buf, const unsigned int buf_len)
{
	unsigned int i_ret = -1;
	unsigned int base = p_dma_info->base;
	P_DMA_VFIFO p_vfifo = p_dma_info->p_vfifo;
	unsigned int len_to_send = buf_len;
	unsigned int ava_len = 0;
	unsigned int wpt = 0;
	unsigned int last_wpt_wrap = 0;
	unsigned int vff_size = 0;
	unsigned char *p_data = (unsigned char *)p_buf;
	P_MTK_BTIF_DMA_VFIFO p_mtk_vfifo = container_of(p_vfifo,
							MTK_BTIF_DMA_VFIFO,
							vfifo);

	BTIF_TRC_FUNC();
	if ((NULL == p_buf) || (0 == buf_len)) {
		i_ret = ERR_INVALID_PAR;
		BTIF_ERR_FUNC("invalid parameters, p_buf:0x%08x, buf_len:%d\n",
			      p_buf, buf_len);
		return i_ret;
	}
/*check if tx dma in flush operation? if yes, should wait until DMA finish flush operation*/
/*currently uplayer logic will make sure this pre-condition*/
/*disable Tx IER, in case Tx irq happens, flush bit may be set in irq handler*/
	btif_tx_dma_ier_ctrl(p_dma_info, false);

	vff_size = p_mtk_vfifo->vfifo.vfifo_size;
	ava_len = BTIF_READ32(TX_DMA_VFF_LEFT_SIZE(base));
	wpt = BTIF_READ32(TX_DMA_VFF_WPT(base)) & DMA_WPT_MASK;
	last_wpt_wrap = BTIF_READ32(TX_DMA_VFF_WPT(base)) & DMA_WPT_WRAP;

/*copy data to vFIFO, Note: ava_len should always large than buf_len, otherwise common logic layer will not call hal_dma_send_data*/
	if (buf_len > ava_len) {
		BTIF_ERR_FUNC
		    ("length to send:(%d) < length available(%d), abnormal!!!---!!!\n",
		     buf_len, ava_len);
		BUG_ON(buf_len > ava_len);	/* this will cause kernel panic */
	}

	len_to_send = buf_len < ava_len ? buf_len : ava_len;
	if (len_to_send + wpt >= vff_size) {
		unsigned int tail_len = vff_size - wpt;
		memcpy((p_mtk_vfifo->vfifo.p_vir_addr + wpt), p_data, tail_len);
		p_data += tail_len;
		memcpy(p_mtk_vfifo->vfifo.p_vir_addr,
		       p_data, len_to_send - tail_len);
/*make sure all data write to memory area tx vfifo locates*/
		dsb();

/*calculate WPT*/
		wpt = wpt + len_to_send - vff_size;
		last_wpt_wrap ^= DMA_WPT_WRAP;
	} else {
		memcpy((p_mtk_vfifo->vfifo.p_vir_addr + wpt),
		       p_data, len_to_send);
/*make sure all data write to memory area tx vfifo locates*/
		dsb();

/*calculate WPT*/
		wpt += len_to_send;
	}
	p_mtk_vfifo->wpt = wpt;
	p_mtk_vfifo->last_wpt_wrap = last_wpt_wrap;

/*make sure tx dma is allowed(tx flush bit is not set) to use before update WPT*/
	if (hal_dma_is_tx_allow(p_dma_info)) {
/*make sure tx dma enabled*/
		hal_btif_dma_ctrl(p_dma_info, DMA_CTRL_ENABLE);

/*update WTP to Tx DMA controller's control register*/
		btif_reg_sync_writel(wpt | last_wpt_wrap, TX_DMA_VFF_WPT(base));

		if ((8 > BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base))) &&
		    (0 < BTIF_READ32(TX_DMA_VFF_VALID_SIZE(base)))) {
/*0 < valid size in Tx vFIFO < 8 && TX Flush is not in process<should always be done>? if yes, set flush bit to DMA*/
			_tx_dma_flush(p_dma_info);
		}
		i_ret = len_to_send;
	} else {
/*TODO: print error log*/
		BTIF_ERR_FUNC
		    ("Tx DMA flush operation is in process, this case should never happen, please check if tx operation is allowed before call this API\n");
/*if flush operation is in process , we will return 0*/
		i_ret = 0;
	}

/*Enable Tx IER*/
	btif_tx_dma_ier_ctrl(p_dma_info, true);

	BTIF_TRC_FUNC();
	return i_ret;
}
Exemplo n.º 16
0
static void hdlcd_flush(void)
{
    dsb();
}