/*
 * This function must be defined to allow the WB API to
 * register a callback function that is called when a
 * DMA transfer is complete.
 */
void cy_as_hal_dma_register_callback(cy_as_hal_device_tag tag,
					cy_as_hal_dma_complete_callback cb)
{
	DBGPRN("<1>\n%s: WB API has registered a dma_complete callback:%x\n",
			__func__, (uint32_t)cb);
	callback = cb ;
}
void cy_as_hal_dma_cancel_request(cy_as_hal_device_tag tag, uint8_t ep)
{
	DBGPRN("cy_as_hal_dma_cancel_request on ep:%d", ep);
	if (end_points[ep].pending)
		cy_as_hal_write_register(tag,
				CY_AS_MEM_P0_EP2_DMA_REG + ep - 2, 0);

	end_points[ep].buffer_valid = cy_false ;
	end_points[ep].type = cy_as_hal_none;
}
Exemplo n.º 3
0
void verbose_rq_flags(int flags)
{
    int i;
    uint32_t j;
    j = 1;
    for (i = 0; i < 32; i++) {
        if (flags & j)
            DBGPRN("<1>%s", rq_flag_bit_names[i]);
        j = j << 1;
    }
}
/*
 * Astoria DMA read request, APP_CPU reads from WB ep buffer
 */
static void cy_service_e_p_dma_read_request(
			cy_as_omap_dev_kernel *dev_p, uint8_t ep)
{
	cy_as_hal_device_tag tag = (cy_as_hal_device_tag)dev_p ;
	uint16_t  v, i, size;
	uint16_t	*dptr;
	uint16_t ep_dma_reg = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2;
    register void     *read_addr ;
    register uint16_t a,b,c,d,e,f,g,h ;
	/*
	 * get the XFER size frtom WB eP DMA REGISTER
	 */
	v = cy_as_hal_read_register(tag, ep_dma_reg);

	/*
	 * amount of data in EP buff in  bytes
	 */
	size =  v & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK;

	/*
	 * memory pointer for this DMA packet xfer (sub_segment)
	 */
	dptr = (uint16_t *) end_points[ep].data_p;

	read_addr = dev_p->m_vma_addr_base + CYAS_DEV_CALC_EP_ADDR(ep) ;

	cy_as_hal_assert(size != 0);

	if (size) {
	     /*
		 * Now, read the data from the device
		 */
		for(i = size/16 ; i > 0 ; i--) {
			a = (unsigned short) readw (read_addr) ;
			b = (unsigned short) readw (read_addr) ;
			c = (unsigned short) readw (read_addr) ;
			d = (unsigned short) readw (read_addr) ;
			e = (unsigned short) readw (read_addr) ;
			f = (unsigned short) readw (read_addr) ;
			g = (unsigned short) readw (read_addr) ;
			h = (unsigned short) readw (read_addr) ;
	                *dptr++ = a ;
	                *dptr++ = b ;
	                *dptr++ = c ;
	                *dptr++ = d ;
	                *dptr++ = e ;
	                *dptr++ = f ;
	                *dptr++ = g ;
	                *dptr++ = h ;
		}
	
		switch ((size & 0xF)/2) {
		case 7:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 6:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 5:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 4:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 3:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 2:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
		case 1:
		    *dptr = (unsigned short) readw(read_addr) ;
		    dptr++ ;
	            break ;
		}
	
		if (size & 1) {
			/* Odd sized packet */
			uint16_t d = (unsigned short) readw (read_addr) ;
			*((uint8_t *)dptr) = (d & 0xff) ;
		}
	}

	/*
	 * clear DMAVALID bit indicating that the data has been read
	 */
	cy_as_hal_write_register(tag, ep_dma_reg, 0) ;

	end_points[ep].seg_xfer_cnt += size;
	end_points[ep].req_xfer_cnt += size;

	/*
	 *  pre-advance data pointer (if it's outside sg
	 * list it will be reset anyway
	 */
	end_points[ep].data_p += size;

	if (prep_for_next_xfer(tag, ep)) {
		/*
		 * we have more data to read in this request,
		 * setup next dma packet due tell WB how much
		 * data we are going to xfer next
		 */
		v = end_points[ep].dma_xfer_sz/*HAL_DMA_PKT_SZ*/ |
				CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;
		cy_as_hal_write_register(tag, ep_dma_reg, v);
	} else {
		end_points[ep].pending	  = cy_false ;
		end_points[ep].type		 = cy_as_hal_none ;
		end_points[ep].buffer_valid = cy_false ;

		/*
		 * notify the API that we are done with rq on this EP
		 */
		if (callback) {
			DBGPRN("<1>trigg rd_dma completion cb: xfer_sz:%d\n",
				end_points[ep].req_xfer_cnt);
				callback(tag, ep,
					end_points[ep].req_xfer_cnt,
					CY_AS_ERROR_SUCCESS);
		}
	}
}
/*
 * preps Ep pointers & data counters for next packet
 * (fragment of the request) xfer returns true if
 * there is a next transfer, and false if all bytes in
 * current request have been xfered
 */
static inline bool prep_for_next_xfer(cy_as_hal_device_tag tag, uint8_t ep)
{

	if (!end_points[ep].sg_list_enabled) {
		/*
		 * no further transfers for non storage EPs
		 * (like EP2 during firmware download, done
		 * in 64 byte chunks)
		 */
		if (end_points[ep].req_xfer_cnt >= end_points[ep].req_length) {
			DBGPRN("<1> %s():RQ sz:%d non-_sg EP:%d completed\n",
				__func__, end_points[ep].req_length, ep);

			/*
			 * no more transfers, we are done with the request
			 */
			return false;
		}

		/*
		 * calculate size of the next DMA xfer, corner
		 * case for non-storage EPs where transfer size
		 * is not egual N * HAL_DMA_PKT_SZ xfers
		 */
		if ((end_points[ep].req_length - end_points[ep].req_xfer_cnt)
		>= HAL_DMA_PKT_SZ) {
				end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;
		} else {
			/*
			 * that would be the last chunk less
			 * than P-port max size
			 */
			end_points[ep].dma_xfer_sz = end_points[ep].req_length -
					end_points[ep].req_xfer_cnt;
		}

		return true;
	}

	/*
	 * for SG_list assisted dma xfers
	 * are we done with current SG ?
	 */
	if (end_points[ep].seg_xfer_cnt ==  end_points[ep].sg_p->length) {
		/*
		 *  was it the Last SG segment on the list ?
		 */
		if (sg_is_last(end_points[ep].sg_p)) {
			DBGPRN("<1> %s: EP:%d completed,"
					"%d bytes xfered\n",
					__func__,
					ep,
					end_points[ep].req_xfer_cnt
			);

			return false;
		} else {
			/*
			 * There are more SG segments in current
			 * request's sg list setup new segment
			 */

			end_points[ep].seg_xfer_cnt = 0;
			end_points[ep].sg_p = sg_next(end_points[ep].sg_p);
			/* set data pointer for next DMA sg transfer*/
			end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
			DBGPRN("<1> %s new SG:_va:%p\n\n",
					__func__, end_points[ep].data_p);
		}

	}

	/*
	 * for sg list xfers it will always be 512 or 1024
	 */
	end_points[ep].dma_xfer_sz = HAL_DMA_PKT_SZ;

	/*
	 * next transfer is required
	 */

	return true;
}
/*
 * west bridge astoria ISR (Interrupt handler)
 */
static irqreturn_t cy_astoria_int_handler(int irq,
				void *dev_id, struct pt_regs *regs)
{
	cy_as_omap_dev_kernel *dev_p;
	uint16_t		  read_val = 0 ;
	uint16_t		  mask_val = 0 ;

	/*
	* debug stuff, counts number of loops per one intr trigger
	*/
	uint16_t		  drq_loop_cnt = 0;
	uint8_t		   irq_pin;
	/*
	 * flags to watch
	 */
	const uint16_t	sentinel = (CY_AS_MEM_P0_INTR_REG_MCUINT |
				CY_AS_MEM_P0_INTR_REG_MBINT |
				CY_AS_MEM_P0_INTR_REG_PMINT |
				CY_AS_MEM_P0_INTR_REG_PLLLOCKINT);

	/*
	 * sample IRQ pin level (just for statistics)
	 */
	irq_pin = __gpio_get_value(AST_INT);

	/*
	 * this one just for debugging
	 */
	intr_sequence_num++ ;

	/*
	 * astoria device handle
	 */
	dev_p = dev_id;

	/*
	 * read Astoria intr register
	 */
	read_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
						CY_AS_MEM_P0_INTR_REG) ;

	/*
	 * save current mask value
	 */
	mask_val = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
						CY_AS_MEM_P0_INT_MASK_REG) ;

	DBGPRN("<1>HAL__intr__enter:_seq:%d, P0_INTR_REG:%x\n",
			intr_sequence_num, read_val);

	/*
	 * Disable WB interrupt signal generation while we are in ISR
	 */
	cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
					CY_AS_MEM_P0_INT_MASK_REG, 0x0000) ;

	/*
	* this is a DRQ Interrupt
	*/
	if (read_val & CY_AS_MEM_P0_INTR_REG_DRQINT) {

		do {
			/*
			 * handle DRQ interrupt
			 */
			drq_loop_cnt++;

			cy_handle_d_r_q_interrupt(dev_p) ;

			/*
			 * spending to much time in ISR may impact
			 * average system performance
			 */
			if (drq_loop_cnt >= MAX_DRQ_LOOPS_IN_ISR)
				break;

		/*
		 * Keep processing if there is another DRQ int flag
		 */
		} while (cy_as_hal_read_register((cy_as_hal_device_tag)dev_p,
					CY_AS_MEM_P0_INTR_REG) &
					CY_AS_MEM_P0_INTR_REG_DRQINT);
	}

	if (read_val & sentinel)
		cy_as_intr_service_interrupt((cy_as_hal_device_tag)dev_p) ;

	DBGPRN("<1>_hal:_intr__exit seq:%d, mask=%4.4x,"
			"int_pin:%d DRQ_jobs:%d\n",
			intr_sequence_num,
			mask_val,
			irq_pin,
			drq_loop_cnt);

	/*
	 * re-enable WB hw interrupts
	 */
	cy_as_hal_write_register((cy_as_hal_device_tag)dev_p,
					CY_AS_MEM_P0_INT_MASK_REG, mask_val) ;

	return IRQ_HANDLED ;
}
/*
 * init OMAP h/w resources
 */
int cy_as_hal_omap_cram_start(const char *pgm,
				cy_as_hal_device_tag *tag, cy_bool debug)
{
	cy_as_omap_dev_kernel *dev_p ;
	int i;
	u16 data16[4];
	uint32_t err = 0;
	/* No debug mode support through argument as of now */
	(void)debug;

	have_irq = false;

	/*
	 * Initialize the HAL level endpoint DMA data.
	 */
	for (i = 0 ; i < sizeof(end_points)/sizeof(end_points[0]) ; i++) {
		end_points[i].data_p = 0 ;
		end_points[i].pending = cy_false ;
		end_points[i].size = 0 ;	/* No debug mode support through argument as of now */
	(void)debug;
		
		end_points[i].type = cy_as_hal_none ;
		end_points[i].sg_list_enabled = cy_false;

		/*
		 * by default the DMA transfers to/from the E_ps don't
		 * use sg_list that implies that the upper devices like
		 * blockdevice have to enable it for the E_ps in their
		 * initialization code
		 */
	}

	/* allocate memory for OMAP HAL*/
	dev_p = (cy_as_omap_dev_kernel *)cy_as_hal_alloc(
						sizeof(cy_as_omap_dev_kernel)) ;
	if (dev_p == 0) {
		cy_as_hal_print_message("out of memory allocating OMAP"
					"device structure\n") ;
		return 0 ;
	}

	dev_p->m_sig = CY_AS_OMAP_CRAM_HAL_SIG;

	/* initialize OMAP hardware and StartOMAPKernelall gpio pins */
	err = cy_as_hal_processor_hw_init(dev_p);
	if(err)
		goto bus_acc_error;

	/*
	 * Now perform a hard reset of the device to have
	 * the new settings take effect
	 */
	__gpio_set_value(AST_WAKEUP, 1);

	/*
	 * do Astoria  h/w reset
	 */
	DBGPRN(KERN_INFO"-_-_pulse -> westbridge RST pin\n");

	/*
	 * NEGATIVE PULSE on RST pin
	 */
	__gpio_set_value(AST_RESET, 0);
	mdelay(1);
	__gpio_set_value(AST_RESET, 1);
	mdelay(50);


   /*
	*  NOTE: if you want to capture bus activity on the LA,
	*  don't use printks in between the activities you want to capture.
	*  prinks may take milliseconds, and the data of interest
	*  will fall outside the LA capture window/buffer
	*/
	cy_as_hal_dump_reg((cy_as_hal_device_tag)dev_p);

	data16[0] = cy_as_hal_read_register((cy_as_hal_device_tag)dev_p, CY_AS_MEM_CM_WB_CFG_ID);

	if ( (data16[0]&0xA100 != 0xA100) ||  (data16[0]&0xA200 != 0xA200))  {
		/*
		 * astoria device is not found
		 */
		printk(KERN_ERR "ERROR: astoria device is not found, "
			"CY_AS_MEM_CM_WB_CFG_ID %4.4x", data16[0]);
		goto bus_acc_error;
	}

	cy_as_hal_print_message(KERN_INFO" register access test:"
				"\n CY_AS_MEM_CM_WB_CFG_ID:%4.4x\n"
				"after cfg_wr:%4.4x\n\n",
				data16[0], data16[1]);

	dev_p->thread_flag = 1 ;
	spin_lock_init(&int_lock) ;
	dev_p->m_next_p = m_omap_list_p ;

	m_omap_list_p = dev_p ;
	*tag = dev_p;

	cy_as_hal_configure_interrupts((void *)dev_p);

	cy_as_hal_print_message(KERN_INFO"OMAP3430__hal started tag:%p"
				", kernel HZ:%d\n", dev_p, HZ);

	/*
	 *make processor to storage endpoints SG assisted by default
	 */
	cy_as_hal_set_ep_dma_mode(4, true);
	cy_as_hal_set_ep_dma_mode(8, true);

	return 1 ;

	/*
	 * there's been a NAND bus access error or
	 * astoria device is not connected
	 */
bus_acc_error:
	/*
	 * at this point hal tag hasn't been set yet
	 * so the device will not call omap_stop
	 */
	cy_as_hal_omap_hardware_deinit(dev_p);
	cy_as_hal_free(dev_p) ;
	return 0;
}
/*
 * This function must be defined to transfer a block of data from
 * the WestBridge device.  This function can use the burst read
 * (DMA) capabilities of WestBridge to do this, or it can just
 * copy the data using reads.
 */
void cy_as_hal_dma_setup_read(cy_as_hal_device_tag tag,
					uint8_t ep, void *buf,
					uint32_t size, uint16_t maxsize)
{
	uint32_t addr ;
	uint16_t v ;

	/*
	 * Note: "size" is the actual request size
	 * "maxsize" - is the P port fragment size
	 * No EP0 or EP1 traffic should get here
	 */
	cy_as_hal_assert(ep != 0 && ep != 1) ;

	/*
	 * If this asserts, we have an ordering problem.
	 * Another DMA request is coming down before the
	 * previous one has completed. we should not get
	 * new requests if current is still in process
	 */

	cy_as_hal_assert(end_points[ep].buffer_valid == cy_false);

	end_points[ep].buffer_valid = cy_true ;
	end_points[ep].type = cy_as_hal_read ;
	end_points[ep].pending = cy_true;
	end_points[ep].req_xfer_cnt = 0;
	end_points[ep].req_length = size;

	if (size >= maxsize) {
		/*
		 * set xfer size for very 1st DMA xfer operation
		 * port max packet size ( typically 512 or 1024)
		 */
		end_points[ep].dma_xfer_sz = maxsize;
	} else {
		/*
		 * so that we could handle small xfers on in case
		 * of non-storage EPs
		 */
		end_points[ep].dma_xfer_sz = size;
	}

	addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2 ;

	if (end_points[ep].sg_list_enabled) {
		/*
		 * Handle sg-list assisted EPs
		 * seg_xfer_cnt - keeps track of N of sent packets
		 * buf - pointer to the SG list
		 * data_p - data pointer for the 1st DMA segment
		 */
		end_points[ep].seg_xfer_cnt = 0 ;
		end_points[ep].sg_p = buf;
		end_points[ep].data_p = sg_virt(end_points[ep].sg_p);

		#ifdef DBGPRN_DMA_SETUP_RD
		DBGPRN("cyasomaphal:DMA_setup_read sg_list EP:%d, "
			   "buf:%p, buf_va:%p, req_sz:%d, maxsz:%d\n",
				ep,
				buf,
				end_points[ep].data_p,
				size,
				maxsize);
		#endif
		v = (end_points[ep].dma_xfer_sz &
				CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
				CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;
		cy_as_hal_write_register(tag, addr, v);
	} else {
		/*
		 * Non sg list EP passed  void *buf rather then scatterlist *sg
		 */
		#ifdef DBGPRN_DMA_SETUP_RD
			DBGPRN("%s:non-sg_list EP:%d,"
					"RQ_sz:%d, maxsz:%d\n",
					__func__, ep, size,  maxsize);
		#endif

		end_points[ep].sg_p = NULL;

		/*
		 * must be a VMA of a membuf in kernel space
		 */
		end_points[ep].data_p = buf;

		/*
		 * Program the EP DMA register for Storage endpoints only.
		 */
		if (is_storage_e_p(ep)) {
			v = (end_points[ep].dma_xfer_sz &
					CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK) |
					CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;
			cy_as_hal_write_register(tag, addr, v);
		}
	}
}
/*
 * This function must be defined to transfer a block of data to
 * the WestBridge device.  This function can use the burst write
 * (DMA) capabilities of WestBridge to do this, or it can just copy
 * the data using writes.
 */
void cy_as_hal_dma_setup_write(cy_as_hal_device_tag tag,
						uint8_t ep, void *buf,
						uint32_t size, uint16_t maxsize)
{
	uint32_t addr = 0 ;
	uint16_t v  = 0;

	/*
	 * Note: "size" is the actual request size
	 * "maxsize" - is the P port fragment size
	 * No EP0 or EP1 traffic should get here
	 */
	cy_as_hal_assert(ep != 0 && ep != 1) ;

	/*
	 * If this asserts, we have an ordering problem.  Another DMA request
	 * is coming down before the previous one has completed.
	 */
	cy_as_hal_assert(end_points[ep].buffer_valid == cy_false) ;
	end_points[ep].buffer_valid = cy_true ;
	end_points[ep].type = cy_as_hal_write ;
	end_points[ep].pending = cy_true;

	/*
	 * total length of the request
	 */
	end_points[ep].req_length = size;

	if (size >= maxsize) {
		/*
		 * set xfer size for very 1st DMA xfer operation
		 * port max packet size ( typically 512 or 1024)
		 */
		end_points[ep].dma_xfer_sz = maxsize;
	} else {
		/*
		 * smaller xfers for non-storage EPs
		 */
		end_points[ep].dma_xfer_sz = size;
	}

	/*
	 * check the EP transfer mode uses sg_list rather then a memory buffer
	 * block devices pass it to the HAL, so the hAL could get to the real
	 * physical address for each segment and set up a DMA controller
	 * hardware ( if there is one)
	 */
	if (end_points[ep].sg_list_enabled) {
		/*
		 * buf -  pointer to the SG list
		 * data_p - data pointer to the 1st DMA segment
		 * seg_xfer_cnt - keeps track of N of bytes sent in current
		 *		sg_list segment
		 * req_xfer_cnt - keeps track of the total N of bytes
		 *		transferred for the request
		 */
		end_points[ep].sg_p = buf;
		end_points[ep].data_p = sg_virt(end_points[ep].sg_p);
		end_points[ep].seg_xfer_cnt = 0 ;
		end_points[ep].req_xfer_cnt = 0;

#ifdef DBGPRN_DMA_SETUP_WR
		DBGPRN("cyasomaphal:%s: EP:%d, buf:%p, buf_va:%p,"
				"req_sz:%d, maxsz:%d\n",
				__func__,
				ep,
				buf,
				end_points[ep].data_p,
				size,
				maxsize);
#endif

	} else {
		/*
		 * setup XFER for non sg_list assisted EPs
		 */

		#ifdef DBGPRN_DMA_SETUP_WR
			DBGPRN("<1>%s non storage or sz < 512:"
					"EP:%d, sz:%d\n", __func__, ep, size);
		#endif

		end_points[ep].sg_p = NULL;

		/*
		 * must be a VMA of a membuf in kernel space
		 */
		end_points[ep].data_p = buf;

		/*
		 * will keep track No of bytes xferred for the request
		 */
		end_points[ep].req_xfer_cnt = 0;
	}

	/*
	 * Tell WB we are ready to send data on the given endpoint
	 */
	v = (end_points[ep].dma_xfer_sz & CY_AS_MEM_P0_E_pn_DMA_REG_COUNT_MASK)
			| CY_AS_MEM_P0_E_pn_DMA_REG_DMAVAL ;

	addr = CY_AS_MEM_P0_EP2_DMA_REG + ep - 2 ;

	cy_as_hal_write_register(tag, addr, v) ;
}
/*
 * enables/disables SG list assisted DMA xfers for the given EP
 * sg_list assisted XFERS can use physical addresses of mem pages in case if the
 * xfer is performed by a h/w DMA controller rather then the CPU on P port
 */
void cy_as_hal_set_ep_dma_mode(uint8_t ep, bool sg_xfer_enabled)
{
	end_points[ep].sg_list_enabled = sg_xfer_enabled;
	DBGPRN("<1> EP:%d sg_list assisted DMA mode set to = %d\n",
			ep, end_points[ep].sg_list_enabled);
}