static void handle_bam_mux_cmd(struct work_struct *work)
{
	unsigned long flags;
	struct bam_mux_hdr *rx_hdr;
	struct rx_pkt_info *info;
	struct sk_buff *rx_skb;

	info = container_of(work, struct rx_pkt_info, work);
	rx_skb = info->skb;
	kfree(info);

	rx_hdr = (struct bam_mux_hdr *)rx_skb->data;

	DBG_INC_READ_CNT(sizeof(struct bam_mux_hdr));
	DBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n", __func__,
			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
	if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
		pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
			" pad %d ch %d len %d\n", __func__,
			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
		dev_kfree_skb_any(rx_skb);
		queue_rx();
		return;
	}
	switch (rx_hdr->cmd) {
	case BAM_MUX_HDR_CMD_DATA:
		DBG_INC_READ_CNT(rx_hdr->pkt_len);
		bam_mux_process_data(rx_skb);
		break;
	case BAM_MUX_HDR_CMD_OPEN:
		spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
		bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
		spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
		dev_kfree_skb_any(rx_skb);
		queue_rx();
		break;
	case BAM_MUX_HDR_CMD_CLOSE:
		/* probably should drop pending write */
		spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
		bam_ch[rx_hdr->ch_id].status &= ~BAM_CH_REMOTE_OPEN;
		spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);
		dev_kfree_skb_any(rx_skb);
		queue_rx();
		break;
	default:
		pr_err("%s: dropping invalid hdr. magic %x reserved %d cmd %d"
			" pad %d ch %d len %d\n", __func__,
			rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
			rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
		dev_kfree_skb_any(rx_skb);
		queue_rx();
		return;
	}
}
Esempio n. 2
0
static void bam_mux_process_data(void)
{
	unsigned long flags;
	struct bam_mux_hdr *rx_hdr;

	rx_hdr = (struct bam_mux_hdr *)rx_skb->data;

	rx_skb->data = (unsigned char *)(rx_hdr + 1);
	rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
	rx_skb->len = rx_hdr->pkt_len;

	spin_lock_irqsave(&bam_ch[rx_hdr->ch_id].lock, flags);
	if (bam_ch[rx_hdr->ch_id].receive_cb)
		bam_ch[rx_hdr->ch_id].receive_cb(bam_ch[rx_hdr->ch_id].priv,
							rx_skb);
	else
		dev_kfree_skb_any(rx_skb);
	spin_unlock_irqrestore(&bam_ch[rx_hdr->ch_id].lock, flags);

	queue_rx();
}
Esempio n. 3
0
static void bam_init(struct work_struct *work)
{
	u32 h;
	dma_addr_t dma_addr;
	int ret;
	void *a2_virt_addr;

	/* init BAM */
	a2_virt_addr = ioremap_nocache(A2_PHYS_BASE, A2_PHYS_SIZE);
	if (!a2_virt_addr) {
		pr_err("%s: ioremap failed\n", __func__);
		ret = -ENOMEM;
		goto register_bam_failed;
	}
	a2_props.phys_addr = A2_PHYS_BASE;
	a2_props.virt_addr = a2_virt_addr;
	a2_props.virt_size = A2_PHYS_SIZE;
	a2_props.irq = A2_BAM_IRQ;
	a2_props.num_pipes = A2_NUM_PIPES;
	a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
	/* need to free on tear down */
	ret = sps_register_bam_device(&a2_props, &h);
	if (ret < 0) {
		pr_err("%s: register bam error %d\n", __func__, ret);
		goto register_bam_failed;
	}

	bam_tx_pipe = sps_alloc_endpoint();
	if (bam_tx_pipe == NULL) {
		pr_err("%s: tx alloc endpoint failed\n", __func__);
		ret = -ENOMEM;
		goto register_bam_failed;
	}
	ret = sps_get_config(bam_tx_pipe, &tx_connection);
	if (ret) {
		pr_err("%s: tx get config failed %d\n", __func__, ret);
		goto tx_get_config_failed;
	}

	tx_connection.source = SPS_DEV_HANDLE_MEM;
	tx_connection.src_pipe_index = 0;
	tx_connection.destination = h;
	tx_connection.dest_pipe_index = 4;
	tx_connection.mode = SPS_MODE_DEST;
	tx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
					SPS_O_ACK_TRANSFERS;
	tx_desc_mem_buf.size = 0x800; /* 2k */
	tx_desc_mem_buf.base = dma_alloc_coherent(NULL, tx_desc_mem_buf.size,
							&dma_addr, 0);
	if (tx_desc_mem_buf.base == NULL) {
		pr_err("%s: tx memory alloc failed\n", __func__);
		ret = -ENOMEM;
		goto tx_mem_failed;
	}
	tx_desc_mem_buf.phys_base = dma_addr;
	memset(tx_desc_mem_buf.base, 0x0, tx_desc_mem_buf.size);
	tx_connection.desc = tx_desc_mem_buf;
	tx_connection.event_thresh = 0x10;

	ret = sps_connect(bam_tx_pipe, &tx_connection);
	if (ret < 0) {
		pr_err("%s: tx connect error %d\n", __func__, ret);
		goto tx_connect_failed;
	}

	bam_rx_pipe = sps_alloc_endpoint();
	if (bam_rx_pipe == NULL) {
		pr_err("%s: rx alloc endpoint failed\n", __func__);
		ret = -ENOMEM;
		goto tx_connect_failed;
	}
	ret = sps_get_config(bam_rx_pipe, &rx_connection);
	if (ret) {
		pr_err("%s: rx get config failed %d\n", __func__, ret);
		goto rx_get_config_failed;
	}

	rx_connection.source = h;
	rx_connection.src_pipe_index = 5;
	rx_connection.destination = SPS_DEV_HANDLE_MEM;
	rx_connection.dest_pipe_index = 1;
	rx_connection.mode = SPS_MODE_SRC;
	rx_connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
					SPS_O_ACK_TRANSFERS;
	rx_desc_mem_buf.size = 0x800; /* 2k */
	rx_desc_mem_buf.base = dma_alloc_coherent(NULL, rx_desc_mem_buf.size,
							&dma_addr, 0);
	if (rx_desc_mem_buf.base == NULL) {
		pr_err("%s: rx memory alloc failed\n", __func__);
		ret = -ENOMEM;
		goto rx_mem_failed;
	}
	rx_desc_mem_buf.phys_base = dma_addr;
	memset(rx_desc_mem_buf.base, 0x0, rx_desc_mem_buf.size);
	rx_connection.desc = rx_desc_mem_buf;
	rx_connection.event_thresh = 0x10;

	ret = sps_connect(bam_rx_pipe, &rx_connection);
	if (ret < 0) {
		pr_err("%s: rx connect error %d\n", __func__, ret);
		goto rx_connect_failed;
	}

	tx_register_event.options = SPS_O_EOT;
	tx_register_event.mode = SPS_TRIGGER_CALLBACK;
	tx_register_event.xfer_done = NULL;
	tx_register_event.callback = bam_mux_tx_notify;
	tx_register_event.user = NULL;
	ret = sps_register_event(bam_tx_pipe, &tx_register_event);
	if (ret < 0) {
		pr_err("%s: tx register event error %d\n", __func__, ret);
		goto rx_event_reg_failed;
	}

	rx_register_event.options = SPS_O_EOT;
	rx_register_event.mode = SPS_TRIGGER_CALLBACK;
	rx_register_event.xfer_done = NULL;
	rx_register_event.callback = bam_mux_rx_notify;
	rx_register_event.user = NULL;
	ret = sps_register_event(bam_rx_pipe, &rx_register_event);
	if (ret < 0) {
		pr_err("%s: tx register event error %d\n", __func__, ret);
		goto rx_event_reg_failed;
	}

	bam_mux_initialized = 1;
	queue_rx();
	return;

rx_event_reg_failed:
	sps_disconnect(bam_rx_pipe);
rx_connect_failed:
	dma_free_coherent(NULL, rx_desc_mem_buf.size, rx_desc_mem_buf.base,
				rx_desc_mem_buf.phys_base);
rx_mem_failed:
	sps_disconnect(bam_tx_pipe);
rx_get_config_failed:
	sps_free_endpoint(bam_rx_pipe);
tx_connect_failed:
	dma_free_coherent(NULL, tx_desc_mem_buf.size, tx_desc_mem_buf.base,
				tx_desc_mem_buf.phys_base);
tx_get_config_failed:
	sps_free_endpoint(bam_tx_pipe);
tx_mem_failed:
	sps_deregister_bam_device(h);
register_bam_failed:
	/*destroy_workqueue(bam_mux_workqueue);*/
	/*return ret;*/
	return;
}
Esempio n. 4
0
void sgdma_add_rx_desc(struct altera_tse_private *priv,
		       struct tse_buffer *rxbuffer)
{
	queue_rx(priv, rxbuffer);
}
Esempio n. 5
0
static void *rx_thread_entry(void *arg)
{
	device_pthread_setaffinity();

	int rc;

	const struct aiocb * list[1];
	list[0] = &rxaio;

	LOG_PRINTF("entry\n");
	while (usb_online) {

#if LOCAL_TRACE_RW
		LTRACEF("going to read packet\n");
#endif

#if 0
		rc = read(ep2out_fd, rx_buffer, sizeof(rx_buffer));
		LTRACEF("read rc %d\n", rc);
		if (rc < 0) {
			TRACEF("error reading packet\n");
		}
		if (rc > 0) {
			novacom_usbll_process_packet(usbll_handle, rx_buffer, rc);
		}
#else
		rc = queue_rx(ep2out_fd, rx_buffer, novacom_usbll_get_mtu(usbll_handle));	
		if (rc < 0) {
			LOG_PRINTF("USB aio_read error, ret=%d, errno=%d\n", rc, errno);
			usleep(1000*GADGETFS_IO_RETRY_DELAY);
			continue;
		}

		struct timespec timeout;
		rc = suspend(list, 1, &timeout);
		while (rc < 0 && errno == EAGAIN) {
			LOG_PRINTF("USB aio_suspend (for read) error, ret=%d, errno=%d\n", rc, errno);
			rc = suspend(list, 1, &timeout);
			if (rc >= 0) {
				LOG_PRINTF("USB aio_suspend (for read), ret=%d, errno=%d\n", rc, errno);
			}
			//the gadget_fs event should be the source to notify usb device offline
			///novacom_usbll_drop_offline(usbll_handle);
			// suspend after usbll_drop_offline ?????
			///aio_suspend(list, 1, NULL);
		};

		if (aio_error(&rxaio) != EINPROGRESS) {
			rc = aio_return(&rxaio);
#if LOCAL_TRACE_RW
		LTRACEF("rx successful: nbytes %d\n", rc);
#endif
			if (rc > 0) {
				rc = novacom_usbll_process_packet(usbll_handle, rx_buffer, rc);
				if (rc == PACKET_TYPE_BADPACKET) {
					LOG_PRINTF("Received bad packet, ret=%d, \n", rc);
				}	
			} else {
				/* online->offline transition */
				LOG_PRINTF("USB aio_return (for read) error, ret=%d, \n", rc);
				if( (usb_online != gadgetfs_online) && (true == usb_online) ) {
					int ret = platform_event_wait_timeout(&usb_online_event, TRANSPORT_RECOVERY_TIMEOUT * 1000*2);
					if (!ret) {
						LOG_PRINTF("platform_event_wait_timeout for usb_online_event, ret=%d\n", ret);
					}
					else {
						LOG_PRINTF("platform_event_wait_timeout for usb_online_event, ret=%d, ignored\n", ret);
					}					
				}
			}
		}
		else {
			LOG_PRINTF("we should never enter here (EINPROGRESS=%d), USB aio read seems to have problem!\n", EINPROGRESS);
		}
#endif
	}

	LOG_PRINTF("shutting down\n");
	platform_event_signal(&rx_shutdown_event);

	return NULL;
}