static int shm_rb_init(struct shm_rbctl *rbctl)
{
	rbctl->rb_type = rbctl - shm_rbctl;

	/* map to non-cache virtual address */
	rbctl->skctl_va =
	    ioremap_nocache(rbctl->skctl_pa, sizeof(struct shm_skctl));
	if (!rbctl->skctl_va)
		goto exit1;

	rbctl->tx_va = ioremap_nocache(rbctl->tx_pa, rbctl->tx_total_size);
	if (!rbctl->tx_va)
		goto exit2;

	rbctl->rx_va = ioremap_nocache(rbctl->rx_pa, rbctl->rx_total_size);

	if (!rbctl->rx_va)
		goto exit3;

	shm_rb_data_init(rbctl);
	shm_rb_dump(rbctl);

	return 0;

exit3:
	iounmap(rbctl->tx_va);
exit2:
	iounmap(rbctl->skctl_va);
exit1:
	return -1;
}
Exemplo n.º 2
0
static void data_path_broadcast_msg(int proc)
{
	struct data_path *dp = &data_path;

	if (atomic_read(&dp->state) == dp_state_opened) {
		if (proc == MsocketLinkdownProcId) {
			/* make sure tx/rx tasklet is stopped */
			tasklet_disable(&dp->tx_tl);
			/*
			 * tx tasklet is completely stopped
			 * purge the skb list
			 */
			tx_q_clean(dp);
			tasklet_enable(&dp->tx_tl);

			tasklet_disable(&dp->rx_tl);
			tasklet_enable(&dp->rx_tl);

			if (dp->cbs && dp->cbs->link_down)
				dp->cbs->link_down();
		} else if (proc == MsocketLinkupProcId) {
			/*
			 * Now both AP and CP will not send packet
			 * to ring buffer or receive packet from ring
			 * buffer, so cleanup any packet in ring buffer
			 * and initialize some key data structure to
			 * the beginning state otherwise user space
			 * process and CP may occur error
			 */
			shm_rb_data_init(dp->rbctl);
			if (dp->cbs && dp->cbs->link_up)
				dp->cbs->link_up();
		}
	}
}