示例#1
0
/**
 * rmnet_egress_handler() - Egress handler entry point
 * @skb:        packet to transmit
 * @ep:         logical endpoint configuration of the packet originator
 *              (e.g.. RmNet virtual network device)
 *
 * Modifies packet as per logical endpoint configuration and egress data format
 * for egress device configured in logical endpoint. Packet is then transmitted
 * on the egress device.
 */
void rmnet_egress_handler(struct sk_buff *skb,
			  struct rmnet_logical_ep_conf_s *ep)
{
	struct rmnet_phys_ep_conf_s *config;
	struct net_device *orig_dev;
	int rc;
	orig_dev = skb->dev;
	skb->dev = ep->egress_dev;

	config = (struct rmnet_phys_ep_conf_s *)
		rcu_dereference(skb->dev->rx_handler_data);

	if (!config) {
		LOGD("%s is not associated with rmnet_data", skb->dev->name);
		kfree_skb(skb);
		return;
	}

	LOGD("Packet going out on %s with egress format 0x%08X",
	     skb->dev->name, config->egress_data_format);

	if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
		switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
		case RMNET_MAP_CONSUMED:
			LOGD("%s", "MAP process consumed packet");
			return;

		case RMNET_MAP_SUCCESS:
			break;

		default:
			LOGD("MAP egress failed on packet on %s",
			     skb->dev->name);
			rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_EGR_MAPFAIL);
			return;
		}
	}

	if (ep->rmnet_mode == RMNET_EPMODE_VND)
		rmnet_vnd_tx_fixup(skb, orig_dev);

	rmnet_print_packet(skb, skb->dev->name, 't');
	trace_rmnet_egress_handler(skb);
	rc = dev_queue_xmit(skb);
	if (rc != 0) {
		LOGD("Failed to queue packet for transmission on [%s]",
		      skb->dev->name);
	}
	rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_EGRESS);
}
示例#2
0
/**
 * rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
 * @work:        struct agg_work containing delayed work and skb to flush
 *
 * This function is scheduled to run in a specified number of jiffies after
 * the last frame transmitted by the network stack. When run, the buffer
 * containing aggregated packets is finally transmitted on the underlying link.
 *
 */
static void rmnet_map_flush_packet_queue(struct work_struct *work)
{
	struct agg_work *real_work;
	struct rmnet_phys_ep_conf_s *config;
	unsigned long flags;
	struct sk_buff *skb;
	int rc;

	skb = 0;
	real_work = (struct agg_work *)work;
	config = real_work->config;
	LOGD("%s", "Entering flush thread");
	spin_lock_irqsave(&config->agg_lock, flags);
	if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
		/* Buffer may have already been shipped out */
		if (likely(config->agg_skb)) {
			rmnet_stats_agg_pkts(config->agg_count);
			if (config->agg_count > 1)
				LOGL("Agg count: %d", config->agg_count);
			skb = config->agg_skb;
			config->agg_skb = 0;
		}
		config->agg_state = RMNET_MAP_AGG_IDLE;
	} else {
		/* How did we get here? */
		LOGE("Ran queued command when state %s",
			"is idle. State machine likely broken");
	}

	spin_unlock_irqrestore(&config->agg_lock, flags);
	if (skb) {
		rc = dev_queue_xmit(skb);
		rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
	}
	kfree(work);
}
示例#3
0
/**
 * rmnet_map_aggregate() - Software aggregates multiple packets.
 * @skb:        current packet being transmitted
 * @config:     Physical endpoint configuration of the ingress device
 *
 * Aggregates multiple SKBs into a single large SKB for transmission. MAP
 * protocol is used to separate the packets in the buffer. This funcion consumes
 * the argument SKB and should not be further processed by any other function.
 */
void rmnet_map_aggregate(struct sk_buff *skb,
			 struct rmnet_phys_ep_conf_s *config) {
	uint8_t *dest_buff;
	struct agg_work *work;
	unsigned long flags;
	struct sk_buff *agg_skb;
	int size, rc;


	if (!skb || !config)
		BUG();
	size = config->egress_agg_size-skb->len;

	if (size < 2000) {
		LOGL("Invalid length %d", size);
		return;
	}

new_packet:
	spin_lock_irqsave(&config->agg_lock, flags);
	if (!config->agg_skb) {
		config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
		if (!config->agg_skb) {
			config->agg_skb = 0;
			config->agg_count = 0;
			spin_unlock_irqrestore(&config->agg_lock, flags);
			rmnet_stats_agg_pkts(1);
			rc = dev_queue_xmit(skb);
			rmnet_stats_queue_xmit(rc,
				RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL);
			return;
		}
		config->agg_count = 1;
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_CPY_EXPAND);
		goto schedule;
	}

	if (skb->len > (config->egress_agg_size - config->agg_skb->len)) {
		rmnet_stats_agg_pkts(config->agg_count);
		if (config->agg_count > 1)
			LOGL("Agg count: %d", config->agg_count);
		agg_skb = config->agg_skb;
		config->agg_skb = 0;
		config->agg_count = 0;
		spin_unlock_irqrestore(&config->agg_lock, flags);
		rc = dev_queue_xmit(agg_skb);
		rmnet_stats_queue_xmit(rc,
					RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER);
		goto new_packet;
	}

	dest_buff = skb_put(config->agg_skb, skb->len);
	memcpy(dest_buff, skb->data, skb->len);
	config->agg_count++;
	rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_INTO_BUFF);

schedule:
	if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
		work = (struct agg_work *)
			kmalloc(sizeof(struct agg_work), GFP_ATOMIC);
		if (!work) {
			LOGE("Failed to allocate work item for packet %s",
			     "transfer. DATA PATH LIKELY BROKEN!");
			config->agg_state = RMNET_MAP_AGG_IDLE;
			spin_unlock_irqrestore(&config->agg_lock, flags);
			return;
		}
		INIT_DELAYED_WORK((struct delayed_work *)work,
				  rmnet_map_flush_packet_queue);
		work->config = config;
		config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
		schedule_delayed_work((struct delayed_work *)work, 1);
	}
	spin_unlock_irqrestore(&config->agg_lock, flags);
	return;
}