コード例 #1
0
/**
 * _rmnet_map_ingress_handler() - Actual MAP ingress handler
 * @skb:        Packet being received
 * @config:     Physical endpoint configuration for the ingress device
 *
 * Most MAP ingress functions are processed here. Packets are processed
 * individually; aggregates packets should use rmnet_map_ingress_handler()
 *
 * Return:
 *      - RX_HANDLER_CONSUMED if packet is dropped
 *      - result of __rmnet_deliver_skb() for all other cases
 */
static rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
					    struct rmnet_phys_ep_conf_s *config)
{
	struct rmnet_logical_ep_conf_s *ep;
	uint8_t mux_id;
	uint16_t len;
	int ckresult;

	mux_id = RMNET_MAP_GET_MUX_ID(skb);
	len = RMNET_MAP_GET_LENGTH(skb)
			- RMNET_MAP_GET_PAD(skb)
			- config->tail_spacing;

	if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
		LOGD("Got packet on %s with bad mux id %d",
			skb->dev->name, mux_id);
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX);
			return RX_HANDLER_CONSUMED;
	}

	ep = &(config->muxed_ep[mux_id]);

	if (!ep->refcount) {
		LOGD("Packet on %s:%d; has no logical endpoint config",
		     skb->dev->name, mux_id);

		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
		return RX_HANDLER_CONSUMED;
	}

	if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
		skb->dev = ep->egress_dev;

	if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
	    (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
		ckresult = rmnet_map_checksum_downlink_packet(skb);
		trace_rmnet_map_checksum_downlink_packet(skb, ckresult);
		rmnet_stats_dl_checksum(ckresult);
		if (likely((ckresult == RMNET_MAP_CHECKSUM_OK)
			    || (ckresult == RMNET_MAP_CHECKSUM_SKIPPED)))
			skb->ip_summed |= CHECKSUM_UNNECESSARY;
		else if (ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION
			&& ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT
			&& ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET
			&& ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
			rmnet_kfree_skb(skb,
				RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM);
			return RX_HANDLER_CONSUMED;
		}
	}

	/* Subtract MAP header */
	skb_pull(skb, sizeof(struct rmnet_map_header_s));
	skb_trim(skb, len);
	__rmnet_data_set_skb_proto(skb);

	return __rmnet_deliver_skb(skb, ep);
}
/**
 * rmnet_map_do_flow_control() - Process MAP flow control command
 * @skb: Socket buffer containing the MAP flow control message
 * @config: Physical end-point configuration of ingress device
 * @enable: boolean for enable/disable
 *
 * Process in-band MAP flow control messages. Assumes mux ID is mapped to a
 * RmNet Data vitrual network device.
 *
 * Return:
 *      - RMNET_MAP_COMMAND_UNSUPPORTED on any error
 *      - RMNET_MAP_COMMAND_ACK on success
 */
static uint8_t rmnet_map_do_flow_control(struct sk_buff *skb,
					 struct rmnet_phys_ep_conf_s *config,
					 int enable)
{
	struct rmnet_map_control_command_s *cmd;
	struct net_device *vnd;
	struct rmnet_logical_ep_conf_s *ep;
	uint8_t mux_id;
	uint16_t  ip_family;
	uint16_t  fc_seq;
	uint32_t  qos_id;
	int r;

	if (unlikely(!skb || !config))
		BUG();

	mux_id = RMNET_MAP_GET_MUX_ID(skb);
	cmd = RMNET_MAP_GET_CMD_START(skb);

	if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
		LOGD("Got packet on %s with bad mux id %d",
		     skb->dev->name, mux_id);
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_BAD_MUX);
		return RX_HANDLER_CONSUMED;
	}

	ep = &(config->muxed_ep[mux_id]);

	if (!ep->refcount) {
		LOGD("Packet on %s:%d; has no logical endpoint config",
		     skb->dev->name, mux_id);

		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_MUX_NO_EP);
			return RX_HANDLER_CONSUMED;
	}

	vnd = ep->egress_dev;

	ip_family = cmd->flow_control.ip_family;
	fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
	qos_id = ntohl(cmd->flow_control.qos_id);

	/* Ignore the ip family and pass the sequence number for both v4 and v6
	 * sequence. User space does not support creating dedicated flows for
	 * the 2 protocols
	 */
	r = rmnet_vnd_do_flow_control(vnd, qos_id, fc_seq, fc_seq, enable);
	LOGD("dev:%s, qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d",
	     skb->dev->name, qos_id, ip_family & 3, fc_seq, enable);

	if (r)
		return RMNET_MAP_COMMAND_UNSUPPORTED;
	else
		return RMNET_MAP_COMMAND_ACK;
}
コード例 #3
0
/**
 * __rmnet_deliver_skb() - Deliver skb
 *
 * Determines where to deliver skb. Options are: consume by network stack,
 * pass to bridge handler, or pass to virtual network device
 *
 * Return:
 *      - RX_HANDLER_CONSUMED if packet forwarded or dropped
 *      - RX_HANDLER_PASS if packet is to be consumed by network stack as-is
 */
static rx_handler_result_t __rmnet_deliver_skb(struct sk_buff *skb,
					 struct rmnet_logical_ep_conf_s *ep)
{
	trace___rmnet_deliver_skb(skb);
	switch (ep->rmnet_mode) {
	case RMNET_EPMODE_NONE:
		return RX_HANDLER_PASS;

	case RMNET_EPMODE_BRIDGE:
		return rmnet_bridge_handler(skb, ep);

	case RMNET_EPMODE_VND:
		skb_reset_transport_header(skb);
		skb_reset_network_header(skb);
		switch (rmnet_vnd_rx_fixup(skb, skb->dev)) {
		case RX_HANDLER_CONSUMED:
			return RX_HANDLER_CONSUMED;

		case RX_HANDLER_PASS:
			skb->pkt_type = PACKET_HOST;
			netif_receive_skb(skb);
			return RX_HANDLER_CONSUMED;
		}
		return RX_HANDLER_PASS;

	default:
		LOGD("Unkown ep mode %d", ep->rmnet_mode);
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_DELIVER_NO_EP);
		return RX_HANDLER_CONSUMED;
	}
}
コード例 #4
0
/**
 * rmnet_map_deaggregate() - Deaggregates a single packet
 * @skb:        Source socket buffer containing multiple MAP frames
 * @config:     Physical endpoint configuration of the ingress device
 *
 * Source skb is cloned with skb_clone(). The new skb data and tail pointers are
 * modified to contain a single MAP frame. Clone happens with GFP_ATOMIC flags
 * set. User should keep calling deaggregate() on the source skb until 0 is
 * returned, indicating that there are no more packets to deaggregate.
 *
 * Return:
 *     - Pointer to new skb
 *     - 0 (null) if no more aggregated packets
 */
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
				      struct rmnet_phys_ep_conf_s *config)
{
	struct sk_buff *skbn;
	struct rmnet_map_header_s *maph;
	uint32_t packet_len;
	uint8_t ip_byte;

	if (skb->len == 0)
		return 0;

	maph = (struct rmnet_map_header_s *) skb->data;
	packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header_s);

	if ((((int)skb->len) - ((int)packet_len)) < 0) {
		LOGM("%s", "Got malformed packet. Dropping");
		return 0;
	}

	skbn = skb_clone(skb, GFP_ATOMIC);
	if (!skbn)
		return 0;

	LOGD("Trimming to %d bytes", packet_len);
	LOGD("before skbn->len = %d", skbn->len);
	skb_trim(skbn, packet_len);
	skb_pull(skb, packet_len);
	LOGD("after skbn->len = %d", skbn->len);

	/* Some hardware can send us empty frames. Catch them */
	if (ntohs(maph->pkt_len) == 0) {
		LOGD("Dropping empty MAP frame");
		rmnet_kfree_skb(skbn, RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0);
		return 0;
	}

	/* Sanity check */
	ip_byte = (skbn->data[4]) & 0xF0;
	if (ip_byte != 0x40 && ip_byte != 0x60) {
		LOGM("Unknown IP type: 0x%02X", ip_byte);
		rmnet_kfree_skb(skbn, RMNET_STATS_SKBFREE_DEAGG_UNKOWN_IP_TYP);
		return 0;
	}

	return skbn;
}
コード例 #5
0
/**
 * rmnet_bridge_handler() - Bridge related functionality
 *
 * Return:
 *      - RX_HANDLER_CONSUMED in all cases
 */
static rx_handler_result_t rmnet_bridge_handler(struct sk_buff *skb,
					struct rmnet_logical_ep_conf_s *ep)
{
	if (!ep->egress_dev) {
		LOGD("Missing egress device for packet arriving on %s",
		     skb->dev->name);
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_BRDG_NO_EGRESS);
	} else {
		rmnet_egress_handler(skb, ep);
	}

	return RX_HANDLER_CONSUMED;
}
コード例 #6
0
/**
 * rmnet_egress_handler() - Egress handler entry point
 * @skb:        packet to transmit
 * @ep:         logical endpoint configuration of the packet originator
 *              (e.g.. RmNet virtual network device)
 *
 * Modifies packet as per logical endpoint configuration and egress data format
 * for egress device configured in logical endpoint. Packet is then transmitted
 * on the egress device.
 */
void rmnet_egress_handler(struct sk_buff *skb,
			  struct rmnet_logical_ep_conf_s *ep)
{
	struct rmnet_phys_ep_conf_s *config;
	struct net_device *orig_dev;
	int rc;
	orig_dev = skb->dev;
	skb->dev = ep->egress_dev;

	config = (struct rmnet_phys_ep_conf_s *)
		rcu_dereference(skb->dev->rx_handler_data);

	if (!config) {
		LOGD("%s is not associated with rmnet_data", skb->dev->name);
		kfree_skb(skb);
		return;
	}

	LOGD("Packet going out on %s with egress format 0x%08X",
	     skb->dev->name, config->egress_data_format);

	if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
		switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
		case RMNET_MAP_CONSUMED:
			LOGD("%s", "MAP process consumed packet");
			return;

		case RMNET_MAP_SUCCESS:
			break;

		default:
			LOGD("MAP egress failed on packet on %s",
			     skb->dev->name);
			rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_EGR_MAPFAIL);
			return;
		}
	}

	if (ep->rmnet_mode == RMNET_EPMODE_VND)
		rmnet_vnd_tx_fixup(skb, orig_dev);

	rmnet_print_packet(skb, skb->dev->name, 't');
	trace_rmnet_egress_handler(skb);
	rc = dev_queue_xmit(skb);
	if (rc != 0) {
		LOGD("Failed to queue packet for transmission on [%s]",
		      skb->dev->name);
	}
	rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_EGRESS);
}
コード例 #7
0
/**
 * rmnet_ingress_deliver_packet() - Ingress handler for raw IP and bridged
 *                                  MAP packets.
 * @skb:     Packet needing a destination.
 * @config:  Physical end point configuration that the packet arrived on.
 *
 * Return:
 *      - RX_HANDLER_CONSUMED if packet forwarded/dropped
 *      - RX_HANDLER_PASS if packet should be passed up the stack by caller
 */
static rx_handler_result_t rmnet_ingress_deliver_packet(struct sk_buff *skb,
					    struct rmnet_phys_ep_conf_s *config)
{
	if (!config) {
		LOGD("%s", "NULL physical EP provided");
		kfree_skb(skb);
		return RX_HANDLER_CONSUMED;
	}

	if (!(config->local_ep.refcount)) {
		LOGD("Packet on %s has no local endpoint configuration",
			skb->dev->name);
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_IPINGRESS_NO_EP);
		return RX_HANDLER_CONSUMED;
	}

	skb->dev = config->local_ep.egress_dev;

	return __rmnet_deliver_skb(skb, &(config->local_ep));
}
コード例 #8
0
/**
 * rmnet_map_ingress_handler() - MAP ingress handler
 * @skb:        Packet being received
 * @config:     Physical endpoint configuration for the ingress device
 *
 * Called if and only if MAP is configured in the ingress device's ingress data
 * format. Deaggregation is done here, actual MAP processing is done in
 * _rmnet_map_ingress_handler().
 *
 * Return:
 *      - RX_HANDLER_CONSUMED for aggregated packets
 *      - RX_HANDLER_CONSUMED for dropped packets
 *      - result of _rmnet_map_ingress_handler() for all other cases
 */
static rx_handler_result_t rmnet_map_ingress_handler(struct sk_buff *skb,
					    struct rmnet_phys_ep_conf_s *config)
{
	struct sk_buff *skbn;
	int rc, co = 0;

	if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
		trace_rmnet_start_deaggregation(skb);
		while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) {
			_rmnet_map_ingress_handler(skbn, config);
			co++;
		}
		trace_rmnet_end_deaggregation(skb, co);
		LOGD("De-aggregated %d packets", co);
		rmnet_stats_deagg_pkts(co);
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);
		rc = RX_HANDLER_CONSUMED;
	} else {
		rc = _rmnet_map_ingress_handler(skb, config);
	}

	return rc;
}
コード例 #9
0
/**
 * rmnet_ingress_handler() - Ingress handler entry point
 * @skb: Packet being received
 *
 * Processes packet as per ingress data format for receiving device. Logical
 * endpoint is determined from packet inspection. Packet is then sent to the
 * egress device listed in the logical endpoint configuration.
 *
 * Return:
 *      - RX_HANDLER_PASS if packet is not processed by handler (caller must
 *        deal with the packet)
 *      - RX_HANDLER_CONSUMED if packet is forwarded or processed by MAP
 */
rx_handler_result_t rmnet_ingress_handler(struct sk_buff *skb)
{
	struct rmnet_phys_ep_conf_s *config;
	struct net_device *dev;
	int rc;

	if (!skb)
		BUG();

	dev = skb->dev;
	trace_rmnet_ingress_handler(skb);
	rmnet_print_packet(skb, dev->name, 'r');

	config = (struct rmnet_phys_ep_conf_s *)
		rcu_dereference(skb->dev->rx_handler_data);

	if (!config) {
		LOGD("%s is not associated with rmnet_data", skb->dev->name);
		kfree_skb(skb);
		return RX_HANDLER_CONSUMED;
	}

	/* Sometimes devices operate in ethernet mode even thouth there is no
	 * ethernet header. This causes the skb->protocol to contain a bogus
	 * value and the skb->data pointer to be off by 14 bytes. Fix it if
	 * configured to do so
	 */
	if (config->ingress_data_format & RMNET_INGRESS_FIX_ETHERNET) {
		skb_push(skb, RMNET_ETHERNET_HEADER_LENGTH);
		__rmnet_data_set_skb_proto(skb);
	}

	if (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) {
		if (RMNET_MAP_GET_CD_BIT(skb)) {
			if (config->ingress_data_format
			    & RMNET_INGRESS_FORMAT_MAP_COMMANDS) {
				rc = rmnet_map_command(skb, config);
			} else {
				LOGM("MAP command packet on %s; %s", dev->name,
				     "Not configured for MAP commands");
				rmnet_kfree_skb(skb,
				   RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC);
				return RX_HANDLER_CONSUMED;
			}
		} else {
			rc = rmnet_map_ingress_handler(skb, config);
		}
	} else {
		switch (ntohs(skb->protocol)) {
		case ETH_P_MAP:
			if (config->local_ep.rmnet_mode ==
				RMNET_EPMODE_BRIDGE) {
				rc = rmnet_ingress_deliver_packet(skb, config);
			} else {
				LOGD("MAP packet on %s; MAP not set",
					dev->name);
				rmnet_kfree_skb(skb,
				   RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD);
				rc = RX_HANDLER_CONSUMED;
			}
			break;

		case ETH_P_ARP:
		case ETH_P_IP:
		case ETH_P_IPV6:
			rc = rmnet_ingress_deliver_packet(skb, config);
			break;

		default:
			LOGD("Unknown skb->proto 0x%04X\n",
				ntohs(skb->protocol) & 0xFFFF);
			rc = RX_HANDLER_PASS;
		}
	}

	return rc;
}
コード例 #10
0
/**
 * rmnet_map_aggregate() - Software aggregates multiple packets.
 * @skb:        current packet being transmitted
 * @config:     Physical endpoint configuration of the ingress device
 *
 * Aggregates multiple SKBs into a single large SKB for transmission. MAP
 * protocol is used to separate the packets in the buffer. This funcion consumes
 * the argument SKB and should not be further processed by any other function.
 */
void rmnet_map_aggregate(struct sk_buff *skb,
			 struct rmnet_phys_ep_conf_s *config) {
	uint8_t *dest_buff;
	struct agg_work *work;
	unsigned long flags;
	struct sk_buff *agg_skb;
	int size, rc;


	if (!skb || !config)
		BUG();
	size = config->egress_agg_size-skb->len;

	if (size < 2000) {
		LOGL("Invalid length %d", size);
		return;
	}

new_packet:
	spin_lock_irqsave(&config->agg_lock, flags);
	if (!config->agg_skb) {
		config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
		if (!config->agg_skb) {
			config->agg_skb = 0;
			config->agg_count = 0;
			spin_unlock_irqrestore(&config->agg_lock, flags);
			rmnet_stats_agg_pkts(1);
			rc = dev_queue_xmit(skb);
			rmnet_stats_queue_xmit(rc,
				RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL);
			return;
		}
		config->agg_count = 1;
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_CPY_EXPAND);
		goto schedule;
	}

	if (skb->len > (config->egress_agg_size - config->agg_skb->len)) {
		rmnet_stats_agg_pkts(config->agg_count);
		if (config->agg_count > 1)
			LOGL("Agg count: %d", config->agg_count);
		agg_skb = config->agg_skb;
		config->agg_skb = 0;
		config->agg_count = 0;
		spin_unlock_irqrestore(&config->agg_lock, flags);
		rc = dev_queue_xmit(agg_skb);
		rmnet_stats_queue_xmit(rc,
					RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER);
		goto new_packet;
	}

	dest_buff = skb_put(config->agg_skb, skb->len);
	memcpy(dest_buff, skb->data, skb->len);
	config->agg_count++;
	rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_INTO_BUFF);

schedule:
	if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
		work = (struct agg_work *)
			kmalloc(sizeof(struct agg_work), GFP_ATOMIC);
		if (!work) {
			LOGE("Failed to allocate work item for packet %s",
			     "transfer. DATA PATH LIKELY BROKEN!");
			config->agg_state = RMNET_MAP_AGG_IDLE;
			spin_unlock_irqrestore(&config->agg_lock, flags);
			return;
		}
		INIT_DELAYED_WORK((struct delayed_work *)work,
				  rmnet_map_flush_packet_queue);
		work->config = config;
		config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
		schedule_delayed_work((struct delayed_work *)work, 1);
	}
	spin_unlock_irqrestore(&config->agg_lock, flags);
	return;
}