/**
 * _rmnet_map_ingress_handler() - Actual MAP ingress handler
 * @skb:        Packet being received
 * @config:     Physical endpoint configuration for the ingress device
 *
 * Most MAP ingress functions are processed here. Packets are processed
 * individually; aggregates packets should use rmnet_map_ingress_handler()
 *
 * Return:
 *      - RX_HANDLER_CONSUMED if packet is dropped
 *      - result of __rmnet_deliver_skb() for all other cases
 */
static rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
					    struct rmnet_phys_ep_conf_s *config)
{
	struct rmnet_logical_ep_conf_s *ep;
	uint8_t mux_id;
	uint16_t len;
	int ckresult;

	mux_id = RMNET_MAP_GET_MUX_ID(skb);
	len = RMNET_MAP_GET_LENGTH(skb)
			- RMNET_MAP_GET_PAD(skb)
			- config->tail_spacing;

	if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
		LOGD("Got packet on %s with bad mux id %d",
			skb->dev->name, mux_id);
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX);
			return RX_HANDLER_CONSUMED;
	}

	ep = &(config->muxed_ep[mux_id]);

	if (!ep->refcount) {
		LOGD("Packet on %s:%d; has no logical endpoint config",
		     skb->dev->name, mux_id);

		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
		return RX_HANDLER_CONSUMED;
	}

	if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
		skb->dev = ep->egress_dev;

	if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
	    (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
		ckresult = rmnet_map_checksum_downlink_packet(skb);
		trace_rmnet_map_checksum_downlink_packet(skb, ckresult);
		rmnet_stats_dl_checksum(ckresult);
		if (likely((ckresult == RMNET_MAP_CHECKSUM_OK)
			    || (ckresult == RMNET_MAP_CHECKSUM_SKIPPED)))
			skb->ip_summed |= CHECKSUM_UNNECESSARY;
		else if (ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION
			&& ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT
			&& ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET
			&& ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
			rmnet_kfree_skb(skb,
				RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM);
			return RX_HANDLER_CONSUMED;
		}
	}

	/* Subtract MAP header */
	skb_pull(skb, sizeof(struct rmnet_map_header_s));
	skb_trim(skb, len);
	__rmnet_data_set_skb_proto(skb);

	return __rmnet_deliver_skb(skb, ep);
}
/**
 * rmnet_map_do_flow_control() - Process MAP flow control command
 * @skb: Socket buffer containing the MAP flow control message
 * @config: Physical end-point configuration of ingress device
 * @enable: boolean for enable/disable
 *
 * Process in-band MAP flow control messages. Assumes mux ID is mapped to a
 * RmNet Data vitrual network device.
 *
 * Return:
 *      - RMNET_MAP_COMMAND_UNSUPPORTED on any error
 *      - RMNET_MAP_COMMAND_ACK on success
 */
static uint8_t rmnet_map_do_flow_control(struct sk_buff *skb,
					 struct rmnet_phys_ep_conf_s *config,
					 int enable)
{
	struct rmnet_map_control_command_s *cmd;
	struct net_device *vnd;
	struct rmnet_logical_ep_conf_s *ep;
	uint8_t mux_id;
	uint16_t  ip_family;
	uint16_t  fc_seq;
	uint32_t  qos_id;
	int r;

	if (unlikely(!skb || !config))
		BUG();

	mux_id = RMNET_MAP_GET_MUX_ID(skb);
	cmd = RMNET_MAP_GET_CMD_START(skb);

	if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
		LOGD("Got packet on %s with bad mux id %d",
		     skb->dev->name, mux_id);
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_BAD_MUX);
		return RX_HANDLER_CONSUMED;
	}

	ep = &(config->muxed_ep[mux_id]);

	if (!ep->refcount) {
		LOGD("Packet on %s:%d; has no logical endpoint config",
		     skb->dev->name, mux_id);

		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_MUX_NO_EP);
			return RX_HANDLER_CONSUMED;
	}

	vnd = ep->egress_dev;

	ip_family = cmd->flow_control.ip_family;
	fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
	qos_id = ntohl(cmd->flow_control.qos_id);

	/* Ignore the ip family and pass the sequence number for both v4 and v6
	 * sequence. User space does not support creating dedicated flows for
	 * the 2 protocols
	 */
	r = rmnet_vnd_do_flow_control(vnd, qos_id, fc_seq, fc_seq, enable);
	LOGD("dev:%s, qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d",
	     skb->dev->name, qos_id, ip_family & 3, fc_seq, enable);

	if (r)
		return RMNET_MAP_COMMAND_UNSUPPORTED;
	else
		return RMNET_MAP_COMMAND_ACK;
}
Beispiel #3
0
static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
				    struct rmnet_port *port,
				    int enable)
{
	struct rmnet_map_control_command *cmd;
	struct rmnet_endpoint *ep;
	struct net_device *vnd;
	u16 ip_family;
	u16 fc_seq;
	u32 qos_id;
	u8 mux_id;
	int r;

	mux_id = RMNET_MAP_GET_MUX_ID(skb);
	cmd = RMNET_MAP_GET_CMD_START(skb);

	if (mux_id >= RMNET_MAX_LOGICAL_EP) {
		kfree_skb(skb);
		return RX_HANDLER_CONSUMED;
	}

	ep = rmnet_get_endpoint(port, mux_id);
	if (!ep) {
		kfree_skb(skb);
		return RX_HANDLER_CONSUMED;
	}

	vnd = ep->egress_dev;

	ip_family = cmd->flow_control.ip_family;
	fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
	qos_id = ntohl(cmd->flow_control.qos_id);

	/* Ignore the ip family and pass the sequence number for both v4 and v6
	 * sequence. User space does not support creating dedicated flows for
	 * the 2 protocols
	 */
	r = rmnet_vnd_do_flow_control(vnd, enable);
	if (r) {
		kfree_skb(skb);
		return RMNET_MAP_COMMAND_UNSUPPORTED;
	} else {
		return RMNET_MAP_COMMAND_ACK;
	}
}
Beispiel #4
0
static void
__rmnet_map_ingress_handler(struct sk_buff *skb,
			    struct rmnet_port *port)
{
	struct rmnet_endpoint *ep;
	u16 len, pad;
	u8 mux_id;

	if (RMNET_MAP_GET_CD_BIT(skb)) {
		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
			return rmnet_map_command(skb, port);

		goto free_skb;
	}

	mux_id = RMNET_MAP_GET_MUX_ID(skb);
	pad = RMNET_MAP_GET_PAD(skb);
	len = RMNET_MAP_GET_LENGTH(skb) - pad;

	if (mux_id >= RMNET_MAX_LOGICAL_EP)
		goto free_skb;

	ep = rmnet_get_endpoint(port, mux_id);
	if (!ep)
		goto free_skb;

	skb->dev = ep->egress_dev;

	/* Subtract MAP header */
	skb_pull(skb, sizeof(struct rmnet_map_header));
	rmnet_set_skb_proto(skb);

	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
		if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
			skb->ip_summed = CHECKSUM_UNNECESSARY;
	}

	skb_trim(skb, len);
	rmnet_deliver_skb(skb);
	return;

free_skb:
	kfree_skb(skb);
}
static void
__rmnet_map_ingress_handler(struct sk_buff *skb,
			    struct rmnet_port *port)
{
	struct rmnet_endpoint *ep;
	u8 mux_id;
	u16 len;

	if (RMNET_MAP_GET_CD_BIT(skb)) {
		if (port->ingress_data_format
		    & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
			return rmnet_map_command(skb, port);

		goto free_skb;
	}

	mux_id = RMNET_MAP_GET_MUX_ID(skb);
	len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);

	if (mux_id >= RMNET_MAX_LOGICAL_EP)
		goto free_skb;

	ep = rmnet_get_endpoint(port, mux_id);
	if (!ep)
		goto free_skb;

	skb->dev = ep->egress_dev;

	/* Subtract MAP header */
	skb_pull(skb, sizeof(struct rmnet_map_header));
	skb_trim(skb, len);
	rmnet_set_skb_proto(skb);
	rmnet_deliver_skb(skb);
	return;

free_skb:
	kfree_skb(skb);
}