/** * _rmnet_map_ingress_handler() - Actual MAP ingress handler * @skb: Packet being received * @config: Physical endpoint configuration for the ingress device * * Most MAP ingress functions are processed here. Packets are processed * individually; aggregates packets should use rmnet_map_ingress_handler() * * Return: * - RX_HANDLER_CONSUMED if packet is dropped * - result of __rmnet_deliver_skb() for all other cases */ static rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_phys_ep_conf_s *config) { struct rmnet_logical_ep_conf_s *ep; uint8_t mux_id; uint16_t len; int ckresult; mux_id = RMNET_MAP_GET_MUX_ID(skb); len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb) - config->tail_spacing; if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) { LOGD("Got packet on %s with bad mux id %d", skb->dev->name, mux_id); rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX); return RX_HANDLER_CONSUMED; } ep = &(config->muxed_ep[mux_id]); if (!ep->refcount) { LOGD("Packet on %s:%d; has no logical endpoint config", skb->dev->name, mux_id); rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP); return RX_HANDLER_CONSUMED; } if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING) skb->dev = ep->egress_dev; if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) || (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) { ckresult = rmnet_map_checksum_downlink_packet(skb); trace_rmnet_map_checksum_downlink_packet(skb, ckresult); rmnet_stats_dl_checksum(ckresult); if (likely((ckresult == RMNET_MAP_CHECKSUM_OK) || (ckresult == RMNET_MAP_CHECKSUM_SKIPPED))) skb->ip_summed |= CHECKSUM_UNNECESSARY; else if (ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION && ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT && ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET && ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) { rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM); return RX_HANDLER_CONSUMED; } } /* Subtract MAP header */ skb_pull(skb, sizeof(struct rmnet_map_header_s)); skb_trim(skb, len); __rmnet_data_set_skb_proto(skb); return __rmnet_deliver_skb(skb, ep); }
static void __rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_endpoint *ep; u16 len, pad; u8 mux_id; if (RMNET_MAP_GET_CD_BIT(skb)) { if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS) return rmnet_map_command(skb, port); goto free_skb; } mux_id = RMNET_MAP_GET_MUX_ID(skb); pad = RMNET_MAP_GET_PAD(skb); len = RMNET_MAP_GET_LENGTH(skb) - pad; if (mux_id >= RMNET_MAX_LOGICAL_EP) goto free_skb; ep = rmnet_get_endpoint(port, mux_id); if (!ep) goto free_skb; skb->dev = ep->egress_dev; /* Subtract MAP header */ skb_pull(skb, sizeof(struct rmnet_map_header)); rmnet_set_skb_proto(skb); if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { if (!rmnet_map_checksum_downlink_packet(skb, len + pad)) skb->ip_summed = CHECKSUM_UNNECESSARY; } skb_trim(skb, len); rmnet_deliver_skb(skb); return; free_skb: kfree_skb(skb); }
static void __rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_endpoint *ep; u8 mux_id; u16 len; if (RMNET_MAP_GET_CD_BIT(skb)) { if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS) return rmnet_map_command(skb, port); goto free_skb; } mux_id = RMNET_MAP_GET_MUX_ID(skb); len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb); if (mux_id >= RMNET_MAX_LOGICAL_EP) goto free_skb; ep = rmnet_get_endpoint(port, mux_id); if (!ep) goto free_skb; skb->dev = ep->egress_dev; /* Subtract MAP header */ skb_pull(skb, sizeof(struct rmnet_map_header)); skb_trim(skb, len); rmnet_set_skb_proto(skb); rmnet_deliver_skb(skb); return; free_skb: kfree_skb(skb); }