static void rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct sk_buff *skbn; if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { while ((skbn = rmnet_map_deaggregate(skb)) != NULL) __rmnet_map_ingress_handler(skbn, port); consume_skb(skb); } else { __rmnet_map_ingress_handler(skb, port); } }
/** * rmnet_map_ingress_handler() - MAP ingress handler * @skb: Packet being received * @config: Physical endpoint configuration for the ingress device * * Called if and only if MAP is configured in the ingress device's ingress data * format. Deaggregation is done here, actual MAP processing is done in * _rmnet_map_ingress_handler(). * * Return: * - RX_HANDLER_CONSUMED for aggregated packets * - RX_HANDLER_CONSUMED for dropped packets * - result of _rmnet_map_ingress_handler() for all other cases */ static rx_handler_result_t rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_phys_ep_conf_s *config) { struct sk_buff *skbn; int rc, co = 0; if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { trace_rmnet_start_deaggregation(skb); while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) { _rmnet_map_ingress_handler(skbn, config); co++; } trace_rmnet_end_deaggregation(skb, co); LOGD("De-aggregated %d packets", co); rmnet_stats_deagg_pkts(co); rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF); rc = RX_HANDLER_CONSUMED; } else { rc = _rmnet_map_ingress_handler(skb, config); } return rc; }
static void rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct sk_buff *skbn; if (skb->dev->type == ARPHRD_ETHER) { if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) { kfree_skb(skb); return; } skb_push(skb, ETH_HLEN); } if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) { while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) __rmnet_map_ingress_handler(skbn, port); consume_skb(skb); } else { __rmnet_map_ingress_handler(skb, port); } }