static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) { int ret; /* register the packet handler for slave ports */ ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev); if (ret) { netdev_err(port_dev, "Device %s failed to register rx_handler\n", port_dev->name); goto out_fail; } ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL); if (ret < 0) goto out_unregister; port_dev->priv_flags |= IFF_L3MDEV_SLAVE; cycle_netdev(port_dev); return 0; out_unregister: netdev_rx_handler_unregister(port_dev); out_fail: return ret; }
/** * rmnet_unassociate_network_device() - Unassociate network device * @dev: Device to unassociate * * Frees all structures generate for device. Unregisters rx_handler * todo: needs to do some sanity verification first (is device in use, etc...) * * Return: * - RMNET_CONFIG_OK if successful * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null * - RMNET_CONFIG_INVALID_REQUEST if device is not already associated * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null */ int rmnet_unassociate_network_device(struct net_device *dev) { struct rmnet_phys_ep_conf_s *config; ASSERT_RTNL(); LOGL("%s(%s);", __func__, dev->name); if (!dev) return RMNET_CONFIG_NO_SUCH_DEVICE; if (!_rmnet_is_physical_endpoint_associated(dev)) return RMNET_CONFIG_INVALID_REQUEST; config = (struct rmnet_phys_ep_conf_s *) rcu_dereference(dev->rx_handler_data); if (!config) return RMNET_CONFIG_UNKNOWN_ERROR; kfree(config); netdev_rx_handler_unregister(dev); return RMNET_CONFIG_OK; }
void hsr_del_port(struct hsr_port *port) { struct hsr_priv *hsr; struct hsr_port *master; hsr = port->hsr; master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); list_del_rcu(&port->port_list); if (port != master) { if (master != NULL) { netdev_update_features(master->dev); dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); } netdev_rx_handler_unregister(port->dev); dev_set_promiscuity(port->dev, -1); } /* FIXME? * netdev_upper_dev_unlink(port->dev, port->hsr->dev); */ synchronize_rcu(); if (port != master) dev_put(port->dev); }
static void macvtap_dellink(struct net_device *dev, struct list_head *head) { struct macvtap_dev *vlantap = netdev_priv(dev); netdev_rx_handler_unregister(dev); tap_del_queues(&vlantap->tap); macvlan_dellink(dev, head); }
void ovs_netdev_detach_dev(struct vport *vport) { ASSERT_RTNL(); vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; netdev_rx_handler_unregister(vport->dev); netdev_upper_dev_unlink(vport->dev, netdev_master_upper_dev_get(vport->dev)); dev_set_promiscuity(vport->dev, -1); }
static int netdev_detach(struct vport *vport) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; netdev_rx_handler_unregister(netdev_vport->dev); dev_set_promiscuity(netdev_vport->dev, -1); return 0; }
static void netdev_destroy(struct vport *vport) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; netdev_rx_handler_unregister(netdev_vport->dev); dev_set_promiscuity(netdev_vport->dev, -1); call_rcu(&netdev_vport->rcu, free_port_rcu); }
/* inverse of do_vrf_add_slave */ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) { netdev_upper_dev_unlink(port_dev, dev); port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; netdev_rx_handler_unregister(port_dev); cycle_netdev(port_dev); return 0; }
void __exit virt_exit( void ) { struct priv *priv = netdev_priv( child ); if( priv->parent ) { rtnl_lock(); netdev_rx_handler_unregister( priv->parent ); rtnl_unlock(); LOG( "unregister rx handler for %s\n", priv->parent->name ); } unregister_netdev( child ); free_netdev( child ); LOG( "module %s unloaded", THIS_MODULE->name ); }
void vhost_del_tap_phys(struct net_device *pdev) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) if (rcu_dereference(pdev->rx_handler) == vhost_rx_handler) netdev_rx_handler_unregister(pdev); #else vr_set_vif_ptr(pdev, NULL); #endif return; }
static void netdev_destroy(struct vport *vport) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; netdev_rx_handler_unregister(netdev_vport->dev); dev_set_promiscuity(netdev_vport->dev, -1); synchronize_rcu(); dev_put(netdev_vport->dev); ovs_vport_free(vport); }
static void netdev_destroy(struct vport *vport) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); netdev_exit(); rtnl_lock(); netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; netdev_rx_handler_unregister(netdev_vport->dev); netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp)); dev_set_promiscuity(netdev_vport->dev, -1); rtnl_unlock(); call_rcu(&netdev_vport->rcu, free_port_rcu); }
static int rmnet_unregister_real_device(struct net_device *real_dev, struct rmnet_port *port) { if (port->nr_rmnet_devs) return -EINVAL; kfree(port); netdev_rx_handler_unregister(real_dev); /* release reference on real_dev */ dev_put(real_dev); netdev_dbg(real_dev, "Removed from rmnet\n"); return 0; }
void netpoll_wrapper_free(struct netpoll_wrapper *pWrapper) { if (pWrapper) { if (pWrapper->tracepoint_registered) unregister_tracepoint_wrapper(netif_receive_skb, hook_receive_skb, pWrapper); if (pWrapper->netpoll_initialized) netpoll_cleanup(&pWrapper->netpoll_obj); if (pWrapper->pDeviceWithHandler) { rtnl_lock(); netdev_rx_handler_unregister(pWrapper->pDeviceWithHandler); rtnl_unlock(); } kfree(pWrapper); } }
/* Ask the Linux RX subsystem to intercept (or stop intercepting) * the packets incoming from the interface attached to 'na'. */ int netmap_catch_rx(struct netmap_adapter *na, int intercept) { #ifndef NETMAP_LINUX_HAVE_RX_REGISTER return 0; #else /* HAVE_RX_REGISTER */ struct ifnet *ifp = na->ifp; if (intercept) { return -netdev_rx_handler_register(na->ifp, &linux_generic_rx_handler, na); } else { netdev_rx_handler_unregister(ifp); return 0; } #endif /* HAVE_RX_REGISTER */ }
static void netif_drop_packets_disable(netif_info *pt_netif) { struct net_device *ndev = (pt_netif->ndev); if (!pt_netif->dsb_np_bit_orig) { pt_netif->ndev->priv_flags &= (~(IFF_DISABLE_NETPOLL)); smp_wmb(); } netdev_rx_handler_unregister(ndev); smp_wmb(); dev_put(pt_netif->ndev); synchronize_rcu(); kfree(pt_netif); DBG_PRINT("=="); }
/* inverse of do_vrf_add_slave */ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) { struct net_vrf *vrf = netdev_priv(dev); struct slave_queue *queue = &vrf->queue; struct slave *slave; netdev_upper_dev_unlink(port_dev, dev); port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; netdev_rx_handler_unregister(port_dev); cycle_netdev(port_dev); slave = __vrf_find_slave_dev(queue, port_dev); if (slave) __vrf_remove_slave(queue, slave); kfree(slave); return 0; }
static void __exit rh_release(void) { unsigned int sum = 0; int i; pr_info("rxhook (v%s) is unloaded\n", RXHOOK_VERSION); rtnl_lock(); netdev_rx_handler_unregister(rh->dev); rtnl_unlock(); kfree(rh); rh = NULL; for(i = 0; i < 10; i++) { pr_info("pps[%d] = %d\n", i, pps[i]); sum += pps[i]; } pr_info("sum: %u\n", sum); return; }
static int macvtap_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct macvtap_dev *vlantap = netdev_priv(dev); int err; INIT_LIST_HEAD(&vlantap->tap.queue_list); /* Since macvlan supports all offloads by default, make * tap support all offloads also. */ vlantap->tap.tap_features = TUN_OFFLOADS; /* Register callbacks for rx/tx drops accounting and updating * net_device features */ vlantap->tap.count_tx_dropped = macvtap_count_tx_dropped; vlantap->tap.count_rx_dropped = macvtap_count_rx_dropped; vlantap->tap.update_features = macvtap_update_features; err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap); if (err) return err; /* Don't put anything that may fail after macvlan_common_newlink * because we can't undo what it does. */ err = macvlan_common_newlink(src_net, dev, tb, data); if (err) { netdev_rx_handler_unregister(dev); return err; } vlantap->tap.dev = vlantap->vlan.dev; return 0; }
static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) { struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL); struct net_vrf *vrf = netdev_priv(dev); struct slave_queue *queue = &vrf->queue; int ret = -ENOMEM; if (!slave) goto out_fail; slave->dev = port_dev; /* register the packet handler for slave ports */ ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev); if (ret) { netdev_err(port_dev, "Device %s failed to register rx_handler\n", port_dev->name); goto out_fail; } ret = netdev_master_upper_dev_link(port_dev, dev); if (ret < 0) goto out_unregister; port_dev->priv_flags |= IFF_L3MDEV_SLAVE; __vrf_insert_slave(queue, slave); cycle_netdev(port_dev); return 0; out_unregister: netdev_rx_handler_unregister(port_dev); out_fail: kfree(slave); return ret; }
/** * rmnet_unassociate_network_device() - Unassociate network device * @dev: Device to unassociate * * Frees all structures generate for device. Unregisters rx_handler * todo: needs to do some sanity verification first (is device in use, etc...) * * Return: * - RMNET_CONFIG_OK if successful * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null * - RMNET_CONFIG_INVALID_REQUEST if device is not already associated * - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null */ int rmnet_unassociate_network_device(struct net_device *dev) { struct rmnet_phys_ep_conf_s *config; int config_id = RMNET_LOCAL_LOGICAL_ENDPOINT; struct rmnet_logical_ep_conf_s *epconfig_l; ASSERT_RTNL(); LOGL("(%s);", dev->name); if (!dev) return RMNET_CONFIG_NO_SUCH_DEVICE; if (!_rmnet_is_physical_endpoint_associated(dev)) return RMNET_CONFIG_INVALID_REQUEST; for (; config_id < RMNET_DATA_MAX_LOGICAL_EP; config_id++) { epconfig_l = _rmnet_get_logical_ep(dev, config_id); if (epconfig_l && epconfig_l->refcount) return RMNET_CONFIG_DEVICE_IN_USE; } config = (struct rmnet_phys_ep_conf_s *) rcu_dereference(dev->rx_handler_data); if (!config) return RMNET_CONFIG_UNKNOWN_ERROR; kfree(config); netdev_rx_handler_unregister(dev); /* Explicitly release the reference from the device */ dev_put(dev); trace_rmnet_unassociate(dev); return RMNET_CONFIG_OK; }
/* * register an rx handler which will bypass all vrouter processing. Helpful * at the time of vrouter soft reset. For kernels that does not support rx * handler tap, the logic is handled in linux_rx_handler, albeit in an * inefficient way (but works :) */ void vhost_tap_phys(struct net_device *vdev, struct net_device *pdev) { struct vhost_priv *vp; if (!vdev || !pdev) return; vp = netdev_priv(vdev); /* * with vhost_attach_phys, it is possible that everything was OK except * that the physical went out and came back. if that is the case, do not * tap here */ if (vp->vp_vifp) goto exit_tap_phys; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) /* * it can so happen (unlikely) that vhost is deleted from vrouter * before the physical, in which case we will have to unregister * the rx handler in physical and install the vhost_rx_handler */ if (rcu_dereference(pdev->rx_handler) == linux_rx_handler) netdev_rx_handler_unregister(pdev); if (!rcu_dereference(pdev->rx_handler)) netdev_rx_handler_register(pdev, vhost_rx_handler, (void *)vdev); #else vr_set_vif_ptr(pdev, &vr_reset_interface); #endif exit_tap_phys: return; }