/* * Actions on received messages from host: * Complete the wait event. * Or, reply with screen and cursor info. */ static void synthvid_recv_sub(struct hv_device *hdev) { struct fb_info *info = hv_get_drvdata(hdev); struct hvfb_par *par; struct synthvid_msg *msg; if (!info) return; par = info->par; msg = (struct synthvid_msg *)par->recv_buf; /* Complete the wait event */ if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE || msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) { memcpy(par->init_buf, msg, MAX_VMBUS_PKT_SIZE); complete(&par->wait); return; } /* Reply with screen and cursor info */ if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) { if (par->fb_ready) { synthvid_send_ptr(hdev); synthvid_send_situ(hdev); } par->update = msg->feature_chg.is_dirt_needed; if (par->update) schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY); } }
/* Receive callback for messages from the host */ static void synthvid_receive(void *ctx) { struct hv_device *hdev = ctx; struct fb_info *info = hv_get_drvdata(hdev); struct hvfb_par *par; struct synthvid_msg *recv_buf; u32 bytes_recvd; u64 req_id; int ret; if (!info) return; par = info->par; recv_buf = (struct synthvid_msg *)par->recv_buf; do { ret = vmbus_recvpacket(hdev->channel, recv_buf, MAX_VMBUS_PKT_SIZE, &bytes_recvd, &req_id); if (bytes_recvd > 0 && recv_buf->pipe_hdr.type == PIPE_MSG_DATA) synthvid_recv_sub(hdev); } while (bytes_recvd > 0 && ret == 0); }
/* Send screen resolution info to host */ static int synthvid_send_situ(struct hv_device *hdev) { struct fb_info *info = hv_get_drvdata(hdev); struct synthvid_msg msg; if (!info) return -ENODEV; memset(&msg, 0, sizeof(struct synthvid_msg)); msg.vid_hdr.type = SYNTHVID_SITUATION_UPDATE; msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) + sizeof(struct synthvid_situation_update); msg.situ.user_ctx = 0; msg.situ.video_output_count = 1; msg.situ.video_output[0].active = 1; msg.situ.video_output[0].vram_offset = 0; msg.situ.video_output[0].depth_bits = info->var.bits_per_pixel; msg.situ.video_output[0].width_pixels = info->var.xres; msg.situ.video_output[0].height_pixels = info->var.yres; msg.situ.video_output[0].pitch_bytes = info->fix.line_length; synthvid_send(hdev, &msg); return 0; }
/* * netvsc_recv_callback - Callback when we receive a packet from the * "wire" on the specified device. */ int netvsc_recv_callback(struct hv_device *device_obj, struct hv_netvsc_packet *packet, struct ndis_tcp_ip_checksum_info *csum_info) { struct net_device *net; struct sk_buff *skb; net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev; if (!net || net->reg_state != NETREG_REGISTERED) { packet->status = NVSP_STAT_FAIL; return 0; } /* Allocate a skb - TODO direct I/O to pages? */ skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen); if (unlikely(!skb)) { ++net->stats.rx_dropped; packet->status = NVSP_STAT_FAIL; return 0; } /* * Copy to skb. This copy is needed here since the memory pointed by * hv_netvsc_packet cannot be deallocated */ memcpy(skb_put(skb, packet->total_data_buflen), packet->data, packet->total_data_buflen); skb->protocol = eth_type_trans(skb, net); if (csum_info) { /* We only look at the IP checksum here. * Should we be dropping the packet if checksum * failed? How do we deal with other checksums - TCP/UDP? */ if (csum_info->receive.ip_checksum_succeeded) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; } if (packet->vlan_tci & VLAN_TAG_PRESENT) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci); skb_record_rx_queue(skb, packet->channel-> offermsg.offer.sub_channel_index); net->stats.rx_packets++; net->stats.rx_bytes += packet->total_data_buflen; /* * Pass the skb back up. Network stack will deallocate the skb when it * is done. * TODO - use NAPI? */ netif_rx(skb); return 0; }
static int mousevsc_hid_parse(struct hid_device *hid) { struct hv_device *dev = hid_get_drvdata(hid); struct mousevsc_dev *input_dev = hv_get_drvdata(dev); return hid_parse_report(hid, input_dev->report_desc, input_dev->report_desc_size); }
static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { struct net_device *net = NULL; struct net_device_context *net_device_ctx; struct netvsc_device_info device_info; struct netvsc_device *nvdev; int ret; net = alloc_etherdev_mq(sizeof(struct net_device_context), num_online_cpus()); if (!net) return -ENOMEM; netif_carrier_off(net); net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; hv_set_drvdata(dev, net); INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); net->netdev_ops = &device_ops; net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO; net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); /* Notify the netvsc driver of the new device */ device_info.ring_size = ring_size; ret = rndis_filter_device_add(dev, &device_info); if (ret != 0) { netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); free_netdev(net); hv_set_drvdata(dev, NULL); return ret; } memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); nvdev = hv_get_drvdata(dev); netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); rndis_filter_device_remove(dev); free_netdev(net); } else { schedule_delayed_work(&net_device_ctx->dwork, 0); } return ret; }
static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *device_obj = net_device_ctx->device_ctx; struct netvsc_device *nvdev = hv_get_drvdata(device_obj); int ret; u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; struct vmbus_channel *chn; netif_tx_disable(net); /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(device_obj); if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); return ret; } /* Ensure pending bytes in ring are read */ while (true) { aread = 0; for (i = 0; i < nvdev->num_chn; i++) { chn = nvdev->chn_table[i]; if (!chn) continue; hv_get_ringbuffer_availbytes(&chn->inbound, &aread, &awrite); if (aread) break; hv_get_ringbuffer_availbytes(&chn->outbound, &aread, &awrite); if (aread) break; } retry++; if (retry > retry_max || aread == 0) break; msleep(msec); if (msec < 1000) msec *= 2; } if (aread) { netdev_err(net, "Ring buffer not empty after closing rndis\n"); ret = -ETIMEDOUT; } return ret; }
static int mousevsc_connect_to_vsp(struct hv_device *device) { int ret = 0; int t; struct mousevsc_dev *input_dev = hv_get_drvdata(device); struct mousevsc_prt_msg *request; struct mousevsc_prt_msg *response; request = &input_dev->protocol_req; memset(request, 0, sizeof(struct mousevsc_prt_msg)); request->type = PIPE_MESSAGE_DATA; request->size = sizeof(struct synthhid_protocol_request); request->request.header.type = SYNTH_HID_PROTOCOL_REQUEST; request->request.header.size = sizeof(unsigned int); request->request.version_requested.version = SYNTHHID_INPUT_VERSION; ret = vmbus_sendpacket(device->channel, request, sizeof(struct pipe_prt_msg) - sizeof(unsigned char) + sizeof(struct synthhid_protocol_request), (unsigned long)request, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) goto cleanup; t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ); if (!t) { ret = -ETIMEDOUT; goto cleanup; } response = &input_dev->protocol_resp; if (!response->response.approved) { pr_err("synthhid protocol request failed (version %d)\n", SYNTHHID_INPUT_VERSION); ret = -ENODEV; goto cleanup; } t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ); if (!t) { ret = -ETIMEDOUT; goto cleanup; } /* * We should have gotten the device attr, hid desc and report * desc at this point */ ret = input_dev->dev_info_status; cleanup: return ret; }
static struct netvsc_device *get_outbound_net_device(struct hv_device *device) { struct netvsc_device *net_device; net_device = hv_get_drvdata(device); if (net_device && net_device->destroy) net_device = NULL; return net_device; }
static int mousevsc_remove(struct hv_device *dev) { struct mousevsc_dev *input_dev = hv_get_drvdata(dev); vmbus_close(dev->channel); hid_hw_stop(input_dev->hid_device); hid_destroy_device(input_dev->hid_device); mousevsc_free_device(input_dev); return 0; }
/* * Callback from vmbus_event when something is in inbound ring. */ static void hv_uio_channel_cb(void *context) { struct vmbus_channel *chan = context; struct hv_device *hv_dev = chan->device_obj; struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); chan->inbound.ring_buffer->interrupt_mask = 1; virt_mb(); uio_event_notify(&pdata->info); }
static void netvsc_get_channels(struct net_device *net, struct ethtool_channels *channel) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = hv_get_drvdata(dev); if (nvdev) { channel->max_combined = nvdev->max_chn; channel->combined_count = nvdev->num_chn; } }
static int hv_kbd_remove(struct hv_device *hv_dev) { struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev); serio_unregister_port(kbd_dev->hv_serio); vmbus_close(hv_dev->channel); kfree(kbd_dev); hv_set_drvdata(hv_dev, NULL); return 0; }
/* * Callback from vmbus_event when channel is rescinded. */ static void hv_uio_rescind(struct vmbus_channel *channel) { struct hv_device *hv_dev = channel->primary_channel->device_obj; struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev); /* * Turn off the interrupt file handle * Next read for event will return -EIO */ pdata->info.irq = 0; /* Wake up reader */ uio_event_notify(&pdata->info); }
static int hv_uio_remove(struct hv_device *dev) { struct hv_uio_private_data *pdata = hv_get_drvdata(dev); if (!pdata) return 0; uio_unregister_device(&pdata->info); hv_uio_cleanup(dev, pdata); hv_set_drvdata(dev, NULL); vmbus_close(dev->channel); kfree(pdata); return 0; }
/* Negotiate NVSP protocol version */ static int negotiate_nvsp_ver(struct hv_device *device, struct netvsc_device *net_device, struct nvsp_message *init_packet, u32 nvsp_ver) { struct net_device *ndev = hv_get_drvdata(device); int ret; memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; /* Send the init request */ ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret != 0) return ret; wait_for_completion(&net_device->channel_init_wait); if (init_packet->msg.init_msg.init_complete.status != NVSP_STAT_SUCCESS) return -EINVAL; if (nvsp_ver == NVSP_PROTOCOL_VERSION_1) return 0; /* NVSPv2 or later: Send NDIS config */ memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN; init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; ret = vmbus_sendpacket(device->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, 0); return ret; }
static struct netvsc_device *get_inbound_net_device(struct hv_device *device) { struct netvsc_device *net_device; net_device = hv_get_drvdata(device); if (!net_device) goto get_in_err; if (net_device->destroy && atomic_read(&net_device->num_outstanding_sends) == 0) net_device = NULL; get_in_err: return net_device; }
/* * netvsc_linkstatus_callback - Link up/down notification */ void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_message *resp) { struct rndis_indicate_status *indicate = &resp->msg.indicate_status; struct net_device *net; struct net_device_context *ndev_ctx; struct netvsc_reconfig *event; unsigned long flags; net = hv_get_drvdata(device_obj); if (!net) return; ndev_ctx = netdev_priv(net); /* Update the physical link speed when changing to another vSwitch */ if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { u32 speed; speed = *(u32 *)((void *)indicate + indicate-> status_buf_offset) / 10000; ndev_ctx->speed = speed; return; } /* Handle these link change statuses below */ if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE && indicate->status != RNDIS_STATUS_MEDIA_CONNECT && indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT) return; if (net->reg_state != NETREG_REGISTERED) return; event = kzalloc(sizeof(*event), GFP_ATOMIC); if (!event) return; event->event = indicate->status; spin_lock_irqsave(&ndev_ctx->lock, flags); list_add_tail(&event->list, &ndev_ctx->reconfig_events); spin_unlock_irqrestore(&ndev_ctx->lock, flags); schedule_delayed_work(&ndev_ctx->dwork, 0); }
static struct netvsc_device *alloc_net_device(struct hv_device *device) { struct netvsc_device *net_device; struct net_device *ndev = hv_get_drvdata(device); net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); if (!net_device) return NULL; init_waitqueue_head(&net_device->wait_drain); net_device->start_remove = false; net_device->destroy = false; net_device->dev = device; net_device->ndev = ndev; hv_set_drvdata(device, net_device); return net_device; }
/* * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is * present send GARP packet to network peers with netif_notify_peers(). */ static void netvsc_link_change(struct work_struct *w) { struct net_device_context *ndev_ctx = container_of(w, struct net_device_context, dwork.work); struct hv_device *device_obj = ndev_ctx->device_ctx; struct net_device *net = hv_get_drvdata(device_obj); struct netvsc_device *net_device; struct rndis_device *rdev; struct netvsc_reconfig *event = NULL; bool notify = false, reschedule = false; unsigned long flags, next_reconfig, delay; /* if changes are happening, comeback later */ if (!rtnl_trylock()) { schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT); return; } net_device = rtnl_dereference(ndev_ctx->nvdev); if (!net_device) goto out_unlock; rdev = net_device->extension; next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT; if (time_is_after_jiffies(next_reconfig)) { /* link_watch only sends one notification with current state * per second, avoid doing reconfig more frequently. Handle * wrap around. */ delay = next_reconfig - jiffies; delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT; schedule_delayed_work(&ndev_ctx->dwork, delay); goto out_unlock; } ndev_ctx->last_reconfig = jiffies; spin_lock_irqsave(&ndev_ctx->lock, flags); if (!list_empty(&ndev_ctx->reconfig_events)) { event = list_first_entry(&ndev_ctx->reconfig_events, struct netvsc_reconfig, list); list_del(&event->list); reschedule = !list_empty(&ndev_ctx->reconfig_events); }
static void netvsc_send_recv_completion(struct hv_device *device, struct vmbus_channel *channel, struct netvsc_device *net_device, u64 transaction_id, u32 status) { struct nvsp_message recvcompMessage; int retries = 0; int ret; struct net_device *ndev = hv_get_drvdata(device); recvcompMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status; retry_send_cmplt: /* Send the completion */ ret = vmbus_sendpacket(channel, &recvcompMessage, sizeof(struct nvsp_message), transaction_id, VM_PKT_COMP, 0); if (ret == 0) { /* success */ /* no-op */ } else if (ret == -EAGAIN) { /* no more room...wait a bit and attempt to retry 3 times */ retries++; netdev_err(ndev, "unable to send receive completion pkt" " (tid %llx)...retrying %d\n", transaction_id, retries); if (retries < 4) { udelay(100); goto retry_send_cmplt; } else { netdev_err(ndev, "unable to send receive " "completion pkt (tid %llx)...give up retrying\n", transaction_id); } } else { netdev_err(ndev, "unable to send receive " "completion pkt - %llx\n", transaction_id); } }
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct hv_device *hdev = net_device_ctx->device_ctx; struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev); u32 hash; u16 q_idx = 0; if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) return 0; if (netvsc_set_hash(&hash, skb)) { q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % ndev->real_num_tx_queues; skb_set_hash(skb, hash, 0); } return q_idx; }
static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device *nvdev = hv_get_drvdata(hdev); struct netvsc_device_info device_info; int limit = ETH_DATA_LEN; int ret = 0; if (nvdev == NULL || nvdev->destroy) return -ENODEV; if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) limit = NETVSC_MTU - ETH_HLEN; if (mtu < NETVSC_MTU_MIN || mtu > limit) return -EINVAL; ret = netvsc_close(ndev); if (ret) goto out; nvdev->start_remove = true; rndis_filter_device_remove(hdev); ndev->mtu = mtu; ndevctx->device_ctx = hdev; hv_set_drvdata(hdev, ndev); memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; device_info.max_num_vrss_chns = max_num_vrss_chns; rndis_filter_device_add(hdev, &device_info); out: netvsc_open(ndev); return ret; }
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct hv_device *hdev = net_device_ctx->device_ctx; struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev); u32 hash; u16 q_idx = 0; if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) return 0; hash = skb_get_hash(skb); q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % ndev->real_num_tx_queues; if (!nvsc_dev->chn_table[q_idx]) q_idx = 0; return q_idx; }
/* * netvsc_linkstatus_callback - Link up/down notification */ void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_message *resp) { struct rndis_indicate_status *indicate = &resp->msg.indicate_status; struct net_device *net; struct net_device_context *ndev_ctx; struct netvsc_device *net_device; struct rndis_device *rdev; net_device = hv_get_drvdata(device_obj); rdev = net_device->extension; switch (indicate->status) { case RNDIS_STATUS_MEDIA_CONNECT: rdev->link_state = false; break; case RNDIS_STATUS_MEDIA_DISCONNECT: rdev->link_state = true; break; case RNDIS_STATUS_NETWORK_CHANGE: rdev->link_change = true; break; default: return; } net = net_device->ndev; if (!net || net->reg_state != NETREG_REGISTERED) return; ndev_ctx = netdev_priv(net); if (!rdev->link_state) { schedule_delayed_work(&ndev_ctx->dwork.work, 0); schedule_delayed_work(&ndev_ctx->dwork.work, msecs_to_jiffies(20)); } else { schedule_delayed_work(&ndev_ctx->dwork.work, 0); } }
/* Connect to VSP (Virtual Service Provider) on host */ static int synthvid_connect_vsp(struct hv_device *hdev) { struct fb_info *info = hv_get_drvdata(hdev); struct hvfb_par *par = info->par; int ret; ret = vmbus_open(hdev->channel, RING_BUFSIZE, RING_BUFSIZE, NULL, 0, synthvid_receive, hdev); if (ret) { pr_err("Unable to open vmbus channel\n"); return ret; } /* Negotiate the protocol version with host */ if (vmbus_proto_version == VERSION_WS2008 || vmbus_proto_version == VERSION_WIN7) ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7); else ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8); if (ret) { pr_err("Synthetic video device version not accepted\n"); goto error; } if (par->synthvid_version == SYNTHVID_VERSION_WIN7) screen_depth = SYNTHVID_DEPTH_WIN7; else screen_depth = SYNTHVID_DEPTH_WIN8; screen_fb_size = hdev->channel->offermsg.offer. mmio_megabytes * 1024 * 1024; return 0; error: vmbus_close(hdev->channel); return ret; }
/* * Send GARP packet to network peers after migrations. * After Quick Migration, the network is not immediately operational in the * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add * another netif_notify_peers() into a delayed work, otherwise GARP packet * will not be sent after quick migration, and cause network disconnection. * Also, we update the carrier status here. */ static void netvsc_link_change(void *data) { struct delayed_work *w = (struct delayed_work *)data; struct net_device_context *ndev_ctx; struct net_device *net; struct netvsc_device *net_device; struct rndis_device *rdev; bool notify, refresh = false; char *argv[] = { "/etc/init.d/network", "restart", NULL }; char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; rtnl_lock(); ndev_ctx = container_of(w, struct net_device_context, dwork); net_device = hv_get_drvdata(ndev_ctx->device_ctx); rdev = net_device->extension; net = net_device->ndev; if (rdev->link_state) { netif_carrier_off(net); notify = false; } else { netif_carrier_on(net); notify = true; if (rdev->link_change) { rdev->link_change = false; refresh = true; } } rtnl_unlock(); if (refresh) call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); if (notify) netif_notify_peers(net); }
/* * netvsc_device_remove - Callback when the root bus device is removed */ int netvsc_device_remove(struct hv_device *device) { struct netvsc_device *net_device; unsigned long flags; net_device = hv_get_drvdata(device); netvsc_disconnect_vsp(net_device); /* * Since we have already drained, we don't need to busy wait * as was done in final_release_stor_device() * Note that we cannot set the ext pointer to NULL until * we have drained - to drain the outgoing packets, we need to * allow incoming packets. */ spin_lock_irqsave(&device->channel->inbound_lock, flags); hv_set_drvdata(device, NULL); spin_unlock_irqrestore(&device->channel->inbound_lock, flags); /* * At this point, no one should be accessing net_device * except in here */ dev_notice(&device->device, "net device safe to remove\n"); /* Now, we can close the channel safely */ vmbus_close(device->channel); /* Release all resources */ if (net_device->sub_cb_buf) vfree(net_device->sub_cb_buf); kfree(net_device); return 0; }
/* Send VRAM and Situation messages to the host */ static int synthvid_send_config(struct hv_device *hdev) { struct fb_info *info = hv_get_drvdata(hdev); struct hvfb_par *par = info->par; struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf; int ret = 0; unsigned long t; /* Send VRAM location */ memset(msg, 0, sizeof(struct synthvid_msg)); msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION; msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) + sizeof(struct synthvid_vram_location); msg->vram.user_ctx = msg->vram.vram_gpa = info->fix.smem_start; msg->vram.is_vram_gpa_specified = 1; synthvid_send(hdev, msg); t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT); if (!t) { pr_err("Time out on waiting vram location ack\n"); ret = -ETIMEDOUT; goto out; } if (msg->vram_ack.user_ctx != info->fix.smem_start) { pr_err("Unable to set VRAM location\n"); ret = -ENODEV; goto out; } /* Send pointer and situation update */ synthvid_send_ptr(hdev); synthvid_send_situ(hdev); out: return ret; }
static void do_set_multicast(struct work_struct *w) { struct net_device_context *ndevctx = container_of(w, struct net_device_context, work); struct netvsc_device *nvdev; struct rndis_device *rdev; nvdev = hv_get_drvdata(ndevctx->device_ctx); if (nvdev == NULL || nvdev->ndev == NULL) return; rdev = nvdev->extension; if (rdev == NULL) return; if (nvdev->ndev->flags & IFF_PROMISC) rndis_filter_set_packet_filter(rdev, NDIS_PACKET_TYPE_PROMISCUOUS); else rndis_filter_set_packet_filter(rdev, NDIS_PACKET_TYPE_BROADCAST | NDIS_PACKET_TYPE_ALL_MULTICAST | NDIS_PACKET_TYPE_DIRECTED); }