static void rmnet_mhi_cb(struct mhi_cb_info *cb_info) { struct rmnet_mhi_private *rmnet_mhi_ptr; struct mhi_result *result; enum MHI_STATUS r = MHI_STATUS_SUCCESS; if (NULL != cb_info && NULL != cb_info->result) { result = cb_info->result; rmnet_mhi_ptr = result->user_data; } else { rmnet_log(MSG_CRITICAL, "Invalid data in MHI callback, quitting\n"); } switch (cb_info->cb_reason) { case MHI_CB_MHI_DISABLED: rmnet_log(MSG_CRITICAL, "Got MHI_DISABLED notification. Stopping stack\n"); if (rmnet_mhi_ptr->mhi_enabled) { rmnet_mhi_disable(rmnet_mhi_ptr); rmnet_mhi_disable_iface(rmnet_mhi_ptr); } break; case MHI_CB_MHI_ENABLED: rmnet_log(MSG_CRITICAL, "Got MHI_ENABLED notification. Starting stack\n"); if (IS_INBOUND(cb_info->chan)) rmnet_mhi_ptr->rx_enabled = 1; else rmnet_mhi_ptr->tx_enabled = 1; if (rmnet_mhi_ptr->tx_enabled && rmnet_mhi_ptr->rx_enabled) { rmnet_log(MSG_INFO, "Both RX/TX are enabled, enabling iface.\n"); r = rmnet_mhi_enable_iface(rmnet_mhi_ptr); if (r) rmnet_log(MSG_CRITICAL, "Failed to enable iface for chan %d\n", cb_info->chan); else rmnet_log(MSG_INFO, "Enabled iface for chan %d\n", cb_info->chan); } break; case MHI_CB_XFER: if (IS_INBOUND(cb_info->chan)) rmnet_mhi_rx_cb(cb_info->result); else rmnet_mhi_tx_cb(cb_info->result); break; default: break; } }
static void rmnet_mhi_cb(struct mhi_cb_info *cb_info) { struct rmnet_mhi_private *rmnet_mhi_ptr; struct mhi_result *result; enum MHI_STATUS r = MHI_STATUS_SUCCESS; if (NULL != cb_info && NULL != cb_info->result) { result = cb_info->result; rmnet_mhi_ptr = result->user_data; } else { rmnet_log(MSG_CRITICAL, "Invalid data in MHI callback, quitting\n"); } switch (cb_info->cb_reason) { case MHI_CB_MHI_DISABLED: rmnet_log(MSG_CRITICAL, "Got MHI_DISABLED notification. Stopping stack\n"); if (rmnet_mhi_ptr->mhi_enabled) { rmnet_mhi_ptr->mhi_enabled = 0; /* Ensure MHI is disabled before other mem ops */ wmb(); while (atomic_read(&rmnet_mhi_ptr->pending_data)) { rmnet_log(MSG_CRITICAL, "Waiting for channels to stop.\n"); msleep(25); } rmnet_mhi_disable(rmnet_mhi_ptr); } break; case MHI_CB_MHI_ENABLED: rmnet_log(MSG_CRITICAL, "Got MHI_ENABLED notification. Starting stack\n"); if (IS_INBOUND(cb_info->chan)) rmnet_mhi_ptr->rx_enabled = 1; else rmnet_mhi_ptr->tx_enabled = 1; if (rmnet_mhi_ptr->tx_enabled && rmnet_mhi_ptr->rx_enabled) { rmnet_log(MSG_INFO, "Both RX/TX are enabled, enabling iface.\n"); r = rmnet_mhi_enable_iface(rmnet_mhi_ptr); if (r) rmnet_log(MSG_CRITICAL, "Failed to enable iface for chan %d\n", cb_info->chan); else rmnet_log(MSG_INFO, "Enabled iface for chan %d\n", cb_info->chan); } break; case MHI_CB_XFER: atomic_inc(&rmnet_mhi_ptr->pending_data); /* Flush pending data is set before any other mem operations */ wmb(); if (rmnet_mhi_ptr->mhi_enabled) { if (IS_INBOUND(cb_info->chan)) rmnet_mhi_rx_cb(cb_info->result); else rmnet_mhi_tx_cb(cb_info->result); } atomic_dec(&rmnet_mhi_ptr->pending_data); break; default: break; } }
static int sendAllListPackets() { // send packet from tail to head and remove sent ones int sendCount = 0; UINT sendLen; PacketNode *pnode; #ifdef _DEBUG // check the list is good // might go into dead loop but it's better for debugging PacketNode *p = head; do { p = p->next; } while (p->next); assert(p == tail); #endif while (!isListEmpty()) { pnode = popNode(tail->prev); sendLen = 0; assert(pnode != head); // FIXME inbound injection on any kind of packet is failing with a very high percentage // need to contact windivert auther and wait for next release if (!WinDivertSend(divertHandle, pnode->packet, pnode->packetLen, &(pnode->addr), &sendLen)) { PWINDIVERT_ICMPHDR icmp_header; PWINDIVERT_ICMPV6HDR icmpv6_header; PWINDIVERT_IPHDR ip_header; PWINDIVERT_IPV6HDR ipv6_header; LOG("Failed to send a packet. (%lu)", GetLastError()); dumpPacket(pnode->packet, pnode->packetLen, &(pnode->addr)); // as noted in windivert help, reinject inbound icmp packets some times would fail // workaround this by resend them as outbound // TODO not sure is this even working as can't find a way to test // need to document about this WinDivertHelperParsePacket(pnode->packet, pnode->packetLen, &ip_header, &ipv6_header, &icmp_header, &icmpv6_header, NULL, NULL, NULL, NULL); if ((icmp_header || icmpv6_header) && IS_INBOUND(pnode->addr.Direction)) { BOOL resent; pnode->addr.Direction = WINDIVERT_DIRECTION_OUTBOUND; if (ip_header) { UINT32 tmp = ip_header->SrcAddr; ip_header->SrcAddr = ip_header->DstAddr; ip_header->DstAddr = tmp; } else if (ipv6_header) { UINT32 tmpArr[4]; memcpy(tmpArr, ipv6_header->SrcAddr, sizeof(tmpArr)); memcpy(ipv6_header->SrcAddr, ipv6_header->DstAddr, sizeof(tmpArr)); memcpy(ipv6_header->DstAddr, tmpArr, sizeof(tmpArr)); } resent = WinDivertSend(divertHandle, pnode->packet, pnode->packetLen, &(pnode->addr), &sendLen); LOG("Resend failed inbound ICMP packets as outbound: %s", resent ? "SUCCESS" : "FAIL"); InterlockedExchange16(&sendState, SEND_STATUS_SEND); } else { InterlockedExchange16(&sendState, SEND_STATUS_FAIL); } } else { if (sendLen < pnode->packetLen) { // TODO don't know how this can happen, or it needs to be resent like good old UDP packet LOG("Internal Error: DivertSend truncated send packet."); InterlockedExchange16(&sendState, SEND_STATUS_FAIL); } else { InterlockedExchange16(&sendState, SEND_STATUS_SEND); } } freeNode(pnode); ++sendCount; } assert(isListEmpty()); // all packets should be sent by now return sendCount; }