int islpci_mgt_receive(struct net_device *ndev) { islpci_private *priv = netdev_priv(ndev); isl38xx_control_block *cb = (isl38xx_control_block *) priv->control_block; u32 curr_frag; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n"); #endif curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]); barrier(); for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) { pimfor_header_t *header; u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE; struct islpci_membuf *buf = &priv->mgmt_rx[index]; u16 frag_len; int size; struct islpci_mgmtframe *frame; if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) { printk(KERN_WARNING "%s: unknown flags 0x%04x\n", ndev->name, le16_to_cpu(cb->rx_data_mgmt[index].flags)); continue; } frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size); if (frag_len > MGMT_FRAME_SIZE) { printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n", ndev->name, frag_len, frag_len); frag_len = MGMT_FRAME_SIZE; } pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr, buf->size, PCI_DMA_FROMDEVICE); header = pimfor_decode_header(buf->mem, frag_len); if (!header) { printk(KERN_WARNING "%s: no PIMFOR header found\n", ndev->name); continue; } header->device_id = priv->ndev->ifindex; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_PIMFOR_FRAMES, "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n", header->operation, header->oid, header->device_id, header->flags, header->length); display_buffer((char *) header, PIMFOR_HEADER_SIZE); display_buffer((char *) header + PIMFOR_HEADER_SIZE, header->length); #endif if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) { printk(KERN_DEBUG "%s: errant PIMFOR application frame\n", ndev->name); continue; } size = PIMFOR_HEADER_SIZE + header->length; frame = kmalloc(sizeof (struct islpci_mgmtframe) + size, GFP_ATOMIC); if (!frame) { printk(KERN_WARNING "%s: Out of memory, cannot handle oid 0x%08x\n", ndev->name, header->oid); continue; } frame->ndev = ndev; memcpy(&frame->buf, header, size); frame->header = (pimfor_header_t *) frame->buf; frame->data = frame->buf + PIMFOR_HEADER_SIZE; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_PIMFOR_FRAMES, "frame: header: %p, data: %p, size: %d\n", frame->header, frame->data, size); #endif if (header->operation == PIMFOR_OP_TRAP) { #if VERBOSE > SHOW_ERROR_MESSAGES printk(KERN_DEBUG "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n", header->oid, header->device_id, header->flags, header->length); #endif INIT_WORK(&frame->ws, prism54_process_trap); schedule_work(&frame->ws); } else { if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) { printk(KERN_WARNING "%s: mgmt response not collected\n", ndev->name); kfree(frame); } #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n"); #endif wake_up(&priv->mgmt_wqueue); } } return 0; }
/* * Receive a management frame from the device. * This can be an arbitrary number of traps, and at most one response * frame for a previous request sent via islpci_mgt_transmit(). */ int islpci_mgt_receive(struct net_device *ndev) { islpci_private *priv = netdev_priv(ndev); isl38xx_control_block *cb = (isl38xx_control_block *) priv->control_block; u32 curr_frag; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n"); #endif /* Only once per interrupt, determine fragment range to * process. This avoids an endless loop (i.e. lockup) if * frames come in faster than we can process them. */ curr_frag = le32_to_cpu(cb->device_curr_frag[ISL38XX_CB_RX_MGMTQ]); barrier(); for (; priv->index_mgmt_rx < curr_frag; priv->index_mgmt_rx++) { pimfor_header_t *header; u32 index = priv->index_mgmt_rx % ISL38XX_CB_MGMT_QSIZE; struct islpci_membuf *buf = &priv->mgmt_rx[index]; u16 frag_len; int size; struct islpci_mgmtframe *frame; /* I have no idea (and no documentation) if flags != 0 * is possible. Drop the frame, reuse the buffer. */ if (le16_to_cpu(cb->rx_data_mgmt[index].flags) != 0) { // printk(KERN_WARNING "%s: unknown flags 0x%04x\n", // ndev->name, ; continue; } /* The device only returns the size of the header(s) here. */ frag_len = le16_to_cpu(cb->rx_data_mgmt[index].size); /* * We appear to have no way to tell the device the * size of a receive buffer. Thus, if this check * triggers, we likely have kernel heap corruption. */ if (frag_len > MGMT_FRAME_SIZE) { // printk(KERN_WARNING // "%s: Bogus packet size of %d (%#x).\n", ; frag_len = MGMT_FRAME_SIZE; } /* Ensure the results of device DMA are visible to the CPU. */ pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr, buf->size, PCI_DMA_FROMDEVICE); /* Perform endianess conversion for PIMFOR header in-place. */ header = pimfor_decode_header(buf->mem, frag_len); if (!header) { // printk(KERN_WARNING "%s: no PIMFOR header found\n", ; continue; } /* The device ID from the PIMFOR packet received from * the MVC is always 0. We forward a sensible device_id. * Not that anyone upstream would care... */ header->device_id = priv->ndev->ifindex; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_PIMFOR_FRAMES, "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n", header->operation, header->oid, header->device_id, header->flags, header->length); /* display the buffer contents for debugging */ display_buffer((char *) header, PIMFOR_HEADER_SIZE); display_buffer((char *) header + PIMFOR_HEADER_SIZE, header->length); #endif /* nobody sends these */ if (header->flags & PIMFOR_FLAG_APPLIC_ORIGIN) { // printk(KERN_DEBUG // "%s: errant PIMFOR application frame\n", ; continue; } /* Determine frame size, skipping OID_INL_TUNNEL headers. */ size = PIMFOR_HEADER_SIZE + header->length; frame = kmalloc(sizeof (struct islpci_mgmtframe) + size, GFP_ATOMIC); if (!frame) { // printk(KERN_WARNING // "%s: Out of memory, cannot handle oid 0x%08x\n", ; continue; } frame->ndev = ndev; memcpy(&frame->buf, header, size); frame->header = (pimfor_header_t *) frame->buf; frame->data = frame->buf + PIMFOR_HEADER_SIZE; #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_PIMFOR_FRAMES, "frame: header: %p, data: %p, size: %d\n", frame->header, frame->data, size); #endif if (header->operation == PIMFOR_OP_TRAP) { #if VERBOSE > SHOW_ERROR_MESSAGES // printk(KERN_DEBUG // "TRAP: oid 0x%x, device %i, flags 0x%x length %i\n", // header->oid, header->device_id, header->flags, ; #endif /* Create work to handle trap out of interrupt * context. */ INIT_WORK(&frame->ws, prism54_process_trap); schedule_work(&frame->ws); } else { /* Signal the one waiting process that a response * has been received. */ if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) { // printk(KERN_WARNING // "%s: mgmt response not collected\n", ; kfree(frame); } #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Wake up Mgmt Queue\n"); #endif wake_up(&priv->mgmt_wqueue); } } return 0; }
static void ntl_receive(struct sock *sk, int len) { int err; struct sk_buff *skb; pid_t pid; struct nlmsghdr *nl_header; __u32 seq; struct pimfor_hdr *header; char *data; int version; int operation; unsigned long oid; int dev_id; int flags; unsigned long length; struct net_device *dev; #ifdef DRIVER_DEBUG printk(KERN_INFO "ntl_receive: sock %p, len %d \n", sk, len); #endif do { if (rtnl_shlock_nowait()) return; while ( (skb = skb_dequeue(&sk->receive_queue)) != NULL ) { pid = NETLINK_CB(skb).pid; nl_header = (struct nlmsghdr*) skb->data; seq = nl_header->nlmsg_seq; header = (struct pimfor_hdr*)(skb->data+(sizeof(struct nlmsghdr))); data = PIMFOR_DATA(header); if ( nl_header->nlmsg_type == NETLINK_TYPE_PIMFOR ) { pimfor_decode_header(header, &version, &operation, &oid, &dev_id, &flags, &length); if (version == PIMFOR_VERSION_1) { err = mgt_request( DEV_NETWORK, dev_id, pid, seq, operation, oid, data, length ); if ( err < 0 ) { printk(KERN_INFO "ntl_receive: mgt_request(%d, %d, %d, %x, %d, %x, %d) returned %d\n", DEV_NETWORK, dev_id, pid, seq, operation, oid, length, err ); netlink_ack(skb, nl_header, -EOPNOTSUPP ); } } else { printk(KERN_ERR "ntl_receive: version (%d) != PIMFOR_VERSION_1\n", version ); netlink_ack(skb, nl_header, -EOPNOTSUPP ); } } else { printk(KERN_ERR "nl_header->nlmsg_type (%d) != NETLINK_TYPE_PIMFOR\n", nl_header->nlmsg_type ); netlink_ack(skb, nl_header, -EOPNOTSUPP ); } kfree_skb(skb); } up(&rtnl_sem); } while (nl_sock && nl_sock->receive_queue.qlen); }