/** * gelic_net_release_tx_descr - processes a used tx descriptor * @card: card structure * @descr: descriptor to release * * releases a used tx descriptor (unmapping, freeing of skb) */ static void gelic_net_release_tx_descr(struct gelic_net_card *card, struct gelic_net_descr *descr) { struct sk_buff *skb; if (descr->data_status & (1 << GELIC_NET_TXDESC_TAIL)) { /* 2nd descriptor */ skb = descr->skb; dma_unmap_single(ctodev(card), descr->buf_addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } else { dma_unmap_single(ctodev(card), descr->buf_addr, descr->buf_size, DMA_TO_DEVICE); } descr->buf_addr = 0; descr->buf_size = 0; descr->next_descr_addr = 0; descr->result_size = 0; descr->valid_size = 0; descr->data_status = 0; descr->data_error = 0; descr->skb = NULL; /* set descr status */ gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); }
/** * gelic_net_open_device - open device and map dma region * @card: card structure */ static int gelic_net_open_device(struct gelic_net_card *card) { int result; result = ps3_sb_event_receive_port_setup(card->dev, PS3_BINDING_CPU_ANY, &card->netdev->irq); if (result) { dev_info(ctodev(card), "%s:%d: gelic_net_open_device failed (%d)\n", __func__, __LINE__, result); result = -EPERM; goto fail_alloc_irq; } result = request_irq(card->netdev->irq, gelic_net_interrupt, IRQF_DISABLED, card->netdev->name, card->netdev); if (result) { dev_info(ctodev(card), "%s:%d: request_irq failed (%d)\n", __func__, __LINE__, result); goto fail_request_irq; } return 0; fail_request_irq: ps3_sb_event_receive_port_destroy(card->dev, card->netdev->irq); card->netdev->irq = NO_IRQ; fail_alloc_irq: return result; }
/** * gelic_net_set_multi - sets multicast addresses and promisc flags * @netdev: interface device structure * * gelic_net_set_multi configures multicast addresses as needed for the * netdev interface. It also sets up multicast, allmulti and promisc * flags appropriately */ void gelic_net_set_multi(struct net_device *netdev) { struct gelic_card *card = netdev_card(netdev); struct dev_mc_list *mc; unsigned int i; uint8_t *p; u64 addr; int status; /* clear all multicast address */ status = lv1_net_remove_multicast_address(bus_id(card), dev_id(card), 0, 1); if (status) dev_err(ctodev(card), "lv1_net_remove_multicast_address failed %d\n", status); /* set broadcast address */ status = lv1_net_add_multicast_address(bus_id(card), dev_id(card), GELIC_NET_BROADCAST_ADDR, 0); if (status) dev_err(ctodev(card), "lv1_net_add_multicast_address failed, %d\n", status); if ((netdev->flags & IFF_ALLMULTI) || (netdev->mc_count > GELIC_NET_MC_COUNT_MAX)) { status = lv1_net_add_multicast_address(bus_id(card), dev_id(card), 0, 1); if (status) dev_err(ctodev(card), "lv1_net_add_multicast_address failed, %d\n", status); return; } /* set multicast addresses */ for (mc = netdev->mc_list; mc; mc = mc->next) { addr = 0; p = mc->dmi_addr; for (i = 0; i < ETH_ALEN; i++) { addr <<= 8; addr |= *p++; } status = lv1_net_add_multicast_address(bus_id(card), dev_id(card), addr, 0); if (status) dev_err(ctodev(card), "lv1_net_add_multicast_address failed, %d\n", status); } }
/** * gelic_card_init_chain - links descriptor chain * @card: card structure * @chain: address of chain * @start_descr: address of descriptor array * @no: number of descriptors * * we manage a circular list that mirrors the hardware structure, * except that the hardware uses bus addresses. * * returns 0 on success, <0 on failure */ static int __devinit gelic_card_init_chain(struct gelic_card *card, struct gelic_descr_chain *chain, struct gelic_descr *start_descr, int no) { int i; struct gelic_descr *descr; descr = start_descr; memset(descr, 0, sizeof(*descr) * no); /* set up the hardware pointers in each descriptor */ for (i = 0; i < no; i++, descr++) { gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); descr->bus_addr = dma_map_single(ctodev(card), descr, GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL); if (!descr->bus_addr) goto iommu_error; descr->next = descr + 1; descr->prev = descr - 1; } /* make them as ring */ (descr - 1)->next = start_descr; start_descr->prev = (descr - 1); /* chain bus addr of hw descriptor */ descr = start_descr; for (i = 0; i < no; i++, descr++) { descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr); } chain->head = start_descr; chain->tail = start_descr; /* do not chain last hw descriptor */ (descr - 1)->next_descr_addr = 0; return 0; iommu_error: for (i--, descr--; 0 <= i; i--, descr--) if (descr->bus_addr) dma_unmap_single(ctodev(card), descr->bus_addr, GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL); return -ENOMEM; }
/** * gelic_net_open - called upon ifonfig up * @netdev: interface device structure * * returns 0 on success, <0 on failure * * gelic_net_open allocates all the descriptors and memory needed for * operation, sets up multicast list and enables interrupts */ int gelic_net_open(struct net_device *netdev) { struct gelic_card *card = netdev_card(netdev); dev_dbg(ctodev(card), " -> %s %p\n", __func__, netdev); gelic_card_up(card); netif_start_queue(netdev); gelic_card_get_ether_port_status(card, 1); dev_dbg(ctodev(card), " <- %s\n", __func__); return 0; }
/** * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on * @descr: descriptor to process * @card: card structure * @netdev: net_device structure to be passed packet * * iommu-unmaps the skb, fills out skb structure and passes the data to the * stack. The descriptor state is not changed. */ static void gelic_net_pass_skb_up(struct gelic_descr *descr, struct gelic_card *card, struct net_device *netdev) { struct sk_buff *skb = descr->skb; u32 data_status, data_error; data_status = be32_to_cpu(descr->data_status); data_error = be32_to_cpu(descr->data_error); /* unmap skb buffer */ dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), GELIC_NET_MAX_MTU, DMA_FROM_DEVICE); skb_put(skb, be32_to_cpu(descr->valid_size)? be32_to_cpu(descr->valid_size) : be32_to_cpu(descr->result_size)); if (!descr->valid_size) dev_info(ctodev(card), "buffer full %x %x %x\n", be32_to_cpu(descr->result_size), be32_to_cpu(descr->buf_size), be32_to_cpu(descr->dmac_cmd_status)); descr->skb = NULL; /* * the card put 2 bytes vlan tag in front * of the ethernet frame */ skb_pull(skb, 2); skb->protocol = eth_type_trans(skb, netdev); /* checksum offload */ if (card->rx_csum) { if ((data_status & GELIC_DESCR_DATA_STATUS_CHK_MASK) && (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; } else skb->ip_summed = CHECKSUM_NONE; /* update netdevice statistics */ netdev->stats.rx_packets++; netdev->stats.rx_bytes += skb->len; /* pass skb up to stack */ netif_receive_skb(skb); }
/** * gelic_net_prepare_rx_descr - reinitializes a rx descriptor * @card: card structure * @descr: descriptor to re-init * * return 0 on succes, <0 on failure * * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. * Activate the descriptor state-wise */ static int gelic_net_prepare_rx_descr(struct gelic_net_card *card, struct gelic_net_descr *descr) { int offset; unsigned int bufsize; if (gelic_net_get_descr_status(descr) != GELIC_NET_DESCR_NOT_IN_USE) { dev_info(ctodev(card), "%s: ERROR status \n", __func__); } /* we need to round up the buffer size to a multiple of 128 */ bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); /* and we need to have it 128 byte aligned, therefore we allocate a * bit more */ descr->skb = netdev_alloc_skb(card->netdev, bufsize + GELIC_NET_RXBUF_ALIGN - 1); if (!descr->skb) { descr->buf_addr = 0; /* tell DMAC don't touch memory */ dev_info(ctodev(card), "%s:allocate skb failed !!\n", __func__); return -ENOMEM; } descr->buf_size = bufsize; descr->dmac_cmd_status = 0; descr->result_size = 0; descr->valid_size = 0; descr->data_error = 0; offset = ((unsigned long)descr->skb->data) & (GELIC_NET_RXBUF_ALIGN - 1); if (offset) skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset); /* io-mmu-map the skb */ descr->buf_addr = dma_map_single(ctodev(card), descr->skb->data, GELIC_NET_MAX_MTU, DMA_FROM_DEVICE); if (!descr->buf_addr) { dev_kfree_skb_any(descr->skb); descr->skb = NULL; dev_info(ctodev(card), "%s:Could not iommu-map rx buffer\n", __func__); gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); return -ENOMEM; } else { gelic_net_set_descr_status(descr, GELIC_NET_DESCR_CARDOWNED); return 0; } }
/** * gelic_net_xmit - transmits a frame over the device * @skb: packet to send out * @netdev: interface device structure * * returns 0 on success, <0 on failure */ int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gelic_card *card = netdev_card(netdev); struct gelic_descr *descr; int result; unsigned long flags; spin_lock_irqsave(&card->tx_lock, flags); gelic_card_release_tx_chain(card, 0); descr = gelic_card_get_next_tx_descr(card); if (!descr) { /* * no more descriptors free */ gelic_card_stop_queues(card); spin_unlock_irqrestore(&card->tx_lock, flags); return NETDEV_TX_BUSY; } result = gelic_descr_prepare_tx(card, descr, skb); if (result) { /* * DMA map failed. As chanses are that failure * would continue, just release skb and return */ netdev->stats.tx_dropped++; dev_kfree_skb_any(skb); spin_unlock_irqrestore(&card->tx_lock, flags); return NETDEV_TX_OK; } /* * link this prepared descriptor to previous one * to achieve high performance */ descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr); /* * as hardware descriptor is modified in the above lines, * ensure that the hardware sees it */ wmb(); if (gelic_card_kick_txdma(card, descr)) { /* * kick failed. * release descriptors which were just prepared */ netdev->stats.tx_dropped++; gelic_descr_release_tx(card, descr); gelic_descr_release_tx(card, descr->next); card->tx_chain.tail = descr->next->next; dev_info(ctodev(card), "%s: kick failure\n", __func__); } else { /* OK, DMA started/reserved */ netdev->trans_start = jiffies; } spin_unlock_irqrestore(&card->tx_lock, flags); return NETDEV_TX_OK; }
/** * gelic_net_kick_txdma - enables TX DMA processing * @card: card structure * @descr: descriptor address to enable TX processing at * */ static int gelic_net_kick_txdma(struct gelic_net_card *card, struct gelic_net_descr *descr) { int status = 0; int count = 10; if (card->tx_dma_progress) return 0; if (gelic_net_get_descr_status(descr) == GELIC_NET_DESCR_CARDOWNED) { card->tx_dma_progress = 1; /* sometimes we need retry here */ while (count--) { status = lv1_net_start_tx_dma(bus_id(card), dev_id(card), descr->bus_addr, 0); if (!status) break; } if (!count) dev_info(ctodev(card), "lv1_net_start_txdma failed," \ "status=%d %#lx\n", status, card->irq_status); } return status; }
/** * gelic_net_open - called upon ifonfig up * @netdev: interface device structure * * returns 0 on success, <0 on failure * * gelic_net_open allocates all the descriptors and memory needed for * operation, sets up multicast list and enables interrupts */ static int gelic_net_open(struct net_device *netdev) { struct gelic_net_card *card = netdev_priv(netdev); dev_dbg(ctodev(card), " -> %s:%d\n", __func__, __LINE__); gelic_net_open_device(card); if (gelic_net_init_chain(card, &card->tx_chain, card->descr, GELIC_NET_TX_DESCRIPTORS)) goto alloc_tx_failed; if (gelic_net_init_chain(card, &card->rx_chain, card->descr + GELIC_NET_TX_DESCRIPTORS, GELIC_NET_RX_DESCRIPTORS)) goto alloc_rx_failed; /* head of chain */ card->tx_top = card->tx_chain.head; card->rx_top = card->rx_chain.head; dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n", card->rx_top, card->tx_top, sizeof(struct gelic_net_descr), GELIC_NET_RX_DESCRIPTORS); /* allocate rx skbs */ if (gelic_net_alloc_rx_skbs(card)) goto alloc_skbs_failed; card->tx_dma_progress = 0; card->ghiintmask = GELIC_NET_RXINT | GELIC_NET_TXINT; gelic_net_set_irq_mask(card, card->ghiintmask); gelic_net_enable_rxdmac(card); netif_start_queue(netdev); netif_carrier_on(netdev); return 0; alloc_skbs_failed: gelic_net_free_chain(card, card->rx_top); alloc_rx_failed: gelic_net_free_chain(card, card->tx_top); alloc_tx_failed: return -ENOMEM; }
/** * gelic_card_disable_rxdmac - disables the receive DMA controller * @card: card structure * * gelic_card_disable_rxdmac terminates processing on the DMA controller by * turing off DMA and issuing a force end */ static void gelic_card_disable_rxdmac(struct gelic_card *card) { int status; /* this hvc blocks until the DMA in progress really stopped */ status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card)); if (status) dev_err(ctodev(card), "lv1_net_stop_rx_dma failed, %d\n", status); }
/** * gelic_net_disable_txdmac - disables the transmit DMA controller * @card: card structure * * gelic_net_disable_txdmac terminates processing on the DMA controller by * turing off DMA and issueing a force end */ static inline void gelic_net_disable_txdmac(struct gelic_net_card *card) { int status; /* this hvc blocks until the DMA in progress really stopped */ status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card), 0); if (status) dev_err(ctodev(card), "lv1_net_stop_tx_dma faild, status=%d\n", status); }
/** * gelic_net_enable_rxdmac - enables the receive DMA controller * @card: card structure * * gelic_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN * in the GDADMACCNTR register */ static inline void gelic_net_enable_rxdmac(struct gelic_net_card *card) { int status; status = lv1_net_start_rx_dma(bus_id(card), dev_id(card), card->rx_chain.tail->bus_addr, 0); if (status) dev_info(ctodev(card), "lv1_net_start_rx_dma failed, status=%d\n", status); }
/* set irq_mask */ int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask) { int status; status = lv1_net_set_interrupt_mask(bus_id(card), dev_id(card), mask, 0); if (status) dev_info(ctodev(card), "%s failed %d\n", __func__, status); return status; }
/** * gelic_card_free_chain - free descriptor chain * @card: card structure * @descr_in: address of desc */ static void gelic_card_free_chain(struct gelic_card *card, struct gelic_descr *descr_in) { struct gelic_descr *descr; for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) { dma_unmap_single(ctodev(card), descr->bus_addr, GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL); descr->bus_addr = 0; } }
/** * gelic_ether_setup_netdev - initialization of net_device * @netdev: net_device structure * @card: card structure * * Returns 0 on success or <0 on failure * * gelic_ether_setup_netdev initializes the net_device structure * and register it. **/ int __devinit gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card) { int status; u64 v1, v2; netdev->features = NETIF_F_IP_CSUM; status = lv1_net_control(bus_id(card), dev_id(card), GELIC_LV1_GET_MAC_ADDRESS, 0, 0, 0, &v1, &v2); v1 <<= 16; if (status || !is_valid_ether_addr((u8 *)&v1)) { dev_info(ctodev(card), "%s:lv1_net_control GET_MAC_ADDR failed %d\n", __func__, status); return -EINVAL; } memcpy(netdev->dev_addr, &v1, ETH_ALEN); if (card->vlan_required) { netdev->hard_header_len += VLAN_HLEN; /* * As vlan is internally used, * we can not receive vlan packets */ netdev->features |= NETIF_F_VLAN_CHALLENGED; } status = register_netdev(netdev); if (status) { dev_err(ctodev(card), "%s:Couldn't register %s %d\n", __func__, netdev->name, status); return status; } dev_info(ctodev(card), "%s: MAC addr %pM\n", netdev->name, netdev->dev_addr); return 0; }
/** * gelic_descr_prepare_tx - setup a descriptor for sending packets * @card: card structure * @descr: descriptor structure * @skb: packet to use * * returns 0 on success, <0 on failure. * */ static int gelic_descr_prepare_tx(struct gelic_card *card, struct gelic_descr *descr, struct sk_buff *skb) { dma_addr_t buf; if (card->vlan_required) { struct sk_buff *skb_tmp; enum gelic_port_type type; type = netdev_port(skb->dev)->type; skb_tmp = gelic_put_vlan_tag(skb, card->vlan[type].tx); if (!skb_tmp) return -ENOMEM; skb = skb_tmp; } buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE); if (!buf) { dev_err(ctodev(card), "dma map 2 failed (%p, %i). Dropping packet\n", skb->data, skb->len); return -ENOMEM; } descr->buf_addr = cpu_to_be32(buf); descr->buf_size = cpu_to_be32(skb->len); descr->skb = skb; descr->data_status = 0; descr->next_descr_addr = 0; /* terminate hw descr */ gelic_descr_set_tx_cmdstat(descr, skb); /* bump free descriptor pointer */ card->tx_chain.head = descr->next; return 0; }
/** * gelic_card_release_tx_chain - processes sent tx descriptors * @card: adapter structure * @stop: net_stop sequence * * releases the tx descriptors that gelic has finished with */ static void gelic_card_release_tx_chain(struct gelic_card *card, int stop) { struct gelic_descr_chain *tx_chain; enum gelic_descr_dma_status status; struct net_device *netdev; int release = 0; for (tx_chain = &card->tx_chain; tx_chain->head != tx_chain->tail && tx_chain->tail; tx_chain->tail = tx_chain->tail->next) { status = gelic_descr_get_status(tx_chain->tail); netdev = tx_chain->tail->skb->dev; switch (status) { case GELIC_DESCR_DMA_RESPONSE_ERROR: case GELIC_DESCR_DMA_PROTECTION_ERROR: case GELIC_DESCR_DMA_FORCE_END: if (printk_ratelimit()) dev_info(ctodev(card), "%s: forcing end of tx descriptor " \ "with status %x\n", __func__, status); netdev->stats.tx_dropped++; break; case GELIC_DESCR_DMA_COMPLETE: if (tx_chain->tail->skb) { netdev->stats.tx_packets++; netdev->stats.tx_bytes += tx_chain->tail->skb->len; } break; case GELIC_DESCR_DMA_CARDOWNED: /* pending tx request */ default: /* any other value (== GELIC_DESCR_DMA_NOT_IN_USE) */ if (!stop) goto out; } gelic_descr_release_tx(card, tx_chain->tail); release ++; } out: if (!stop && release) gelic_card_wake_queues(card); }
/** * gelic_card_kick_txdma - enables TX DMA processing * @card: card structure * @descr: descriptor address to enable TX processing at * */ static int gelic_card_kick_txdma(struct gelic_card *card, struct gelic_descr *descr) { int status = 0; if (card->tx_dma_progress) return 0; if (gelic_descr_get_status(descr) == GELIC_DESCR_DMA_CARDOWNED) { card->tx_dma_progress = 1; status = lv1_net_start_tx_dma(bus_id(card), dev_id(card), descr->bus_addr, 0); if (status) dev_info(ctodev(card), "lv1_net_start_txdma failed," \ "status=%d\n", status); } return status; }
/** * gelic_card_release_rx_chain - free all skb of rx descr * @card: card structure * */ static void gelic_card_release_rx_chain(struct gelic_card *card) { struct gelic_descr *descr = card->rx_chain.head; do { if (descr->skb) { dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), descr->skb->len, DMA_FROM_DEVICE); descr->buf_addr = 0; dev_kfree_skb_any(descr->skb); descr->skb = NULL; gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); } descr = descr->next; } while (descr != card->rx_chain.head); }
/** * gelic_net_tx_timeout_task - task scheduled by the watchdog timeout * function (to be called not under interrupt status) * @work: work is context of tx timout task * * called as task when tx hangs, resets interface (if interface is up) */ static void gelic_net_tx_timeout_task(struct work_struct *work) { struct gelic_card *card = container_of(work, struct gelic_card, tx_timeout_task); struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET]; dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__); if (!(netdev->flags & IFF_UP)) goto out; netif_device_detach(netdev); gelic_net_stop(netdev); gelic_net_open(netdev); netif_device_attach(netdev); out: atomic_dec(&card->tx_timeout_task_counter); }
/** * gelic_card_enable_rxdmac - enables the receive DMA controller * @card: card structure * * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN * in the GDADMACCNTR register */ static void gelic_card_enable_rxdmac(struct gelic_card *card) { int status; #ifdef DEBUG if (gelic_descr_get_status(card->rx_chain.head) != GELIC_DESCR_DMA_CARDOWNED) { printk(KERN_ERR "%s: status=%x\n", __func__, be32_to_cpu(card->rx_chain.head->dmac_cmd_status)); printk(KERN_ERR "%s: nextphy=%x\n", __func__, be32_to_cpu(card->rx_chain.head->next_descr_addr)); printk(KERN_ERR "%s: head=%p\n", __func__, card->rx_chain.head); } #endif status = lv1_net_start_rx_dma(bus_id(card), dev_id(card), card->rx_chain.head->bus_addr, 0); if (status) dev_info(ctodev(card), "lv1_net_start_rx_dma failed, status=%d\n", status); }
/** * gelic_descr_release_tx - processes a used tx descriptor * @card: card structure * @descr: descriptor to release * * releases a used tx descriptor (unmapping, freeing of skb) */ static void gelic_descr_release_tx(struct gelic_card *card, struct gelic_descr *descr) { struct sk_buff *skb = descr->skb; BUG_ON(!(be32_to_cpu(descr->data_status) & GELIC_DESCR_TX_TAIL)); dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); descr->buf_addr = 0; descr->buf_size = 0; descr->next_descr_addr = 0; descr->result_size = 0; descr->valid_size = 0; descr->data_status = 0; descr->data_error = 0; descr->skb = NULL; /* set descr status */ gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); }
/** * gelic_net_prepare_tx_descr_v - get dma address of skb_data * @card: card structure * @descr: descriptor structure * @skb: packet to use * * returns 0 on success, <0 on failure. * */ static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card, struct gelic_net_descr *descr, struct sk_buff *skb) { dma_addr_t buf[2]; unsigned int vlan_len; struct gelic_net_descr *sec_descr = descr->next; if (skb->len < GELIC_NET_VLAN_POS) return -EINVAL; vlan_len = GELIC_NET_VLAN_POS; memcpy(&descr->vlan, skb->data, vlan_len); if (card->vlan_index != -1) { /* internal vlan tag used */ descr->vlan.h_vlan_proto = htons(ETH_P_8021Q); /* vlan 0x8100*/ descr->vlan.h_vlan_TCI = htons(card->vlan_id[card->vlan_index]); vlan_len += VLAN_HLEN; /* added for above two lines */ } /* map data area */ buf[0] = dma_map_single(ctodev(card), &descr->vlan, vlan_len, DMA_TO_DEVICE); if (!buf[0]) { dev_err(ctodev(card), "dma map 1 failed (%p, %i). Dropping packet\n", skb->data, vlan_len); return -ENOMEM; } buf[1] = dma_map_single(ctodev(card), skb->data + GELIC_NET_VLAN_POS, skb->len - GELIC_NET_VLAN_POS, DMA_TO_DEVICE); if (!buf[1]) { dev_err(ctodev(card), "dma map 2 failed (%p, %i). Dropping packet\n", skb->data + GELIC_NET_VLAN_POS, skb->len - GELIC_NET_VLAN_POS); dma_unmap_single(ctodev(card), buf[0], vlan_len, DMA_TO_DEVICE); return -ENOMEM; } /* first descr */ descr->buf_addr = buf[0]; descr->buf_size = vlan_len; descr->skb = NULL; /* not used */ descr->data_status = 0; descr->next_descr_addr = descr->next->bus_addr; gelic_net_set_txdescr_cmdstat(descr, skb, 1); /* not the frame end */ /* second descr */ sec_descr->buf_addr = buf[1]; sec_descr->buf_size = skb->len - GELIC_NET_VLAN_POS; sec_descr->skb = skb; sec_descr->data_status = 0; sec_descr->next_descr_addr = 0; /* terminate hw descr */ gelic_net_set_txdescr_cmdstat(sec_descr, skb, 0); /* bump free descriptor pointer */ card->tx_chain.head = sec_descr->next; return 0; }
/** * gelic_net_decode_one_descr - processes an rx descriptor * @card: card structure * * returns 1 if a packet has been sent to the stack, otherwise 0 * * processes an rx descriptor by iommu-unmapping the data buffer and passing * the packet up to the stack */ static int gelic_net_decode_one_descr(struct gelic_net_card *card) { enum gelic_net_descr_status status; struct gelic_net_descr_chain *chain = &card->rx_chain; struct gelic_net_descr *descr = chain->tail; int dmac_chain_ended; status = gelic_net_get_descr_status(descr); /* is this descriptor terminated with next_descr == NULL? */ dmac_chain_ended = descr->dmac_cmd_status & GELIC_NET_DMAC_CMDSTAT_RXDCEIS; if (status == GELIC_NET_DESCR_CARDOWNED) return 0; if (status == GELIC_NET_DESCR_NOT_IN_USE) { dev_dbg(ctodev(card), "dormant descr? %p\n", descr); return 0; } if ((status == GELIC_NET_DESCR_RESPONSE_ERROR) || (status == GELIC_NET_DESCR_PROTECTION_ERROR) || (status == GELIC_NET_DESCR_FORCE_END)) { dev_info(ctodev(card), "dropping RX descriptor with state %x\n", status); card->netdev->stats.rx_dropped++; goto refill; } if (status == GELIC_NET_DESCR_BUFFER_FULL) { /* * Buffer full would occur if and only if * the frame length was longer than the size of this * descriptor's buffer. If the frame length was equal * to or shorter than buffer'size, FRAME_END condition * would occur. * Anyway this frame was longer than the MTU, * just drop it. */ dev_info(ctodev(card), "overlength frame\n"); goto refill; } /* * descriptoers any other than FRAME_END here should * be treated as error. */ if (status != GELIC_NET_DESCR_FRAME_END) { dev_dbg(ctodev(card), "RX descriptor with state %x\n", status); goto refill; } /* ok, we've got a packet in descr */ gelic_net_pass_skb_up(descr, card); refill: /* * So that always DMAC can see the end * of the descriptor chain to avoid * from unwanted DMAC overrun. */ descr->next_descr_addr = 0; /* change the descriptor state: */ gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); /* * this call can fail, but for now, just leave this * decriptor without skb */ gelic_net_prepare_rx_descr(card, descr); chain->head = descr; chain->tail = descr->next; /* * Set this descriptor the end of the chain. */ descr->prev->next_descr_addr = descr->bus_addr; /* * If dmac chain was met, DMAC stopped. * thus re-enable it */ if (dmac_chain_ended) { card->rx_dma_restart_required = 1; dev_dbg(ctodev(card), "reenable rx dma scheduled\n"); } return 1; }
/** * gelic_card_decode_one_descr - processes an rx descriptor * @card: card structure * * returns 1 if a packet has been sent to the stack, otherwise 0 * * processes an rx descriptor by iommu-unmapping the data buffer and passing * the packet up to the stack */ static int gelic_card_decode_one_descr(struct gelic_card *card) { enum gelic_descr_dma_status status; struct gelic_descr_chain *chain = &card->rx_chain; struct gelic_descr *descr = chain->head; struct net_device *netdev = NULL; int dmac_chain_ended; status = gelic_descr_get_status(descr); /* is this descriptor terminated with next_descr == NULL? */ dmac_chain_ended = be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_RX_DMA_CHAIN_END; if (status == GELIC_DESCR_DMA_CARDOWNED) return 0; if (status == GELIC_DESCR_DMA_NOT_IN_USE) { dev_dbg(ctodev(card), "dormant descr? %p\n", descr); return 0; } /* netdevice select */ if (card->vlan_required) { unsigned int i; u16 vid; vid = *(u16 *)(descr->skb->data) & VLAN_VID_MASK; for (i = 0; i < GELIC_PORT_MAX; i++) { if (card->vlan[i].rx == vid) { netdev = card->netdev[i]; break; } }; if (GELIC_PORT_MAX <= i) { pr_info("%s: unknown packet vid=%x\n", __func__, vid); goto refill; } } else netdev = card->netdev[GELIC_PORT_ETHERNET]; if ((status == GELIC_DESCR_DMA_RESPONSE_ERROR) || (status == GELIC_DESCR_DMA_PROTECTION_ERROR) || (status == GELIC_DESCR_DMA_FORCE_END)) { dev_info(ctodev(card), "dropping RX descriptor with state %x\n", status); netdev->stats.rx_dropped++; goto refill; } if (status == GELIC_DESCR_DMA_BUFFER_FULL) { /* * Buffer full would occur if and only if * the frame length was longer than the size of this * descriptor's buffer. If the frame length was equal * to or shorter than buffer'size, FRAME_END condition * would occur. * Anyway this frame was longer than the MTU, * just drop it. */ dev_info(ctodev(card), "overlength frame\n"); goto refill; } /* * descriptoers any other than FRAME_END here should * be treated as error. */ if (status != GELIC_DESCR_DMA_FRAME_END) { dev_dbg(ctodev(card), "RX descriptor with state %x\n", status); goto refill; } /* ok, we've got a packet in descr */ gelic_net_pass_skb_up(descr, card, netdev); refill: /* * So that always DMAC can see the end * of the descriptor chain to avoid * from unwanted DMAC overrun. */ descr->next_descr_addr = 0; /* change the descriptor state: */ gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); /* * this call can fail, but for now, just leave this * decriptor without skb */ gelic_descr_prepare_rx(card, descr); chain->tail = descr; chain->head = descr->next; /* * Set this descriptor the end of the chain. */ descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr); /* * If dmac chain was met, DMAC stopped. * thus re-enable it */ if (dmac_chain_ended) { card->rx_dma_restart_required = 1; dev_dbg(ctodev(card), "reenable rx dma scheduled\n"); } return 1; }
/** * gelic_net_setup_netdev - initialization of net_device * @card: card structure * * Returns 0 on success or <0 on failure * * gelic_net_setup_netdev initializes the net_device structure **/ static int gelic_net_setup_netdev(struct gelic_net_card *card) { struct net_device *netdev = card->netdev; struct sockaddr addr; unsigned int i; int status; u64 v1, v2; SET_MODULE_OWNER(netdev); SET_NETDEV_DEV(netdev, &card->dev->core); spin_lock_init(&card->tx_dma_lock); card->rx_csum = GELIC_NET_RX_CSUM_DEFAULT; gelic_net_setup_netdev_ops(netdev); netdev->features = NETIF_F_IP_CSUM; status = lv1_net_control(bus_id(card), dev_id(card), GELIC_NET_GET_MAC_ADDRESS, 0, 0, 0, &v1, &v2); if (status || !is_valid_ether_addr((u8 *)&v1)) { dev_info(ctodev(card), "%s:lv1_net_control GET_MAC_ADDR failed %d\n", __func__, status); return -EINVAL; } v1 <<= 16; memcpy(addr.sa_data, &v1, ETH_ALEN); memcpy(netdev->dev_addr, addr.sa_data, ETH_ALEN); dev_info(ctodev(card), "MAC addr %02x:%02x:%02x:%02x:%02x:%02x\n", netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); card->vlan_index = -1; /* no vlan */ for (i = 0; i < GELIC_NET_VLAN_MAX; i++) { status = lv1_net_control(bus_id(card), dev_id(card), GELIC_NET_GET_VLAN_ID, i + 1, /* index; one based */ 0, 0, &v1, &v2); if (status == GELIC_NET_VLAN_NO_ENTRY) { dev_dbg(ctodev(card), "GELIC_VLAN_ID no entry:%d, VLAN disabled\n", status); card->vlan_id[i] = 0; } else if (status) { dev_dbg(ctodev(card), "%s:GELIC_NET_VLAN_ID faild, status=%d\n", __func__, status); card->vlan_id[i] = 0; } else { card->vlan_id[i] = (u32)v1; dev_dbg(ctodev(card), "vlan_id:%d, %lx\n", i, v1); } } if (card->vlan_id[GELIC_NET_VLAN_WIRED - 1]) card->vlan_index = GELIC_NET_VLAN_WIRED - 1; status = register_netdev(netdev); if (status) { dev_err(ctodev(card), "%s:Couldn't register net_device: %d\n", __func__, status); return status; } return 0; }