static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) { struct ibmveth_adapter *adapter = netdev_priv(dev); struct vio_dev *viodev = adapter->vdev; int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; int i, rc; int need_restart = 0; if (new_mtu < IBMVETH_MIN_MTU) return -EINVAL; for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) break; if (i == IBMVETH_NUM_BUFF_POOLS) return -EINVAL; /* Deactivate all the buffer pools so that the next loop can activate only the buffer pools necessary to hold the new MTU */ if (netif_running(adapter->netdev)) { need_restart = 1; adapter->pool_config = 1; ibmveth_close(adapter->netdev); adapter->pool_config = 0; } /* Look for an active buffer pool that can hold the new MTU */ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { adapter->rx_buff_pool[i].active = 1; if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { dev->mtu = new_mtu; vio_cmo_set_dev_desired(viodev, ibmveth_get_desired_dma (viodev)); if (need_restart) { return ibmveth_open(adapter->netdev); } return 0; } } if (need_restart && (rc = ibmveth_open(adapter->netdev))) return rc; return -EINVAL; }
static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, void (*done) (struct net_device *, u32)) { struct ibmveth_adapter *adapter = netdev_priv(dev); unsigned long set_attr, clr_attr, ret_attr; long ret; int rc1 = 0, rc2 = 0; int restart = 0; if (netif_running(dev)) { restart = 1; adapter->pool_config = 1; ibmveth_close(dev); adapter->pool_config = 0; } set_attr = 0; clr_attr = 0; if (data) set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; else clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr, set_attr, &ret_attr); if (ret != H_SUCCESS) { rc1 = -EIO; ibmveth_error_printk("unable to change checksum offload settings." " %d rc=%ld\n", data, ret); ret = h_illan_attributes(adapter->vdev->unit_address, set_attr, clr_attr, &ret_attr); } else done(dev, data); } else { rc1 = -EIO; ibmveth_error_printk("unable to change checksum offload settings." " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); } if (restart) rc2 = ibmveth_open(dev); return rc1 ? rc1 : rc2; }
static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) { struct ibmveth_adapter *adapter = netdev_priv(dev); struct vio_dev *viodev = adapter->vdev; int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; int i; if (new_mtu < IBMVETH_MAX_MTU) return -EINVAL; for (i = 0; i < IbmVethNumBufferPools; i++) if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) break; if (i == IbmVethNumBufferPools) return -EINVAL; for (i = 0; i < IbmVethNumBufferPools; i++) if (adapter->rx_buff_pool[i].active) { ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); adapter->rx_buff_pool[i].active = 0; } for(i = 0; i<IbmVethNumBufferPools; i++) { adapter->rx_buff_pool[i].active = 1; if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { if (netif_running(adapter->netdev)) { adapter->pool_config = 1; ibmveth_close(adapter->netdev); adapter->pool_config = 0; dev->mtu = new_mtu; vio_cmo_set_dev_desired(viodev, ibmveth_get_desired_dma (viodev)); return ibmveth_open(adapter->netdev); } dev->mtu = new_mtu; vio_cmo_set_dev_desired(viodev, ibmveth_get_desired_dma (viodev)); return 0; } } return -EINVAL; }
static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) { struct ibmveth_adapter *adapter = dev->priv; int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; int reinit = 0; int i, rc; if (new_mtu < IBMVETH_MAX_MTU) return -EINVAL; for (i = 0; i < IbmVethNumBufferPools; i++) if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) break; if (i == IbmVethNumBufferPools) return -EINVAL; /* Look for an active buffer pool that can hold the new MTU */ for(i = 0; i<IbmVethNumBufferPools; i++) { if (!adapter->rx_buff_pool[i].active) { adapter->rx_buff_pool[i].active = 1; reinit = 1; } if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { if (reinit && netif_running(adapter->netdev)) { adapter->pool_config = 1; ibmveth_close(adapter->netdev); adapter->pool_config = 0; dev->mtu = new_mtu; if ((rc = ibmveth_open(adapter->netdev))) return rc; } else dev->mtu = new_mtu; return 0; } } return -EINVAL; }
static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr, const char * buf, size_t count) { struct ibmveth_buff_pool *pool = container_of(kobj, struct ibmveth_buff_pool, kobj); struct net_device *netdev = dev_get_drvdata( container_of(kobj->parent, struct device, kobj)); struct ibmveth_adapter *adapter = netdev_priv(netdev); long value = simple_strtol(buf, NULL, 10); long rc; if (attr == &veth_active_attr) { if (value && !pool->active) { if (netif_running(netdev)) { if(ibmveth_alloc_buffer_pool(pool)) { ibmveth_error_printk("unable to alloc pool\n"); return -ENOMEM; } pool->active = 1; adapter->pool_config = 1; ibmveth_close(netdev); adapter->pool_config = 0; if ((rc = ibmveth_open(netdev))) return rc; } else pool->active = 1; } else if (!value && pool->active) { int mtu = netdev->mtu + IBMVETH_BUFF_OH; int i; for (i = 0; i < IbmVethNumBufferPools; i++) { if (pool == &adapter->rx_buff_pool[i]) continue; if (!adapter->rx_buff_pool[i].active) continue; if (mtu <= adapter->rx_buff_pool[i].buff_size) break; } if (i == IbmVethNumBufferPools) { ibmveth_error_printk("no active pool >= MTU\n"); return -EPERM; } if (netif_running(netdev)) { adapter->pool_config = 1; ibmveth_close(netdev); pool->active = 0; adapter->pool_config = 0; if ((rc = ibmveth_open(netdev))) return rc; } pool->active = 0; } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) return -EINVAL; else { if (netif_running(netdev)) { adapter->pool_config = 1; ibmveth_close(netdev); adapter->pool_config = 0; pool->size = value; if ((rc = ibmveth_open(netdev))) return rc; } else pool->size = value; } } else if (attr == &veth_size_attr) { if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) return -EINVAL; else { if (netif_running(netdev)) { adapter->pool_config = 1; ibmveth_close(netdev); adapter->pool_config = 0; pool->buff_size = value; if ((rc = ibmveth_open(netdev))) return rc; } else pool->buff_size = value; } } ibmveth_interrupt(netdev->irq, netdev); return count; }
static int ibmveth_set_csum_offload(struct net_device *dev, u32 data) { struct ibmveth_adapter *adapter = netdev_priv(dev); unsigned long set_attr, clr_attr, ret_attr; unsigned long set_attr6, clr_attr6; long ret, ret4, ret6; int rc1 = 0, rc2 = 0; int restart = 0; if (netif_running(dev)) { restart = 1; adapter->pool_config = 1; ibmveth_close(dev); adapter->pool_config = 0; } set_attr = 0; clr_attr = 0; set_attr6 = 0; clr_attr6 = 0; if (data) { set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; } else { clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM; clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM; } ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) && !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) && (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) { ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, set_attr, &ret_attr); if (ret4 != H_SUCCESS) { netdev_err(dev, "unable to change IPv4 checksum " "offload settings. %d rc=%ld\n", data, ret4); h_illan_attributes(adapter->vdev->unit_address, set_attr, clr_attr, &ret_attr); if (data == 1) dev->features &= ~NETIF_F_IP_CSUM; } else { adapter->fw_ipv4_csum_support = data; } ret6 = h_illan_attributes(adapter->vdev->unit_address, clr_attr6, set_attr6, &ret_attr); if (ret6 != H_SUCCESS) { netdev_err(dev, "unable to change IPv6 checksum " "offload settings. %d rc=%ld\n", data, ret6); h_illan_attributes(adapter->vdev->unit_address, set_attr6, clr_attr6, &ret_attr); if (data == 1) dev->features &= ~NETIF_F_IPV6_CSUM; } else adapter->fw_ipv6_csum_support = data; if (ret4 == H_SUCCESS || ret6 == H_SUCCESS) adapter->rx_csum = data; else rc1 = -EIO; } else { rc1 = -EIO; netdev_err(dev, "unable to change checksum offload settings." " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr); } if (restart) rc2 = ibmveth_open(dev); return rc1 ? rc1 : rc2; }
static int ibmveth_set_tso(struct net_device *dev, u32 data) { struct ibmveth_adapter *adapter = netdev_priv(dev); unsigned long set_attr, clr_attr, ret_attr; long ret1, ret2; int rc1 = 0, rc2 = 0; int restart = 0; if (netif_running(dev)) { restart = 1; adapter->pool_config = 1; ibmveth_close(dev); adapter->pool_config = 0; } set_attr = 0; clr_attr = 0; if (data) set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; else clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED; ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr); if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) && !old_large_send) { ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr, set_attr, &ret_attr); if (ret2 != H_SUCCESS) { netdev_err(dev, "unable to change tso settings. %d rc=%ld\n", data, ret2); h_illan_attributes(adapter->vdev->unit_address, set_attr, clr_attr, &ret_attr); if (data == 1) dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); rc1 = -EIO; } else { adapter->fw_large_send_support = data; adapter->large_send = data; } } else { /* Older firmware version of large send offload does not * support tcp6/ipv6 */ if (data == 1) { dev->features &= ~NETIF_F_TSO6; netdev_info(dev, "TSO feature requires all partitions to have updated driver"); } adapter->large_send = data; } if (restart) rc2 = ibmveth_open(dev); return rc1 ? rc1 : rc2; }
static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct ibmveth_buff_pool *pool = container_of(kobj, struct ibmveth_buff_pool, kobj); struct net_device *netdev = dev_get_drvdata( container_of(kobj->parent, struct device, kobj)); struct ibmveth_adapter *adapter = netdev_priv(netdev); long value = simple_strtol(buf, NULL, 10); long rc; if (attr == &veth_active_attr) { if (value && !pool->active) { if (netif_running(netdev)) { if (ibmveth_alloc_buffer_pool(pool)) { netdev_err(netdev, "unable to alloc pool\n"); return -ENOMEM; } pool->active = 1; adapter->pool_config = 1; ibmveth_close(netdev); adapter->pool_config = 0; if ((rc = ibmveth_open(netdev))) return rc; } else { pool->active = 1; } } else if (!value && pool->active) { int mtu = netdev->mtu + IBMVETH_BUFF_OH; int i; /* Make sure there is a buffer pool with buffers that can hold a packet of the size of the MTU */ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { if (pool == &adapter->rx_buff_pool[i]) continue; if (!adapter->rx_buff_pool[i].active) continue; if (mtu <= adapter->rx_buff_pool[i].buff_size) break; } if (i == IBMVETH_NUM_BUFF_POOLS) { netdev_err(netdev, "no active pool >= MTU\n"); return -EPERM; } if (netif_running(netdev)) { adapter->pool_config = 1; ibmveth_close(netdev); pool->active = 0; adapter->pool_config = 0; if ((rc = ibmveth_open(netdev))) return rc; } pool->active = 0; } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) { return -EINVAL; } else { if (netif_running(netdev)) { adapter->pool_config = 1; ibmveth_close(netdev); adapter->pool_config = 0; pool->size = value; if ((rc = ibmveth_open(netdev))) return rc; } else { pool->size = value; } } } else if (attr == &veth_size_attr) { if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) { return -EINVAL; } else { if (netif_running(netdev)) { adapter->pool_config = 1; ibmveth_close(netdev); adapter->pool_config = 0; pool->buff_size = value; if ((rc = ibmveth_open(netdev))) return rc; } else { pool->buff_size = value; } } } /* kick the interrupt handler to allocate/deallocate pools */ ibmveth_interrupt(netdev->irq, netdev); return count; }