static int hss_hdlc_open(struct net_device *dev) { struct port *port = dev_to_port(dev); unsigned long flags; int i, err = 0; if ((err = hdlc_open(dev))) return err; if ((err = hss_load_firmware(port))) goto err_hdlc_close; if ((err = request_hdlc_queues(port))) goto err_hdlc_close; if ((err = init_hdlc_queues(port))) goto err_destroy_queues; spin_lock_irqsave(&npe_lock, flags); if (port->plat->open) if ((err = port->plat->open(port->id, dev, hss_hdlc_set_carrier))) goto err_unlock; spin_unlock_irqrestore(&npe_lock, flags); for (i = 0; i < TX_DESCS; i++) queue_put_desc(port->plat->txreadyq, tx_desc_phys(port, i), tx_desc_ptr(port, i)); for (i = 0; i < RX_DESCS; i++) queue_put_desc(queue_ids[port->id].rxfree, rx_desc_phys(port, i), rx_desc_ptr(port, i)); napi_enable(&port->napi); netif_start_queue(dev); qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY, hss_hdlc_rx_irq, dev); qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY, hss_hdlc_txdone_irq, dev); qmgr_enable_irq(queue_ids[port->id].txdone); ports_open++; hss_set_hdlc_cfg(port); hss_config(port); hss_start_hdlc(port); napi_schedule(&port->napi); return 0; err_unlock: spin_unlock_irqrestore(&npe_lock, flags); err_destroy_queues: destroy_hdlc_queues(port); release_hdlc_queues(port); err_hdlc_close: hdlc_close(dev); return err; }
static int eth_open(struct net_device *dev) { struct port *port = netdev_priv(dev); struct npe *npe = port->npe; struct msg msg; int i, err; if (!npe_running(npe)) { err = npe_load_firmware(npe, npe_name(npe), &dev->dev); if (err) return err; if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { printk(KERN_ERR "%s: %s not responding\n", dev->name, npe_name(npe)); return -EIO; } port->firmware[0] = msg.byte4; port->firmware[1] = msg.byte5; port->firmware[2] = msg.byte6; port->firmware[3] = msg.byte7; } memset(&msg, 0, sizeof(msg)); msg.cmd = NPE_VLAN_SETRXQOSENTRY; msg.eth_id = port->id; msg.byte5 = port->plat->rxq | 0x80; msg.byte7 = port->plat->rxq << 4; for (i = 0; i < 8; i++) { msg.byte3 = i; if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) return -EIO; } msg.cmd = NPE_EDB_SETPORTADDRESS; msg.eth_id = PHYSICAL_ID(port->id); msg.byte2 = dev->dev_addr[0]; msg.byte3 = dev->dev_addr[1]; msg.byte4 = dev->dev_addr[2]; msg.byte5 = dev->dev_addr[3]; msg.byte6 = dev->dev_addr[4]; msg.byte7 = dev->dev_addr[5]; if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) return -EIO; memset(&msg, 0, sizeof(msg)); msg.cmd = NPE_FW_SETFIREWALLMODE; msg.eth_id = port->id; if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) return -EIO; if ((err = request_queues(port)) != 0) return err; if ((err = init_queues(port)) != 0) { destroy_queues(port); release_queues(port); return err; } port->speed = 0; /* force "link up" message */ phy_start(port->phydev); for (i = 0; i < ETH_ALEN; i++) __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); __raw_writel(0x08, &port->regs->random_seed); __raw_writel(0x12, &port->regs->partial_empty_threshold); __raw_writel(0x30, &port->regs->partial_full_threshold); __raw_writel(0x08, &port->regs->tx_start_bytes); __raw_writel(0x15, &port->regs->tx_deferral); __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); __raw_writel(0x80, &port->regs->slot_time); __raw_writel(0x01, &port->regs->int_clock_threshold); /* Populate queues with buffers, no failure after this point */ for (i = 0; i < TX_DESCS; i++) queue_put_desc(port->plat->txreadyq, tx_desc_phys(port, i), tx_desc_ptr(port, i)); for (i = 0; i < RX_DESCS; i++) queue_put_desc(RXFREE_QUEUE(port->id), rx_desc_phys(port, i), rx_desc_ptr(port, i)); __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); __raw_writel(0, &port->regs->rx_control[1]); __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); napi_enable(&port->napi); eth_set_mcast_list(dev); netif_start_queue(dev); qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, eth_rx_irq, dev); if (!ports_open) { qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, eth_txdone_irq, NULL); qmgr_enable_irq(TXDONE_QUEUE); } ports_open++; /* we may already have RX data, enables IRQ */ napi_schedule(&port->napi); return 0; }