static void c101_set_iface(port_t *port) { u8 rxs = port->rxs & CLK_BRG_MASK; u8 txs = port->txs & CLK_BRG_MASK; switch(port->settings.clock_type) { case CLOCK_INT: rxs |= CLK_BRG_RX; /* TX clock */ txs |= CLK_RXCLK_TX; /* BRG output */ break; case CLOCK_TXINT: rxs |= CLK_LINE_RX; /* RXC input */ txs |= CLK_BRG_TX; /* BRG output */ break; case CLOCK_TXFROMRX: rxs |= CLK_LINE_RX; /* RXC input */ txs |= CLK_RXCLK_TX; /* RX clock */ break; default: /* EXTernal clock */ rxs |= CLK_LINE_RX; /* RXC input */ txs |= CLK_LINE_TX; /* TXC input */ } port->rxs = rxs; port->txs = txs; sca_out(rxs, MSCI1_OFFSET + RXS, port); sca_out(txs, MSCI1_OFFSET + TXS, port); sca_set_port(port); }
/* Receive DMA service */ static inline int sca_rx_done(port_t *port, int budget) { struct net_device *dev = port->netdev; u16 dmac = get_dmac_rx(port); card_t *card = port->card; u8 stat = sca_in(DSR_RX(port->chan), card); /* read DMA Status */ int received = 0; /* Reset DSR status bits */ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, DSR_RX(port->chan), card); if (stat & DSR_BOF) /* Dropped one or more frames */ dev->stats.rx_over_errors++; while (received < budget) { u32 desc_off = hd_desc_offset(port, port->rxin, 0); pkt_desc __iomem *desc; u32 cda = sca_inl(dmac + CDAL, card); if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc))) break; /* No frame received */ desc = desc_address(port, port->rxin, 0); stat = readb(&desc->stat); if (!(stat & ST_RX_EOM)) port->rxpart = 1; /* partial frame received */ else if ((stat & ST_ERROR_MASK) || port->rxpart) { dev->stats.rx_errors++; if (stat & ST_RX_OVERRUN) dev->stats.rx_fifo_errors++; else if ((stat & (ST_RX_SHORT | ST_RX_ABORT | ST_RX_RESBIT)) || port->rxpart) dev->stats.rx_frame_errors++; else if (stat & ST_RX_CRC) dev->stats.rx_crc_errors++; if (stat & ST_RX_EOM) port->rxpart = 0; /* received last fragment */ } else { sca_rx(card, port, desc, port->rxin); received++; } /* Set new error descriptor address */ sca_outl(desc_off, dmac + EDAL, card); port->rxin = (port->rxin + 1) % card->rx_ring_buffers; } /* make sure RX DMA is enabled */ sca_out(DSR_DE, DSR_RX(port->chan), card); return received; }
static inline void sca_rx_intr(port_t *port) { struct net_device *dev = port_to_dev(port); u16 dmac = get_dmac_rx(port); card_t *card = port_to_card(port); u8 stat = sca_in(DSR_RX(phy_node(port)), card); sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, DSR_RX(phy_node(port)), card); if (stat & DSR_BOF) dev->stats.rx_over_errors++; while (1) { u32 desc_off = desc_offset(port, port->rxin, 0); pkt_desc __iomem *desc; u32 cda = sca_inw(dmac + CDAL, card); if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc))) break; desc = desc_address(port, port->rxin, 0); stat = readb(&desc->stat); if (!(stat & ST_RX_EOM)) port->rxpart = 1; else if ((stat & ST_ERROR_MASK) || port->rxpart) { dev->stats.rx_errors++; if (stat & ST_RX_OVERRUN) dev->stats.rx_fifo_errors++; else if ((stat & (ST_RX_SHORT | ST_RX_ABORT | ST_RX_RESBIT)) || port->rxpart) dev->stats.rx_frame_errors++; else if (stat & ST_RX_CRC) dev->stats.rx_crc_errors++; if (stat & ST_RX_EOM) port->rxpart = 0; } else sca_rx(card, port, desc, port->rxin); sca_outw(desc_off, dmac + EDAL, card); port->rxin = next_desc(port, port->rxin, 0); } sca_out(DSR_DE, DSR_RX(phy_node(port)), card); }
/* Receive DMA interrupt service */ static inline void sca_rx_intr(port_t *port) { struct net_device *dev = port_to_dev(port); u16 dmac = get_dmac_rx(port); card_t *card = port_to_card(port); u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */ /* Reset DSR status bits */ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, DSR_RX(phy_node(port)), card); if (stat & DSR_BOF) /* Dropped one or more frames */ dev->stats.rx_over_errors++; while (1) { u32 desc_off = desc_offset(port, port->rxin, 0); pkt_desc __iomem *desc; u32 cda = sca_inw(dmac + CDAL, card); if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc))) break; /* No frame received */ desc = desc_address(port, port->rxin, 0); stat = readb(&desc->stat); if (!(stat & ST_RX_EOM)) port->rxpart = 1; /* partial frame received */ else if ((stat & ST_ERROR_MASK) || port->rxpart) { dev->stats.rx_errors++; if (stat & ST_RX_OVERRUN) dev->stats.rx_fifo_errors++; else if ((stat & (ST_RX_SHORT | ST_RX_ABORT | ST_RX_RESBIT)) || port->rxpart) dev->stats.rx_frame_errors++; else if (stat & ST_RX_CRC) dev->stats.rx_crc_errors++; if (stat & ST_RX_EOM) port->rxpart = 0; /* received last fragment */ } else sca_rx(card, port, desc, port->rxin); /* Set new error descriptor address */ sca_outw(desc_off, dmac + EDAL, card); port->rxin = next_desc(port, port->rxin, 0); } /* make sure RX DMA is enabled */ sca_out(DSR_DE, DSR_RX(phy_node(port)), card); }
/* Transmit DMA service */ static inline void sca_tx_done(port_t *port) { struct net_device *dev = port->netdev; card_t* card = port->card; u8 stat; spin_lock(&port->lock); stat = sca_in(DSR_TX(port->chan), card); /* read DMA Status */ /* Reset DSR status bits */ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, DSR_TX(port->chan), card); while (1) { pkt_desc __iomem *desc = desc_address(port, port->txlast, 1); u8 stat = readb(&desc->stat); if (!(stat & ST_TX_OWNRSHP)) break; /* not yet transmitted */ if (stat & ST_TX_UNDRRUN) { dev->stats.tx_errors++; dev->stats.tx_fifo_errors++; } else { dev->stats.tx_packets++; dev->stats.tx_bytes += readw(&desc->len); } writeb(0, &desc->stat); /* Free descriptor */ port->txlast = (port->txlast + 1) % card->tx_ring_buffers; } netif_wake_queue(dev); spin_unlock(&port->lock); }
/* Transmit DMA interrupt service */ static inline void sca_tx_intr(port_t *port) { struct net_device *dev = port_to_dev(port); u16 dmac = get_dmac_tx(port); card_t* card = port_to_card(port); u8 stat; spin_lock(&port->lock); stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */ /* Reset DSR status bits */ sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE, DSR_TX(phy_node(port)), card); while (1) { pkt_desc __iomem *desc; u32 desc_off = desc_offset(port, port->txlast, 1); u32 cda = sca_inw(dmac + CDAL, card); if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc))) break; /* Transmitter is/will_be sending this frame */ desc = desc_address(port, port->txlast, 1); dev->stats.tx_packets++; dev->stats.tx_bytes += readw(&desc->len); writeb(0, &desc->stat); /* Free descriptor */ port->txlast = next_desc(port, port->txlast, 1); } netif_wake_queue(dev); spin_unlock(&port->lock); }
static void pc300_set_iface(port_t *port) { card_t *card = port->card; u32 __iomem * init_ctrl = &card->plxbase->init_ctrl; u16 msci = get_msci(port); u8 rxs = port->rxs & CLK_BRG_MASK; u8 txs = port->txs & CLK_BRG_MASK; sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS, port->card); switch(port->settings.clock_type) { case CLOCK_INT: rxs |= CLK_BRG; /* BRG output */ txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */ break; case CLOCK_TXINT: rxs |= CLK_LINE; /* RXC input */ txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */ break; case CLOCK_TXFROMRX: rxs |= CLK_LINE; /* RXC input */ txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */ break; default: /* EXTernal clock */ rxs |= CLK_LINE; /* RXC input */ txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */ break; } port->rxs = rxs; port->txs = txs; sca_out(rxs, msci + RXS, card); sca_out(txs, msci + TXS, card); sca_set_port(port); if (port->card->type == PC300_RSV) { if (port->iface == IF_IFACE_V35) writel(card->init_ctrl_value | PC300_CHMEDIA_MASK(port->chan), init_ctrl); else writel(card->init_ctrl_value & ~PC300_CHMEDIA_MASK(port->chan), init_ctrl); } }
static int c101_close(struct net_device *dev) { port_t *port = dev_to_port(dev); sca_close(dev); writeb(0, port->win0base + C101_DTR); sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port); hdlc_close(dev); return 0; }
static void sca_msci_intr(port_t *port) { u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */ /* Reset MSCI TX underrun and CDCD (ignored) status bit */ sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port); if (stat & ST1_UDRN) { /* TX Underrun error detected */ port_to_dev(port)->stats.tx_errors++; port_to_dev(port)->stats.tx_fifo_errors++; } stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */ /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); if (stat & ST1_CDCD) set_carrier(port); }
/* MSCI interrupt service */ static inline void sca_msci_intr(port_t *port) { u16 msci = get_msci(port); card_t* card = port->card; if (sca_in(msci + ST1, card) & ST1_CDCD) { /* Reset MSCI CDCD status bit */ sca_out(ST1_CDCD, msci + ST1, card); sca_set_carrier(port); } }
static void n2_set_iface(port_t *port) { card_t *card = port->card; int io = card->io; u8 mcr = inb(io + N2_MCR); u8 msci = get_msci(port); u8 rxs = port->rxs & CLK_BRG_MASK; u8 txs = port->txs & CLK_BRG_MASK; switch(port->settings.clock_type) { case CLOCK_INT: mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0; rxs |= CLK_BRG_RX; /* BRG output */ txs |= CLK_RXCLK_TX; /* RX clock */ break; case CLOCK_TXINT: mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0; rxs |= CLK_LINE_RX; /* RXC input */ txs |= CLK_BRG_TX; /* BRG output */ break; case CLOCK_TXFROMRX: mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0; rxs |= CLK_LINE_RX; /* RXC input */ txs |= CLK_RXCLK_TX; /* RX clock */ break; default: /* Clock EXTernal */ mcr &= port->phy_node ? ~CLOCK_OUT_PORT1 : ~CLOCK_OUT_PORT0; rxs |= CLK_LINE_RX; /* RXC input */ txs |= CLK_LINE_TX; /* TXC input */ } outb(mcr, io + N2_MCR); port->rxs = rxs; port->txs = txs; sca_out(rxs, msci + RXS, card); sca_out(txs, msci + TXS, card); sca_set_port(port); }
static int c101_open(struct net_device *dev) { port_t *port = dev_to_port(dev); int result; result = hdlc_open(dev); if (result) return result; writeb(1, port->win0base + C101_DTR); sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */ sca_open(dev); /* DCD is connected to port 2 !@#$%^& - disable MSCI0 CDCD interrupt */ sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port); sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port); set_carrier(port); /* enable MSCI1 CDCD interrupt */ sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port); sca_out(IE0_RXINTA, MSCI1_OFFSET + IE0, port); sca_out(0x48, IER0, port); /* TXINT #0 and RXINT #1 */ c101_set_iface(port); return 0; }
static void pci200_set_iface(port_t *port) { card_t *card = port->card; u16 msci = get_msci(port); u8 rxs = port->rxs & CLK_BRG_MASK; u8 txs = port->txs & CLK_BRG_MASK; sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS, port->card); switch(port->settings.clock_type) { case CLOCK_INT: rxs |= CLK_BRG; /* BRG output */ txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */ break; case CLOCK_TXINT: rxs |= CLK_LINE; /* RXC input */ txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */ break; case CLOCK_TXFROMRX: rxs |= CLK_LINE; /* RXC input */ txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */ break; default: /* EXTernal clock */ rxs |= CLK_LINE; /* RXC input */ txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */ break; } port->rxs = rxs; port->txs = txs; sca_out(rxs, msci + RXS, card); sca_out(txs, msci + TXS, card); sca_set_port(port); }
static inline void sca_msci_intr(port_t *port) { u16 msci = get_msci(port); card_t* card = port_to_card(port); u8 stat = sca_in(msci + ST1, card); sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card); if (stat & ST1_UDRN) { port_to_dev(port)->stats.tx_errors++; port_to_dev(port)->stats.tx_fifo_errors++; } if (stat & ST1_CDCD) sca_set_carrier(port); }
/* MSCI interrupt service */ static inline void sca_msci_intr(port_t *port) { u16 msci = get_msci(port); card_t* card = port_to_card(port); u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */ /* Reset MSCI TX underrun and CDCD status bit */ sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card); if (stat & ST1_UDRN) { /* TX Underrun error detected */ port_to_dev(port)->stats.tx_errors++; port_to_dev(port)->stats.tx_fifo_errors++; } if (stat & ST1_CDCD) sca_set_carrier(port); }
/* MSCI interrupt service */ static inline void sca_msci_intr(port_t *port) { u16 msci = get_msci(port); card_t* card = port_to_card(port); u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */ /* Reset MSCI TX underrun and CDCD status bit */ sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card); if (stat & ST1_UDRN) { struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); stats->tx_errors++; /* TX Underrun error detected */ stats->tx_fifo_errors++; } if (stat & ST1_CDCD) hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD), port_to_dev(port)); }
static __inline__ void close_port(port_t *port) { writeb(0, port->win0base+C101_DTR); sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port); }
static void sca_init_port(port_t *port) { card_t *card = port_to_card(port); int transmit, i; port->rxin = 0; port->txin = 0; port->txlast = 0; #ifndef PAGE0_ALWAYS_MAPPED openwin(card, 0); #endif for (transmit = 0; transmit < 2; transmit++) { u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port); u16 buffs = transmit ? card->tx_ring_buffers : card->rx_ring_buffers; for (i = 0; i < buffs; i++) { pkt_desc __iomem *desc = desc_address(port, i, transmit); u16 chain_off = desc_offset(port, i + 1, transmit); u32 buff_off = buffer_offset(port, i, transmit); writew(chain_off, &desc->cp); writel(buff_off, &desc->bp); writew(0, &desc->len); writeb(0, &desc->stat); } sca_out(0, transmit ? DSR_TX(phy_node(port)) : DSR_RX(phy_node(port)), card); sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) : DCR_RX(phy_node(port)), card); sca_out(0, dmac + CPB, card); sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card); if (!transmit) sca_outw(desc_offset(port, buffs - 1, transmit), dmac + EDAL, card); else sca_outw(desc_offset(port, 0, transmit), dmac + EDAL, card); sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) : DCR_RX(phy_node(port)), card); if (!transmit) { sca_outw(HDLC_MAX_MRU, dmac + BFLL, card); sca_out(0x14, DMR_RX(phy_node(port)), card); sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)), card); sca_out(DSR_DE, DSR_RX(phy_node(port)), card); } else { sca_out(0x14, DMR_TX(phy_node(port)), card); sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card); } } sca_set_carrier(port); }
static void sca_init_port(port_t *port) { card_t *card = port_to_card(port); int transmit, i; port->rxin = 0; port->txin = 0; port->txlast = 0; #ifndef PAGE0_ALWAYS_MAPPED openwin(card, 0); #endif for (transmit = 0; transmit < 2; transmit++) { u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port); u16 buffs = transmit ? card->tx_ring_buffers : card->rx_ring_buffers; for (i = 0; i < buffs; i++) { pkt_desc __iomem *desc = desc_address(port, i, transmit); u16 chain_off = desc_offset(port, i + 1, transmit); u32 buff_off = buffer_offset(port, i, transmit); writew(chain_off, &desc->cp); writel(buff_off, &desc->bp); writew(0, &desc->len); writeb(0, &desc->stat); } /* DMA disable - to halt state */ sca_out(0, transmit ? DSR_TX(phy_node(port)) : DSR_RX(phy_node(port)), card); /* software ABORT - to initial state */ sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) : DCR_RX(phy_node(port)), card); /* current desc addr */ sca_out(0, dmac + CPB, card); /* pointer base */ sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card); if (!transmit) sca_outw(desc_offset(port, buffs - 1, transmit), dmac + EDAL, card); else sca_outw(desc_offset(port, 0, transmit), dmac + EDAL, card); /* clear frame end interrupt counter */ sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) : DCR_RX(phy_node(port)), card); if (!transmit) { /* Receive */ /* set buffer length */ sca_outw(HDLC_MAX_MRU, dmac + BFLL, card); /* Chain mode, Multi-frame */ sca_out(0x14, DMR_RX(phy_node(port)), card); sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)), card); /* DMA enable */ sca_out(DSR_DE, DSR_RX(phy_node(port)), card); } else { /* Transmit */ /* Chain mode, Multi-frame */ sca_out(0x14, DMR_TX(phy_node(port)), card); /* enable underflow interrupts */ sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card); } } sca_set_carrier(port); }
static int __devinit pc300_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { card_t *card; u32 __iomem *p; int i; u32 ramsize; u32 ramphys; /* buffer memory base */ u32 scaphys; /* SCA memory base */ u32 plxphys; /* PLX registers memory base */ i = pci_enable_device(pdev); if (i) return i; i = pci_request_regions(pdev, "PC300"); if (i) { pci_disable_device(pdev); return i; } card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "pc300: unable to allocate memory\n"); pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } pci_set_drvdata(pdev, card); if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE || pci_resource_len(pdev, 2) != PC300_SCA_SIZE || pci_resource_len(pdev, 3) < 16384) { printk(KERN_ERR "pc300: invalid card EEPROM parameters\n"); pc300_pci_remove_one(pdev); return -EFAULT; } plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK; card->plxbase = ioremap(plxphys, PC300_PLX_SIZE); scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK; card->scabase = ioremap(scaphys, PC300_SCA_SIZE); ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK; card->rambase = pci_ioremap_bar(pdev, 3); if (card->plxbase == NULL || card->scabase == NULL || card->rambase == NULL) { printk(KERN_ERR "pc300: ioremap() failed\n"); pc300_pci_remove_one(pdev); } /* PLX PCI 9050 workaround for local configuration register read bug */ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys); card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys); if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 || pdev->device == PCI_DEVICE_ID_PC300_TE_2) card->type = PC300_TE; /* not fully supported */ else if (card->init_ctrl_value & PC300_CTYPE_MASK) card->type = PC300_X21; else card->type = PC300_RSV; if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 || pdev->device == PCI_DEVICE_ID_PC300_TE_1) card->n_ports = 1; else card->n_ports = 2; for (i = 0; i < card->n_ports; i++) if (!(card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]))) { printk(KERN_ERR "pc300: unable to allocate memory\n"); pc300_pci_remove_one(pdev); return -ENOMEM; } /* Reset PLX */ p = &card->plxbase->init_ctrl; writel(card->init_ctrl_value | 0x40000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); writel(card->init_ctrl_value, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); /* Reload Config. Registers from EEPROM */ writel(card->init_ctrl_value | 0x20000000, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); writel(card->init_ctrl_value, p); readl(p); /* Flush the write - do not use sca_flush */ udelay(1); ramsize = sca_detect_ram(card, card->rambase, pci_resource_len(pdev, 3)); if (use_crystal_clock) card->init_ctrl_value &= ~PC300_CLKSEL_MASK; else card->init_ctrl_value |= PC300_CLKSEL_MASK; writel(card->init_ctrl_value, &card->plxbase->init_ctrl); /* number of TX + RX buffers for one port */ i = ramsize / (card->n_ports * (sizeof(pkt_desc) + HDLC_MAX_MRU)); card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS); card->rx_ring_buffers = i - card->tx_ring_buffers; card->buff_offset = card->n_ports * sizeof(pkt_desc) * (card->tx_ring_buffers + card->rx_ring_buffers); printk(KERN_INFO "pc300: PC300/%s, %u KB RAM at 0x%x, IRQ%u, " "using %u TX + %u RX packets rings\n", card->type == PC300_X21 ? "X21" : card->type == PC300_TE ? "TE" : "RSV", ramsize / 1024, ramphys, pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers); if (card->tx_ring_buffers < 1) { printk(KERN_ERR "pc300: RAM test failed\n"); pc300_pci_remove_one(pdev); return -EFAULT; } /* Enable interrupts on the PCI bridge, LINTi1 active low */ writew(0x0041, &card->plxbase->intr_ctrl_stat); /* Allocate IRQ */ if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pc300", card)) { printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n", pdev->irq); pc300_pci_remove_one(pdev); return -EBUSY; } card->irq = pdev->irq; sca_init(card, 0); // COTE not set - allows better TX DMA settings // sca_out(sca_in(PCR, card) | PCR_COTE, PCR, card); sca_out(0x10, BTCR, card); for (i = 0; i < card->n_ports; i++) { port_t *port = &card->ports[i]; struct net_device *dev = port->netdev; hdlc_device *hdlc = dev_to_hdlc(dev); port->chan = i; spin_lock_init(&port->lock); dev->irq = card->irq; dev->mem_start = ramphys; dev->mem_end = ramphys + ramsize - 1; dev->tx_queue_len = 50; dev->netdev_ops = &pc300_ops; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; port->settings.clock_type = CLOCK_EXT; port->card = card; if (card->type == PC300_X21) port->iface = IF_IFACE_X21; else port->iface = IF_IFACE_V35; sca_init_port(port); if (register_hdlc_device(dev)) { printk(KERN_ERR "pc300: unable to register hdlc " "device\n"); port->card = NULL; pc300_pci_remove_one(pdev); return -ENOBUFS; } printk(KERN_INFO "%s: PC300 channel %d\n", dev->name, port->chan); } return 0; }
static void sca_init_port(port_t *port) { card_t *card = port->card; u16 dmac_rx = get_dmac_rx(port), dmac_tx = get_dmac_tx(port); int transmit, i; port->rxin = 0; port->txin = 0; port->txlast = 0; for (transmit = 0; transmit < 2; transmit++) { u16 buffs = transmit ? card->tx_ring_buffers : card->rx_ring_buffers; for (i = 0; i < buffs; i++) { pkt_desc __iomem *desc = desc_address(port, i, transmit); u16 chain_off = hd_desc_offset(port, i + 1, transmit); u32 buff_off = buffer_offset(port, i, transmit); writel(chain_off, &desc->cp); writel(buff_off, &desc->bp); writew(0, &desc->len); writeb(0, &desc->stat); } } /* DMA disable - to halt state */ sca_out(0, DSR_RX(port->chan), card); sca_out(0, DSR_TX(port->chan), card); /* software ABORT - to initial state */ sca_out(DCR_ABORT, DCR_RX(port->chan), card); sca_out(DCR_ABORT, DCR_TX(port->chan), card); /* current desc addr */ sca_outl(hd_desc_offset(port, 0, 0), dmac_rx + CDAL, card); sca_outl(hd_desc_offset(port, card->tx_ring_buffers - 1, 0), dmac_rx + EDAL, card); sca_outl(hd_desc_offset(port, 0, 1), dmac_tx + CDAL, card); sca_outl(hd_desc_offset(port, 0, 1), dmac_tx + EDAL, card); /* clear frame end interrupt counter */ sca_out(DCR_CLEAR_EOF, DCR_RX(port->chan), card); sca_out(DCR_CLEAR_EOF, DCR_TX(port->chan), card); /* Receive */ sca_outw(HDLC_MAX_MRU, dmac_rx + BFLL, card); /* set buffer length */ sca_out(0x14, DMR_RX(port->chan), card); /* Chain mode, Multi-frame */ sca_out(DIR_EOME, DIR_RX(port->chan), card); /* enable interrupts */ sca_out(DSR_DE, DSR_RX(port->chan), card); /* DMA enable */ /* Transmit */ sca_out(0x14, DMR_TX(port->chan), card); /* Chain mode, Multi-frame */ sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */ sca_set_carrier(port); netif_napi_add(port->netdev, &port->napi, sca_poll, NAPI_WEIGHT); }
static __inline__ void open_port(port_t *port) { writeb(1, port->win0base+C101_DTR); sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */ }