Exemple #1
0
static inline void set_carrier(port_t *port)
{
	if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
		netif_carrier_on(port_to_dev(port));
	else
		netif_carrier_off(port_to_dev(port));
}
Exemple #2
0
static void pci200_pci_remove_one(struct pci_dev *pdev)
{
	int i;
	card_t *card = pci_get_drvdata(pdev);

	for(i = 0; i < 2; i++)
		if (card->ports[i].card) {
			struct net_device *dev = port_to_dev(&card->ports[i]);
			unregister_hdlc_device(dev);
		}

	if (card->irq)
		free_irq(card->irq, card);

	if (card->rambase)
		iounmap(card->rambase);
	if (card->scabase)
		iounmap(card->scabase);
	if (card->plxbase)
		iounmap(card->plxbase);

	pci_release_regions(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
	if (card->ports[0].dev)
		free_netdev(card->ports[0].dev);
	if (card->ports[1].dev)
		free_netdev(card->ports[1].dev);
	kfree(card);
}
Exemple #3
0
/* Transmit DMA interrupt service */
static inline void sca_tx_intr(port_t *port)
{
	struct net_device *dev = port_to_dev(port);
	u16 dmac = get_dmac_tx(port);
	card_t* card = port_to_card(port);
	u8 stat;

	spin_lock(&port->lock);

	stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */

	/* Reset DSR status bits */
	sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
		DSR_TX(phy_node(port)), card);

	while (1) {
		pkt_desc __iomem *desc;

		u32 desc_off = desc_offset(port, port->txlast, 1);
		u32 cda = sca_inw(dmac + CDAL, card);
		if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
			break;	/* Transmitter is/will_be sending this frame */

		desc = desc_address(port, port->txlast, 1);
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += readw(&desc->len);
		writeb(0, &desc->stat);	/* Free descriptor */
		port->txlast = next_desc(port, port->txlast, 1);
	}

	netif_wake_queue(dev);
	spin_unlock(&port->lock);
}
Exemple #4
0
static void n2_destroy_card(card_t *card)
{
    int cnt;

    for (cnt = 0; cnt < 2; cnt++)
        if (card->ports[cnt].card) {
            struct net_device *dev = port_to_dev(&card->ports[cnt]);
            unregister_hdlc_device(dev);
        }

    if (card->irq)
        free_irq(card->irq, card);

    if (card->winbase) {
        iounmap(card->winbase);
        release_mem_region(card->phy_winbase, USE_WINDOWSIZE);
    }

    if (card->io)
        release_region(card->io, N2_IOPORTS);
    if (card->ports[0].dev)
        free_netdev(card->ports[0].dev);
    if (card->ports[1].dev)
        free_netdev(card->ports[1].dev);
    kfree(card);
}
Exemple #5
0
static inline void sca_set_carrier(port_t *port)
{
	if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
#ifdef DEBUG_LINK
		printk(KERN_DEBUG "%s: sca_set_carrier on\n",
		       port_to_dev(port)->name);
#endif
		netif_carrier_on(port_to_dev(port));
	} else {
#ifdef DEBUG_LINK
		printk(KERN_DEBUG "%s: sca_set_carrier off\n",
		       port_to_dev(port)->name);
#endif
		netif_carrier_off(port_to_dev(port));
	}
}
Exemple #6
0
/* MSCI interrupt service */
static inline void sca_msci_intr(port_t *port)
{
	u16 msci = get_msci(port);
	card_t* card = port_to_card(port);
	u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */

	/* Reset MSCI TX underrun and CDCD status bit */
	sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);

	if (stat & ST1_UDRN) {
		/* TX Underrun error detected */
		port_to_dev(port)->stats.tx_errors++;
		port_to_dev(port)->stats.tx_fifo_errors++;
	}

	if (stat & ST1_CDCD)
		sca_set_carrier(port);
}
static inline void sca_msci_intr(port_t *port)
{
    u16 msci = get_msci(port);
    card_t* card = port_to_card(port);
    u8 stat = sca_in(msci + ST1, card);


    sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);

    if (stat & ST1_UDRN) {

        port_to_dev(port)->stats.tx_errors++;
        port_to_dev(port)->stats.tx_fifo_errors++;
    }

    if (stat & ST1_CDCD)
        sca_set_carrier(port);
}
Exemple #8
0
/* MSCI interrupt service */
static inline void sca_msci_intr(port_t *port)
{
	u16 msci = get_msci(port);
	card_t* card = port_to_card(port);
	u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */

	/* Reset MSCI TX underrun and CDCD status bit */
	sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);

	if (stat & ST1_UDRN) {
		struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
		stats->tx_errors++; /* TX Underrun error detected */
		stats->tx_fifo_errors++;
	}

	if (stat & ST1_CDCD)
		hdlc_set_carrier(!(sca_in(msci + ST3, card) & ST3_DCD),
				 port_to_dev(port));
}
Exemple #9
0
static void __exit c101_cleanup(void)
{
	card_t *card = first_card;

	while (card) {
		card_t *ptr = card;
		card = card->next_card;
		unregister_hdlc_device(port_to_dev(ptr));
		c101_destroy_card(ptr);
	}
}
Exemple #10
0
static void sca_msci_intr(port_t *port)
{
	u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */

	/* Reset MSCI TX underrun and CDCD (ignored) status bit */
	sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);

	if (stat & ST1_UDRN) {
		/* TX Underrun error detected */
		port_to_dev(port)->stats.tx_errors++;
		port_to_dev(port)->stats.tx_fifo_errors++;
	}

	stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
	/* Reset MSCI CDCD status bit - uses ch#2 DCD input */
	sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);

	if (stat & ST1_CDCD)
		set_carrier(port);
}
Exemple #11
0
static inline void sca_rx(card_t *card, port_t *port, pkt_desc *desc, u16 rxin)
{
	struct net_device *dev = port_to_dev(port);
	struct net_device_stats *stats = hdlc_stats(dev);
	struct sk_buff *skb;
	u16 len;
	u32 buff;
#ifndef ALL_PAGES_ALWAYS_MAPPED
	u32 maxlen;
	u8 page;
#endif

	len = readw(&desc->len);
	skb = dev_alloc_skb(len);
	if (!skb) {
		stats->rx_dropped++;
		return;
	}

	buff = buffer_offset(port, rxin, 0);
#ifndef ALL_PAGES_ALWAYS_MAPPED
	page = buff / winsize(card);
	buff = buff % winsize(card);
	maxlen = winsize(card) - buff;

	openwin(card, page);

	if (len > maxlen) {
		memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
		openwin(card, page + 1);
		memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
	} else
#endif
	memcpy_fromio(skb->data, winbase(card) + buff, len);

#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
	/* select pkt_desc table page back */
	openwin(card, 0);
#endif
	skb_put(skb, len);
#ifdef DEBUG_PKT
	printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
	debug_frame(skb);
#endif
	stats->rx_packets++;
	stats->rx_bytes += skb->len;
	skb->mac.raw = skb->data;
	skb->dev = dev;
	skb->dev->last_rx = jiffies;
	skb->protocol = hdlc_type_trans(skb, dev);
	netif_rx(skb);
}
Exemple #12
0
/* Receive DMA interrupt service */
static inline void sca_rx_intr(port_t *port)
{
	struct net_device *dev = port_to_dev(port);
	u16 dmac = get_dmac_rx(port);
	card_t *card = port_to_card(port);
	u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */

	/* Reset DSR status bits */
	sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
		DSR_RX(phy_node(port)), card);

	if (stat & DSR_BOF)
		/* Dropped one or more frames */
		dev->stats.rx_over_errors++;

	while (1) {
		u32 desc_off = desc_offset(port, port->rxin, 0);
		pkt_desc __iomem *desc;
		u32 cda = sca_inw(dmac + CDAL, card);

		if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
			break;	/* No frame received */

		desc = desc_address(port, port->rxin, 0);
		stat = readb(&desc->stat);
		if (!(stat & ST_RX_EOM))
			port->rxpart = 1; /* partial frame received */
		else if ((stat & ST_ERROR_MASK) || port->rxpart) {
			dev->stats.rx_errors++;
			if (stat & ST_RX_OVERRUN)
				dev->stats.rx_fifo_errors++;
			else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
					  ST_RX_RESBIT)) || port->rxpart)
				dev->stats.rx_frame_errors++;
			else if (stat & ST_RX_CRC)
				dev->stats.rx_crc_errors++;
			if (stat & ST_RX_EOM)
				port->rxpart = 0; /* received last fragment */
		} else
			sca_rx(card, port, desc, port->rxin);

		/* Set new error descriptor address */
		sca_outw(desc_off, dmac + EDAL, card);
		port->rxin = next_desc(port, port->rxin, 0);
	}

	/* make sure RX DMA is enabled */
	sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
}
static inline void sca_rx_intr(port_t *port)
{
    struct net_device *dev = port_to_dev(port);
    u16 dmac = get_dmac_rx(port);
    card_t *card = port_to_card(port);
    u8 stat = sca_in(DSR_RX(phy_node(port)), card);


    sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
            DSR_RX(phy_node(port)), card);

    if (stat & DSR_BOF)

        dev->stats.rx_over_errors++;

    while (1) {
        u32 desc_off = desc_offset(port, port->rxin, 0);
        pkt_desc __iomem *desc;
        u32 cda = sca_inw(dmac + CDAL, card);

        if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
            break;

        desc = desc_address(port, port->rxin, 0);
        stat = readb(&desc->stat);
        if (!(stat & ST_RX_EOM))
            port->rxpart = 1;
        else if ((stat & ST_ERROR_MASK) || port->rxpart) {
            dev->stats.rx_errors++;
            if (stat & ST_RX_OVERRUN)
                dev->stats.rx_fifo_errors++;
            else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
                              ST_RX_RESBIT)) || port->rxpart)
                dev->stats.rx_frame_errors++;
            else if (stat & ST_RX_CRC)
                dev->stats.rx_crc_errors++;
            if (stat & ST_RX_EOM)
                port->rxpart = 0;
        } else
            sca_rx(card, port, desc, port->rxin);


        sca_outw(desc_off, dmac + EDAL, card);
        port->rxin = next_desc(port, port->rxin, 0);
    }


    sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
}
static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
                          u16 rxin)
{
    struct net_device *dev = port_to_dev(port);
    struct sk_buff *skb;
    u16 len;
    u32 buff;
    u32 maxlen;
    u8 page;

    len = readw(&desc->len);
    skb = dev_alloc_skb(len);
    if (!skb) {
        dev->stats.rx_dropped++;
        return;
    }

    buff = buffer_offset(port, rxin, 0);
    page = buff / winsize(card);
    buff = buff % winsize(card);
    maxlen = winsize(card) - buff;

    openwin(card, page);

    if (len > maxlen) {
        memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
        openwin(card, page + 1);
        memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
    } else
        memcpy_fromio(skb->data, winbase(card) + buff, len);

#ifndef PAGE0_ALWAYS_MAPPED
    openwin(card, 0);
#endif
    skb_put(skb, len);
#ifdef DEBUG_PKT
    printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
    debug_frame(skb);
#endif
    dev->stats.rx_packets++;
    dev->stats.rx_bytes += skb->len;
    skb->protocol = hdlc_type_trans(skb, dev);
    netif_rx(skb);
}
Exemple #15
0
static int __init c101_run(unsigned long irq, unsigned long winbase)
{
	struct net_device *dev;
	hdlc_device *hdlc;
	card_t *card;
	int result;

	if (irq<3 || irq>15 || irq == 6) /* FIXME */ {
		printk(KERN_ERR "c101: invalid IRQ value\n");
		return -ENODEV;
	}

	if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) !=0) {
		printk(KERN_ERR "c101: invalid RAM value\n");
		return -ENODEV;
	}

	card = kzalloc(sizeof(card_t), GFP_KERNEL);
	if (card == NULL) {
		printk(KERN_ERR "c101: unable to allocate memory\n");
		return -ENOBUFS;
	}

	card->dev = alloc_hdlcdev(card);
	if (!card->dev) {
		printk(KERN_ERR "c101: unable to allocate memory\n");
		kfree(card);
		return -ENOBUFS;
	}

	if (request_irq(irq, sca_intr, 0, devname, card)) {
		printk(KERN_ERR "c101: could not allocate IRQ\n");
		c101_destroy_card(card);
		return -EBUSY;
	}
	card->irq = irq;

	if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) {
		printk(KERN_ERR "c101: could not request RAM window\n");
		c101_destroy_card(card);
		return -EBUSY;
	}
	card->phy_winbase = winbase;
	card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE);
	if (!card->win0base) {
		printk(KERN_ERR "c101: could not map I/O address\n");
		c101_destroy_card(card);
		return -EFAULT;
	}

	card->tx_ring_buffers = TX_RING_BUFFERS;
	card->rx_ring_buffers = RX_RING_BUFFERS;
	card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */

	readb(card->win0base + C101_PAGE); /* Resets SCA? */
	udelay(100);
	writeb(0, card->win0base + C101_PAGE);
	writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */

	sca_init(card, 0);

	dev = port_to_dev(card);
	hdlc = dev_to_hdlc(dev);

	spin_lock_init(&card->lock);
	dev->irq = irq;
	dev->mem_start = winbase;
	dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
	dev->tx_queue_len = 50;
	dev->do_ioctl = c101_ioctl;
	dev->open = c101_open;
	dev->stop = c101_close;
	hdlc->attach = sca_attach;
	hdlc->xmit = sca_xmit;
	card->settings.clock_type = CLOCK_EXT;

	result = register_hdlc_device(dev);
	if (result) {
		printk(KERN_WARNING "c101: unable to register hdlc device\n");
		c101_destroy_card(card);
		return result;
	}

	sca_init_sync_port(card); /* Set up C101 memory */
	set_carrier(card);

	printk(KERN_INFO "%s: Moxa C101 on IRQ%u,"
	       " using %u TX + %u RX packets rings\n",
	       dev->name, card->irq,
	       card->tx_ring_buffers, card->rx_ring_buffers);

	*new_card = card;
	new_card = &card->next_card;
	return 0;
}
Exemple #16
0
static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
					 const struct pci_device_id *ent)
{
	card_t *card;
	u8 rev_id;
	u32 *p;
	int i;
	u32 ramsize;
	u32 ramphys;		/* buffer memory base */
	u32 scaphys;		/* SCA memory base */
	u32 plxphys;		/* PLX registers memory base */

#ifndef MODULE
	static int printed_version;
	if (!printed_version++)
		printk(KERN_INFO "%s\n", version);
#endif

	i = pci_enable_device(pdev);
	if (i)
		return i;

	i = pci_request_regions(pdev, "PCI200SYN");
	if (i) {
		pci_disable_device(pdev);
		return i;
	}

	card = kmalloc(sizeof(card_t), GFP_KERNEL);
	if (card == NULL) {
		printk(KERN_ERR "pci200syn: unable to allocate memory\n");
		pci_release_regions(pdev);
		pci_disable_device(pdev);
		return -ENOBUFS;
	}
	memset(card, 0, sizeof(card_t));
	pci_set_drvdata(pdev, card);
	card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
	card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
	if (!card->ports[0].dev || !card->ports[1].dev) {
		printk(KERN_ERR "pci200syn: unable to allocate memory\n");
		pci200_pci_remove_one(pdev);
		return -ENOMEM;
	}

	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
	if (pci_resource_len(pdev, 0) != PCI200SYN_PLX_SIZE ||
	    pci_resource_len(pdev, 2) != PCI200SYN_SCA_SIZE ||
	    pci_resource_len(pdev, 3) < 16384) {
		printk(KERN_ERR "pci200syn: invalid card EEPROM parameters\n");
		pci200_pci_remove_one(pdev);
		return -EFAULT;
	}

	plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
	card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE);

	scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
	card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);

	ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
	card->rambase = ioremap(ramphys, pci_resource_len(pdev,3));

	if (card->plxbase == NULL ||
	    card->scabase == NULL ||
	    card->rambase == NULL) {
		printk(KERN_ERR "pci200syn: ioremap() failed\n");
		pci200_pci_remove_one(pdev);
	}

	/* Reset PLX */
	p = &card->plxbase->init_ctrl;
	writel(readl(p) | 0x40000000, p);
	readl(p);		/* Flush the write - do not use sca_flush */
	udelay(1);

	writel(readl(p) & ~0x40000000, p);
	readl(p);		/* Flush the write - do not use sca_flush */
	udelay(1);

	ramsize = sca_detect_ram(card, card->rambase,
				 pci_resource_len(pdev, 3));

	/* number of TX + RX buffers for one port - this is dual port card */
	i = ramsize / (2 * (sizeof(pkt_desc) + HDLC_MAX_MRU));
	card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
	card->rx_ring_buffers = i - card->tx_ring_buffers;

	card->buff_offset = 2 * sizeof(pkt_desc) * (card->tx_ring_buffers +
						    card->rx_ring_buffers);

	printk(KERN_INFO "pci200syn: %u KB RAM at 0x%x, IRQ%u, using %u TX +"
	       " %u RX packets rings\n", ramsize / 1024, ramphys,
	       pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);

	if (card->tx_ring_buffers < 1) {
		printk(KERN_ERR "pci200syn: RAM test failed\n");
		pci200_pci_remove_one(pdev);
		return -EFAULT;
	}

	/* Enable interrupts on the PCI bridge */
	p = &card->plxbase->intr_ctrl_stat;
	writew(readw(p) | 0x0040, p);

	/* Allocate IRQ */
	if(request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
		printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
		       pdev->irq);
		pci200_pci_remove_one(pdev);
		return -EBUSY;
	}
	card->irq = pdev->irq;

	sca_init(card, 0);

	for(i = 0; i < 2; i++) {
		port_t *port = &card->ports[i];
		struct net_device *dev = port_to_dev(port);
		hdlc_device *hdlc = dev_to_hdlc(dev);
		port->phy_node = i;

		spin_lock_init(&port->lock);
		SET_MODULE_OWNER(dev);
		dev->irq = card->irq;
		dev->mem_start = ramphys;
		dev->mem_end = ramphys + ramsize - 1;
		dev->tx_queue_len = 50;
		dev->do_ioctl = pci200_ioctl;
		dev->open = pci200_open;
		dev->stop = pci200_close;
		hdlc->attach = sca_attach;
		hdlc->xmit = sca_xmit;
		port->settings.clock_type = CLOCK_EXT;
		port->card = card;
		if(register_hdlc_device(dev)) {
			printk(KERN_ERR "pci200syn: unable to register hdlc "
			       "device\n");
			port->card = NULL;
			pci200_pci_remove_one(pdev);
			return -ENOBUFS;
		}
		sca_init_sync_port(port);	/* Set up SCA memory */

		printk(KERN_INFO "%s: PCI200SYN node %d\n",
		       dev->name, port->phy_node);
	}

	sca_flush(card);
	return 0;
}
Exemple #17
0
static int __init n2_run(unsigned long io, unsigned long irq,
                         unsigned long winbase, long valid0, long valid1)
{
    card_t *card;
    u8 cnt, pcr;
    int i;

    if (io < 0x200 || io > 0x3FF || (io % N2_IOPORTS) != 0) {
        printk(KERN_ERR "n2: invalid I/O port value\n");
        return -ENODEV;
    }

    if (irq < 3 || irq > 15 || irq == 6) { /* FIXME */
        printk(KERN_ERR "n2: invalid IRQ value\n");
        return -ENODEV;
    }

    if (winbase < 0xA0000 || winbase > 0xFFFFF || (winbase & 0xFFF) != 0) {
        printk(KERN_ERR "n2: invalid RAM value\n");
        return -ENODEV;
    }

    card = kzalloc(sizeof(card_t), GFP_KERNEL);
    if (card == NULL) {
        printk(KERN_ERR "n2: unable to allocate memory\n");
        return -ENOBUFS;
    }

    card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
    card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
    if (!card->ports[0].dev || !card->ports[1].dev) {
        printk(KERN_ERR "n2: unable to allocate memory\n");
        n2_destroy_card(card);
        return -ENOMEM;
    }

    if (!request_region(io, N2_IOPORTS, devname)) {
        printk(KERN_ERR "n2: I/O port region in use\n");
        n2_destroy_card(card);
        return -EBUSY;
    }
    card->io = io;

    if (request_irq(irq, &sca_intr, 0, devname, card)) {
        printk(KERN_ERR "n2: could not allocate IRQ\n");
        n2_destroy_card(card);
        return(-EBUSY);
    }
    card->irq = irq;

    if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) {
        printk(KERN_ERR "n2: could not request RAM window\n");
        n2_destroy_card(card);
        return(-EBUSY);
    }
    card->phy_winbase = winbase;
    card->winbase = ioremap(winbase, USE_WINDOWSIZE);
    if (!card->winbase) {
        printk(KERN_ERR "n2: ioremap() failed\n");
        n2_destroy_card(card);
        return -EFAULT;
    }

    outb(0, io + N2_PCR);
    outb(winbase >> 12, io + N2_BAR);

    switch (USE_WINDOWSIZE) {
    case 16384:
        outb(WIN16K, io + N2_PSR);
        break;

    case 32768:
        outb(WIN32K, io + N2_PSR);
        break;

    case 65536:
        outb(WIN64K, io + N2_PSR);
        break;

    default:
        printk(KERN_ERR "n2: invalid window size\n");
        n2_destroy_card(card);
        return -ENODEV;
    }

    pcr = PCR_ENWIN | PCR_VPM | (USE_BUS16BITS ? PCR_BUS16 : 0);
    outb(pcr, io + N2_PCR);

    card->ram_size = sca_detect_ram(card, card->winbase, MAX_RAM_SIZE);

    /* number of TX + RX buffers for one port */
    i = card->ram_size / ((valid0 + valid1) * (sizeof(pkt_desc) +
                          HDLC_MAX_MRU));

    card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
    card->rx_ring_buffers = i - card->tx_ring_buffers;

    card->buff_offset = (valid0 + valid1) * sizeof(pkt_desc) *
                        (card->tx_ring_buffers + card->rx_ring_buffers);

    printk(KERN_INFO "n2: RISCom/N2 %u KB RAM, IRQ%u, "
           "using %u TX + %u RX packets rings\n", card->ram_size / 1024,
           card->irq, card->tx_ring_buffers, card->rx_ring_buffers);

    if (card->tx_ring_buffers < 1) {
        printk(KERN_ERR "n2: RAM test failed\n");
        n2_destroy_card(card);
        return -EIO;
    }

    pcr |= PCR_RUNSCA;		/* run SCA */
    outb(pcr, io + N2_PCR);
    outb(0, io + N2_MCR);

    sca_init(card, 0);
    for (cnt = 0; cnt < 2; cnt++) {
        port_t *port = &card->ports[cnt];
        struct net_device *dev = port_to_dev(port);
        hdlc_device *hdlc = dev_to_hdlc(dev);

        if ((cnt == 0 && !valid0) || (cnt == 1 && !valid1))
            continue;

        port->phy_node = cnt;
        port->valid = 1;

        if ((cnt == 1) && valid0)
            port->log_node = 1;

        spin_lock_init(&port->lock);
        SET_MODULE_OWNER(dev);
        dev->irq = irq;
        dev->mem_start = winbase;
        dev->mem_end = winbase + USE_WINDOWSIZE - 1;
        dev->tx_queue_len = 50;
        dev->do_ioctl = n2_ioctl;
        dev->open = n2_open;
        dev->stop = n2_close;
        hdlc->attach = sca_attach;
        hdlc->xmit = sca_xmit;
        port->settings.clock_type = CLOCK_EXT;
        port->card = card;

        if (register_hdlc_device(dev)) {
            printk(KERN_WARNING "n2: unable to register hdlc "
                   "device\n");
            port->card = NULL;
            n2_destroy_card(card);
            return -ENOBUFS;
        }
        sca_init_sync_port(port); /* Set up SCA memory */

        printk(KERN_INFO "%s: RISCom/N2 node %d\n",
               dev->name, port->phy_node);
    }

    *new_card = card;
    new_card = &card->next_card;

    return 0;
}
Exemple #18
0
static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
					const struct pci_device_id *ent)
{
	card_t *card;
	u8 rev_id;
	u32 __iomem *p;
	int i;
	u32 ramsize;
	u32 ramphys;		/* buffer memory base */
	u32 scaphys;		/* SCA memory base */
	u32 plxphys;		/* PLX registers memory base */

#ifndef MODULE
	static int printed_version;
	if (!printed_version++)
		printk(KERN_INFO "%s\n", version);
#endif

	i = pci_enable_device(pdev);
	if (i)
		return i;

	i = pci_request_regions(pdev, "PC300");
	if (i) {
		pci_disable_device(pdev);
		return i;
	}

	card = kmalloc(sizeof(card_t), GFP_KERNEL);
	if (card == NULL) {
		printk(KERN_ERR "pc300: unable to allocate memory\n");
		pci_release_regions(pdev);
		pci_disable_device(pdev);
		return -ENOBUFS;
	}
	memset(card, 0, sizeof(card_t));
	pci_set_drvdata(pdev, card);

	if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
	    pdev->device == PCI_DEVICE_ID_PC300_TE_2)
		card->type = PC300_TE; /* not fully supported */
	else if (card->init_ctrl_value & PC300_CTYPE_MASK)
		card->type = PC300_X21;
	else
		card->type = PC300_RSV;

	if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
	    pdev->device == PCI_DEVICE_ID_PC300_TE_1)
		card->n_ports = 1;
	else
		card->n_ports = 2;

	for (i = 0; i < card->n_ports; i++)
		if (!(card->ports[i].dev = alloc_hdlcdev(&card->ports[i]))) {
			printk(KERN_ERR "pc300: unable to allocate memory\n");
			pc300_pci_remove_one(pdev);
			return -ENOMEM;
		}

	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
	if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
	    pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
	    pci_resource_len(pdev, 3) < 16384) {
		printk(KERN_ERR "pc300: invalid card EEPROM parameters\n");
		pc300_pci_remove_one(pdev);
		return -EFAULT;
	}

	plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
	card->plxbase = ioremap(plxphys, PC300_PLX_SIZE);

	scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
	card->scabase = ioremap(scaphys, PC300_SCA_SIZE);

	ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
	card->rambase = ioremap(ramphys, pci_resource_len(pdev,3));

	if (card->plxbase == NULL ||
	    card->scabase == NULL ||
	    card->rambase == NULL) {
		printk(KERN_ERR "pc300: ioremap() failed\n");
		pc300_pci_remove_one(pdev);
	}

	/* PLX PCI 9050 workaround for local configuration register read bug */
	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys);
	card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl);
	pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys);

	/* Reset PLX */
	p = &card->plxbase->init_ctrl;
	writel(card->init_ctrl_value | 0x40000000, p);
	readl(p);		/* Flush the write - do not use sca_flush */
	udelay(1);

	writel(card->init_ctrl_value, p);
	readl(p);		/* Flush the write - do not use sca_flush */
	udelay(1);

	/* Reload Config. Registers from EEPROM */
	writel(card->init_ctrl_value | 0x20000000, p);
	readl(p);		/* Flush the write - do not use sca_flush */
	udelay(1);

	writel(card->init_ctrl_value, p);
	readl(p);		/* Flush the write - do not use sca_flush */
	udelay(1);

	ramsize = sca_detect_ram(card, card->rambase,
				 pci_resource_len(pdev, 3));

	if (use_crystal_clock)
		card->init_ctrl_value &= ~PC300_CLKSEL_MASK;
	else
		card->init_ctrl_value |= PC300_CLKSEL_MASK;

	writel(card->init_ctrl_value, &card->plxbase->init_ctrl);
	/* number of TX + RX buffers for one port */
	i = ramsize / (card->n_ports * (sizeof(pkt_desc) + HDLC_MAX_MRU));
	card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
	card->rx_ring_buffers = i - card->tx_ring_buffers;

	card->buff_offset = card->n_ports * sizeof(pkt_desc) *
		(card->tx_ring_buffers + card->rx_ring_buffers);

	printk(KERN_INFO "pc300: PC300/%s, %u KB RAM at 0x%x, IRQ%u, "
	       "using %u TX + %u RX packets rings\n",
	       card->type == PC300_X21 ? "X21" :
	       card->type == PC300_TE ? "TE" : "RSV",
	       ramsize / 1024, ramphys, pdev->irq,
	       card->tx_ring_buffers, card->rx_ring_buffers);

	if (card->tx_ring_buffers < 1) {
		printk(KERN_ERR "pc300: RAM test failed\n");
		pc300_pci_remove_one(pdev);
		return -EFAULT;
	}

	/* Enable interrupts on the PCI bridge, LINTi1 active low */
	writew(0x0041, &card->plxbase->intr_ctrl_stat);

	/* Allocate IRQ */
	if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) {
		printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n",
		       pdev->irq);
		pc300_pci_remove_one(pdev);
		return -EBUSY;
	}
	card->irq = pdev->irq;

	sca_init(card, 0);

	// COTE not set - allows better TX DMA settings
	// sca_out(sca_in(PCR, card) | PCR_COTE, PCR, card);

	sca_out(0x10, BTCR, card);

	for (i = 0; i < card->n_ports; i++) {
		port_t *port = &card->ports[i];
		struct net_device *dev = port_to_dev(port);
		hdlc_device *hdlc = dev_to_hdlc(dev);
		port->phy_node = i;

		spin_lock_init(&port->lock);
		SET_MODULE_OWNER(dev);
		dev->irq = card->irq;
		dev->mem_start = ramphys;
		dev->mem_end = ramphys + ramsize - 1;
		dev->tx_queue_len = 50;
		dev->do_ioctl = pc300_ioctl;
		dev->open = pc300_open;
		dev->stop = pc300_close;
		hdlc->attach = sca_attach;
		hdlc->xmit = sca_xmit;
		port->settings.clock_type = CLOCK_EXT;
		port->card = card;
		if (card->type == PC300_X21)
			port->iface = IF_IFACE_X21;
		else
			port->iface = IF_IFACE_V35;

		if (register_hdlc_device(dev)) {
			printk(KERN_ERR "pc300: unable to register hdlc "
			       "device\n");
			port->card = NULL;
			pc300_pci_remove_one(pdev);
			return -ENOBUFS;
		}
		sca_init_sync_port(port);	/* Set up SCA memory */

		printk(KERN_INFO "%s: PC300 node %d\n",
		       dev->name, port->phy_node);
	}
	return 0;
}
Exemple #19
0
static void sca_init_sync_port(port_t *port)
{
	card_t *card = port_to_card(port);
	int transmit, i;

	port->rxin = 0;
	port->txin = 0;
	port->txlast = 0;

#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
	openwin(card, 0);
#endif

	for (transmit = 0; transmit < 2; transmit++) {
		u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port);
		u16 buffs = transmit ? card->tx_ring_buffers
			: card->rx_ring_buffers;

		for (i = 0; i < buffs; i++) {
			pkt_desc __iomem *desc = desc_address(port, i, transmit);
			u16 chain_off = desc_offset(port, i + 1, transmit);
			u32 buff_off = buffer_offset(port, i, transmit);

			writea(chain_off, &desc->cp);
			writel(buff_off, &desc->bp);
			writew(0, &desc->len);
			writeb(0, &desc->stat);
		}

		/* DMA disable - to halt state */
		sca_out(0, transmit ? DSR_TX(phy_node(port)) :
			DSR_RX(phy_node(port)), card);
		/* software ABORT - to initial state */
		sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
			DCR_RX(phy_node(port)), card);

#ifdef __HD64570_H
		sca_out(0, dmac + CPB, card); /* pointer base */
#endif
		/* current desc addr */
		sca_outa(desc_offset(port, 0, transmit), dmac + CDAL, card);
		if (!transmit)
			sca_outa(desc_offset(port, buffs - 1, transmit),
				 dmac + EDAL, card);
		else
			sca_outa(desc_offset(port, 0, transmit), dmac + EDAL,
				 card);

		/* clear frame end interrupt counter */
		sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) :
			DCR_RX(phy_node(port)), card);

		if (!transmit) { /* Receive */
			/* set buffer length */
			sca_outw(HDLC_MAX_MRU, dmac + BFLL, card);
			/* Chain mode, Multi-frame */
			sca_out(0x14, DMR_RX(phy_node(port)), card);
			sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)),
				card);
			/* DMA enable */
			sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
		} else {	/* Transmit */
			/* Chain mode, Multi-frame */
			sca_out(0x14, DMR_TX(phy_node(port)), card);
			/* enable underflow interrupts */
			sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
		}
	}

	hdlc_set_carrier(!(sca_in(get_msci(port) + ST3, card) & ST3_DCD),
			 port_to_dev(port));
}