Ejemplo n.º 1
0
int __init
xpc_init(void)
{
    int ret;
    short partid;
    struct xpc_partition *part;
    struct task_struct *kthread;
    size_t buf_size;

    if (!ia64_platform_is("sn2"))
        return -ENODEV;

    buf_size = max(XPC_RP_VARS_SIZE,
                   XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
    xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
                             GFP_KERNEL,
                             &xpc_remote_copy_buffer_base);
    if (xpc_remote_copy_buffer == NULL)
        return -ENOMEM;

    snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
    snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");

    xpc_sysctl = register_sysctl_table(xpc_sys_dir);

    /*
     * The first few fields of each entry of xpc_partitions[] need to
     * be initialized now so that calls to xpc_connect() and
     * xpc_disconnect() can be made prior to the activation of any remote
     * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
     * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
     * PARTITION HAS BEEN ACTIVATED.
     */
    for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
        part = &xpc_partitions[partid];

        DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));

        part->act_IRQ_rcvd = 0;
        spin_lock_init(&part->act_lock);
        part->act_state = XPC_P_INACTIVE;
        XPC_SET_REASON(part, 0, 0);

        init_timer(&part->disengage_request_timer);
        part->disengage_request_timer.function =
            xpc_timeout_partition_disengage_request;
        part->disengage_request_timer.data = (unsigned long)part;

        part->setup_state = XPC_P_UNSET;
        init_waitqueue_head(&part->teardown_wq);
        atomic_set(&part->references, 0);
    }

    /*
     * Open up protections for IPI operations (and AMO operations on
     * Shub 1.1 systems).
     */
    xpc_allow_IPI_ops();

    /*
     * Interrupts being processed will increment this atomic variable and
     * awaken the heartbeat thread which will process the interrupts.
     */
    atomic_set(&xpc_act_IRQ_rcvd, 0);

    /*
     * This is safe to do before the xpc_hb_checker thread has started
     * because the handler releases a wait queue.  If an interrupt is
     * received before the thread is waiting, it will not go to sleep,
     * but rather immediately process the interrupt.
     */
    ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
                      "xpc hb", NULL);
    if (ret != 0) {
        dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
                "errno=%d\n", -ret);

        xpc_restrict_IPI_ops();

        if (xpc_sysctl)
            unregister_sysctl_table(xpc_sysctl);

        kfree(xpc_remote_copy_buffer_base);
        return -EBUSY;
    }

    /*
     * Fill the partition reserved page with the information needed by
     * other partitions to discover we are alive and establish initial
     * communications.
     */
    xpc_rsvd_page = xpc_rsvd_page_init();
    if (xpc_rsvd_page == NULL) {
        dev_err(xpc_part, "could not setup our reserved page\n");

        free_irq(SGI_XPC_ACTIVATE, NULL);
        xpc_restrict_IPI_ops();

        if (xpc_sysctl)
            unregister_sysctl_table(xpc_sysctl);

        kfree(xpc_remote_copy_buffer_base);
        return -EBUSY;
    }

    /* add ourselves to the reboot_notifier_list */
    ret = register_reboot_notifier(&xpc_reboot_notifier);
    if (ret != 0)
        dev_warn(xpc_part, "can't register reboot notifier\n");

    /* add ourselves to the die_notifier list */
    ret = register_die_notifier(&xpc_die_notifier);
    if (ret != 0)
        dev_warn(xpc_part, "can't register die notifier\n");

    init_timer(&xpc_hb_timer);
    xpc_hb_timer.function = xpc_hb_beater;

    /*
     * The real work-horse behind xpc.  This processes incoming
     * interrupts and monitors remote heartbeats.
     */
    kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
    if (IS_ERR(kthread)) {
        dev_err(xpc_part, "failed while forking hb check thread\n");

        /* indicate to others that our reserved page is uninitialized */
        xpc_rsvd_page->vars_pa = 0;

        /* take ourselves off of the reboot_notifier_list */
        (void)unregister_reboot_notifier(&xpc_reboot_notifier);

        /* take ourselves off of the die_notifier list */
        (void)unregister_die_notifier(&xpc_die_notifier);

        del_timer_sync(&xpc_hb_timer);
        free_irq(SGI_XPC_ACTIVATE, NULL);
        xpc_restrict_IPI_ops();

        if (xpc_sysctl)
            unregister_sysctl_table(xpc_sysctl);

        kfree(xpc_remote_copy_buffer_base);
        return -EBUSY;
    }

    /*
     * Startup a thread that will attempt to discover other partitions to
     * activate based on info provided by SAL. This new thread is short
     * lived and will exit once discovery is complete.
     */
    kthread = kthread_run(xpc_initiate_discovery, NULL,
                          XPC_DISCOVERY_THREAD_NAME);
    if (IS_ERR(kthread)) {
        dev_err(xpc_part, "failed while forking discovery thread\n");

        /* mark this new thread as a non-starter */
        complete(&xpc_discovery_exited);

        xpc_do_exit(xpUnloading);
        return -EBUSY;
    }

    /* set the interface to point at XPC's functions */
    xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
                      xpc_initiate_allocate, xpc_initiate_send,
                      xpc_initiate_send_notify, xpc_initiate_received,
                      xpc_initiate_partid_to_nasids);

    return 0;
}
Ejemplo n.º 2
0
/* non NAPI receive function */
static int fs_enet_rx_non_napi(struct net_device *dev)
{
	struct fs_enet_private *fep = netdev_priv(dev);
	const struct fs_platform_info *fpi = fep->fpi;
	cbd_t __iomem *bdp;
	struct sk_buff *skb, *skbn, *skbt;
	int received = 0;
	u16 pkt_len, sc;
	int curidx;
	/*
	 * First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {

		curidx = bdp - fep->rx_bd_base;

		/*
		 * Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((sc & BD_ENET_RX_LAST) == 0)
			printk(KERN_WARNING DRV_MODULE_NAME
			       ": %s rcv is not +last\n",
			       dev->name);

		/*
		 * Check for errors.
		 */
		if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
			  BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			fep->stats.rx_errors++;
			/* Frame too long or too short. */
			if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
				fep->stats.rx_length_errors++;
			/* Frame alignment */
			if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
				fep->stats.rx_frame_errors++;
			/* CRC Error */
			if (sc & BD_ENET_RX_CR)
				fep->stats.rx_crc_errors++;
			/* FIFO overrun */
			if (sc & BD_ENET_RX_OV)
				fep->stats.rx_crc_errors++;

			skb = fep->rx_skbuff[curidx];

			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE);

			skbn = skb;

		} else {

			skb = fep->rx_skbuff[curidx];

			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
				DMA_FROM_DEVICE);

			/*
			 * Process the incoming frame.
			 */
			fep->stats.rx_packets++;
			pkt_len = CBDR_DATLEN(bdp) - 4;	/* remove CRC */
			fep->stats.rx_bytes += pkt_len + 4;

			if (pkt_len <= fpi->rx_copybreak) {
				/* +2 to make IP header L1 cache aligned */
				skbn = dev_alloc_skb(pkt_len + 2);
				if (skbn != NULL) {
					skb_reserve(skbn, 2);	/* align IP header */
					skb_copy_from_linear_data(skb,
						      skbn->data, pkt_len);
					/* swap */
					skbt = skb;
					skb = skbn;
					skbn = skbt;
				}
			} else {
				skbn = dev_alloc_skb(ENET_RX_FRSIZE);

				if (skbn)
					skb_align(skbn, ENET_RX_ALIGN);
			}

			if (skbn != NULL) {
				skb_put(skb, pkt_len);	/* Make room */
				skb->protocol = eth_type_trans(skb, dev);
				received++;
				netif_rx(skb);
			} else {
				printk(KERN_WARNING DRV_MODULE_NAME
				       ": %s Memory squeeze, dropping packet.\n",
				       dev->name);
				fep->stats.rx_dropped++;
				skbn = skb;
			}
		}

		fep->rx_skbuff[curidx] = skbn;
		CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
			     L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
			     DMA_FROM_DEVICE));
		CBDW_DATLEN(bdp, 0);
		CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);

		/*
		 * Update BD pointer to next entry.
		 */
		if ((sc & BD_ENET_RX_WRAP) == 0)
			bdp++;
		else
			bdp = fep->rx_bd_base;

		(*fep->ops->rx_bd_done)(dev);
	}

	fep->cur_rx = bdp;

	return 0;
}
Ejemplo n.º 3
0
/*
 * Given a nasid, get the physical address of the  partition's reserved page
 * for that nasid. This function returns 0 on any error.
 */
static unsigned long
xpc_get_rsvd_page_pa(int nasid)
{
	enum xp_retval ret;
	u64 cookie = 0;
	unsigned long rp_pa = nasid;	/* seed with nasid */
	size_t len = 0;
	size_t buf_len = 0;
	void *buf = buf;
	void *buf_base = NULL;
	enum xp_retval (*get_partition_rsvd_page_pa)
		(void *, u64 *, unsigned long *, size_t *) =
		xpc_arch_ops.get_partition_rsvd_page_pa;

	while (1) {

		/* !!! rp_pa will need to be _gpa on UV.
		 * ??? So do we save it into the architecture specific parts
		 * ??? of the xpc_partition structure? Do we rename this
		 * ??? function or have two versions? Rename rp_pa for UV to
		 * ??? rp_gpa?
		 */
		ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len);

		dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, "
			"address=0x%016lx, len=0x%016lx\n", ret,
			(unsigned long)cookie, rp_pa, len);

		if (ret != xpNeedMoreInfo)
			break;

		/* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */
		if (is_shub())
			len = L1_CACHE_ALIGN(len);

		if (len > buf_len) {
			if (buf_base != NULL)
				kfree(buf_base);
			buf_len = L1_CACHE_ALIGN(len);
			buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
							    &buf_base);
			if (buf_base == NULL) {
				dev_err(xpc_part, "unable to kmalloc "
					"len=0x%016lx\n", buf_len);
				ret = xpNoMemory;
				break;
			}
		}

		ret = xp_remote_memcpy(xp_pa(buf), rp_pa, len);
		if (ret != xpSuccess) {
			dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret);
			break;
		}
	}

	kfree(buf_base);

	if (ret != xpSuccess)
		rp_pa = 0;

	dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
	return rp_pa;
}