Esempio n. 1
0
static hub_intr_t
do_hub_intr_alloc(devfs_handle_t dev,
		device_desc_t dev_desc,
		devfs_handle_t owner_dev,
		int uncond_nothread)
{
	cpuid_t		cpu = 0;
	int		vector;
	hub_intr_t	intr_hdl;
	cnodeid_t	cnode;
	int		cpuphys, slice;
	int		nasid;
	iopaddr_t	xtalk_addr;
	struct xtalk_intr_s	*xtalk_info;
	xwidget_info_t	xwidget_info;
	ilvl_t		intr_swlevel = 0;

	cpu = intr_heuristic(dev, dev_desc, -1, 0, owner_dev, NULL, &vector);

	if (cpu == CPU_NONE) {
		printk("Unable to allocate interrupt for 0x%p\n", (void *)owner_dev);
		return(0);
	}

	cpuphys = cpu_physical_id(cpu);
	slice = cpu_physical_id_to_slice(cpuphys);
	nasid = cpu_physical_id_to_nasid(cpuphys);
	cnode = cpuid_to_cnodeid(cpu);

	if (slice) {
		xtalk_addr = SH_II_INT1 | GLOBAL_MMR_SPACE |
			((unsigned long)nasid << 36) | (1UL << 47);
	} else {
		xtalk_addr = SH_II_INT0 | GLOBAL_MMR_SPACE |
			((unsigned long)nasid << 36) | (1UL << 47);
	}

	intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, cnode);
	ASSERT_ALWAYS(intr_hdl);

	xtalk_info = &intr_hdl->i_xtalk_info;
	xtalk_info->xi_dev = dev;
	xtalk_info->xi_vector = vector;
	xtalk_info->xi_addr = xtalk_addr;

	xwidget_info = xwidget_info_get(dev);
	if (xwidget_info) {
		xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);
	}

	intr_hdl->i_swlevel = intr_swlevel;
	intr_hdl->i_cpuid = cpu;
	intr_hdl->i_bit = vector;
	intr_hdl->i_flags |= HUB_INTR_IS_ALLOCED;

	hub_device_desc_update(dev_desc, intr_swlevel, cpu);
	return(intr_hdl);
}
Esempio n. 2
0
void
xwidget_error_register(vertex_hdl_t xwidget,
		       error_handler_f *efunc,
		       error_handler_arg_t einfo)
{
    xwidget_info_t          xwidget_info;

    xwidget_info = xwidget_info_get(xwidget);
    ASSERT(xwidget_info != NULL);
    xwidget_info->w_efunc = efunc;
    xwidget_info->w_einfo = einfo;
}
Esempio n. 3
0
static xtalk_provider_t *
xwidget_to_provider_fns(vertex_hdl_t xconn)
{
    xwidget_info_t          widget_info;
    xtalk_provider_t       *provider_fns;

    widget_info = xwidget_info_get(xconn);
    ASSERT(widget_info != NULL);

    provider_fns = xwidget_info_pops_get(widget_info);
    ASSERT(provider_fns != NULL);

    return (provider_fns);
}
Esempio n. 4
0
/* Get the canonical hwgraph  name of xtalk widget */
char *
xwidget_name_get(vertex_hdl_t xwidget_vhdl)
{
	xwidget_info_t  info;

	/* If we have a bogus widget handle then return
	 * a default anonymous widget name.
	 */
	if (xwidget_vhdl == GRAPH_VERTEX_NONE)
	    return(ANON_XWIDGET_NAME);
	/* Read the widget name stored in the widget info
	 * for the widget setup during widget initialization.
	 */
	info = xwidget_info_get(xwidget_vhdl);
	ASSERT(info != NULL);
	return(xwidget_info_name_get(info));
}
Esempio n. 5
0
/* ARGSUSED */
hub_dmamap_t
hub_dmamap_alloc(	devfs_handle_t dev,	/* set up mappings for this device */
                    device_desc_t dev_desc,	/* device descriptor */
                    size_t byte_count_max, 	/* max size of a mapping */
                    unsigned flags)		/* defined in dma.h */
{
    hub_dmamap_t dmamap;
    xwidget_info_t widget_info = xwidget_info_get(dev);
    xwidgetnum_t widget = xwidget_info_id_get(widget_info);
    devfs_handle_t hubv = xwidget_info_master_get(widget_info);

    dmamap = kern_malloc(sizeof(struct hub_dmamap_s));
    dmamap->hdma_xtalk_info.xd_dev = dev;
    dmamap->hdma_xtalk_info.xd_target = widget;
    dmamap->hdma_hub = hubv;
    dmamap->hdma_flags = HUB_DMAMAP_IS_VALID;
    if (flags & XTALK_FIXED)
        dmamap->hdma_flags |= HUB_DMAMAP_IS_FIXED;

    return(dmamap);
}
Esempio n. 6
0
/* ARGSUSED */
caddr_t
hub_piotrans_addr(	devfs_handle_t dev,	/* translate to this device */
                    device_desc_t dev_desc,	/* device descriptor */
                    iopaddr_t xtalk_addr,	/* Crosstalk address */
                    size_t byte_count,	/* map this many bytes */
                    unsigned flags)		/* (currently unused) */
{
    xwidget_info_t widget_info = xwidget_info_get(dev);
    xwidgetnum_t widget = xwidget_info_id_get(widget_info);
    devfs_handle_t hubv = xwidget_info_master_get(widget_info);
    hub_piomap_t hub_piomap;
    hubinfo_t hubinfo;

    hubinfo_get(hubv, &hubinfo);

    if (xtalk_addr + byte_count <= SWIN_SIZE) {
        hub_piomap = hubinfo_swin_piomap_get(hubinfo, (int)widget);
        return(hub_piomap_addr(hub_piomap, xtalk_addr, byte_count));
    } else
        return(0);
}
Esempio n. 7
0
int
hubii_ixtt_get(devfs_handle_t widget_vhdl, ii_ixtt_u_t *ixtt)
{
    xwidget_info_t		widget_info = xwidget_info_get(widget_vhdl);
    devfs_handle_t		hub_vhdl    = xwidget_info_master_get(widget_info);
    hubinfo_t		hub_info = 0;
    nasid_t			nasid;
    int			s;

    /* Use the nasid from the hub info hanging off the hub vertex
     * and widget number from the widget vertex
     */
    hubinfo_get(hub_vhdl, &hub_info);
    /* Being over cautious by grabbing a lock */
    s 	= mutex_spinlock(&hub_info->h_bwlock);
    nasid 	= hub_info->h_nasid;

    ixtt->ii_ixtt_regval = REMOTE_HUB_L(nasid, IIO_IXTT);

    mutex_spinunlock(&hub_info->h_bwlock, s);
    return 0;
}
Esempio n. 8
0
/* Interface to allow special drivers to set hub specific
 * device flags.
 * Return 0 on failure , 1 on success
 */
int
hub_device_flags_set(devfs_handle_t	widget_vhdl,
                     hub_widget_flags_t	flags)
{
    xwidget_info_t		widget_info = xwidget_info_get(widget_vhdl);
    xwidgetnum_t		widget_num  = xwidget_info_id_get(widget_info);
    devfs_handle_t		hub_vhdl    = xwidget_info_master_get(widget_info);
    hubinfo_t		hub_info = 0;
    nasid_t			nasid;
    int			s,rv;

    /* Use the nasid from the hub info hanging off the hub vertex
     * and widget number from the widget vertex
     */
    hubinfo_get(hub_vhdl, &hub_info);
    /* Being over cautious by grabbing a lock */
    s 	= mutex_spinlock(&hub_info->h_bwlock);
    nasid 	= hub_info->h_nasid;
    rv 	= hub_widget_flags_set(nasid,widget_num,flags);
    mutex_spinunlock(&hub_info->h_bwlock, s);

    return rv;
}
Esempio n. 9
0
/*
 * xtalk_error_handler:
 * pass this error on to the handler registered
 * at the specified xtalk connecdtion point,
 * or complain about it here if there is no handler.
 *
 * This routine plays two roles during error delivery
 * to most widgets: first, the external agent (heart,
 * hub, or whatever) calls in with the error and the
 * connect point representing the crosstalk switch,
 * or whatever crosstalk device is directly connected
 * to the agent.
 *
 * If there is a switch, it will generally look at the
 * widget number stashed in the ioerror structure; and,
 * if the error came from some widget other than the
 * switch, it will call back into xtalk_error_handler
 * with the connection point of the offending port.
 */
int
xtalk_error_handler(
		       vertex_hdl_t xconn,
		       int error_code,
		       ioerror_mode_t mode,
		       ioerror_t *ioerror)
{
    xwidget_info_t          xwidget_info;

    xwidget_info = xwidget_info_get(xconn);
    /* Make sure that xwidget_info is a valid pointer before derefencing it.
     * We could come in here during very early initialization. 
     */
    if (xwidget_info && xwidget_info->w_efunc)
	return xwidget_info->w_efunc
	    (xwidget_info->w_einfo,
	     error_code, mode, ioerror);
    /*
     * no error handler registered for
     * the offending port. it's not clear
     * what needs to be done, but reporting
     * it would be a good thing, unless it
     * is a mode that requires nothing.
     */
    if ((mode == MODE_DEVPROBE) || (mode == MODE_DEVUSERERROR) ||
	(mode == MODE_DEVREENABLE))
	return IOERROR_HANDLED;

#if defined(SUPPORT_PRINTING_V_FORMAT)
    printk(KERN_WARNING "Xbow at %v encountered Fatal error", xconn);
#else
    printk(KERN_WARNING "Xbow at 0x%p encountered Fatal error", (void *)xconn);
#endif
    snia_ioerror_dump("xtalk", error_code, mode, ioerror);

    return IOERROR_UNHANDLED;
}
Esempio n. 10
0
/*
 * xwidget_unregister :
 *	Unregister the xtalk device and detach all its hwgraph namespace.
 */
int
xwidget_unregister(vertex_hdl_t widget)
{
    xwidget_info_t	widget_info;
    xwidget_hwid_t	hwid;

    /* Make sure that we have valid widget information initialized */
    if (!(widget_info = xwidget_info_get(widget)))
	return(1);

    /* Remove the inventory information associated
     * with the widget.
     */
    hwgraph_inventory_remove(widget, -1, -1, -1, -1, -1);
    
    hwid = &(widget_info->w_hwid);

    /* Clean out the xwidget information */
    (void)kfree(widget_info->w_name);
    memset((void *)widget_info, 0, sizeof(widget_info));
    DEL(widget_info);
    
    return(0);
}
Esempio n. 11
0
/*
 * Allocate resources required for an interrupt as specified in dev_desc.
 * Returns a hub interrupt handle on success, or 0 on failure.
 */
static hub_intr_t
do_hub_intr_alloc(devfs_handle_t dev,		/* which crosstalk device */
		  device_desc_t dev_desc,	/* device descriptor */
		  devfs_handle_t owner_dev,	/* owner of this interrupt, if known */
		  int uncond_nothread)		/* unconditionally non-threaded */
{
	cpuid_t cpu = (cpuid_t)0;			/* cpu to receive interrupt */
        int cpupicked = 0;
	int bit;			/* interrupt vector */
	/*REFERENCED*/
	int intr_resflags = 0;
	hub_intr_t intr_hdl;
	cnodeid_t nodeid;		/* node to receive interrupt */
	/*REFERENCED*/
	nasid_t nasid;			/* nasid to receive interrupt */
	struct xtalk_intr_s *xtalk_info;
	iopaddr_t xtalk_addr;		/* xtalk addr on hub to set intr */
	xwidget_info_t xwidget_info;	/* standard crosstalk widget info handle */
	char *intr_name = NULL;
	ilvl_t intr_swlevel = (ilvl_t)0;
	extern int default_intr_pri;
	extern void synergy_intr_alloc(int, int);


	if (dev_desc) {
		if (dev_desc->flags & D_INTR_ISERR) {
			intr_resflags = II_ERRORINT;
		} else if (!uncond_nothread && !(dev_desc->flags & D_INTR_NOTHREAD)) {
			intr_resflags = II_THREADED;
		} else {
			/* Neither an error nor a thread. */
			intr_resflags = 0;
		}
	} else {
		intr_swlevel = default_intr_pri;
		if (!uncond_nothread)
			intr_resflags = II_THREADED;
	}

	/* XXX - Need to determine if the interrupt should be threaded. */

	/* If the cpu has not been picked already then choose a candidate 
	 * interrupt target and reserve the interrupt bit 
	 */
	if (!cpupicked) {
		cpu = intr_heuristic(dev,dev_desc,allocate_my_bit,
				     intr_resflags,owner_dev,
				     intr_name,&bit);
	}

	/* At this point we SHOULD have a valid cpu */
	if (cpu == CPU_NONE) {
#if defined(SUPPORT_PRINTING_V_FORMAT)
		printk(KERN_WARNING  "%v hub_intr_alloc could not allocate interrupt\n",
			owner_dev);
#else
		printk(KERN_WARNING  "%p hub_intr_alloc could not allocate interrupt\n",
			(void *)owner_dev);
#endif
		return(0);

	}

	/* If the cpu has been picked already (due to the bridge data 
	 * corruption bug) then try to reserve an interrupt bit .
	 */
	if (cpupicked) {
		bit = intr_reserve_level(cpu, allocate_my_bit, 
					 intr_resflags, 
					 owner_dev, intr_name);
		if (bit < 0) {
#if defined(SUPPORT_PRINTING_V_FORMAT)
			printk(KERN_WARNING  "Could not reserve an interrupt bit for cpu "
				" %d and dev %v\n",
				cpu,owner_dev);
#else
			printk(KERN_WARNING  "Could not reserve an interrupt bit for cpu "
				" %d and dev %p\n",
				(int)cpu, (void *)owner_dev);
#endif
				
			return(0);
		}
	}

	nodeid = cpuid_to_cnodeid(cpu);
	nasid = cpuid_to_nasid(cpu);
	xtalk_addr = HUBREG_AS_XTALKADDR(nasid, PIREG(PI_INT_PEND_MOD, cpuid_to_subnode(cpu)));

	/*
	 * Allocate an interrupt handle, and fill it in.  There are two
	 * pieces to an interrupt handle: the piece needed by generic
	 * xtalk code which is used by crosstalk device drivers, and
	 * the piece needed by low-level IP27 hardware code.
	 */
	intr_hdl = snia_kmem_alloc_node(sizeof(struct hub_intr_s), KM_NOSLEEP, nodeid);
	ASSERT_ALWAYS(intr_hdl);

	/* 
	 * Fill in xtalk information for generic xtalk interfaces that
	 * operate on xtalk_intr_hdl's.
	 */
	xtalk_info = &intr_hdl->i_xtalk_info;
	xtalk_info->xi_dev = dev;
	xtalk_info->xi_vector = bit;
	xtalk_info->xi_addr = xtalk_addr;

	/*
	 * Regardless of which CPU we ultimately interrupt, a given crosstalk
	 * widget always handles interrupts (and PIO and DMA) through its 
	 * designated "master" crosstalk provider.
	 */
	xwidget_info = xwidget_info_get(dev);
	if (xwidget_info)
		xtalk_info->xi_target = xwidget_info_masterid_get(xwidget_info);

	/* Fill in low level hub information for hub_* interrupt interface */
	intr_hdl->i_swlevel = intr_swlevel;
	intr_hdl->i_cpuid = cpu;
	intr_hdl->i_bit = bit;
	intr_hdl->i_flags = HUB_INTR_IS_ALLOCED;

	/* Store the actual interrupt priority level & interrupt target
	 * cpu back in the device descriptor.
	 */
	hub_device_desc_update(dev_desc, intr_swlevel, cpu);
	synergy_intr_alloc((int)bit, (int)cpu);
	return(intr_hdl);
}
Esempio n. 12
0
/*
 * PIC has two buses under a single widget.  pic_attach() calls pic_attach2()
 * to attach each of those buses.
 */
static int
pic_attach2(vertex_hdl_t xconn_vhdl, void *bridge,
	      vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
{
    vertex_hdl_t	    ctlr_vhdl;
    pcibr_soft_t	    pcibr_soft;
    pcibr_info_t	    pcibr_info;
    xwidget_info_t	    info;
    xtalk_intr_t	    xtalk_intr;
    pcibr_list_p	    self;
    int			    entry, slot, ibit, i;
    vertex_hdl_t	    noslot_conn;
    char		    devnm[MAXDEVNAME], *s;
    pcibr_hints_t	    pcibr_hints;
    picreg_t		    id;
    picreg_t		    int_enable;
    picreg_t		    pic_ctrl_reg;

    int			    iobrick_type_get_nasid(nasid_t nasid);
    int			    iomoduleid_get(nasid_t nasid);
    int			    irq;
    int			    cpu;

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
		"pic_attach2: bridge=0x%lx, busnum=%d\n", bridge, busnum));

    ctlr_vhdl = NULL;
    ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0,
		0, 0, 0,
		S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
		(struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
    ASSERT(ctlr_vhdl != NULL);

    id = pcireg_bridge_id_get(bridge);
    hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV,
                         (arbitrary_info_t)XWIDGET_PART_REV_NUM(id));

    /*
     * Get the hint structure; if some NIC callback marked this vertex as
     * "hands-off" then we just return here, before doing anything else.
     */
    pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);

    if (pcibr_hints && pcibr_hints->ph_hands_off)
        return -1;

    /* allocate soft structure to hang off the vertex.  Link the new soft
     * structure to the pcibr_list linked list
     */
    pcibr_soft = kmalloc(sizeof (*(pcibr_soft)), GFP_KERNEL);
    if ( !pcibr_soft )
	return -ENOMEM;

    self = kmalloc(sizeof (*(self)), GFP_KERNEL);
    if ( !self ) {
	kfree(pcibr_soft);
	return -ENOMEM;
    }
    memset(pcibr_soft, 0, sizeof (*(pcibr_soft)));
    memset(self, 0, sizeof (*(self)));

    self->bl_soft = pcibr_soft;
    self->bl_vhdl = pcibr_vhdl;
    self->bl_next = pcibr_list;
    pcibr_list = self;

    if (ret_softp)
        *ret_softp = pcibr_soft;

    memset(pcibr_soft, 0, sizeof *pcibr_soft);
    pcibr_soft_set(pcibr_vhdl, pcibr_soft);

    s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
    pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
    if (!pcibr_soft->bs_name)
	    return -ENOMEM;

    strcpy(pcibr_soft->bs_name, s);

    pcibr_soft->bs_conn = xconn_vhdl;
    pcibr_soft->bs_vhdl = pcibr_vhdl;
    pcibr_soft->bs_base = (void *)bridge;
    pcibr_soft->bs_rev_num = XWIDGET_PART_REV_NUM(id);
    pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits;
    pcibr_soft->bsi_err_intr = 0;
    pcibr_soft->bs_min_slot = 0;
    pcibr_soft->bs_max_slot = 3;
    pcibr_soft->bs_busnum = busnum;
    pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
    pcibr_soft->bs_int_ate_size = PIC_INTERNAL_ATES;
    /* Make sure this is called after setting the bs_base and bs_bridge_type */
    pcibr_soft->bs_bridge_mode = (pcireg_speed_get(pcibr_soft) << 1) |
                                  pcireg_mode_get(pcibr_soft);

    info = xwidget_info_get(xconn_vhdl);
    pcibr_soft->bs_xid = xwidget_info_id_get(info);
    pcibr_soft->bs_master = xwidget_info_master_get(info);
    pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);

    strcpy(pcibr_soft->bs_asic_name, "PIC");

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
                "pic_attach2: pcibr_soft=0x%lx, mode=0x%x\n",
                pcibr_soft, pcibr_soft->bs_bridge_mode));

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
                "pic_attach2: %s ASIC: rev %s (code=0x%x)\n",
                pcibr_soft->bs_asic_name,
                (IS_PIC_PART_REV_A(pcibr_soft->bs_rev_num)) ? "A" :
                (IS_PIC_PART_REV_B(pcibr_soft->bs_rev_num)) ? "B" :
                (IS_PIC_PART_REV_C(pcibr_soft->bs_rev_num)) ? "C" :
                "unknown", pcibr_soft->bs_rev_num));

    /* PV854845: Must clear write request buffer to avoid parity errors */
    for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) {
        ((pic_t *)bridge)->p_wr_req_lower[i] = 0;
        ((pic_t *)bridge)->p_wr_req_upper[i] = 0;
        ((pic_t *)bridge)->p_wr_req_parity[i] = 0;
    }

    pcibr_soft->bs_nasid = NASID_GET(bridge);

    pcibr_soft->bs_bricktype = iobrick_type_get_nasid(pcibr_soft->bs_nasid);
    if (pcibr_soft->bs_bricktype < 0)
        printk(KERN_WARNING "%s: bricktype was unknown by L1 (ret val = 0x%x)\n",
                pcibr_soft->bs_name, pcibr_soft->bs_bricktype);

    pcibr_soft->bs_moduleid = iomoduleid_get(pcibr_soft->bs_nasid);

    if (pcibr_soft->bs_bricktype > 0) {
        switch (pcibr_soft->bs_bricktype) {
	case MODULE_PXBRICK:
	case MODULE_IXBRICK:
	case MODULE_OPUSBRICK:
            pcibr_soft->bs_first_slot = 0;
            pcibr_soft->bs_last_slot = 1;
            pcibr_soft->bs_last_reset = 1;

            /* Bus 1 of IXBrick has a IO9, so there are 4 devices, not 2 */
	    if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) 
		    && isIO9(pcibr_soft->bs_nasid)) {
                pcibr_soft->bs_last_slot = 3;
                pcibr_soft->bs_last_reset = 3;
            }
            break;

        case MODULE_CGBRICK:
            pcibr_soft->bs_first_slot = 0;
            pcibr_soft->bs_last_slot = 0;
            pcibr_soft->bs_last_reset = 0;
            break;

        default:
	    printk(KERN_WARNING "%s: Unknown bricktype: 0x%x\n",
                    pcibr_soft->bs_name, pcibr_soft->bs_bricktype);
            break;
        }

        PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
                    "pic_attach2: bricktype=%d, brickbus=%d, "
		    "slots %d-%d\n", pcibr_soft->bs_bricktype,
		    pcibr_widget_to_bus(pcibr_vhdl),
                    pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot));
    }

    /*
     * Initialize bridge and bus locks
     */
    spin_lock_init(&pcibr_soft->bs_lock);

    /*
     * If we have one, process the hints structure.
     */
    if (pcibr_hints) {
        unsigned	rrb_fixed;
        PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
                    "pic_attach2: pcibr_hints=0x%lx\n", pcibr_hints));

        rrb_fixed = pcibr_hints->ph_rrb_fixed;

        pcibr_soft->bs_rrb_fixed = rrb_fixed;

        if (pcibr_hints->ph_intr_bits)
            pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;


        for (slot = pcibr_soft->bs_min_slot;
                                slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
            int hslot = pcibr_hints->ph_host_slot[slot] - 1;

            if (hslot < 0) {
                pcibr_soft->bs_slot[slot].host_slot = slot;
            } else {
                pcibr_soft->bs_slot[slot].has_host = 1;
                pcibr_soft->bs_slot[slot].host_slot = hslot;
            }
        }
    }

    /*
     * Set-up initial values for state fields
     */
    for (slot = pcibr_soft->bs_min_slot;
                                slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
        pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
        pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
        pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
        pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
        pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
    }

    for (ibit = 0; ibit < 8; ++ibit) {
        pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
    }


    /*
     * connect up our error handler.  PIC has 2 busses (thus resulting in 2
     * pcibr_soft structs under 1 widget), so only register a xwidget error
     * handler for PIC's bus0.  NOTE: for PIC pcibr_error_handler_wrapper()
     * is a wrapper routine we register that will call the real error handler
     * pcibr_error_handler() with the correct pcibr_soft struct.
     */
    if (busnum == 0) {
        xwidget_error_register(xconn_vhdl,
                                pcibr_error_handler_wrapper, pcibr_soft);
    }

    /*
     * Clear all pending interrupts.  Assume all interrupts are from slot 3
     * until otherise setup.
     */
    pcireg_intr_reset_set(pcibr_soft, PIC_IRR_ALL_CLR);
    pcireg_intr_device_set(pcibr_soft, 0x006db6db);

    /* Setup the mapping register used for direct mapping */
    pcibr_directmap_init(pcibr_soft);

    /*
     * Initialize the PICs control register.
     */
    pic_ctrl_reg = pcireg_control_get(pcibr_soft);

    /* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
    pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK;
    pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum);
    pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK;
    pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK;

    pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP;
    pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER;

    /* enable parity checking on PICs internal RAM */
    pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
    pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;

    /* PIC BRINGUP WAR (PV# 862253): dont enable write request parity */
    if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
        pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ;
    }

    pic_ctrl_reg |= PIC_CTRL_PAGE_SIZE;

    pcireg_control_set(pcibr_soft, pic_ctrl_reg);

    /* Initialize internal mapping entries (ie. the ATEs) */
    for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++)
	pcireg_int_ate_set(pcibr_soft, entry, 0);

    pcibr_soft->bs_int_ate_resource.start = 0;
    pcibr_soft->bs_int_ate_resource.end = pcibr_soft->bs_int_ate_size - 1;

    /* Setup the PICs error interrupt handler. */
    xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);

    ASSERT(xtalk_intr != NULL);

    irq = ((hub_intr_t)xtalk_intr)->i_bit;
    cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;

    intr_unreserve_level(cpu, irq);
    ((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
    xtalk_intr->xi_vector = SGI_PCIBR_ERROR;

    pcibr_soft->bsi_err_intr = xtalk_intr;

    /*
     * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
     * in order to work around some addressing limitations.  In order
     * for that fire wall to work properly, we need to make sure we
     * start from a known clean state.
     */
    pcibr_clearwidint(pcibr_soft);

    xtalk_intr_connect(xtalk_intr,
		       (intr_func_t) pcibr_error_intr_handler,
		       (intr_arg_t) pcibr_soft,
		       (xtalk_intr_setfunc_t) pcibr_setwidint,
		       (void *) pcibr_soft);

    request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, 
			"PCIBR error", (intr_arg_t) pcibr_soft);

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
		"pcibr_setwidint: target_id=0x%lx, int_addr=0x%lx\n",
		pcireg_intr_dst_target_id_get(pcibr_soft),
		pcireg_intr_dst_addr_get(pcibr_soft)));

    /* now we can start handling error interrupts */
    int_enable = pcireg_intr_enable_get(pcibr_soft);
    int_enable |= PIC_ISR_ERRORS;

    /* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
     * locked out to be freed up sooner (by timing out) so that the
     * read tnums are never completely used up.
     */
    if (PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) {
	int_enable &= ~PIC_ISR_PCIX_REQ_TOUT;
	int_enable &= ~PIC_ISR_XREAD_REQ_TIMEOUT;

	pcireg_req_timeout_set(pcibr_soft, 0x750);
    }

    pcireg_intr_enable_set(pcibr_soft, int_enable);
    pcireg_intr_mode_set(pcibr_soft, 0); /* dont send 'clear interrupt' pkts */
    pcireg_tflush_get(pcibr_soft);       /* wait until Bridge PIO complete */

    /*
     * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use
     * RRB0, RRB8, RRB1, and RRB9.  Assign them to DEVICE[2|3]--VCHAN3
     * so they are not used.  This works since there is currently no
     * API to penable VCHAN3.
     */
    if (PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) {
	pcireg_rrb_bit_set(pcibr_soft, 0, 0x000f000f);	/* even rrb reg */
	pcireg_rrb_bit_set(pcibr_soft, 1, 0x000f000f);	/* odd rrb reg */
    }

    /* PIC only supports 64-bit direct mapping in PCI-X mode.  Since
     * all PCI-X devices that initiate memory transactions must be
     * capable of generating 64-bit addressed, we force 64-bit DMAs.
     */
    pcibr_soft->bs_dma_flags = 0;
    if (IS_PCIX(pcibr_soft)) {
	pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64;
    }

    {

    iopaddr_t		    prom_base_addr = pcibr_soft->bs_xid << 24;
    int			    prom_base_size = 0x1000000;
    int			    status;
    struct resource	    *res;

    /* Allocate resource maps based on bus page size; for I/O and memory
     * space, free all pages except those in the base area and in the
     * range set by the PROM.
     *
     * PROM creates BAR addresses in this format: 0x0ws00000 where w is
     * the widget number and s is the device register offset for the slot.
     */

    /* Setup the Bus's PCI IO Root Resource. */
    pcibr_soft->bs_io_win_root_resource.start = PCIBR_BUS_IO_BASE;
    pcibr_soft->bs_io_win_root_resource.end = 0xffffffff;
    res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
    if (!res)
	panic("PCIBR:Unable to allocate resource structure\n");

    /* Block off the range used by PROM. */
    res->start = prom_base_addr;
    res->end = prom_base_addr + (prom_base_size - 1);
    status = request_resource(&pcibr_soft->bs_io_win_root_resource, res);
    if (status)
	panic("PCIBR:Unable to request_resource()\n");

    /* Setup the Small Window Root Resource */
    pcibr_soft->bs_swin_root_resource.start = PAGE_SIZE;
    pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF;

    /* Setup the Bus's PCI Memory Root Resource */
    pcibr_soft->bs_mem_win_root_resource.start = 0x200000;
    pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff;
    res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
    if (!res)
	panic("PCIBR:Unable to allocate resource structure\n");

    /* Block off the range used by PROM. */
    res->start = prom_base_addr;
    res->end = prom_base_addr + (prom_base_size - 1);
    status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res);
    if (status)
	panic("PCIBR:Unable to request_resource()\n");

    }


    /* build "no-slot" connection point */
    pcibr_info = pcibr_device_info_new(pcibr_soft, PCIIO_SLOT_NONE,
		 PCIIO_FUNC_NONE, PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
    noslot_conn = pciio_device_info_register(pcibr_vhdl, &pcibr_info->f_c);

    /* Store no slot connection point info for tearing it down during detach. */
    pcibr_soft->bs_noslot_conn = noslot_conn;
    pcibr_soft->bs_noslot_info = pcibr_info;

    for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	/* Find out what is out there */
	(void)pcibr_slot_info_init(pcibr_vhdl, slot);
    }

    for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	/* Set up the address space for this slot in the PCI land */
	(void)pcibr_slot_addr_space_init(pcibr_vhdl, slot);
    }

    for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	/* Setup the device register */
	(void)pcibr_slot_device_init(pcibr_vhdl, slot);
    }

    if (IS_PCIX(pcibr_soft)) {
	pcibr_soft->bs_pcix_rbar_inuse = 0;
	pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
	pcibr_soft->bs_pcix_rbar_percent_allowed =
					pcibr_pcix_rbars_calc(pcibr_soft);

	for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	    /* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */
	    (void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
	}
    }

    for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	/* Setup host/guest relations */
	(void)pcibr_slot_guest_info_init(pcibr_vhdl, slot);
    }

    /* Handle initial RRB management */
    pcibr_initial_rrb(pcibr_vhdl,
		      pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot);

   /* Before any drivers get called that may want to re-allocate RRB's,
    * let's get some special cases pre-allocated. Drivers may override
    * these pre-allocations, but by doing pre-allocations now we're
    * assured not to step all over what the driver intended.
    */
    if (pcibr_soft->bs_bricktype > 0) {
	switch (pcibr_soft->bs_bricktype) {
	case MODULE_PXBRICK:
	case MODULE_IXBRICK:
	case MODULE_OPUSBRICK:
		/*
		 * If IO9 in bus 1, allocate RRBs to all the IO9 devices
		 */
		if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) &&
		    (pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) &&
		    (pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) {
			pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4);
			pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4);
			pcibr_rrb_alloc_init(pcibr_soft, 2, VCHAN0, 4);
			pcibr_rrb_alloc_init(pcibr_soft, 3, VCHAN0, 4);
		} else {
			pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4);
			pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4);
		}
		break;

	case MODULE_CGBRICK:
		pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 8);
		break;
	} /* switch */
    }


    for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	/* Call the device attach */
	(void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
    }

    pciio_device_attach(noslot_conn, 0);

    return 0;
}
Esempio n. 13
0
/*
 * PIC has two buses under a single widget.  pic_attach() calls pic_attach2()
 * to attach each of those buses.
 */
int
pic_attach(vertex_hdl_t conn_v)
{
	int		rc;
	void	*bridge0, *bridge1 = (void *)0;
	vertex_hdl_t	pcibr_vhdl0, pcibr_vhdl1 = (vertex_hdl_t)0;
	pcibr_soft_t	bus0_soft, bus1_soft = (pcibr_soft_t)0;
	vertex_hdl_t  conn_v0, conn_v1, peer_conn_v;
	int		bricktype;
	int		iobrick_type_get_nasid(nasid_t nasid);

	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v, "pic_attach()\n"));

	bridge0 = pcibr_bridge_ptr_get(conn_v, 0);
	bridge1 = pcibr_bridge_ptr_get(conn_v, 1);

	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
		    "pic_attach: bridge0=0x%lx, bridge1=0x%lx\n", 
		    bridge0, bridge1));

	conn_v0 = conn_v1 = conn_v;

	/* If dual-ported then split the two PIC buses across both Cbricks */
	peer_conn_v = pic_bus1_redist(NASID_GET(bridge0), conn_v);
	if (peer_conn_v)
		conn_v1 = peer_conn_v;

	/*
	 * Create the vertex for the PCI buses, which we
	 * will also use to hold the pcibr_soft and
	 * which will be the "master" vertex for all the
	 * pciio connection points we will hang off it.
	 * This needs to happen before we call nic_bridge_vertex_info
	 * as we are some of the *_vmc functions need access to the edges.
	 *
	 * Opening this vertex will provide access to
	 * the Bridge registers themselves.
	 */
	bricktype = iobrick_type_get_nasid(NASID_GET(bridge0));
	if ( bricktype == MODULE_CGBRICK ) {
		rc = hwgraph_path_add(conn_v0, EDGE_LBL_AGP_0, &pcibr_vhdl0);
		ASSERT(rc == GRAPH_SUCCESS);
		rc = hwgraph_path_add(conn_v1, EDGE_LBL_AGP_1, &pcibr_vhdl1);
		ASSERT(rc == GRAPH_SUCCESS);
	} else {
		rc = hwgraph_path_add(conn_v0, EDGE_LBL_PCIX_0, &pcibr_vhdl0);
		ASSERT(rc == GRAPH_SUCCESS);
		rc = hwgraph_path_add(conn_v1, EDGE_LBL_PCIX_1, &pcibr_vhdl1);
		ASSERT(rc == GRAPH_SUCCESS);
	}

	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
		    "pic_attach: pcibr_vhdl0=0x%lx, pcibr_vhdl1=0x%lx\n",
		    pcibr_vhdl0, pcibr_vhdl1));

	/* register pci provider array */
	pciio_provider_register(pcibr_vhdl0, &pci_pic_provider);
	pciio_provider_register(pcibr_vhdl1, &pci_pic_provider);

	pciio_provider_startup(pcibr_vhdl0);
	pciio_provider_startup(pcibr_vhdl1);

	pic_attach2(conn_v0, bridge0, pcibr_vhdl0, 0, &bus0_soft);
	pic_attach2(conn_v1, bridge1, pcibr_vhdl1, 1, &bus1_soft);

	{
	    /* If we're dual-ported finish duplicating the peer info structure.
	     * The error handler and arg are done in pic_attach2().
	     */
	    xwidget_info_t info0, info1;
		if (conn_v0 != conn_v1) {	/* dual ported */
			info0 = xwidget_info_get(conn_v0);
			info1 = xwidget_info_get(conn_v1);
			if (info1->w_efunc == (error_handler_f *)NULL)
				info1->w_efunc = info0->w_efunc;
			if (info1->w_einfo == (error_handler_arg_t)0)
				info1->w_einfo = bus1_soft;
		}
	}

	/* save a pointer to the PIC's other bus's soft struct */
        bus0_soft->bs_peers_soft = bus1_soft;
        bus1_soft->bs_peers_soft = bus0_soft;

	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
		    "pic_attach: bus0_soft=0x%lx, bus1_soft=0x%lx\n",
		    bus0_soft, bus1_soft));

	return 0;
}
Esempio n. 14
0
/* ARGSUSED */
hub_piomap_t
hub_piomap_alloc(devfs_handle_t dev,	/* set up mapping for this device */
                 device_desc_t dev_desc,	/* device descriptor */
                 iopaddr_t xtalk_addr,	/* map for this xtalk_addr range */
                 size_t byte_count,
                 size_t byte_count_max, 	/* maximum size of a mapping */
                 unsigned flags)		/* defined in sys/pio.h */
{
    xwidget_info_t widget_info = xwidget_info_get(dev);
    xwidgetnum_t widget = xwidget_info_id_get(widget_info);
    devfs_handle_t hubv = xwidget_info_master_get(widget_info);
    hubinfo_t hubinfo;
    hub_piomap_t bw_piomap;
    int bigwin, free_bw_index;
    nasid_t nasid;
    volatile hubreg_t junk;
    int s;

    /* sanity check */
    if (byte_count_max > byte_count)
        return(NULL);

    hubinfo_get(hubv, &hubinfo);

    /* If xtalk_addr range is mapped by a small window, we don't have
     * to do much
     */
    if (xtalk_addr + byte_count <= SWIN_SIZE)
        return(hubinfo_swin_piomap_get(hubinfo, (int)widget));

    /* We need to use a big window mapping.  */

    /*
     * TBD: Allow requests that would consume multiple big windows --
     * split the request up and use multiple mapping entries.
     * For now, reject requests that span big windows.
     */
    if ((xtalk_addr % BWIN_SIZE) + byte_count > BWIN_SIZE)
        return(NULL);


    /* Round xtalk address down for big window alignement */
    xtalk_addr = xtalk_addr & ~(BWIN_SIZE-1);

    /*
     * Check to see if an existing big window mapping will suffice.
     */
tryagain:
    free_bw_index = -1;
    s = mutex_spinlock(&hubinfo->h_bwlock);
    for (bigwin=0; bigwin < HUB_NUM_BIG_WINDOW; bigwin++) {
        bw_piomap = hubinfo_bwin_piomap_get(hubinfo, bigwin);

        /* If mapping is not valid, skip it */
        if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_VALID)) {
            free_bw_index = bigwin;
            continue;
        }

        /*
         * If mapping is UNFIXED, skip it.  We don't allow sharing
         * of UNFIXED mappings, because this would allow starvation.
         */
        if (!(bw_piomap->hpio_flags & HUB_PIOMAP_IS_FIXED))
            continue;

        if ( xtalk_addr == bw_piomap->hpio_xtalk_info.xp_xtalk_addr &&
                widget == bw_piomap->hpio_xtalk_info.xp_target) {
            bw_piomap->hpio_holdcnt++;
            mutex_spinunlock(&hubinfo->h_bwlock, s);
            return(bw_piomap);
        }
    }

    /*
     * None of the existing big window mappings will work for us --
     * we need to establish a new mapping.
     */

    /* Insure that we don't consume all big windows with FIXED mappings */
    if (flags & PIOMAP_FIXED) {
        if (hubinfo->h_num_big_window_fixed < HUB_NUM_BIG_WINDOW-1) {
            ASSERT(free_bw_index >= 0);
            hubinfo->h_num_big_window_fixed++;
        } else {
            bw_piomap = NULL;
            goto done;
        }
    } else { /* PIOMAP_UNFIXED */
        if (free_bw_index < 0) {
            if (flags & PIOMAP_NOSLEEP) {
                bw_piomap = NULL;
                goto done;
            }

            sv_wait(&hubinfo->h_bwwait, PZERO, &hubinfo->h_bwlock, s);
            goto tryagain;
        }
    }


    /* OK!  Allocate big window free_bw_index for this mapping. */
    /*
     * The code below does a PIO write to setup an ITTE entry.
     * We need to prevent other CPUs from seeing our updated memory
     * shadow of the ITTE (in the piomap) until the ITTE entry is
     * actually set up; otherwise, another CPU might attempt a PIO
     * prematurely.
     *
     * Also, the only way we can know that an entry has been received
     * by the hub and can be used by future PIO reads/writes is by
     * reading back the ITTE entry after writing it.
     *
     * For these two reasons, we PIO read back the ITTE entry after
     * we write it.
     */

    nasid = hubinfo->h_nasid;
    IIO_ITTE_PUT(nasid, free_bw_index, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
    junk = HUB_L(IIO_ITTE_GET(nasid, free_bw_index));

    bw_piomap = hubinfo_bwin_piomap_get(hubinfo, free_bw_index);
    bw_piomap->hpio_xtalk_info.xp_dev = dev;
    bw_piomap->hpio_xtalk_info.xp_target = widget;
    bw_piomap->hpio_xtalk_info.xp_xtalk_addr = xtalk_addr;
    bw_piomap->hpio_xtalk_info.xp_kvaddr = (caddr_t)NODE_BWIN_BASE(nasid, free_bw_index);
    bw_piomap->hpio_holdcnt++;
    bw_piomap->hpio_bigwin_num = free_bw_index;

    if (flags & PIOMAP_FIXED)
        bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID | HUB_PIOMAP_IS_FIXED;
    else
        bw_piomap->hpio_flags |= HUB_PIOMAP_IS_VALID;

done:
    mutex_spinunlock(&hubinfo->h_bwlock, s);
    return(bw_piomap);
}