Beispiel #1
0
u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
{
	long rc;
	u16 uninitialized_var(ioboard);		/* GCC be quiet */
	nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);

	rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
	if (rc) {
		printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
		       rc);
		return 0;
	}

	return ioboard;
}
Beispiel #2
0
u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
{
	s64 rc;
	u16 ioboard;
	nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);

	rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
	if (rc) {
		printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
		       rc);
		return 0;
	}

	return ioboard;
}
int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
{
	struct msi_msg msg;
	int widget;
	int status;
	nasid_t nasid;
	u64 bus_addr;
	struct sn_irq_info *sn_irq_info;
	struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	int irq;

	if (!entry->msi_attrib.is_64)
		return -EINVAL;

	if (bussoft == NULL)
		return -EINVAL;

	if (provider == NULL || provider->dma_map_consistent == NULL)
		return -EINVAL;

	irq = create_irq();
	if (irq < 0)
		return irq;

	/*
	 * Set up the vector plumbing.  Let the prom (via sn_intr_alloc)
	 * decide which cpu to direct this msi at by default.
	 */

	nasid = NASID_GET(bussoft->bs_base);
	widget = (nasid & 1) ?
			TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
			SWIN_WIDGETNUM(bussoft->bs_base);

	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
	if (! sn_irq_info) {
		destroy_irq(irq);
		return -ENOMEM;
	}

	status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
	if (status) {
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_irq_info->irq_int_bit = -1;		/* mark this as an MSI irq */
	sn_irq_fixup(pdev, sn_irq_info);

	/* Prom probably should fill these in, but doesn't ... */
	sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
	sn_irq_info->irq_bridge = (void *)bussoft->bs_base;

	/*
	 * Map the xio address into bus space
	 */
	bus_addr = (*provider->dma_map_consistent)(pdev,
					sn_irq_info->irq_xtalkaddr,
					sizeof(sn_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);
	if (! bus_addr) {
		sn_intr_free(nasid, widget, sn_irq_info);
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_msi_info[irq].sn_irq_info = sn_irq_info;
	sn_msi_info[irq].pci_addr = bus_addr;

	msg.address_hi = (u32)(bus_addr >> 32);
	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);

	/*
	 * In the SN platform, bit 16 is a "send vector" bit which
	 * must be present in order to move the vector through the system.
	 */
	msg.data = 0x100 + irq;

	irq_set_msi_desc(irq, entry);
	pci_write_msi_msg(irq, &msg);
	irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);

	return 0;
}
Beispiel #4
0
/* ARGSUSED */
static void __init
klhwg_connect_one_router(vertex_hdl_t hwgraph_root, lboard_t *brd,
			 cnodeid_t cnode, nasid_t nasid)
{
	klrou_t *router;
	char path_buffer[50];
	char dest_path[50];
	vertex_hdl_t router_hndl;
	vertex_hdl_t dest_hndl;
	int rc;
	int port;
	lboard_t *dest_brd;

	/* Don't add duplicate boards. */
	if (brd->brd_flags & DUPLICATE_BOARD) {
		return;
	}

	/* Generate a hardware graph path for this board. */
	board_to_path(brd, path_buffer);

	rc = hwgraph_traverse(hwgraph_root, path_buffer, &router_hndl);

	if (rc != GRAPH_SUCCESS)
			return;

	if (rc != GRAPH_SUCCESS)
		printk(KERN_WARNING  "Can't find router: %s", path_buffer);

	/* We don't know what to do with multiple router components */
	if (brd->brd_numcompts != 1) {
		printk("klhwg_connect_one_router: %d cmpts on router\n",
			brd->brd_numcompts);
		return;
	}


	/* Convert component 0 to klrou_t ptr */
	router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd),
					      brd->brd_compts[0]);

	for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
		/* See if the port's active */
		if (router->rou_port[port].port_nasid == INVALID_NASID) {
			GRPRINTF(("klhwg_connect_one_router: port %d inactive.\n",
				 port));
			continue;
		}
		if (nasid_to_cnodeid(router->rou_port[port].port_nasid) 
		    == INVALID_CNODEID) {
			continue;
		}

		dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
				router->rou_port[port].port_nasid,
				router->rou_port[port].port_offset);

		/* Generate a hardware graph path for this board. */
		board_to_path(dest_brd, dest_path);

		rc = hwgraph_traverse(hwgraph_root, dest_path, &dest_hndl);

		if (rc != GRAPH_SUCCESS) {
			if (KL_CONFIG_DUPLICATE_BOARD(dest_brd))
				continue;
			printk("Can't find router: %s", dest_path);
			return;
		}

		sprintf(dest_path, "%d", port);

		rc = hwgraph_edge_add(router_hndl, dest_hndl, dest_path);

		if (rc == GRAPH_DUP) {
			GRPRINTF(("Skipping port %d. nasid %d %s/%s\n",
				  port, router->rou_port[port].port_nasid,
				  path_buffer, dest_path));
			continue;
		}

		if (rc != GRAPH_SUCCESS) {
			printk("Can't create edge: %s/%s to vertex 0x%p error 0x%x\n",
				path_buffer, dest_path, (void *)dest_hndl, rc);
			return;
		}
		HWGRAPH_DEBUG(__FILE__, __FUNCTION__, __LINE__, router_hndl, dest_hndl, "Created edge %s from vhdl1 to vhdl2.\n", dest_path);
		
	}
}
Beispiel #5
0
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
	int nasid, cnode, j;
	struct hubdev_info *hubdev_info;
	struct pcibus_info *soft;
	struct sn_flush_device_list *sn_flush_device_list;

	if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
		return NULL;
	}

	/*
	 * Allocate kernel bus soft and copy from prom.
	 */

	soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
	if (!soft) {
		return NULL;
	}

	memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
	soft->pbi_buscommon.bs_base =
	    (((u64) soft->pbi_buscommon.
	      bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;

	spin_lock_init(&soft->pbi_lock);

	/*
	 * register the bridge's error interrupt handler
	 */
	if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler,
			SA_SHIRQ, "PCIBR error", (void *)(soft))) {
		printk(KERN_WARNING
		       "pcibr cannot allocate interrupt for error handler\n");
	}

	/* 
	 * Update the Bridge with the "kernel" pagesize 
	 */
	if (PAGE_SIZE < 16384) {
		pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
	} else {
		pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
	}

	nasid = NASID_GET(soft->pbi_buscommon.bs_base);
	cnode = nasid_to_cnodeid(nasid);
	hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);

	if (hubdev_info->hdi_flush_nasid_list.widget_p) {
		sn_flush_device_list = hubdev_info->hdi_flush_nasid_list.
		    widget_p[(int)soft->pbi_buscommon.bs_xid];
		if (sn_flush_device_list) {
			for (j = 0; j < DEV_PER_WIDGET;
			     j++, sn_flush_device_list++) {
				if (sn_flush_device_list->sfdl_slot == -1)
					continue;
				if (sn_flush_device_list->
				    sfdl_persistent_busnum ==
				    soft->pbi_buscommon.bs_persist_busnum)
					sn_flush_device_list->sfdl_pcibus_info =
					    soft;
			}
		}
	}

	/* Setup the PMU ATE map */
	soft->pbi_int_ate_resource.lowest_free_index = 0;
	soft->pbi_int_ate_resource.ate =
	    kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL);
	memset(soft->pbi_int_ate_resource.ate, 0,
 	       (soft->pbi_int_ate_size * sizeof(uint64_t)));

	if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP)
		/*
		 * TIO PCI Bridge with no closest node information.
		 * FIXME: Find another way to determine the closest node
		 */
		controller->node = -1;
	else
		controller->node = cnode;
	return soft;
}
Beispiel #6
0
void sn_dma_flush(u64 addr)
{
	nasid_t nasid;
	int is_tio;
	int wid_num;
	int i, j;
	unsigned long flags;
	u64 itte;
	struct hubdev_info *hubinfo;
	struct sn_flush_device_kernel *p;
	struct sn_flush_device_common *common;
	struct sn_flush_nasid_entry *flush_nasid_list;

	if (!sn_ioif_inited)
		return;

	nasid = NASID_GET(addr);
	if (-1 == nasid_to_cnodeid(nasid))
		return;

	hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;

	BUG_ON(!hubinfo);

	flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
	if (flush_nasid_list->widget_p == NULL)
		return;

	is_tio = (nasid & 1);
	if (is_tio) {
		int itte_index;

		if (TIO_HWIN(addr))
			itte_index = 0;
		else if (TIO_BWIN_WINDOWNUM(addr))
			itte_index = TIO_BWIN_WINDOWNUM(addr);
		else
			itte_index = -1;

		if (itte_index >= 0) {
			itte = flush_nasid_list->iio_itte[itte_index];
			if (! TIO_ITTE_VALID(itte))
				return;
			wid_num = TIO_ITTE_WIDGET(itte);
		} else
			wid_num = TIO_SWIN_WIDGETNUM(addr);
	} else {
		if (BWIN_WINDOWNUM(addr)) {
			itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
			wid_num = IIO_ITTE_WIDGET(itte);
		} else
			wid_num = SWIN_WIDGETNUM(addr);
	}
	if (flush_nasid_list->widget_p[wid_num] == NULL)
		return;
	p = &flush_nasid_list->widget_p[wid_num][0];

	
	for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
		common = p->common;
		for (j = 0; j < PCI_ROM_RESOURCE; j++) {
			if (common->sfdl_bar_list[j].start == 0)
				break;
			if (addr >= common->sfdl_bar_list[j].start
			    && addr <= common->sfdl_bar_list[j].end)
				break;
		}
		if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
			break;
	}

	
	if (i == DEV_PER_WIDGET)
		return;

	if (is_tio) {
		u32 tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
		u32 revnum = XWIDGET_PART_REV_NUM(tio_id);

		
		if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
			return;
		} else {
			pcireg_wrb_flush_get(common->sfdl_pcibus_info,
					     (common->sfdl_slot - 1));
		}
	} else {
		spin_lock_irqsave(&p->sfdl_flush_lock, flags);
		*common->sfdl_flush_addr = 0;

		
		*(volatile u32 *)(common->sfdl_force_int_addr) = 1;

		
		while (*(common->sfdl_flush_addr) != 0x10f)
			cpu_relax();

		
		spin_unlock_irqrestore(&p->sfdl_flush_lock, flags);
	}
	return;
}
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
	int nasid, cnode, j;
	struct hubdev_info *hubdev_info;
	struct pcibus_info *soft;
	struct sn_flush_device_kernel *sn_flush_device_kernel;
	struct sn_flush_device_common *common;

	if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
		return NULL;
	}

	/*
	 * Allocate kernel bus soft and copy from prom.
	 */

	soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
	if (!soft) {
		return NULL;
	}

	memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
	soft->pbi_buscommon.bs_base = (unsigned long)
		ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
			sizeof(struct pic));

	spin_lock_init(&soft->pbi_lock);

	/*
	 * register the bridge's error interrupt handler
	 */
	if (request_irq(SGI_PCIASIC_ERROR, pcibr_error_intr_handler,
			IRQF_SHARED, "PCIBR error", (void *)(soft))) {
		printk(KERN_WARNING
		       "pcibr cannot allocate interrupt for error handler\n");
	}
	sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);

	/* 
	 * Update the Bridge with the "kernel" pagesize 
	 */
	if (PAGE_SIZE < 16384) {
		pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
	} else {
		pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
	}

	nasid = NASID_GET(soft->pbi_buscommon.bs_base);
	cnode = nasid_to_cnodeid(nasid);
	hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);

	if (hubdev_info->hdi_flush_nasid_list.widget_p) {
		sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
		    widget_p[(int)soft->pbi_buscommon.bs_xid];
		if (sn_flush_device_kernel) {
			for (j = 0; j < DEV_PER_WIDGET;
			     j++, sn_flush_device_kernel++) {
				common = sn_flush_device_kernel->common;
				if (common->sfdl_slot == -1)
					continue;
				if ((common->sfdl_persistent_segment ==
				     soft->pbi_buscommon.bs_persist_segment) &&
				     (common->sfdl_persistent_busnum ==
				     soft->pbi_buscommon.bs_persist_busnum))
					common->sfdl_pcibus_info =
					    soft;
			}
		}
	}

	/* Setup the PMU ATE map */
	soft->pbi_int_ate_resource.lowest_free_index = 0;
	soft->pbi_int_ate_resource.ate =
	    kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);

	if (!soft->pbi_int_ate_resource.ate) {
		kfree(soft);
		return NULL;
	}

	return soft;
}
Beispiel #8
0
int
sn_msi_setup(struct pci_dev *pdev, unsigned int vector,
	     u32 *addr_hi, u32 *addr_lo, u32 *data)
{
	int widget;
	int status;
	nasid_t nasid;
	u64 bus_addr;
	struct sn_irq_info *sn_irq_info;
	struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);

	if (bussoft == NULL)
		return -EINVAL;

	if (provider == NULL || provider->dma_map_consistent == NULL)
		return -EINVAL;

	/*
	 * Set up the vector plumbing.  Let the prom (via sn_intr_alloc)
	 * decide which cpu to direct this msi at by default.
	 */

	nasid = NASID_GET(bussoft->bs_base);
	widget = (nasid & 1) ?
			TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
			SWIN_WIDGETNUM(bussoft->bs_base);

	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
	if (! sn_irq_info)
		return -ENOMEM;

	status = sn_intr_alloc(nasid, widget, sn_irq_info, vector, -1, -1);
	if (status) {
		kfree(sn_irq_info);
		return -ENOMEM;
	}

	sn_irq_info->irq_int_bit = -1;		/* mark this as an MSI irq */
	sn_irq_fixup(pdev, sn_irq_info);

	/* Prom probably should fill these in, but doesn't ... */
	sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
	sn_irq_info->irq_bridge = (void *)bussoft->bs_base;

	/*
	 * Map the xio address into bus space
	 */
	bus_addr = (*provider->dma_map_consistent)(pdev,
					sn_irq_info->irq_xtalkaddr,
					sizeof(sn_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);
	if (! bus_addr) {
		sn_intr_free(nasid, widget, sn_irq_info);
		kfree(sn_irq_info);
		return -ENOMEM;
	}

	sn_msi_info[vector].sn_irq_info = sn_irq_info;
	sn_msi_info[vector].pci_addr = bus_addr;

	*addr_hi = (u32)(bus_addr >> 32);
	*addr_lo = (u32)(bus_addr & 0x00000000ffffffff);

	/*
	 * In the SN platform, bit 16 is a "send vector" bit which
	 * must be present in order to move the vector through the system.
	 */
	*data = 0x100 + (unsigned int)vector;

#ifdef CONFIG_SMP
	set_irq_affinity_info((vector & 0xff), sn_irq_info->irq_cpuid, 0);
#endif

	return 0;
}
Beispiel #9
0
static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
{
	struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
	int cpuid, cpuphys;

	cpuid = first_cpu(mask);
	cpuphys = cpu_physical_id(cpuid);

	list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
				 sn_irq_lh[irq], list) {
		u64 bridge;
		int local_widget, status;
		nasid_t local_nasid;
		struct sn_irq_info *new_irq_info;
		struct sn_pcibus_provider *pci_provider;

		new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
		if (new_irq_info == NULL)
			break;
		memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));

		bridge = (u64) new_irq_info->irq_bridge;
		if (!bridge) {
			kfree(new_irq_info);
			break; /* irq is not a device interrupt */
		}

		local_nasid = NASID_GET(bridge);

		if (local_nasid & 1)
			local_widget = TIO_SWIN_WIDGETNUM(bridge);
		else
			local_widget = SWIN_WIDGETNUM(bridge);

		/* Free the old PROM new_irq_info structure */
		sn_intr_free(local_nasid, local_widget, new_irq_info);
		/* Update kernels new_irq_info with new target info */
		unregister_intr_pda(new_irq_info);

		/* allocate a new PROM new_irq_info struct */
		status = sn_intr_alloc(local_nasid, local_widget,
				       __pa(new_irq_info), irq,
				       cpuid_to_nasid(cpuid),
				       cpuid_to_slice(cpuid));

		/* SAL call failed */
		if (status) {
			kfree(new_irq_info);
			break;
		}

		new_irq_info->irq_cpuid = cpuid;
		register_intr_pda(new_irq_info);

		pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
		if (pci_provider && pci_provider->target_interrupt)
			(pci_provider->target_interrupt)(new_irq_info);

		spin_lock(&sn_irq_info_lock);
		list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
		spin_unlock(&sn_irq_info_lock);
		call_rcu(&sn_irq_info->rcu, sn_irq_info_free);

#ifdef CONFIG_SMP
		set_irq_affinity_info((irq & 0xff), cpuphys, 0);
#endif
	}
Beispiel #10
0
unsigned
ate_freeze(pcibr_dmamap_t pcibr_dmamap,
#if PCIBR_FREEZE_TIME
	   unsigned *freeze_time_ptr,
#endif
	   unsigned *cmd_regs)
{
    pcibr_soft_t            pcibr_soft = pcibr_dmamap->bd_soft;
#ifdef LATER
    int                     dma_slot = pcibr_dmamap->bd_slot;
#endif
    int                     ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;
    int                     slot;

    unsigned long           s;
    unsigned                cmd_reg;
    volatile unsigned      *cmd_lwa;
    unsigned                cmd_lwd;

    if (!ext_ates)
	return 0;

    /* Bridge Hardware Bug WAR #484930:
     * Bridge can't handle updating External ATEs
     * while DMA is occuring that uses External ATEs,
     * even if the particular ATEs involved are disjoint.
     */

    /* need to prevent anyone else from
     * unfreezing the grant while we
     * are working; also need to prevent
     * this thread from being interrupted
     * to keep PCI grant freeze time
     * at an absolute minimum.
     */
    s = pcibr_lock(pcibr_soft);

#ifdef LATER
    /* just in case pcibr_dmamap_done was not called */
    if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
	pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
	if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
	    atomic_dec(&(pcibr_soft->bs_slot[dma_slot]. bss_ext_ates_active));
	xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
    }
#endif	/* LATER */
#if PCIBR_FREEZE_TIME
    *freeze_time_ptr = get_timestamp();
#endif

    cmd_lwa = 0;
    for (slot = pcibr_soft->bs_min_slot; 
		slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
	if (atomic_read(&pcibr_soft->bs_slot[slot].bss_ext_ates_active)) {
	    cmd_reg = pcibr_soft->
		bs_slot[slot].
		bss_cmd_shadow;
	    if (cmd_reg & PCI_CMD_BUS_MASTER) {
		cmd_lwa = pcibr_soft->
		    bs_slot[slot].
		    bss_cmd_pointer;
		cmd_lwd = cmd_reg ^ PCI_CMD_BUS_MASTER;
		cmd_lwa[0] = cmd_lwd;
	    }
	    cmd_regs[slot] = cmd_reg;
	} else
	    cmd_regs[slot] = 0;

    if (cmd_lwa) {
	    bridge_t	*bridge = pcibr_soft->bs_base;

	    /* Read the last master bit that has been cleared. This PIO read
	     * on the PCI bus is to ensure the completion of any DMAs that
	     * are due to bus requests issued by PCI devices before the
	     * clearing of master bits.
	     */
	    cmd_lwa[0];

	    /* Flush all the write buffers in the bridge */
	    for (slot = pcibr_soft->bs_min_slot; 
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
		    if (atomic_read(&pcibr_soft->bs_slot[slot].bss_ext_ates_active)) {
			    /* Flush the write buffer associated with this
			     * PCI device which might be using dma map RAM.
			     */
			if ( is_pic(bridge) ) {
				bridge->b_wr_req_buf[slot].reg;
			}
			else {
				if (io_get_sh_swapper(NASID_GET(bridge)) ) {
					BRIDGE_REG_GET32((&bridge->b_wr_req_buf[slot].reg));
				}
				else
					bridge->b_wr_req_buf[slot].reg;
			}
		    }
	    }
    }
    return s;
}
Beispiel #11
0
int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
{
	struct msi_msg msg;
	int widget;
	int status;
	nasid_t nasid;
	u64 bus_addr;
	struct sn_irq_info *sn_irq_info;
	struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(pdev);
	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
	int irq;

	if (!entry->msi_attrib.is_64)
		return -EINVAL;

	if (bussoft == NULL)
		return -EINVAL;

	if (provider == NULL || provider->dma_map_consistent == NULL)
		return -EINVAL;

	irq = create_irq();
	if (irq < 0)
		return irq;


	nasid = NASID_GET(bussoft->bs_base);
	widget = (nasid & 1) ?
			TIO_SWIN_WIDGETNUM(bussoft->bs_base) :
			SWIN_WIDGETNUM(bussoft->bs_base);

	sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
	if (! sn_irq_info) {
		destroy_irq(irq);
		return -ENOMEM;
	}

	status = sn_intr_alloc(nasid, widget, sn_irq_info, irq, -1, -1);
	if (status) {
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_irq_info->irq_int_bit = -1;		
	sn_irq_fixup(pdev, sn_irq_info);

	
	sn_irq_info->irq_bridge_type = bussoft->bs_asic_type;
	sn_irq_info->irq_bridge = (void *)bussoft->bs_base;

	bus_addr = (*provider->dma_map_consistent)(pdev,
					sn_irq_info->irq_xtalkaddr,
					sizeof(sn_irq_info->irq_xtalkaddr),
					SN_DMA_MSI|SN_DMA_ADDR_XIO);
	if (! bus_addr) {
		sn_intr_free(nasid, widget, sn_irq_info);
		kfree(sn_irq_info);
		destroy_irq(irq);
		return -ENOMEM;
	}

	sn_msi_info[irq].sn_irq_info = sn_irq_info;
	sn_msi_info[irq].pci_addr = bus_addr;

	msg.address_hi = (u32)(bus_addr >> 32);
	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);

	msg.data = 0x100 + irq;

	irq_set_msi_desc(irq, entry);
	write_msi_msg(irq, &msg);
	irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq);

	return 0;
}
Beispiel #12
0
/*
 * PIC has two buses under a single widget.  pic_attach() calls pic_attach2()
 * to attach each of those buses.
 */
static int
pic_attach2(vertex_hdl_t xconn_vhdl, void *bridge,
	      vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
{
    vertex_hdl_t	    ctlr_vhdl;
    pcibr_soft_t	    pcibr_soft;
    pcibr_info_t	    pcibr_info;
    xwidget_info_t	    info;
    xtalk_intr_t	    xtalk_intr;
    pcibr_list_p	    self;
    int			    entry, slot, ibit, i;
    vertex_hdl_t	    noslot_conn;
    char		    devnm[MAXDEVNAME], *s;
    pcibr_hints_t	    pcibr_hints;
    picreg_t		    id;
    picreg_t		    int_enable;
    picreg_t		    pic_ctrl_reg;

    int			    iobrick_type_get_nasid(nasid_t nasid);
    int			    iomoduleid_get(nasid_t nasid);
    int			    irq;
    int			    cpu;

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
		"pic_attach2: bridge=0x%lx, busnum=%d\n", bridge, busnum));

    ctlr_vhdl = NULL;
    ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0,
		0, 0, 0,
		S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
		(struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
    ASSERT(ctlr_vhdl != NULL);

    id = pcireg_bridge_id_get(bridge);
    hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV,
                         (arbitrary_info_t)XWIDGET_PART_REV_NUM(id));

    /*
     * Get the hint structure; if some NIC callback marked this vertex as
     * "hands-off" then we just return here, before doing anything else.
     */
    pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);

    if (pcibr_hints && pcibr_hints->ph_hands_off)
        return -1;

    /* allocate soft structure to hang off the vertex.  Link the new soft
     * structure to the pcibr_list linked list
     */
    pcibr_soft = kmalloc(sizeof (*(pcibr_soft)), GFP_KERNEL);
    if ( !pcibr_soft )
	return -ENOMEM;

    self = kmalloc(sizeof (*(self)), GFP_KERNEL);
    if ( !self ) {
	kfree(pcibr_soft);
	return -ENOMEM;
    }
    memset(pcibr_soft, 0, sizeof (*(pcibr_soft)));
    memset(self, 0, sizeof (*(self)));

    self->bl_soft = pcibr_soft;
    self->bl_vhdl = pcibr_vhdl;
    self->bl_next = pcibr_list;
    pcibr_list = self;

    if (ret_softp)
        *ret_softp = pcibr_soft;

    memset(pcibr_soft, 0, sizeof *pcibr_soft);
    pcibr_soft_set(pcibr_vhdl, pcibr_soft);

    s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
    pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
    if (!pcibr_soft->bs_name)
	    return -ENOMEM;

    strcpy(pcibr_soft->bs_name, s);

    pcibr_soft->bs_conn = xconn_vhdl;
    pcibr_soft->bs_vhdl = pcibr_vhdl;
    pcibr_soft->bs_base = (void *)bridge;
    pcibr_soft->bs_rev_num = XWIDGET_PART_REV_NUM(id);
    pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits;
    pcibr_soft->bsi_err_intr = 0;
    pcibr_soft->bs_min_slot = 0;
    pcibr_soft->bs_max_slot = 3;
    pcibr_soft->bs_busnum = busnum;
    pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
    pcibr_soft->bs_int_ate_size = PIC_INTERNAL_ATES;
    /* Make sure this is called after setting the bs_base and bs_bridge_type */
    pcibr_soft->bs_bridge_mode = (pcireg_speed_get(pcibr_soft) << 1) |
                                  pcireg_mode_get(pcibr_soft);

    info = xwidget_info_get(xconn_vhdl);
    pcibr_soft->bs_xid = xwidget_info_id_get(info);
    pcibr_soft->bs_master = xwidget_info_master_get(info);
    pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);

    strcpy(pcibr_soft->bs_asic_name, "PIC");

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
                "pic_attach2: pcibr_soft=0x%lx, mode=0x%x\n",
                pcibr_soft, pcibr_soft->bs_bridge_mode));

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
                "pic_attach2: %s ASIC: rev %s (code=0x%x)\n",
                pcibr_soft->bs_asic_name,
                (IS_PIC_PART_REV_A(pcibr_soft->bs_rev_num)) ? "A" :
                (IS_PIC_PART_REV_B(pcibr_soft->bs_rev_num)) ? "B" :
                (IS_PIC_PART_REV_C(pcibr_soft->bs_rev_num)) ? "C" :
                "unknown", pcibr_soft->bs_rev_num));

    /* PV854845: Must clear write request buffer to avoid parity errors */
    for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) {
        ((pic_t *)bridge)->p_wr_req_lower[i] = 0;
        ((pic_t *)bridge)->p_wr_req_upper[i] = 0;
        ((pic_t *)bridge)->p_wr_req_parity[i] = 0;
    }

    pcibr_soft->bs_nasid = NASID_GET(bridge);

    pcibr_soft->bs_bricktype = iobrick_type_get_nasid(pcibr_soft->bs_nasid);
    if (pcibr_soft->bs_bricktype < 0)
        printk(KERN_WARNING "%s: bricktype was unknown by L1 (ret val = 0x%x)\n",
                pcibr_soft->bs_name, pcibr_soft->bs_bricktype);

    pcibr_soft->bs_moduleid = iomoduleid_get(pcibr_soft->bs_nasid);

    if (pcibr_soft->bs_bricktype > 0) {
        switch (pcibr_soft->bs_bricktype) {
	case MODULE_PXBRICK:
	case MODULE_IXBRICK:
	case MODULE_OPUSBRICK:
            pcibr_soft->bs_first_slot = 0;
            pcibr_soft->bs_last_slot = 1;
            pcibr_soft->bs_last_reset = 1;

            /* Bus 1 of IXBrick has a IO9, so there are 4 devices, not 2 */
	    if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) 
		    && isIO9(pcibr_soft->bs_nasid)) {
                pcibr_soft->bs_last_slot = 3;
                pcibr_soft->bs_last_reset = 3;
            }
            break;

        case MODULE_CGBRICK:
            pcibr_soft->bs_first_slot = 0;
            pcibr_soft->bs_last_slot = 0;
            pcibr_soft->bs_last_reset = 0;
            break;

        default:
	    printk(KERN_WARNING "%s: Unknown bricktype: 0x%x\n",
                    pcibr_soft->bs_name, pcibr_soft->bs_bricktype);
            break;
        }

        PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
                    "pic_attach2: bricktype=%d, brickbus=%d, "
		    "slots %d-%d\n", pcibr_soft->bs_bricktype,
		    pcibr_widget_to_bus(pcibr_vhdl),
                    pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot));
    }

    /*
     * Initialize bridge and bus locks
     */
    spin_lock_init(&pcibr_soft->bs_lock);

    /*
     * If we have one, process the hints structure.
     */
    if (pcibr_hints) {
        unsigned	rrb_fixed;
        PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
                    "pic_attach2: pcibr_hints=0x%lx\n", pcibr_hints));

        rrb_fixed = pcibr_hints->ph_rrb_fixed;

        pcibr_soft->bs_rrb_fixed = rrb_fixed;

        if (pcibr_hints->ph_intr_bits)
            pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;


        for (slot = pcibr_soft->bs_min_slot;
                                slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
            int hslot = pcibr_hints->ph_host_slot[slot] - 1;

            if (hslot < 0) {
                pcibr_soft->bs_slot[slot].host_slot = slot;
            } else {
                pcibr_soft->bs_slot[slot].has_host = 1;
                pcibr_soft->bs_slot[slot].host_slot = hslot;
            }
        }
    }

    /*
     * Set-up initial values for state fields
     */
    for (slot = pcibr_soft->bs_min_slot;
                                slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
        pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
        pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
        pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
        pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
        pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
    }

    for (ibit = 0; ibit < 8; ++ibit) {
        pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
        pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
    }


    /*
     * connect up our error handler.  PIC has 2 busses (thus resulting in 2
     * pcibr_soft structs under 1 widget), so only register a xwidget error
     * handler for PIC's bus0.  NOTE: for PIC pcibr_error_handler_wrapper()
     * is a wrapper routine we register that will call the real error handler
     * pcibr_error_handler() with the correct pcibr_soft struct.
     */
    if (busnum == 0) {
        xwidget_error_register(xconn_vhdl,
                                pcibr_error_handler_wrapper, pcibr_soft);
    }

    /*
     * Clear all pending interrupts.  Assume all interrupts are from slot 3
     * until otherise setup.
     */
    pcireg_intr_reset_set(pcibr_soft, PIC_IRR_ALL_CLR);
    pcireg_intr_device_set(pcibr_soft, 0x006db6db);

    /* Setup the mapping register used for direct mapping */
    pcibr_directmap_init(pcibr_soft);

    /*
     * Initialize the PICs control register.
     */
    pic_ctrl_reg = pcireg_control_get(pcibr_soft);

    /* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
    pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK;
    pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum);
    pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK;
    pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK;

    pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP;
    pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER;

    /* enable parity checking on PICs internal RAM */
    pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
    pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;

    /* PIC BRINGUP WAR (PV# 862253): dont enable write request parity */
    if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
        pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ;
    }

    pic_ctrl_reg |= PIC_CTRL_PAGE_SIZE;

    pcireg_control_set(pcibr_soft, pic_ctrl_reg);

    /* Initialize internal mapping entries (ie. the ATEs) */
    for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++)
	pcireg_int_ate_set(pcibr_soft, entry, 0);

    pcibr_soft->bs_int_ate_resource.start = 0;
    pcibr_soft->bs_int_ate_resource.end = pcibr_soft->bs_int_ate_size - 1;

    /* Setup the PICs error interrupt handler. */
    xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);

    ASSERT(xtalk_intr != NULL);

    irq = ((hub_intr_t)xtalk_intr)->i_bit;
    cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;

    intr_unreserve_level(cpu, irq);
    ((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
    xtalk_intr->xi_vector = SGI_PCIBR_ERROR;

    pcibr_soft->bsi_err_intr = xtalk_intr;

    /*
     * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
     * in order to work around some addressing limitations.  In order
     * for that fire wall to work properly, we need to make sure we
     * start from a known clean state.
     */
    pcibr_clearwidint(pcibr_soft);

    xtalk_intr_connect(xtalk_intr,
		       (intr_func_t) pcibr_error_intr_handler,
		       (intr_arg_t) pcibr_soft,
		       (xtalk_intr_setfunc_t) pcibr_setwidint,
		       (void *) pcibr_soft);

    request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, 
			"PCIBR error", (intr_arg_t) pcibr_soft);

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
		"pcibr_setwidint: target_id=0x%lx, int_addr=0x%lx\n",
		pcireg_intr_dst_target_id_get(pcibr_soft),
		pcireg_intr_dst_addr_get(pcibr_soft)));

    /* now we can start handling error interrupts */
    int_enable = pcireg_intr_enable_get(pcibr_soft);
    int_enable |= PIC_ISR_ERRORS;

    /* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
     * locked out to be freed up sooner (by timing out) so that the
     * read tnums are never completely used up.
     */
    if (PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) {
	int_enable &= ~PIC_ISR_PCIX_REQ_TOUT;
	int_enable &= ~PIC_ISR_XREAD_REQ_TIMEOUT;

	pcireg_req_timeout_set(pcibr_soft, 0x750);
    }

    pcireg_intr_enable_set(pcibr_soft, int_enable);
    pcireg_intr_mode_set(pcibr_soft, 0); /* dont send 'clear interrupt' pkts */
    pcireg_tflush_get(pcibr_soft);       /* wait until Bridge PIO complete */

    /*
     * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use
     * RRB0, RRB8, RRB1, and RRB9.  Assign them to DEVICE[2|3]--VCHAN3
     * so they are not used.  This works since there is currently no
     * API to penable VCHAN3.
     */
    if (PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) {
	pcireg_rrb_bit_set(pcibr_soft, 0, 0x000f000f);	/* even rrb reg */
	pcireg_rrb_bit_set(pcibr_soft, 1, 0x000f000f);	/* odd rrb reg */
    }

    /* PIC only supports 64-bit direct mapping in PCI-X mode.  Since
     * all PCI-X devices that initiate memory transactions must be
     * capable of generating 64-bit addressed, we force 64-bit DMAs.
     */
    pcibr_soft->bs_dma_flags = 0;
    if (IS_PCIX(pcibr_soft)) {
	pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64;
    }

    {

    iopaddr_t		    prom_base_addr = pcibr_soft->bs_xid << 24;
    int			    prom_base_size = 0x1000000;
    int			    status;
    struct resource	    *res;

    /* Allocate resource maps based on bus page size; for I/O and memory
     * space, free all pages except those in the base area and in the
     * range set by the PROM.
     *
     * PROM creates BAR addresses in this format: 0x0ws00000 where w is
     * the widget number and s is the device register offset for the slot.
     */

    /* Setup the Bus's PCI IO Root Resource. */
    pcibr_soft->bs_io_win_root_resource.start = PCIBR_BUS_IO_BASE;
    pcibr_soft->bs_io_win_root_resource.end = 0xffffffff;
    res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
    if (!res)
	panic("PCIBR:Unable to allocate resource structure\n");

    /* Block off the range used by PROM. */
    res->start = prom_base_addr;
    res->end = prom_base_addr + (prom_base_size - 1);
    status = request_resource(&pcibr_soft->bs_io_win_root_resource, res);
    if (status)
	panic("PCIBR:Unable to request_resource()\n");

    /* Setup the Small Window Root Resource */
    pcibr_soft->bs_swin_root_resource.start = PAGE_SIZE;
    pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF;

    /* Setup the Bus's PCI Memory Root Resource */
    pcibr_soft->bs_mem_win_root_resource.start = 0x200000;
    pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff;
    res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
    if (!res)
	panic("PCIBR:Unable to allocate resource structure\n");

    /* Block off the range used by PROM. */
    res->start = prom_base_addr;
    res->end = prom_base_addr + (prom_base_size - 1);
    status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res);
    if (status)
	panic("PCIBR:Unable to request_resource()\n");

    }


    /* build "no-slot" connection point */
    pcibr_info = pcibr_device_info_new(pcibr_soft, PCIIO_SLOT_NONE,
		 PCIIO_FUNC_NONE, PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
    noslot_conn = pciio_device_info_register(pcibr_vhdl, &pcibr_info->f_c);

    /* Store no slot connection point info for tearing it down during detach. */
    pcibr_soft->bs_noslot_conn = noslot_conn;
    pcibr_soft->bs_noslot_info = pcibr_info;

    for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	/* Find out what is out there */
	(void)pcibr_slot_info_init(pcibr_vhdl, slot);
    }

    for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	/* Set up the address space for this slot in the PCI land */
	(void)pcibr_slot_addr_space_init(pcibr_vhdl, slot);
    }

    for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	/* Setup the device register */
	(void)pcibr_slot_device_init(pcibr_vhdl, slot);
    }

    if (IS_PCIX(pcibr_soft)) {
	pcibr_soft->bs_pcix_rbar_inuse = 0;
	pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
	pcibr_soft->bs_pcix_rbar_percent_allowed =
					pcibr_pcix_rbars_calc(pcibr_soft);

	for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	    /* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */
	    (void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
	}
    }

    for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	/* Setup host/guest relations */
	(void)pcibr_slot_guest_info_init(pcibr_vhdl, slot);
    }

    /* Handle initial RRB management */
    pcibr_initial_rrb(pcibr_vhdl,
		      pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot);

   /* Before any drivers get called that may want to re-allocate RRB's,
    * let's get some special cases pre-allocated. Drivers may override
    * these pre-allocations, but by doing pre-allocations now we're
    * assured not to step all over what the driver intended.
    */
    if (pcibr_soft->bs_bricktype > 0) {
	switch (pcibr_soft->bs_bricktype) {
	case MODULE_PXBRICK:
	case MODULE_IXBRICK:
	case MODULE_OPUSBRICK:
		/*
		 * If IO9 in bus 1, allocate RRBs to all the IO9 devices
		 */
		if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) &&
		    (pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) &&
		    (pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) {
			pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4);
			pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4);
			pcibr_rrb_alloc_init(pcibr_soft, 2, VCHAN0, 4);
			pcibr_rrb_alloc_init(pcibr_soft, 3, VCHAN0, 4);
		} else {
			pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 4);
			pcibr_rrb_alloc_init(pcibr_soft, 1, VCHAN0, 4);
		}
		break;

	case MODULE_CGBRICK:
		pcibr_rrb_alloc_init(pcibr_soft, 0, VCHAN0, 8);
		break;
	} /* switch */
    }


    for (slot = pcibr_soft->bs_min_slot;
				slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	/* Call the device attach */
	(void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
    }

    pciio_device_attach(noslot_conn, 0);

    return 0;
}
Beispiel #13
0
/*
 * PIC has two buses under a single widget.  pic_attach() calls pic_attach2()
 * to attach each of those buses.
 */
int
pic_attach(vertex_hdl_t conn_v)
{
	int		rc;
	void	*bridge0, *bridge1 = (void *)0;
	vertex_hdl_t	pcibr_vhdl0, pcibr_vhdl1 = (vertex_hdl_t)0;
	pcibr_soft_t	bus0_soft, bus1_soft = (pcibr_soft_t)0;
	vertex_hdl_t  conn_v0, conn_v1, peer_conn_v;
	int		bricktype;
	int		iobrick_type_get_nasid(nasid_t nasid);

	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v, "pic_attach()\n"));

	bridge0 = pcibr_bridge_ptr_get(conn_v, 0);
	bridge1 = pcibr_bridge_ptr_get(conn_v, 1);

	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
		    "pic_attach: bridge0=0x%lx, bridge1=0x%lx\n", 
		    bridge0, bridge1));

	conn_v0 = conn_v1 = conn_v;

	/* If dual-ported then split the two PIC buses across both Cbricks */
	peer_conn_v = pic_bus1_redist(NASID_GET(bridge0), conn_v);
	if (peer_conn_v)
		conn_v1 = peer_conn_v;

	/*
	 * Create the vertex for the PCI buses, which we
	 * will also use to hold the pcibr_soft and
	 * which will be the "master" vertex for all the
	 * pciio connection points we will hang off it.
	 * This needs to happen before we call nic_bridge_vertex_info
	 * as we are some of the *_vmc functions need access to the edges.
	 *
	 * Opening this vertex will provide access to
	 * the Bridge registers themselves.
	 */
	bricktype = iobrick_type_get_nasid(NASID_GET(bridge0));
	if ( bricktype == MODULE_CGBRICK ) {
		rc = hwgraph_path_add(conn_v0, EDGE_LBL_AGP_0, &pcibr_vhdl0);
		ASSERT(rc == GRAPH_SUCCESS);
		rc = hwgraph_path_add(conn_v1, EDGE_LBL_AGP_1, &pcibr_vhdl1);
		ASSERT(rc == GRAPH_SUCCESS);
	} else {
		rc = hwgraph_path_add(conn_v0, EDGE_LBL_PCIX_0, &pcibr_vhdl0);
		ASSERT(rc == GRAPH_SUCCESS);
		rc = hwgraph_path_add(conn_v1, EDGE_LBL_PCIX_1, &pcibr_vhdl1);
		ASSERT(rc == GRAPH_SUCCESS);
	}

	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
		    "pic_attach: pcibr_vhdl0=0x%lx, pcibr_vhdl1=0x%lx\n",
		    pcibr_vhdl0, pcibr_vhdl1));

	/* register pci provider array */
	pciio_provider_register(pcibr_vhdl0, &pci_pic_provider);
	pciio_provider_register(pcibr_vhdl1, &pci_pic_provider);

	pciio_provider_startup(pcibr_vhdl0);
	pciio_provider_startup(pcibr_vhdl1);

	pic_attach2(conn_v0, bridge0, pcibr_vhdl0, 0, &bus0_soft);
	pic_attach2(conn_v1, bridge1, pcibr_vhdl1, 1, &bus1_soft);

	{
	    /* If we're dual-ported finish duplicating the peer info structure.
	     * The error handler and arg are done in pic_attach2().
	     */
	    xwidget_info_t info0, info1;
		if (conn_v0 != conn_v1) {	/* dual ported */
			info0 = xwidget_info_get(conn_v0);
			info1 = xwidget_info_get(conn_v1);
			if (info1->w_efunc == (error_handler_f *)NULL)
				info1->w_efunc = info0->w_efunc;
			if (info1->w_einfo == (error_handler_arg_t)0)
				info1->w_einfo = bus1_soft;
		}
	}

	/* save a pointer to the PIC's other bus's soft struct */
        bus0_soft->bs_peers_soft = bus1_soft;
        bus1_soft->bs_peers_soft = bus0_soft;

	PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
		    "pic_attach: bus0_soft=0x%lx, bus1_soft=0x%lx\n",
		    bus0_soft, bus1_soft));

	return 0;
}
Beispiel #14
0
struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
				       nasid_t nasid, int slice)
{
	int vector;
	int cpuid;
#ifdef CONFIG_SMP
	int cpuphys;
#endif
	int64_t bridge;
	int local_widget, status;
	nasid_t local_nasid;
	struct sn_irq_info *new_irq_info;
	struct sn_pcibus_provider *pci_provider;

	new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
	if (new_irq_info == NULL)
		return NULL;

	memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));

	bridge = (u64) new_irq_info->irq_bridge;
	if (!bridge) {
		kfree(new_irq_info);
		return NULL; /* irq is not a device interrupt */
	}

	local_nasid = NASID_GET(bridge);

	if (local_nasid & 1)
		local_widget = TIO_SWIN_WIDGETNUM(bridge);
	else
		local_widget = SWIN_WIDGETNUM(bridge);

	vector = sn_irq_info->irq_irq;
	/* Free the old PROM new_irq_info structure */
	sn_intr_free(local_nasid, local_widget, new_irq_info);
	unregister_intr_pda(new_irq_info);

	/* allocate a new PROM new_irq_info struct */
	status = sn_intr_alloc(local_nasid, local_widget,
			       new_irq_info, vector,
			       nasid, slice);

	/* SAL call failed */
	if (status) {
		kfree(new_irq_info);
		return NULL;
	}

	/* Update kernels new_irq_info with new target info */
	cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
				     new_irq_info->irq_slice);
	new_irq_info->irq_cpuid = cpuid;
	register_intr_pda(new_irq_info);

	pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];

	/*
	 * If this represents a line interrupt, target it.  If it's
	 * an msi (irq_int_bit < 0), it's already targeted.
	 */
	if (new_irq_info->irq_int_bit >= 0 &&
	    pci_provider && pci_provider->target_interrupt)
		(pci_provider->target_interrupt)(new_irq_info);

	spin_lock(&sn_irq_info_lock);
	list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
	spin_unlock(&sn_irq_info_lock);
	call_rcu(&sn_irq_info->rcu, sn_irq_info_free);

#ifdef CONFIG_SMP
	cpuphys = cpu_physical_id(cpuid);
	set_irq_affinity_info((vector & 0xff), cpuphys, 0);
#endif

	return new_irq_info;
}
Beispiel #15
0
void
ate_thaw(pcibr_dmamap_t pcibr_dmamap,
	 int ate_index,
#if PCIBR_FREEZE_TIME
	 bridge_ate_t ate,
	 int ate_total,
	 unsigned freeze_time_start,
#endif
	 unsigned *cmd_regs,
	 unsigned s)
{
    pcibr_soft_t            pcibr_soft = pcibr_dmamap->bd_soft;
    int                     dma_slot = pcibr_dmamap->bd_slot;
    int                     slot;
    bridge_t               *bridge = pcibr_soft->bs_base;
    int                     ext_ates = pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM;

    unsigned                cmd_reg;

#if PCIBR_FREEZE_TIME
    unsigned                freeze_time;
    static unsigned         max_freeze_time = 0;
    static unsigned         max_ate_total;
#endif

    if (!ext_ates)
	return;

    /* restore cmd regs */
    for (slot = pcibr_soft->bs_min_slot; 
		slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
	if ((cmd_reg = cmd_regs[slot]) & PCI_CMD_BUS_MASTER) {
		if ( IS_PIC_SOFT(pcibr_soft) ) {
			pcibr_slot_config_set(bridge, slot, PCI_CFG_COMMAND/4, cmd_reg);
		}
		else {
			if (io_get_sh_swapper(NASID_GET(bridge))) {
				bridge->b_type0_cfg_dev[slot].l[PCI_CFG_COMMAND / 4] = __swab32(cmd_reg);
			}
			else {
//				BUG(); /* Does this really work if called when io_get_sh_swapper = 0? */
//				bridge->b_type0_cfg_dev[slot].l[PCI_CFG_COMMAND / 4] = cmd_reg;
				pcibr_slot_config_set(bridge, slot, PCI_CFG_COMMAND/4, cmd_reg);
			}
		}
	}
    }
    pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_BUSY;
    atomic_inc(&(pcibr_soft->bs_slot[dma_slot]. bss_ext_ates_active));

#if PCIBR_FREEZE_TIME
    freeze_time = get_timestamp() - freeze_time_start;

    if ((max_freeze_time < freeze_time) ||
	(max_ate_total < ate_total)) {
	if (max_freeze_time < freeze_time)
	    max_freeze_time = freeze_time;
	if (max_ate_total < ate_total)
	    max_ate_total = ate_total;
	pcibr_unlock(pcibr_soft, s);
	printk( "%s: pci freeze time %d usec for %d ATEs\n"
		"\tfirst ate: %R\n",
		pcibr_soft->bs_name,
		freeze_time * 1000 / 1250,
		ate_total,
		ate, ate_bits);
    } else
#endif
	pcibr_unlock(pcibr_soft, s);
}
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
	int nasid, cnode, j;
	cnodeid_t near_cnode;
	struct hubdev_info *hubdev_info;
	struct pcibus_info *soft;
	struct sn_flush_device_kernel *sn_flush_device_kernel;
	struct sn_flush_device_common *common;

	if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
		return NULL;
	}

	/*
	 * Allocate kernel bus soft and copy from prom.
	 */

	soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
	if (!soft) {
		return NULL;
	}

	memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
	soft->pbi_buscommon.bs_base =
	    (((u64) soft->pbi_buscommon.
	      bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;

	spin_lock_init(&soft->pbi_lock);

	/*
	 * register the bridge's error interrupt handler
	 */
	if (request_irq(SGI_PCIASIC_ERROR, (void *)pcibr_error_intr_handler,
			IRQF_SHARED, "PCIBR error", (void *)(soft))) {
		printk(KERN_WARNING
		       "pcibr cannot allocate interrupt for error handler\n");
	}

	/* 
	 * Update the Bridge with the "kernel" pagesize 
	 */
	if (PAGE_SIZE < 16384) {
		pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
	} else {
		pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
	}

	nasid = NASID_GET(soft->pbi_buscommon.bs_base);
	cnode = nasid_to_cnodeid(nasid);
	hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);

	if (hubdev_info->hdi_flush_nasid_list.widget_p) {
		sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
		    widget_p[(int)soft->pbi_buscommon.bs_xid];
		if (sn_flush_device_kernel) {
			for (j = 0; j < DEV_PER_WIDGET;
			     j++, sn_flush_device_kernel++) {
				common = sn_flush_device_kernel->common;
				if (common->sfdl_slot == -1)
					continue;
				if ((common->sfdl_persistent_segment ==
				     soft->pbi_buscommon.bs_persist_segment) &&
				     (common->sfdl_persistent_busnum ==
				     soft->pbi_buscommon.bs_persist_busnum))
					common->sfdl_pcibus_info =
					    soft;
			}
		}
	}

	/* Setup the PMU ATE map */
	soft->pbi_int_ate_resource.lowest_free_index = 0;
	soft->pbi_int_ate_resource.ate =
	    kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);

	if (!soft->pbi_int_ate_resource.ate) {
		kfree(soft);
		return NULL;
	}

	if (prom_bussoft->bs_asic_type == PCIIO_ASIC_TYPE_TIOCP) {
		/* TIO PCI Bridge: find nearest node with CPUs */
		int e = sn_hwperf_get_nearest_node(cnode, NULL, &near_cnode);

		if (e < 0) {
			near_cnode = (cnodeid_t)-1; /* use any node */
			printk(KERN_WARNING "pcibr_bus_fixup: failed to find "
				"near node with CPUs to TIO node %d, err=%d\n",
				cnode, e);
		}
		controller->node = near_cnode;
	}
	else
		controller->node = cnode;
	return soft;
}
Beispiel #17
0
/*
 * Determine the size of this bridge's external mapping SSRAM, and set
 * the control register appropriately to reflect this size, and initialize
 * the external SSRAM.
 */
int
pcibr_init_ext_ate_ram(bridge_t *bridge)
{
    int                     largest_working_size = 0;
    int                     num_entries, entry;
    int                     i, j;
    bridgereg_t             old_enable, new_enable;
    int                     s;
    int			    this_is_pic = is_pic(bridge);

    /* Probe SSRAM to determine its size. */
    if ( this_is_pic ) {
	old_enable = bridge->b_int_enable;
	new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
	bridge->b_int_enable = new_enable;
    }
    else {
	if (io_get_sh_swapper(NASID_GET(bridge))) {
		old_enable = BRIDGE_REG_GET32((&bridge->b_int_enable));
		new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
		BRIDGE_REG_SET32((&bridge->b_int_enable)) = new_enable;
	}
	else {
		old_enable = bridge->b_int_enable;
		new_enable = old_enable & ~BRIDGE_IMR_PCI_MST_TIMEOUT;
		bridge->b_int_enable = new_enable;
	}
    }

    for (i = 1; i < ATE_NUM_SIZES; i++) {
	/* Try writing a value */
	if ( this_is_pic ) {
		bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
	}
	else {
		if (io_get_sh_swapper(NASID_GET(bridge)))
			bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = __swab64(ATE_PROBE_VALUE);
		else
			bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] = ATE_PROBE_VALUE;
	}

	/* Guard against wrap */
	for (j = 1; j < i; j++)
	    bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(j) - 1] = 0;

	/* See if value was written */
	if ( this_is_pic ) {
		if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
				largest_working_size = i;
	}
	else {
		if (io_get_sh_swapper(NASID_GET(bridge))) {
			if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == __swab64(ATE_PROBE_VALUE))
					largest_working_size = i;
			else {
				if (bridge->b_ext_ate_ram[ATE_NUM_ENTRIES(i) - 1] == ATE_PROBE_VALUE)
					largest_working_size = i;
			}
		}
	}
    }
    if ( this_is_pic ) {
	bridge->b_int_enable = old_enable;
	bridge->b_wid_tflush;		/* wait until Bridge PIO complete */
    }
    else {
	if (io_get_sh_swapper(NASID_GET(bridge))) {
		BRIDGE_REG_SET32((&bridge->b_int_enable)) = old_enable;
		BRIDGE_REG_GET32((&bridge->b_wid_tflush));   /* wait until Bridge PIO complete */
	}
	else {
		bridge->b_int_enable = old_enable;
		bridge->b_wid_tflush;               /* wait until Bridge PIO complete */
	}
    }

    /*
     * ensure that we write and read without any interruption.
     * The read following the write is required for the Bridge war
     */

    s = splhi();
    if ( this_is_pic ) {
	bridge->b_wid_control = (bridge->b_wid_control
			& ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
			| BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
	bridge->b_wid_control;		/* inval addr bug war */
    }
    else {
	if (io_get_sh_swapper(NASID_GET(bridge))) {
		BRIDGE_REG_SET32((&(bridge->b_wid_control))) = 
				__swab32((BRIDGE_REG_GET32((&bridge->b_wid_control))
					& ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
					| BRIDGE_CTRL_SSRAM_SIZE(largest_working_size));
		BRIDGE_REG_GET32((&bridge->b_wid_control));/* inval addr bug war */
	}
	else {
		bridge->b_wid_control = (bridge->b_wid_control & ~BRIDGE_CTRL_SSRAM_SIZE_MASK)
				| BRIDGE_CTRL_SSRAM_SIZE(largest_working_size);
		bridge->b_wid_control;              /* inval addr bug war */
	}
    }
    splx(s);

    num_entries = ATE_NUM_ENTRIES(largest_working_size);

    if (pcibr_debug_mask & PCIBR_DEBUG_ATE) {
	if (num_entries) {
	    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, NULL,
			"bridge at 0x%x: clearing %d external ATEs\n",
			bridge, num_entries));
	} else {
	    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, NULL,
			"bridge at 0x%x: no external ATE RAM found\n",
			bridge));
	}
    }

    /* Initialize external mapping entries */
    for (entry = 0; entry < num_entries; entry++)
	bridge->b_ext_ate_ram[entry] = 0;

    return (num_entries);
}
Beispiel #18
0
void sn_dma_flush(uint64_t addr)
{
	nasid_t nasid;
	int is_tio;
	int wid_num;
	int i, j;
	uint64_t flags;
	uint64_t itte;
	struct hubdev_info *hubinfo;
	volatile struct sn_flush_device_list *p;
	struct sn_flush_nasid_entry *flush_nasid_list;

	if (!sn_ioif_inited)
		return;

	nasid = NASID_GET(addr);
	if (-1 == nasid_to_cnodeid(nasid))
		return;

	hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;

	if (!hubinfo) {
		BUG();
	}

	flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
	if (flush_nasid_list->widget_p == NULL)
		return;

	is_tio = (nasid & 1);
	if (is_tio) {
		int itte_index;

		if (TIO_HWIN(addr))
			itte_index = 0;
		else if (TIO_BWIN_WINDOWNUM(addr))
			itte_index = TIO_BWIN_WINDOWNUM(addr);
		else
			itte_index = -1;

		if (itte_index >= 0) {
			itte = flush_nasid_list->iio_itte[itte_index];
			if (! TIO_ITTE_VALID(itte))
				return;
			wid_num = TIO_ITTE_WIDGET(itte);
		} else
			wid_num = TIO_SWIN_WIDGETNUM(addr);
	} else {
		if (BWIN_WINDOWNUM(addr)) {
			itte = flush_nasid_list->iio_itte[BWIN_WINDOWNUM(addr)];
			wid_num = IIO_ITTE_WIDGET(itte);
		} else
			wid_num = SWIN_WIDGETNUM(addr);
	}
	if (flush_nasid_list->widget_p[wid_num] == NULL)
		return;
	p = &flush_nasid_list->widget_p[wid_num][0];

	/* find a matching BAR */
	for (i = 0; i < DEV_PER_WIDGET; i++) {
		for (j = 0; j < PCI_ROM_RESOURCE; j++) {
			if (p->sfdl_bar_list[j].start == 0)
				break;
			if (addr >= p->sfdl_bar_list[j].start
			    && addr <= p->sfdl_bar_list[j].end)
				break;
		}
		if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0)
			break;
		p++;
	}

	/* if no matching BAR, return without doing anything. */
	if (i == DEV_PER_WIDGET)
		return;

	/*
	 * For TIOCP use the Device(x) Write Request Buffer Flush Bridge
	 * register since it ensures the data has entered the coherence
	 * domain, unlike PIC.
	 */
	if (is_tio) {
		/*
	 	 * Note:  devices behind TIOCE should never be matched in the
		 * above code, and so the following code is PIC/CP centric.
		 * If CE ever needs the sn_dma_flush mechanism, we will have
		 * to account for that here and in tioce_bus_fixup().
	 	 */
		uint32_t tio_id = HUB_L(TIO_IOSPACE_ADDR(nasid, TIO_NODE_ID));
		uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id);

		/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
		if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
			return;
		} else {
			pcireg_wrb_flush_get(p->sfdl_pcibus_info,
					     (p->sfdl_slot - 1));
		}
	} else {
		spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
				  sfdl_flush_lock, flags);

		*p->sfdl_flush_addr = 0;

		/* force an interrupt. */
		*(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;

		/* wait for the interrupt to come back. */
		while (*(p->sfdl_flush_addr) != 0x10f)
			cpu_relax();

		/* okay, everything is synched up. */
		spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags);
	}
	return;
}
Beispiel #19
0
int
pic_attach(vertex_hdl_t conn_v)
{
    int		rc;
    bridge_t	*bridge0, *bridge1 = (bridge_t *)0;
    vertex_hdl_t	pcibr_vhdl0, pcibr_vhdl1 = (vertex_hdl_t)0;
    pcibr_soft_t	bus0_soft, bus1_soft = (pcibr_soft_t)0;
    vertex_hdl_t  conn_v0, conn_v1, peer_conn_v;

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v, "pic_attach()\n"));

    bridge0 = (bridge_t *) xtalk_piotrans_addr(conn_v, NULL,
              0, sizeof(bridge_t), 0);
    bridge1 = (bridge_t *)((char *)bridge0 + PIC_BUS1_OFFSET);

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
                        "pic_attach: bridge0=0x%x, bridge1=0x%x\n",
                        bridge0, bridge1));

    conn_v0 = conn_v1 = conn_v;

    /* If dual-ported then split the two PIC buses across both Cbricks */
    if ((peer_conn_v = (pic_bus1_redist(NASID_GET(bridge0), conn_v))))
        conn_v1 = peer_conn_v;

    /*
     * Create the vertex for the PCI buses, which we
     * will also use to hold the pcibr_soft and
     * which will be the "master" vertex for all the
     * pciio connection points we will hang off it.
     * This needs to happen before we call nic_bridge_vertex_info
     * as we are some of the *_vmc functions need access to the edges.
     *
     * Opening this vertex will provide access to
     * the Bridge registers themselves.
     */
    /* FIXME: what should the hwgraph path look like ? */
    rc = hwgraph_path_add(conn_v0, EDGE_LBL_PCIX_0, &pcibr_vhdl0);
    ASSERT(rc == GRAPH_SUCCESS);
    rc = hwgraph_path_add(conn_v1, EDGE_LBL_PCIX_1, &pcibr_vhdl1);
    ASSERT(rc == GRAPH_SUCCESS);

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
                        "pic_attach: pcibr_vhdl0=%v, pcibr_vhdl1=%v\n",
                        pcibr_vhdl0, pcibr_vhdl1));

    /* register pci provider array */
    pciio_provider_register(pcibr_vhdl0, &pci_pic_provider);
    pciio_provider_register(pcibr_vhdl1, &pci_pic_provider);

    pciio_provider_startup(pcibr_vhdl0);
    pciio_provider_startup(pcibr_vhdl1);

    pcibr_attach2(conn_v0, bridge0, pcibr_vhdl0, 0, &bus0_soft);
    pcibr_attach2(conn_v1, bridge1, pcibr_vhdl1, 1, &bus1_soft);

    /* save a pointer to the PIC's other bus's soft struct */
    bus0_soft->bs_peers_soft = bus1_soft;
    bus1_soft->bs_peers_soft = bus0_soft;

    PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, conn_v,
                        "pic_attach: bus0_soft=0x%x, bus1_soft=0x%x\n",
                        bus0_soft, bus1_soft));

    return 0;
}