Exemple #1
0
static void unregister_cpu_online(unsigned int cpu)
{
	struct cpu *c = &per_cpu(cpu_devices, cpu);
	struct device *s = &c->dev;
	struct device_attribute *attrs, *pmc_attrs;
	int i, nattrs;

	BUG_ON(!c->hotpluggable);

#ifdef CONFIG_PPC64
	if (!firmware_has_feature(FW_FEATURE_ISERIES) &&
			cpu_has_feature(CPU_FTR_SMT))
		device_remove_file(s, &dev_attr_smt_snooze_delay);
#endif

	/* PMC stuff */
	switch (cur_cpu_spec->pmc_type) {
#ifdef HAS_PPC_PMC_IBM
	case PPC_PMC_IBM:
		attrs = ibm_common_attrs;
		nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
		pmc_attrs = classic_pmc_attrs;
		break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
	case PPC_PMC_G4:
		attrs = g4_common_attrs;
		nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
		pmc_attrs = classic_pmc_attrs;
		break;
#endif /* HAS_PPC_PMC_G4 */
#ifdef HAS_PPC_PMC_PA6T
	case PPC_PMC_PA6T:
		/* PA Semi starts counting at PMC0 */
		attrs = pa6t_attrs;
		nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
		pmc_attrs = NULL;
		break;
#endif /* HAS_PPC_PMC_PA6T */
	default:
		attrs = NULL;
		nattrs = 0;
		pmc_attrs = NULL;
	}

	for (i = 0; i < nattrs; i++)
		device_remove_file(s, &attrs[i]);

	if (pmc_attrs)
		for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
			device_remove_file(s, &pmc_attrs[i]);

#ifdef CONFIG_PPC64
	if (cpu_has_feature(CPU_FTR_MMCRA))
		device_remove_file(s, &dev_attr_mmcra);

	if (cpu_has_feature(CPU_FTR_PURR))
		device_remove_file(s, &dev_attr_purr);

	if (cpu_has_feature(CPU_FTR_SPURR))
		device_remove_file(s, &dev_attr_spurr);

	if (cpu_has_feature(CPU_FTR_DSCR))
		device_remove_file(s, &dev_attr_dscr);

	if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
		device_remove_file(s, &dev_attr_pir);
#endif /* CONFIG_PPC64 */

	cacheinfo_cpu_offline(cpu);
}
Exemple #2
0
int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
{
	int written = 0;
	__be64 olen;
	s64 len, rc;
	unsigned long flags;
	__be64 evt;

	if (!opal.entry)
		return -ENODEV;

	/* We want put_chars to be atomic to avoid mangling of hvsi
	 * packets. To do that, we first test for room and return
	 * -EAGAIN if there isn't enough.
	 *
	 * Unfortunately, opal_console_write_buffer_space() doesn't
	 * appear to work on opal v1, so we just assume there is
	 * enough room and be done with it
	 */
	spin_lock_irqsave(&opal_write_lock, flags);
	if (firmware_has_feature(FW_FEATURE_OPALv2)) {
		rc = opal_console_write_buffer_space(vtermno, &olen);
		len = be64_to_cpu(olen);
		if (rc || len < total_len) {
			spin_unlock_irqrestore(&opal_write_lock, flags);
			/* Closed -> drop characters */
			if (rc)
				return total_len;
			opal_poll_events(NULL);
			return -EAGAIN;
		}
	}

	/* We still try to handle partial completions, though they
	 * should no longer happen.
	 */
	rc = OPAL_BUSY;
	while(total_len > 0 && (rc == OPAL_BUSY ||
				rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
		olen = cpu_to_be64(total_len);
		rc = opal_console_write(vtermno, &olen, data);
		len = be64_to_cpu(olen);

		/* Closed or other error drop */
		if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
		    rc != OPAL_BUSY_EVENT) {
			written = total_len;
			break;
		}
		if (rc == OPAL_SUCCESS) {
			total_len -= len;
			data += len;
			written += len;
		}
		/* This is a bit nasty but we need that for the console to
		 * flush when there aren't any interrupts. We will clean
		 * things a bit later to limit that to synchronous path
		 * such as the kernel console and xmon/udbg
		 */
		do
			opal_poll_events(&evt);
		while(rc == OPAL_SUCCESS &&
			(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
	}
	spin_unlock_irqrestore(&opal_write_lock, flags);
	return written;
}
Exemple #3
0
static int __init opal_init(void)
{
	struct device_node *np, *consoles;
	const __be32 *irqs;
	int rc, i, irqlen;

	opal_node = of_find_node_by_path("/ibm,opal");
	if (!opal_node) {
		pr_warn("opal: Node not found\n");
		return -ENODEV;
	}

	/* Register OPAL consoles if any ports */
	if (firmware_has_feature(FW_FEATURE_OPALv2))
		consoles = of_find_node_by_path("/ibm,opal/consoles");
	else
		consoles = of_node_get(opal_node);
	if (consoles) {
		for_each_child_of_node(consoles, np) {
			if (strcmp(np->name, "serial"))
				continue;
			of_platform_device_create(np, NULL, NULL);
		}
		of_node_put(consoles);
	}

	/* Find all OPAL interrupts and request them */
	irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
	pr_debug("opal: Found %d interrupts reserved for OPAL\n",
		 irqs ? (irqlen / 4) : 0);
	opal_irq_count = irqlen / 4;
	opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
	for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
		unsigned int hwirq = be32_to_cpup(irqs);
		unsigned int irq = irq_create_mapping(NULL, hwirq);
		if (irq == NO_IRQ) {
			pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
			continue;
		}
		rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
		if (rc)
			pr_warning("opal: Error %d requesting irq %d"
				   " (0x%x)\n", rc, irq, hwirq);
		opal_irqs[i] = irq;
	}

	/* Create "opal" kobject under /sys/firmware */
	rc = opal_sysfs_init();
	if (rc == 0) {
		/* Setup error log interface */
		rc = opal_elog_init();
		/* Setup code update interface */
		opal_flash_init();
		/* Setup platform dump extract interface */
		opal_platform_dump_init();
		/* Setup system parameters interface */
		opal_sys_param_init();
		/* Setup message log interface. */
		opal_msglog_init();
	}

	return 0;
}
Exemple #4
0
static int pSeries_pci_probe_mode(struct pci_bus *bus)
{
	if (firmware_has_feature(FW_FEATURE_LPAR))
		return PCI_PROBE_DEVTREE;
	return PCI_PROBE_NORMAL;
}
static int opal_xscom_init(void)
{
	if (firmware_has_feature(FW_FEATURE_OPAL))
		scom_init(&opal_scom_controller);
	return 0;
}
static void ps3_ehci_driver_unregister(struct ps3_system_bus_driver *drv)
{
	if (firmware_has_feature(FW_FEATURE_PS3_LV1))
		ps3_system_bus_driver_unregister(drv);
}
Exemple #7
0
void __init early_setup(unsigned long dt_ptr)
{
	/* -------- printk is _NOT_ safe to use here ! ------- */

	/* Fill in any unititialised pacas */
	initialise_pacas();

	/* Identify CPU type */
	identify_cpu(0, mfspr(SPRN_PVR));

	/* Assume we're on cpu 0 for now. Don't write to the paca yet! */
	setup_paca(0);

	/* Initialize lockdep early or else spinlocks will blow */
	lockdep_init();

	/* -------- printk is now safe to use ------- */

	/* Enable early debugging if any specified (see udbg.h) */
	udbg_early_init();

 	DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);

	/*
	 * Do early initialization using the flattened device
	 * tree, such as retrieving the physical memory map or
	 * calculating/retrieving the hash table size.
	 */
	early_init_devtree(__va(dt_ptr));

	/* Now we know the logical id of our boot cpu, setup the paca. */
	setup_paca(boot_cpuid);

	/* Fix up paca fields required for the boot cpu */
	get_paca()->cpu_start = 1;
	get_paca()->stab_real = __pa((u64)&initial_stab);
	get_paca()->stab_addr = (u64)&initial_stab;

	/* Probe the machine type */
	probe_machine();

	setup_kdump_trampoline();

	DBG("Found, Initializing memory management...\n");

	/*
	 * Initialize the MMU Hash table and create the linear mapping
	 * of memory. Has to be done before stab/slb initialization as
	 * this is currently where the page size encoding is obtained
	 */
	htab_initialize();

	/*
	 * Initialize stab / SLB management except on iSeries
	 */
	if (cpu_has_feature(CPU_FTR_SLB))
		slb_initialize();
	else if (!firmware_has_feature(FW_FEATURE_ISERIES))
		stab_initialize(get_paca()->stab_real);

	DBG(" <- early_setup()\n");
}
static int ps3_ehci_driver_register(struct ps3_system_bus_driver *drv)
{
	return firmware_has_feature(FW_FEATURE_PS3_LV1)
		? ps3_system_bus_driver_register(drv)
		: 0;
}
Exemple #9
0
void vpa_init(int cpu)
{
    int hwcpu = get_hard_smp_processor_id(cpu);
    unsigned long addr;
    long ret;
    struct paca_struct *pp;
    struct dtl_entry *dtl;

    /*
     * The spec says it "may be problematic" if CPU x registers the VPA of
     * CPU y. We should never do that, but wail if we ever do.
     */
    WARN_ON(cpu != smp_processor_id());

    if (cpu_has_feature(CPU_FTR_ALTIVEC))
        lppaca_of(cpu).vmxregs_in_use = 1;

    if (cpu_has_feature(CPU_FTR_ARCH_207S))
        lppaca_of(cpu).ebb_regs_in_use = 1;

    addr = __pa(&lppaca_of(cpu));
    ret = register_vpa(hwcpu, addr);

    if (ret) {
        pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
               "%lx failed with %ld\n", cpu, hwcpu, addr, ret);
        return;
    }
    /*
     * PAPR says this feature is SLB-Buffer but firmware never
     * reports that.  All SPLPAR support SLB shadow buffer.
     */
    addr = __pa(&slb_shadow[cpu]);
    if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
        ret = register_slb_shadow(hwcpu, addr);
        if (ret)
            pr_err("WARNING: SLB shadow buffer registration for "
                   "cpu %d (hw %d) of area %lx failed with %ld\n",
                   cpu, hwcpu, addr, ret);
    }

    /*
     * Register dispatch trace log, if one has been allocated.
     */
    pp = &paca[cpu];
    dtl = pp->dispatch_log;
    if (dtl) {
        pp->dtl_ridx = 0;
        pp->dtl_curr = dtl;
        lppaca_of(cpu).dtl_idx = 0;

        /* hypervisor reads buffer length from this field */
        dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
        ret = register_dtl(hwcpu, __pa(dtl));
        if (ret)
            pr_err("WARNING: DTL registration of cpu %d (hw %d) "
                   "failed with %ld\n", smp_processor_id(),
                   hwcpu, ret);
        lppaca_of(cpu).dtl_enable_mask = 2;
    }
}
Exemple #10
0
void htab_initialize_secondary(void)
{
	if (!firmware_has_feature(FW_FEATURE_LPAR))
		mtspr(SPRN_SDR1, _SDR1);
}
Exemple #11
0
void __init htab_initialize(void)
{
	unsigned long table;
	unsigned long pteg_count;
	unsigned long mode_rw;
	unsigned long base = 0, size = 0;
	int i;

	extern unsigned long tce_alloc_start, tce_alloc_end;

	DBG(" -> htab_initialize()\n");

	/* Initialize page sizes */
	htab_init_page_sizes();

	/*
	 * Calculate the required size of the htab.  We want the number of
	 * PTEGs to equal one half the number of real pages.
	 */ 
	htab_size_bytes = htab_get_table_size();
	pteg_count = htab_size_bytes >> 7;

	htab_hash_mask = pteg_count - 1;

	if (firmware_has_feature(FW_FEATURE_LPAR)) {
		/* Using a hypervisor which owns the htab */
		htab_address = NULL;
		_SDR1 = 0; 
	} else {
		/* Find storage for the HPT.  Must be contiguous in
		 * the absolute address space.
		 */
		table = lmb_alloc(htab_size_bytes, htab_size_bytes);

		DBG("Hash table allocated at %lx, size: %lx\n", table,
		    htab_size_bytes);

		htab_address = abs_to_virt(table);

		/* htab absolute addr + encoded htabsize */
		_SDR1 = table + __ilog2(pteg_count) - 11;

		/* Initialize the HPT with no entries */
		memset((void *)table, 0, htab_size_bytes);

		/* Set SDR1 */
		mtspr(SPRN_SDR1, _SDR1);
	}

	mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;

	/* On U3 based machines, we need to reserve the DART area and
	 * _NOT_ map it to avoid cache paradoxes as it's remapped non
	 * cacheable later on
	 */

	/* create bolted the linear mapping in the hash table */
	for (i=0; i < lmb.memory.cnt; i++) {
		base = (unsigned long)__va(lmb.memory.region[i].base);
		size = lmb.memory.region[i].size;

		DBG("creating mapping for region: %lx : %lx\n", base, size);

#ifdef CONFIG_U3_DART
		/* Do not map the DART space. Fortunately, it will be aligned
		 * in such a way that it will not cross two lmb regions and
		 * will fit within a single 16Mb page.
		 * The DART space is assumed to be a full 16Mb region even if
		 * we only use 2Mb of that space. We will use more of it later
		 * for AGP GART. We have to use a full 16Mb large page.
		 */
		DBG("DART base: %lx\n", dart_tablebase);

		if (dart_tablebase != 0 && dart_tablebase >= base
		    && dart_tablebase < (base + size)) {
			unsigned long dart_table_end = dart_tablebase + 16 * MB;
			if (base != dart_tablebase)
				BUG_ON(htab_bolt_mapping(base, dart_tablebase,
							__pa(base), mode_rw,
							mmu_linear_psize));
			if ((base + size) > dart_table_end)
				BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
							base + size,
							__pa(dart_table_end),
							 mode_rw,
							 mmu_linear_psize));
			continue;
		}
#endif /* CONFIG_U3_DART */
		BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
					mode_rw, mmu_linear_psize));
       }

	/*
	 * If we have a memory_limit and we've allocated TCEs then we need to
	 * explicitly map the TCE area at the top of RAM. We also cope with the
	 * case that the TCEs start below memory_limit.
	 * tce_alloc_start/end are 16MB aligned so the mapping should work
	 * for either 4K or 16MB pages.
	 */
	if (tce_alloc_start) {
		tce_alloc_start = (unsigned long)__va(tce_alloc_start);
		tce_alloc_end = (unsigned long)__va(tce_alloc_end);

		if (base + size >= tce_alloc_start)
			tce_alloc_start = base + size + 1;

		BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
					 __pa(tce_alloc_start), mode_rw,
					 mmu_linear_psize));
	}

	htab_finish_init();

	DBG(" <- htab_initialize()\n");
}
Exemple #12
0
static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
				      struct net_device *netdev)
{
	struct ibmveth_adapter *adapter = netdev_priv(netdev);
	unsigned int desc_flags;
	union ibmveth_buf_desc descs[6];
	int last, i;
	int force_bounce = 0;

	/*
	 * veth handles a maximum of 6 segments including the header, so
	 * we have to linearize the skb if there are more than this.
	 */
	if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
		netdev->stats.tx_dropped++;
		goto out;
	}

	/* veth can't checksum offload UDP */
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
	    ((skb->protocol == htons(ETH_P_IP) &&
	      ip_hdr(skb)->protocol != IPPROTO_TCP) ||
	     (skb->protocol == htons(ETH_P_IPV6) &&
	      ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
	    skb_checksum_help(skb)) {

		netdev_err(netdev, "tx: failed to checksum packet\n");
		netdev->stats.tx_dropped++;
		goto out;
	}

	desc_flags = IBMVETH_BUF_VALID;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		unsigned char *buf = skb_transport_header(skb) +
						skb->csum_offset;

		desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);

		/* Need to zero out the checksum */
		buf[0] = 0;
		buf[1] = 0;
	}

retry_bounce:
	memset(descs, 0, sizeof(descs));

	/*
	 * If a linear packet is below the rx threshold then
	 * copy it into the static bounce buffer. This avoids the
	 * cost of a TCE insert and remove.
	 */
	if (force_bounce || (!skb_is_nonlinear(skb) &&
				(skb->len < tx_copybreak))) {
		skb_copy_from_linear_data(skb, adapter->bounce_buffer,
					  skb->len);

		descs[0].fields.flags_len = desc_flags | skb->len;
		descs[0].fields.address = adapter->bounce_buffer_dma;

		if (ibmveth_send(adapter, descs)) {
			adapter->tx_send_failed++;
			netdev->stats.tx_dropped++;
		} else {
			netdev->stats.tx_packets++;
			netdev->stats.tx_bytes += skb->len;
		}

		goto out;
	}

	/* Map the header */
	descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
						 skb_headlen(skb),
						 DMA_TO_DEVICE);
	if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
		goto map_failed;

	descs[0].fields.flags_len = desc_flags | skb_headlen(skb);

	/* Map the frags */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		unsigned long dma_addr;
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
					frag->page_offset, frag->size,
					DMA_TO_DEVICE);

		if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
			goto map_failed_frags;

		descs[i+1].fields.flags_len = desc_flags | frag->size;
		descs[i+1].fields.address = dma_addr;
	}

	if (ibmveth_send(adapter, descs)) {
		adapter->tx_send_failed++;
		netdev->stats.tx_dropped++;
	} else {
		netdev->stats.tx_packets++;
		netdev->stats.tx_bytes += skb->len;
	}

	dma_unmap_single(&adapter->vdev->dev,
			 descs[0].fields.address,
			 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
			 DMA_TO_DEVICE);

	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
			       DMA_TO_DEVICE);

out:
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;

map_failed_frags:
	last = i+1;
	for (i = 0; i < last; i++)
		dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
			       descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
			       DMA_TO_DEVICE);

map_failed:
	if (!firmware_has_feature(FW_FEATURE_CMO))
		netdev_err(netdev, "tx: unable to map xmit buffer\n");
	adapter->tx_map_failed++;
	skb_linearize(skb);
	force_bounce = 1;
	goto retry_bounce;
}