int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
	int r;
	/* Assume we're using HV mode when the HV module is loaded */
	int hv_enabled = kvmppc_hv_ops ? 1 : 0;

	if (kvm) {
		/*
		 * Hooray - we know which VM type we're running on. Depend on
		 * that rather than the guess above.
		 */
		hv_enabled = is_kvmppc_hv_enabled(kvm);
	}

	switch (ext) {
#ifdef CONFIG_BOOKE
	case KVM_CAP_PPC_BOOKE_SREGS:
	case KVM_CAP_PPC_BOOKE_WATCHDOG:
	case KVM_CAP_PPC_EPR:
#else
	case KVM_CAP_PPC_SEGSTATE:
	case KVM_CAP_PPC_HIOR:
	case KVM_CAP_PPC_PAPR:
#endif
	case KVM_CAP_PPC_UNSET_IRQ:
	case KVM_CAP_PPC_IRQ_LEVEL:
	case KVM_CAP_ENABLE_CAP:
	case KVM_CAP_ENABLE_CAP_VM:
	case KVM_CAP_ONE_REG:
	case KVM_CAP_IOEVENTFD:
	case KVM_CAP_DEVICE_CTRL:
		r = 1;
		break;
	case KVM_CAP_PPC_PAIRED_SINGLES:
	case KVM_CAP_PPC_OSI:
	case KVM_CAP_PPC_GET_PVINFO:
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
	case KVM_CAP_SW_TLB:
#endif
		/* We support this only for PR */
		r = !hv_enabled;
		break;
#ifdef CONFIG_KVM_MMIO
	case KVM_CAP_COALESCED_MMIO:
		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
		break;
#endif
#ifdef CONFIG_KVM_MPIC
	case KVM_CAP_IRQ_MPIC:
		r = 1;
		break;
#endif

#ifdef CONFIG_PPC_BOOK3S_64
	case KVM_CAP_SPAPR_TCE:
	case KVM_CAP_SPAPR_TCE_64:
	case KVM_CAP_PPC_RTAS:
	case KVM_CAP_PPC_FIXUP_HCALL:
	case KVM_CAP_PPC_ENABLE_HCALL:
#ifdef CONFIG_KVM_XICS
	case KVM_CAP_IRQ_XICS:
#endif
		r = 1;
		break;

	case KVM_CAP_PPC_ALLOC_HTAB:
		r = hv_enabled;
		break;
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	case KVM_CAP_PPC_SMT:
		r = 0;
		if (hv_enabled) {
			if (cpu_has_feature(CPU_FTR_ARCH_300))
				r = 1;
			else
				r = threads_per_subcore;
		}
		break;
	case KVM_CAP_PPC_RMA:
		r = 0;
		break;
	case KVM_CAP_PPC_HWRNG:
		r = kvmppc_hwrng_present();
		break;
#endif
	case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
		r = hv_enabled;
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
		r = 1;
#else
		r = 0;
#endif
		break;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	case KVM_CAP_PPC_HTAB_FD:
		r = hv_enabled;
		break;
#endif
	case KVM_CAP_NR_VCPUS:
		/*
		 * Recommending a number of CPUs is somewhat arbitrary; we
		 * return the number of present CPUs for -HV (since a host
		 * will have secondary threads "offline"), and for other KVM
		 * implementations just count online CPUs.
		 */
		if (hv_enabled)
			r = num_present_cpus();
		else
			r = num_online_cpus();
		break;
	case KVM_CAP_NR_MEMSLOTS:
		r = KVM_USER_MEM_SLOTS;
		break;
	case KVM_CAP_MAX_VCPUS:
		r = KVM_MAX_VCPUS;
		break;
#ifdef CONFIG_PPC_BOOK3S_64
	case KVM_CAP_PPC_GET_SMMU_INFO:
		r = 1;
		break;
	case KVM_CAP_SPAPR_MULTITCE:
		r = 1;
		break;
#endif
	case KVM_CAP_PPC_HTM:
		r = cpu_has_feature(CPU_FTR_TM_COMP) &&
		    is_kvmppc_hv_enabled(kvm);
		break;
	default:
		r = 0;
		break;
	}
	return r;

}
Exemple #2
0
int kvm_dev_ioctl_check_extension(long ext)
{
	int r;
	/* FIXME!!
	 * Should some of this be vm ioctl ? is it possible now ?
	 */
	int hv_enabled = kvmppc_hv_ops ? 1 : 0;

	switch (ext) {
#ifdef CONFIG_BOOKE
	case KVM_CAP_PPC_BOOKE_SREGS:
	case KVM_CAP_PPC_BOOKE_WATCHDOG:
	case KVM_CAP_PPC_EPR:
#else
	case KVM_CAP_PPC_SEGSTATE:
	case KVM_CAP_PPC_HIOR:
	case KVM_CAP_PPC_PAPR:
#endif
	case KVM_CAP_PPC_UNSET_IRQ:
	case KVM_CAP_PPC_IRQ_LEVEL:
	case KVM_CAP_ENABLE_CAP:
	case KVM_CAP_ONE_REG:
	case KVM_CAP_IOEVENTFD:
	case KVM_CAP_DEVICE_CTRL:
		r = 1;
		break;
	case KVM_CAP_PPC_PAIRED_SINGLES:
	case KVM_CAP_PPC_OSI:
	case KVM_CAP_PPC_GET_PVINFO:
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
	case KVM_CAP_SW_TLB:
#endif
		/* We support this only for PR */
		r = !hv_enabled;
		break;
#ifdef CONFIG_KVM_MMIO
	case KVM_CAP_COALESCED_MMIO:
		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
		break;
#endif
#ifdef CONFIG_KVM_MPIC
	case KVM_CAP_IRQ_MPIC:
		r = 1;
		break;
#endif

#ifdef CONFIG_PPC_BOOK3S_64
	case KVM_CAP_SPAPR_TCE:
	case KVM_CAP_PPC_ALLOC_HTAB:
	case KVM_CAP_PPC_RTAS:
	case KVM_CAP_PPC_FIXUP_HCALL:
#ifdef CONFIG_KVM_XICS
	case KVM_CAP_IRQ_XICS:
#endif
		r = 1;
		break;
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	case KVM_CAP_PPC_SMT:
		if (hv_enabled)
			r = threads_per_core;
		else
			r = 0;
		break;
	case KVM_CAP_PPC_RMA:
		r = hv_enabled;
		/* PPC970 requires an RMA */
		if (r && cpu_has_feature(CPU_FTR_ARCH_201))
			r = 2;
		break;
#endif
	case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
		if (hv_enabled)
			r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
		else
			r = 0;
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
		r = 1;
#else
		r = 0;
#endif
		break;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	case KVM_CAP_PPC_HTAB_FD:
		r = hv_enabled;
		break;
#endif
	case KVM_CAP_NR_VCPUS:
		/*
		 * Recommending a number of CPUs is somewhat arbitrary; we
		 * return the number of present CPUs for -HV (since a host
		 * will have secondary threads "offline"), and for other KVM
		 * implementations just count online CPUs.
		 */
		if (hv_enabled)
			r = num_present_cpus();
		else
			r = num_online_cpus();
		break;
	case KVM_CAP_MAX_VCPUS:
		r = KVM_MAX_VCPUS;
		break;
#ifdef CONFIG_PPC_BOOK3S_64
	case KVM_CAP_PPC_GET_SMMU_INFO:
		r = 1;
		break;
#endif
	default:
		r = 0;
		break;
	}
	return r;

}
int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
{
	struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
	u32 rings_per_vf;
	u64 reg_val;

	if (octeon_map_pci_barx(oct, 0, 0))
		return 1;

	/* INPUT_CONTROL[RPVF] gives the VF IOq count */
	reg_val = octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(0));

	oct->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
		      CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
	oct->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
		      CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;

	reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;

	rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;

	cn23xx->conf  = oct_get_config_info(oct, LIO_23XX);
	if (!cn23xx->conf) {
		dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n",
			__func__);
		octeon_unmap_pci_barx(oct, 0);
		return 1;
	}

	if (oct->sriov_info.rings_per_vf > rings_per_vf) {
		dev_warn(&oct->pci_dev->dev,
			 "num_queues:%d greater than PF configured rings_per_vf:%d. Reducing to %d.\n",
			 oct->sriov_info.rings_per_vf, rings_per_vf,
			 rings_per_vf);
		oct->sriov_info.rings_per_vf = rings_per_vf;
	} else {
		if (rings_per_vf > num_present_cpus()) {
			dev_warn(&oct->pci_dev->dev,
				 "PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n",
				 rings_per_vf,
				 num_present_cpus(),
				 num_present_cpus());
			oct->sriov_info.rings_per_vf =
				num_present_cpus();
		} else {
			oct->sriov_info.rings_per_vf = rings_per_vf;
		}
	}

	oct->fn_list.setup_iq_regs = cn23xx_setup_vf_iq_regs;
	oct->fn_list.setup_oq_regs = cn23xx_setup_vf_oq_regs;
	oct->fn_list.setup_mbox = cn23xx_setup_vf_mbox;
	oct->fn_list.free_mbox = cn23xx_free_vf_mbox;

	oct->fn_list.msix_interrupt_handler = cn23xx_vf_msix_interrupt_handler;

	oct->fn_list.setup_device_regs = cn23xx_setup_vf_device_regs;
	oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;

	oct->fn_list.enable_interrupt = cn23xx_enable_vf_interrupt;
	oct->fn_list.disable_interrupt = cn23xx_disable_vf_interrupt;

	oct->fn_list.enable_io_queues = cn23xx_enable_vf_io_queues;
	oct->fn_list.disable_io_queues = cn23xx_disable_vf_io_queues;

	return 0;
}
Exemple #4
0
/* Put the CAIF packet on the virtio ring and kick the receiver */
static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
{
	struct cfv_info *cfv = netdev_priv(netdev);
	struct buf_info *buf_info;
	struct scatterlist sg;
	unsigned long flags;
	bool flow_off = false;
	int ret;

	/* garbage collect released buffers */
	cfv_release_used_buf(cfv->vq_tx);
	spin_lock_irqsave(&cfv->tx_lock, flags);

	/* Flow-off check takes into account number of cpus to make sure
	 * virtqueue will not be overfilled in any possible smp conditions.
	 *
	 * Flow-on is triggered when sufficient buffers are freed
	 */
	if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) {
		flow_off = true;
		cfv->stats.tx_full_ring++;
	}

	/* If we run out of memory, we release the memory reserve and retry
	 * allocation.
	 */
	buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
	if (unlikely(!buf_info)) {
		cfv->stats.tx_no_mem++;
		flow_off = true;

		if (cfv->reserved_mem && cfv->genpool) {
			gen_pool_free(cfv->genpool,  cfv->reserved_mem,
				      cfv->reserved_size);
			cfv->reserved_mem = 0;
			buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg);
		}
	}

	if (unlikely(flow_off)) {
		/* Turn flow on when a 1/4 of the descriptors are released */
		cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4;
		/* Enable notifications of recycled TX buffers */
		virtqueue_enable_cb(cfv->vq_tx);
		netif_tx_stop_all_queues(netdev);
	}

	if (unlikely(!buf_info)) {
		/* If the memory reserve does it's job, this shouldn't happen */
		netdev_warn(cfv->ndev, "Out of gen_pool memory\n");
		goto err;
	}

	ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC);
	if (unlikely((ret < 0))) {
		/* If flow control works, this shouldn't happen */
		netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n",
			    ret);
		goto err;
	}

	/* update netdev statistics */
	cfv->ndev->stats.tx_packets++;
	cfv->ndev->stats.tx_bytes += skb->len;
	spin_unlock_irqrestore(&cfv->tx_lock, flags);

	/* tell the remote processor it has a pending message to read */
	virtqueue_kick(cfv->vq_tx);

	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
err:
	spin_unlock_irqrestore(&cfv->tx_lock, flags);
	cfv->ndev->stats.tx_dropped++;
	free_buf_info(cfv, buf_info);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
int
CommOS_StartIO(const char *dispatchTaskName,    
               CommOSDispatchFunc dispatchFunc, 
               unsigned int intervalMillis,     
               unsigned int maxCycles,          
               const char *aioTaskName)         
{
   int rc;
   int cpu;

   if (running) {
      CommOS_Debug(("%s: I/O tasks already running.\n", __FUNCTION__));
      return 0;
   }


   if (!dispatchFunc) {
      CommOS_Log(("%s: a NULL Dispatch handler was passed.\n", __FUNCTION__));
      return -1;
   }
   dispatch = dispatchFunc;

   if (intervalMillis == 0) {
      intervalMillis = 4;
   }
   if ((dispatchInterval = msecs_to_jiffies(intervalMillis)) < 1) {
      dispatchInterval = 1;
   }
   if (maxCycles > DISPATCH_MAX_CYCLES) {
      dispatchMaxCycles = DISPATCH_MAX_CYCLES;
   } else if (maxCycles > 0) {
      dispatchMaxCycles = maxCycles;
   }
   CommOS_Debug(("%s: Interval millis %u (jif:%u).\n", __FUNCTION__,
                 intervalMillis, dispatchInterval));
   CommOS_Debug(("%s: Max cycles %u.\n", __FUNCTION__, dispatchMaxCycles));

   numCpus = num_present_cpus();
   dispatchWQ = CreateWorkqueue(dispatchTaskName);
   if (!dispatchWQ) {
      CommOS_Log(("%s: Couldn't create %s task(s).\n", __FUNCTION__,
                  dispatchTaskName));
      return -1;
   }

   if (aioTaskName) {
      aioWQ = CreateWorkqueue(aioTaskName);
      if (!aioWQ) {
         CommOS_Log(("%s: Couldn't create %s task(s).\n", __FUNCTION__,
                     aioTaskName));
         DestroyWorkqueue(dispatchWQ);
         return -1;
      }
   } else {
      aioWQ = NULL;
   }

   running = 1;
   for (cpu = 0; cpu < numCpus; cpu++) {
      CommOS_InitWork(&dispatchWorksNow[cpu], DispatchWrapper);
      CommOS_InitWork(&dispatchWorks[cpu], DispatchWrapper);
      rc = QueueDelayedWorkOn(cpu, dispatchWQ,
                              &dispatchWorks[cpu],
                              dispatchInterval);
      if (rc != 0) {
         CommOS_StopIO();
         return -1;
      }
   }
   CommOS_Log(("%s: Created I/O task(s) successfully.\n", __FUNCTION__));
   return 0;
}
Exemple #6
0
static int __init parisc_init(void)
{
	u32 osid = (OS_ID_LINUX << 16);

	parisc_proc_mkdir();
	parisc_init_resources();
	do_device_inventory();                  /* probe for hardware */

	parisc_pdc_chassis_init();
	
	/* set up a new led state on systems shipped LED State panel */
	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BSTART);

	/* tell PDC we're Linux. Nevermind failure. */
	pdc_stable_write(0x40, &osid, sizeof(osid));
	
	/* start with known state */
	flush_cache_all_local();
	flush_tlb_all_local(NULL);

	processor_init();
#ifdef CONFIG_SMP
	pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n",
		num_online_cpus(), num_present_cpus(),
#else
	pr_info("CPU(s): 1 x %s at %d.%06d MHz\n",
#endif
			boot_cpu_data.cpu_name,
			boot_cpu_data.cpu_hz / 1000000,
			boot_cpu_data.cpu_hz % 1000000	);

	parisc_setup_cache_timing();

	/* These are in a non-obvious order, will fix when we have an iotree */
#if defined(CONFIG_IOSAPIC)
	iosapic_init();
#endif
#if defined(CONFIG_IOMMU_SBA)
	sba_init();
#endif
#if defined(CONFIG_PCI_LBA)
	lba_init();
#endif

	/* CCIO before any potential subdevices */
#if defined(CONFIG_IOMMU_CCIO)
	ccio_init();
#endif

	/*
	 * Need to register Asp & Wax before the EISA adapters for the IRQ
	 * regions.  EISA must come before PCI to be sure it gets IRQ region
	 * 0.
	 */
#if defined(CONFIG_GSC_LASI) || defined(CONFIG_GSC_WAX)
	gsc_init();
#endif
#ifdef CONFIG_EISA
	eisa_init();
#endif

#if defined(CONFIG_HPPB)
	hppb_init();
#endif

#if defined(CONFIG_GSC_DINO)
	dino_init();
#endif

#ifdef CONFIG_CHASSIS_LCD_LED
	register_led_regions();	/* register LED port info in procfs */
#endif

	return 0;
}
Exemple #7
0
int init_mmdc_settings(struct platform_device *busfreq_pdev)
{
	struct device *dev = &busfreq_pdev->dev;
	struct platform_device *ocram_dev;
	unsigned int iram_paddr;
	int i, err;
	u32 cpu;
	struct device_node *node;
	struct gen_pool *iram_pool;

	node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-mmdc-combine");
	if (!node) {
		printk(KERN_ERR "failed to find imx6q-mmdc device tree data!\n");
		return -EINVAL;
	}
	mmdc_base = of_iomap(node, 0);
	WARN(!mmdc_base, "unable to map mmdc registers\n");

	node = NULL;
	if (cpu_is_imx6q())
		node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-iomuxc");
	if (cpu_is_imx6dl())
		node = of_find_compatible_node(NULL, NULL,
			"fsl,imx6dl-iomuxc");
	if (!node) {
		printk(KERN_ERR "failed to find imx6q-iomux device tree data!\n");
		return -EINVAL;
	}
	iomux_base = of_iomap(node, 0);
	WARN(!iomux_base, "unable to map iomux registers\n");

	node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm");
	if (!node) {
		printk(KERN_ERR "failed to find imx6q-ccm device tree data!\n");
		return -EINVAL;
	}
	ccm_base = of_iomap(node, 0);
	WARN(!mmdc_base, "unable to map mmdc registers\n");

	node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
	if (!node) {
		printk(KERN_ERR "failed to find imx6q-pl310-cache device tree data!\n");
		return -EINVAL;
	}
	l2_base = of_iomap(node, 0);
	WARN(!mmdc_base, "unable to map mmdc registers\n");

	node = NULL;
	node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic");
	if (!node) {
		printk(KERN_ERR "failed to find imx6q-a9-gic device tree data!\n");
		return -EINVAL;
	}
	gic_dist_base = of_iomap(node, 0);
	WARN(!gic_dist_base, "unable to map gic dist registers\n");

	if (cpu_is_imx6q())
		ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6q) +
			ARRAY_SIZE(ddr3_calibration);
	if (cpu_is_imx6dl())
		ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6dl) +
			ARRAY_SIZE(ddr3_calibration);

	normal_mmdc_settings = kmalloc((ddr_settings_size * 8), GFP_KERNEL);
	if (cpu_is_imx6q()) {
		memcpy(normal_mmdc_settings, ddr3_dll_mx6q,
			sizeof(ddr3_dll_mx6q));
		memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6q)),
			ddr3_calibration, sizeof(ddr3_calibration));
	}
	if (cpu_is_imx6dl()) {
		memcpy(normal_mmdc_settings, ddr3_dll_mx6dl,
			sizeof(ddr3_dll_mx6dl));
		memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6dl)),
			ddr3_calibration, sizeof(ddr3_calibration));
	}
	/* store the original DDR settings at boot. */
	for (i = 0; i < ddr_settings_size; i++) {
		/*
		 * writes via command mode register cannot be read back.
		 * hence hardcode them in the initial static array.
		 * this may require modification on a per customer basis.
		 */
		if (normal_mmdc_settings[i][0] != 0x1C)
			normal_mmdc_settings[i][1] =
				readl_relaxed(mmdc_base
				+ normal_mmdc_settings[i][0]);
	}

	irqs_used = devm_kzalloc(dev, sizeof(u32) * num_present_cpus(),
					GFP_KERNEL);

	for_each_present_cpu(cpu) {
		int irq;

		/*
		 * set up a reserved interrupt to get all
		 * the active cores into a WFE state
		 * before changing the DDR frequency.
		 */
		irq = platform_get_irq(busfreq_pdev, cpu);
		err = request_irq(irq, wait_in_wfe_irq,
			IRQF_PERCPU, "mmdc_1", NULL);
		if (err) {
			dev_err(dev,
				"Busfreq:request_irq failed %d, err = %d\n",
				irq, err);
			return err;
		}
		err = irq_set_affinity(irq, cpumask_of(cpu));
		if (err) {
			dev_err(dev,
				"Busfreq: Cannot set irq affinity irq=%d,\n",
				irq);
			return err;
		}
		irqs_used[cpu] = irq;
	}

	node = NULL;
	node = of_find_compatible_node(NULL, NULL, "mmio-sram");
	if (!node) {
		dev_err(dev, "%s: failed to find ocram node\n",
			__func__);
		return -EINVAL;
	}

	ocram_dev = of_find_device_by_node(node);
	if (!ocram_dev) {
		dev_err(dev, "failed to find ocram device!\n");
		return -EINVAL;
	}

	iram_pool = dev_get_gen_pool(&ocram_dev->dev);
	if (!iram_pool) {
		dev_err(dev, "iram pool unavailable!\n");
		return -EINVAL;
	}

	iomux_settings_size = ARRAY_SIZE(iomux_offsets_mx6q);
	iram_iomux_settings = gen_pool_alloc(iram_pool,
						(iomux_settings_size * 8) + 8);
	if (!iram_iomux_settings) {
		dev_err(dev, "unable to alloc iram for IOMUX settings!\n");
		return -ENOMEM;
	}

	/*
	  * Allocate extra space to store the number of entries in the
	  * ddr_settings plus 4 extra regsiter information that needs
	  * to be passed to the frequency change code.
	  * sizeof(iram_ddr_settings) = sizeof(ddr_settings) +
	  *					entries in ddr_settings + 16.
	  * The last 4 enties store the addresses of the registers:
	  * CCM_BASE_ADDR
	  * MMDC_BASE_ADDR
	  * IOMUX_BASE_ADDR
	  * L2X0_BASE_ADDR
	  */
	iram_ddr_settings = gen_pool_alloc(iram_pool,
					(ddr_settings_size * 8) + 8 + 32);
	if (!iram_ddr_settings) {
		dev_err(dev, "unable to alloc iram for ddr settings!\n");
		return -ENOMEM;
	}
	i = ddr_settings_size + 1;
	iram_ddr_settings[i][0] = (unsigned long)mmdc_base;
	iram_ddr_settings[i+1][0] = (unsigned long)ccm_base;
	iram_ddr_settings[i+2][0] = (unsigned long)iomux_base;
	iram_ddr_settings[i+3][0] = (unsigned long)l2_base;

	if (cpu_is_imx6q()) {
		/* store the IOMUX settings at boot. */
		for (i = 0; i < iomux_settings_size; i++) {
			iomux_offsets_mx6q[i][1] =
				readl_relaxed(iomux_base +
					iomux_offsets_mx6q[i][0]);
			iram_iomux_settings[i+1][0] = iomux_offsets_mx6q[i][0];
			iram_iomux_settings[i+1][1] = iomux_offsets_mx6q[i][1];
		}
	}

	if (cpu_is_imx6dl()) {
		for (i = 0; i < iomux_settings_size; i++) {
			iomux_offsets_mx6dl[i][1] =
				readl_relaxed(iomux_base +
					iomux_offsets_mx6dl[i][0]);
			iram_iomux_settings[i+1][0] = iomux_offsets_mx6dl[i][0];
			iram_iomux_settings[i+1][1] = iomux_offsets_mx6dl[i][1];
		}
	}

	ddr_freq_change_iram_base = gen_pool_alloc(iram_pool,
						DDR_FREQ_CHANGE_SIZE);
	if (!ddr_freq_change_iram_base) {
		dev_err(dev, "Cannot alloc iram for ddr freq change code!\n");
		return -ENOMEM;
	}

	iram_paddr = gen_pool_virt_to_phys(iram_pool,
				(unsigned long)ddr_freq_change_iram_base);
	/*
	 * need to remap the area here since we want
	 * the memory region to be executable.
	 */
	ddr_freq_change_iram_base = __arm_ioremap(iram_paddr,
						DDR_FREQ_CHANGE_SIZE,
						MT_MEMORY_RWX_NONCACHED);
	mx6_change_ddr_freq = (void *)fncpy(ddr_freq_change_iram_base,
		&mx6_ddr3_freq_change, DDR_FREQ_CHANGE_SIZE);

	curr_ddr_rate = ddr_normal_rate;

	return 0;
}
Exemple #8
0
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
	int r;
	/* Assume we're using HV mode when the HV module is loaded */
	int hv_enabled = kvmppc_hv_ops ? 1 : 0;

	if (kvm) {
		/*
		 * Hooray - we know which VM type we're running on. Depend on
		 * that rather than the guess above.
		 */
		hv_enabled = is_kvmppc_hv_enabled(kvm);
	}

	switch (ext) {
#ifdef CONFIG_BOOKE
	case KVM_CAP_PPC_BOOKE_SREGS:
	case KVM_CAP_PPC_BOOKE_WATCHDOG:
	case KVM_CAP_PPC_EPR:
#else
	case KVM_CAP_PPC_SEGSTATE:
	case KVM_CAP_PPC_HIOR:
	case KVM_CAP_PPC_PAPR:
#endif
	case KVM_CAP_PPC_UNSET_IRQ:
	case KVM_CAP_PPC_IRQ_LEVEL:
	case KVM_CAP_ENABLE_CAP:
	case KVM_CAP_ENABLE_CAP_VM:
	case KVM_CAP_ONE_REG:
	case KVM_CAP_IOEVENTFD:
	case KVM_CAP_DEVICE_CTRL:
	case KVM_CAP_IMMEDIATE_EXIT:
		r = 1;
		break;
	case KVM_CAP_PPC_PAIRED_SINGLES:
	case KVM_CAP_PPC_OSI:
	case KVM_CAP_PPC_GET_PVINFO:
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
	case KVM_CAP_SW_TLB:
#endif
		/* We support this only for PR */
		r = !hv_enabled;
		break;
#ifdef CONFIG_KVM_MPIC
	case KVM_CAP_IRQ_MPIC:
		r = 1;
		break;
#endif

#ifdef CONFIG_PPC_BOOK3S_64
	case KVM_CAP_SPAPR_TCE:
	case KVM_CAP_SPAPR_TCE_64:
		/* fallthrough */
	case KVM_CAP_SPAPR_TCE_VFIO:
	case KVM_CAP_PPC_RTAS:
	case KVM_CAP_PPC_FIXUP_HCALL:
	case KVM_CAP_PPC_ENABLE_HCALL:
#ifdef CONFIG_KVM_XICS
	case KVM_CAP_IRQ_XICS:
#endif
	case KVM_CAP_PPC_GET_CPU_CHAR:
		r = 1;
		break;

	case KVM_CAP_PPC_ALLOC_HTAB:
		r = hv_enabled;
		break;
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	case KVM_CAP_PPC_SMT:
		r = 0;
		if (kvm) {
			if (kvm->arch.emul_smt_mode > 1)
				r = kvm->arch.emul_smt_mode;
			else
				r = kvm->arch.smt_mode;
		} else if (hv_enabled) {
			if (cpu_has_feature(CPU_FTR_ARCH_300))
				r = 1;
			else
				r = threads_per_subcore;
		}
		break;
	case KVM_CAP_PPC_SMT_POSSIBLE:
		r = 1;
		if (hv_enabled) {
			if (!cpu_has_feature(CPU_FTR_ARCH_300))
				r = ((threads_per_subcore << 1) - 1);
			else
				/* P9 can emulate dbells, so allow any mode */
				r = 8 | 4 | 2 | 1;
		}
		break;
	case KVM_CAP_PPC_RMA:
		r = 0;
		break;
	case KVM_CAP_PPC_HWRNG:
		r = kvmppc_hwrng_present();
		break;
	case KVM_CAP_PPC_MMU_RADIX:
		r = !!(hv_enabled && radix_enabled());
		break;
	case KVM_CAP_PPC_MMU_HASH_V3:
		r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300));
		break;
#endif
	case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
		r = hv_enabled;
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
		r = 1;
#else
		r = 0;
#endif
		break;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	case KVM_CAP_PPC_HTAB_FD:
		r = hv_enabled;
		break;
#endif
	case KVM_CAP_NR_VCPUS:
		/*
		 * Recommending a number of CPUs is somewhat arbitrary; we
		 * return the number of present CPUs for -HV (since a host
		 * will have secondary threads "offline"), and for other KVM
		 * implementations just count online CPUs.
		 */
		if (hv_enabled)
			r = num_present_cpus();
		else
			r = num_online_cpus();
		break;
	case KVM_CAP_NR_MEMSLOTS:
		r = KVM_USER_MEM_SLOTS;
		break;
	case KVM_CAP_MAX_VCPUS:
		r = KVM_MAX_VCPUS;
		break;
#ifdef CONFIG_PPC_BOOK3S_64
	case KVM_CAP_PPC_GET_SMMU_INFO:
		r = 1;
		break;
	case KVM_CAP_SPAPR_MULTITCE:
		r = 1;
		break;
	case KVM_CAP_SPAPR_RESIZE_HPT:
		r = !!hv_enabled;
		break;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	case KVM_CAP_PPC_FWNMI:
		r = hv_enabled;
		break;
#endif
	case KVM_CAP_PPC_HTM:
		r = hv_enabled &&
		    (cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP);
		break;
	default:
		r = 0;
		break;
	}
	return r;

}
Exemple #9
0
static void crash_kexec_prepare_cpus(int cpu)
{
	unsigned int msecs;
	unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
	int tries = 0;
	int (*old_handler)(struct pt_regs *regs);

	printk(KERN_EMERG "Sending IPI to other CPUs\n");

	if (crash_wake_offline)
		ncpus = num_present_cpus() - 1;

	crash_send_ipi(crash_ipi_callback);
	smp_wmb();

again:
	/*
	 * FIXME: Until we will have the way to stop other CPUs reliably,
	 * the crash CPU will send an IPI and wait for other CPUs to
	 * respond.
	 */
	msecs = IPI_TIMEOUT;
	while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0))
		mdelay(1);

	/* Would it be better to replace the trap vector here? */

	if (atomic_read(&cpus_in_crash) >= ncpus) {
		printk(KERN_EMERG "IPI complete\n");
		return;
	}

	printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n",
		ncpus - atomic_read(&cpus_in_crash));

	/*
	 * If we have a panic timeout set then we can't wait indefinitely
	 * for someone to activate system reset. We also give up on the
	 * second time through if system reset fail to work.
	 */
	if ((panic_timeout > 0) || (tries > 0))
		return;

	/*
	 * A system reset will cause all CPUs to take an 0x100 exception.
	 * The primary CPU returns here via setjmp, and the secondary
	 * CPUs reexecute the crash_kexec_secondary path.
	 */
	old_handler = __debugger;
	__debugger = handle_fault;
	crash_shutdown_cpu = smp_processor_id();

	if (setjmp(crash_shutdown_buf) == 0) {
		printk(KERN_EMERG "Activate system reset (dumprestart) "
				  "to stop other cpu(s)\n");

		/*
		 * A system reset will force all CPUs to execute the
		 * crash code again. We need to reset cpus_in_crash so we
		 * wait for everyone to do this.
		 */
		atomic_set(&cpus_in_crash, 0);
		smp_mb();

		while (atomic_read(&cpus_in_crash) < ncpus)
			cpu_relax();
	}

	crash_shutdown_cpu = -1;
	__debugger = old_handler;

	tries++;
	goto again;
}
Exemple #10
0
/**
 * setup_cpu_maps - initialize the following cpu maps:
 *                  cpu_possible_map
 *                  cpu_present_map
 *                  cpu_sibling_map
 *
 * Having the possible map set up early allows us to restrict allocations
 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
 *
 * We do not initialize the online map here; cpus set their own bits in
 * cpu_online_map as they come up.
 *
 * This function is valid only for Open Firmware systems.  finish_device_tree
 * must be called before using this.
 *
 * While we're here, we may as well set the "physical" cpu ids in the paca.
 */
static void __init setup_cpu_maps(void)
{
    struct device_node *dn = NULL;
    int cpu = 0;

    check_smt_enabled();

    while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
        u32 *intserv;
        int j, len = sizeof(u32), nthreads;

        intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
                                      &len);
        if (!intserv)
            intserv = (u32 *)get_property(dn, "reg", NULL);

        nthreads = len / sizeof(u32);

        for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
            /*
             * Only spin up secondary threads if SMT is enabled.
             * We must leave space in the logical map for the
             * threads.  cpu_present_map is fixed up later
             * after smp init.
             */
            if (j == 0 || smt_enabled_at_boot) {
                cpu_set(cpu, cpu_present_map);
            }
            set_hard_smp_processor_id(cpu, intserv[j]);
            cpu_set(cpu, cpu_possible_map);
            cpu++;
        }
    }

    /*
     * On pSeries LPAR, we need to know how many cpus
     * could possibly be added to this partition.
     */
    if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
            (dn = of_find_node_by_path("/rtas"))) {
        int num_addr_cell, num_size_cell, maxcpus;
        unsigned int *ireg;

        num_addr_cell = prom_n_addr_cells(dn);
        num_size_cell = prom_n_size_cells(dn);

        ireg = (unsigned int *)
               get_property(dn, "ibm,lrdr-capacity", NULL);

        if (!ireg)
            goto out;

        maxcpus = ireg[num_addr_cell + num_size_cell];

        /* Double maxcpus for processors which have SMT capability */
        if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
            maxcpus *= 2;

        if (maxcpus > NR_CPUS) {
            printk(KERN_WARNING
                   "Partition configured for %d cpus, "
                   "operating system maximum is %d.\n",
                   maxcpus, NR_CPUS);
            maxcpus = NR_CPUS;
        } else
            printk(KERN_INFO "Partition configured for %d cpus.\n",
                   maxcpus);

        for (cpu = 0; cpu < maxcpus; cpu++)
            cpu_set(cpu, cpu_possible_map);
out:
        of_node_put(dn);
    }

    /*
     * Do the sibling map; assume only two threads per processor.
     */
    for_each_cpu(cpu) {
        cpu_set(cpu, cpu_sibling_map[cpu]);
        if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
            cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
    }

    systemcfg->processorCount = num_present_cpus();
}