static void mhi_move_interrupts(mhi_device_ctxt *mhi_dev_ctxt, u32 cpu) { u32 irq_to_affin = 0; MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC, mhi_dev_ctxt->ev_ring_props[IPA_IN_EV_RING], irq_to_affin); irq_to_affin += mhi_dev_ctxt->dev_props->irq_base; irq_set_affinity(irq_to_affin, get_cpu_mask(cpu)); MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC, mhi_dev_ctxt->ev_ring_props[IPA_OUT_EV_RING], irq_to_affin); irq_to_affin += mhi_dev_ctxt->dev_props->irq_base; irq_set_affinity(irq_to_affin, get_cpu_mask(cpu)); }
static int boost_notify(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned cpu = (long)hcpu; const struct cpumask *cpumask; cpumask = get_cpu_mask(cpu); /* * Clear the boost-disable bit on the CPU_DOWN path so that * this cpu cannot block the remaining ones from boosting. On * the CPU_UP path we simply keep the boost-disable flag in * sync with the current global state. */ switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: boost_set_msrs(1, cpumask); break; default: break; } return NOTIFY_OK; }
int __init arch_smp_start_cpu(u32 cpu) { const struct vmm_cpumask *mask = get_cpu_mask(cpu); /* Wakeup target cpu from wfe/wfi by sending an IPI */ gic_raise_softirq(mask, 0); return VMM_OK; }
static int __init scu_cpu_boot(unsigned int cpu) { const struct vmm_cpumask *mask = get_cpu_mask(cpu); /* Wakeup target cpu from wfe/wfi by sending an IPI */ gic_raise_softirq(mask, 0); return VMM_OK; }
static int __init scu_cpu_boot(unsigned int cpu) { const struct vmm_cpumask *mask = get_cpu_mask(cpu); /* Wakeup target cpu from wfe/wfi by sending an IPI */ vmm_host_irq_raise(0, mask); return VMM_OK; }
static int nitrox_request_irqs(struct nitrox_device *ndev) { struct pci_dev *pdev = ndev->pdev; struct msix_entry *msix_ent = ndev->msix.entries; int nr_ring_vectors, i = 0, ring, cpu, ret; char *name; /* * PF MSI-X vectors * * Entry 0: NPS PKT ring 0 * Entry 1: AQMQ ring 0 * Entry 2: ZQM ring 0 * Entry 3: NPS PKT ring 1 * .... * Entry 192: NPS_CORE_INT_ACTIVE */ nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS; /* request irq for pkt ring/ports only */ while (i < nr_ring_vectors) { name = *(ndev->msix.names + i); ring = (i / NR_RING_VECTORS); snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d", ndev->idx, ring); ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0, name, &ndev->bh.slc[ring]); if (ret) { dev_err(&pdev->dev, "failed to get irq %d for %s\n", msix_ent[i].vector, name); return ret; } cpu = ring % num_online_cpus(); irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu)); set_bit(i, ndev->msix.irqs); i += NR_RING_VECTORS; } /* Request IRQ for NPS_CORE_INT_ACTIVE */ name = *(ndev->msix.names + i); snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx); ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev); if (ret) { dev_err(&pdev->dev, "failed to get irq %d for %s\n", msix_ent[i].vector, name); return ret; } set_bit(i, ndev->msix.irqs); return 0; }
int caam_qi_shutdown(struct device *qidev) { struct caam_qi_priv *priv = dev_get_drvdata(qidev); int i, ret; const cpumask_t *cpus = qman_affine_cpus(); struct cpumask old_cpumask = *tsk_cpus_allowed(current); for_each_cpu(i, cpus) { struct napi_struct *irqtask; irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask; napi_disable(irqtask); netif_napi_del(irqtask); if (kill_fq(qidev, &per_cpu(pcpu_qipriv.rsp_fq, i))) dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); } /* * QMAN driver requires CGRs to be deleted from same CPU from where * they were instantiated. Hence we get the module removal execute * from the same CPU from where it was originally inserted. */ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu)); ret = qman_delete_cgr(&priv->rsp_cgr); if (ret) dev_err(qidev, "Delete response CGR failed: %d\n", ret); else qman_release_cgrid(priv->rsp_cgr.cgrid); if (qi_cache) kmem_cache_destroy(qi_cache); /* Now that we're done with the CGRs, restore the cpus allowed mask */ set_cpus_allowed_ptr(current, &old_cpumask); platform_device_unregister(priv->qi_pdev); return ret; }
static int adf_request_irqs(struct adf_accel_dev *accel_dev) { struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct msix_entry *msixe = pci_dev_info->msix_entries.entries; struct adf_etr_data *etr_data = accel_dev->transport; int ret, i; char *name; /* Request msix irq for all banks */ for (i = 0; i < hw_data->num_banks; i++) { struct adf_etr_bank_data *bank = &etr_data->banks[i]; unsigned int cpu, cpus = num_online_cpus(); name = *(pci_dev_info->msix_entries.names + i); snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, "qat%d-bundle%d", accel_dev->accel_id, i); ret = request_irq(msixe[i].vector, adf_msix_isr_bundle, 0, name, bank); if (ret) { pr_err("QAT: failed to enable irq %d for %s\n", msixe[i].vector, name); return ret; } cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus; irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu)); } /* Request msix irq for AE */ name = *(pci_dev_info->msix_entries.names + i); snprintf(name, ADF_MAX_MSIX_VECTOR_NAME, "qat%d-ae-cluster", accel_dev->accel_id); ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev); if (ret) { pr_err("QAT: failed to enable irq %d, for %s\n", msixe[i].vector, name); return ret; } return ret; }
void active_cpu ( uint32_t active_cpu ) { cpu_set_t cpu_mask; int cpu_count = sysconf ( _SC_NPROCESSORS_ONLN ); if ( cpu_count < 1 ) { return; } active_cpu = active_cpu % cpu_count; get_cpu_mask ( 0, &cpu_mask ); //print_cpu_mask(cpu_mask); CPU_ZERO ( &cpu_mask ); CPU_SET ( active_cpu, &cpu_mask ); set_cpu_mask ( 0, &cpu_mask ); //get_cpu_mask(0, &cpu_mask); //print_cpu_mask(cpu_mask); }
static int adf_request_msi_irq(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); unsigned int cpu; int ret; snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME, "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name, (void *)accel_dev); if (ret) { dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n", accel_dev->vf.irq_name); return ret; } cpu = accel_dev->accel_id % num_online_cpus(); irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu)); return ret; }
int __init arch_smp_start_cpu(u32 cpu) { const struct vmm_cpumask *mask; int rc; struct vmm_devtree_node *node; virtual_addr_t ca9_pmu_base; if (cpu == 0) { /* Nothing to do for first CPU */ return VMM_OK; } /* Get the PMU node in the dev tree */ node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_HOSTINFO_NODE_NAME VMM_DEVTREE_PATH_SEPARATOR_STRING "pmu"); if (!node) { return VMM_EFAIL; } /* map the PMU physical address to virtual address */ rc = vmm_devtree_regmap(node, &ca9_pmu_base, 0); if (rc) { return rc; } mask = get_cpu_mask(cpu); /* Write the entry address for the secondary cpus */ vmm_writel((u32)_load_start, (void *)ca9_pmu_base + 0x814); /* unmap the PMU node */ rc = vmm_devtree_regunmap(node, ca9_pmu_base, 0); /* Wakeup target cpu from wfe/wfi by sending an IPI */ gic_raise_softirq(mask, 0); return rc; }
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { struct net_device *netdev = priv->netdev; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; int err; c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) return -ENOMEM; c->priv = priv; c->ix = ix; c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mr.key); c->num_tc = priv->num_tc; netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); err = mlx5e_open_tx_cqs(c, cparam); if (err) goto err_napi_del; err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, priv->params.rx_cq_moderation_usec, priv->params.rx_cq_moderation_pkts); if (err) goto err_close_tx_cqs; c->rq.cq.sqrq = &c->rq; napi_enable(&c->napi); err = mlx5e_open_sqs(c, cparam); if (err) goto err_disable_napi; err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix); *cp = c; return 0; err_close_sqs: mlx5e_close_sqs(c); err_disable_napi: napi_disable(&c->napi); mlx5e_close_cq(&c->rq.cq); err_close_tx_cqs: mlx5e_close_tx_cqs(c); err_napi_del: netif_napi_del(&c->napi); kfree(c); return err; }
} extern void start_kernel(void); static int __init start_kernel_proc(void *unused) { int pid; block_signals(); pid = os_getpid(); cpu_tasks[0].pid = pid; cpu_tasks[0].task = current; #ifdef CONFIG_SMP <<<<<<< HEAD init_cpu_online(get_cpu_mask(0)); ======= <<<<<<< HEAD init_cpu_online(get_cpu_mask(0)); ======= cpu_online_map = cpumask_of_cpu(0); >>>>>>> 58a75b6a81be54a8b491263ca1af243e9d8617b9 >>>>>>> ae1773bb70f3d7cf73324ce8fba787e01d8fa9f2 #endif start_kernel(); return 0; } extern int userspace_pid[]; extern char cpu0_irqstack[];