コード例 #1
0
ファイル: nitrox_isr.c プロジェクト: EMFPGA/linux_media
static void nitrox_disable_msix(struct nitrox_device *ndev)
{
	struct msix_entry *msix_ent = ndev->msix.entries;
	char **names = ndev->msix.names;
	int i = 0, ring, nr_ring_vectors;

	nr_ring_vectors = ndev->msix.nr_entries - 1;

	/* clear pkt ring irqs */
	while (i < nr_ring_vectors) {
		if (test_and_clear_bit(i, ndev->msix.irqs)) {
			ring = (i / NR_RING_VECTORS);
			irq_set_affinity_hint(msix_ent[i].vector, NULL);
			free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
		}
		i += NR_RING_VECTORS;
	}
	irq_set_affinity_hint(msix_ent[i].vector, NULL);
	free_irq(msix_ent[i].vector, ndev);
	clear_bit(i, ndev->msix.irqs);

	kfree(ndev->msix.entries);
	for (i = 0; i < ndev->msix.nr_entries; i++)
		kfree(*(names + i));

	kfree(names);
	pci_disable_msix(ndev->pdev);
}
コード例 #2
0
ファイル: adf_isr.c プロジェクト: ravi-vid/linux-xlnx
static void adf_free_irqs(struct adf_accel_dev *accel_dev)
{
    struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
    struct adf_hw_device_data *hw_data = accel_dev->hw_device;
    struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
    struct adf_etr_data *etr_data = accel_dev->transport;
    int i;

    for (i = 0; i < hw_data->num_banks; i++) {
        irq_set_affinity_hint(msixe[i].vector, NULL);
        free_irq(msixe[i].vector, &etr_data->banks[i]);
    }
    irq_set_affinity_hint(msixe[i].vector, NULL);
    free_irq(msixe[i].vector, accel_dev);
}
コード例 #3
0
void __init x3_bpc_mgmt_init(void)
{
#ifdef CONFIG_SMP
#if defined(CONFIG_MACH_VU10)
	int tegra_bpc_gpio;
	int int_gpio;

	if (x3_get_hw_rev_pcb_version() == hw_rev_pcb_type_A) {
		tegra_bpc_gpio = TEGRA_GPIO_PJ6;
	}
	else {
		tegra_bpc_gpio = TEGRA_GPIO_PX6;
	}


	bpc_mgmt_platform_data.gpio_trigger = tegra_bpc_gpio;

	int_gpio = TEGRA_GPIO_TO_IRQ(tegra_bpc_gpio);
	tegra_gpio_enable(tegra_bpc_gpio);
#else
	int int_gpio = TEGRA_GPIO_TO_IRQ(TEGRA_BPC_TRIGGER);

	tegra_gpio_enable(TEGRA_BPC_TRIGGER);

#endif
	cpumask_setall(&(bpc_mgmt_platform_data.affinity_mask));
	irq_set_affinity_hint(int_gpio,
				&(bpc_mgmt_platform_data.affinity_mask));
	irq_set_affinity(int_gpio, &(bpc_mgmt_platform_data.affinity_mask));
#endif
	platform_device_register(&x3_bpc_mgmt_device);

	return;
}
コード例 #4
0
ファイル: knav_qmss_queue.c プロジェクト: Announcement/linux
static int knav_queue_setup_irq(struct knav_range_info *range,
			  struct knav_queue_inst *inst)
{
	unsigned queue = inst->id - range->queue_base;
	unsigned long cpu_map;
	int ret = 0, irq;

	if (range->flags & RANGE_HAS_IRQ) {
		irq = range->irqs[queue].irq;
		cpu_map = range->irqs[queue].cpu_map;
		ret = request_irq(irq, knav_queue_int_handler, 0,
					inst->irq_name, inst);
		if (ret)
			return ret;
		disable_irq(irq);
		if (cpu_map) {
			ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
			if (ret) {
				dev_warn(range->kdev->dev,
					 "Failed to set IRQ affinity\n");
				return ret;
			}
		}
	}
	return ret;
}
コード例 #5
0
ファイル: adf_vf_isr.c プロジェクト: AK101111/linux
/**
 * adf_vf_isr_resource_free() - Free IRQ for acceleration device
 * @accel_dev:  Pointer to acceleration device.
 *
 * Function frees interrupts for acceleration device virtual function.
 */
void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
{
	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);

	irq_set_affinity_hint(pdev->irq, NULL);
	free_irq(pdev->irq, (void *)accel_dev);
	adf_cleanup_bh(accel_dev);
	adf_cleanup_pf2vf_bh(accel_dev);
	adf_disable_msi(accel_dev);
}
コード例 #6
0
ファイル: knav_qmss_queue.c プロジェクト: Announcement/linux
static void knav_queue_free_irq(struct knav_queue_inst *inst)
{
	struct knav_range_info *range = inst->range;
	unsigned queue = inst->id - inst->range->queue_base;
	int irq;

	if (range->flags & RANGE_HAS_IRQ) {
		irq = range->irqs[queue].irq;
		irq_set_affinity_hint(irq, NULL);
		free_irq(irq, inst);
	}
}
コード例 #7
0
ファイル: en_cq.c プロジェクト: arunmdavid/linux
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
	napi_disable(&cq->napi);
	if (!cq->is_tx) {
		napi_hash_del(&cq->napi);
		synchronize_rcu();
		irq_set_affinity_hint(cq->mcq.irq, NULL);
	}
	netif_napi_del(&cq->napi);

	mlx4_cq_free(priv->mdev->dev, &cq->mcq);
}
コード例 #8
0
ファイル: nitrox_isr.c プロジェクト: EMFPGA/linux_media
static int nitrox_request_irqs(struct nitrox_device *ndev)
{
	struct pci_dev *pdev = ndev->pdev;
	struct msix_entry *msix_ent = ndev->msix.entries;
	int nr_ring_vectors, i = 0, ring, cpu, ret;
	char *name;

	/*
	 * PF MSI-X vectors
	 *
	 * Entry 0: NPS PKT ring 0
	 * Entry 1: AQMQ ring 0
	 * Entry 2: ZQM ring 0
	 * Entry 3: NPS PKT ring 1
	 * ....
	 * Entry 192: NPS_CORE_INT_ACTIVE
	 */
	nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;

	/* request irq for pkt ring/ports only */
	while (i < nr_ring_vectors) {
		name = *(ndev->msix.names + i);
		ring = (i / NR_RING_VECTORS);
		snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
			 ndev->idx, ring);

		ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
				  name, &ndev->bh.slc[ring]);
		if (ret) {
			dev_err(&pdev->dev, "failed to get irq %d for %s\n",
				msix_ent[i].vector, name);
			return ret;
		}
		cpu = ring % num_online_cpus();
		irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));

		set_bit(i, ndev->msix.irqs);
		i += NR_RING_VECTORS;
	}

	/* Request IRQ for NPS_CORE_INT_ACTIVE */
	name = *(ndev->msix.names + i);
	snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
	ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
	if (ret) {
		dev_err(&pdev->dev, "failed to get irq %d for %s\n",
			msix_ent[i].vector, name);
		return ret;
	}
	set_bit(i, ndev->msix.irqs);

	return 0;
}
コード例 #9
0
ファイル: aq_pci_func.c プロジェクト: AlexShiLucky/linux
void aq_pci_func_free_irqs(struct aq_nic_s *self)
{
	struct pci_dev *pdev = self->pdev;
	unsigned int i;

	for (i = 32U; i--;) {
		if (!((1U << i) & self->msix_entry_mask))
			continue;

		if (pdev->msix_enabled)
			irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
		free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
		self->msix_entry_mask &= ~(1U << i);
	}
}
void __init enterprise_bpc_mgmt_init(void)
{
	int int_gpio = TEGRA_GPIO_TO_IRQ(TEGRA_BPC_TRIGGER);

	tegra_gpio_enable(TEGRA_BPC_TRIGGER);

#ifdef CONFIG_SMP
	cpumask_setall(&(bpc_mgmt_platform_data.affinity_mask));
	irq_set_affinity_hint(int_gpio,
				&(bpc_mgmt_platform_data.affinity_mask));
	irq_set_affinity(int_gpio, &(bpc_mgmt_platform_data.affinity_mask));
#endif
	platform_device_register(&enterprise_bpc_mgmt_device);

	return;
}
コード例 #11
0
ファイル: adf_isr.c プロジェクト: ravi-vid/linux-xlnx
static int adf_request_irqs(struct adf_accel_dev *accel_dev)
{
    struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
    struct adf_hw_device_data *hw_data = accel_dev->hw_device;
    struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
    struct adf_etr_data *etr_data = accel_dev->transport;
    int ret, i;
    char *name;

    /* Request msix irq for all banks */
    for (i = 0; i < hw_data->num_banks; i++) {
        struct adf_etr_bank_data *bank = &etr_data->banks[i];
        unsigned int cpu, cpus = num_online_cpus();

        name = *(pci_dev_info->msix_entries.names + i);
        snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
                 "qat%d-bundle%d", accel_dev->accel_id, i);
        ret = request_irq(msixe[i].vector,
                          adf_msix_isr_bundle, 0, name, bank);
        if (ret) {
            pr_err("QAT: failed to enable irq %d for %s\n",
                   msixe[i].vector, name);
            return ret;
        }

        cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
        irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
    }

    /* Request msix irq for AE */
    name = *(pci_dev_info->msix_entries.names + i);
    snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
             "qat%d-ae-cluster", accel_dev->accel_id);
    ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
    if (ret) {
        pr_err("QAT: failed to enable irq %d, for %s\n",
               msixe[i].vector, name);
        return ret;
    }
    return ret;
}
コード例 #12
0
ファイル: adf_vf_isr.c プロジェクト: AK101111/linux
static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
{
	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
	unsigned int cpu;
	int ret;

	snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
		 "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
		 PCI_FUNC(pdev->devfn));
	ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
			  (void *)accel_dev);
	if (ret) {
		dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
			accel_dev->vf.irq_name);
		return ret;
	}
	cpu = accel_dev->accel_id % num_online_cpus();
	irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));

	return ret;
}
コード例 #13
0
ファイル: aq_pci_func.c プロジェクト: Anjali05/linux
int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
			  char *name, void *aq_vec, cpumask_t *affinity_mask)
{
	struct pci_dev *pdev = self->pdev;
	int err;

	if (pdev->msix_enabled || pdev->msi_enabled)
		err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
				  name, aq_vec);
	else
		err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
				  IRQF_SHARED, name, aq_vec);

	if (err >= 0) {
		self->msix_entry_mask |= (1 << i);
		self->aq_vec[i] = aq_vec;

		if (pdev->msix_enabled)
			irq_set_affinity_hint(pci_irq_vector(pdev, i),
					      affinity_mask);
	}
	return err;
}
コード例 #14
0
ファイル: en_cq.c プロジェクト: arunmdavid/linux
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
			int cq_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;
	char name[25];
	int timestamp_en = 0;
	struct cpu_rmap *rmap =
#ifdef CONFIG_RFS_ACCEL
		priv->dev->rx_cpu_rmap;
#else
		NULL;
#endif

	cq->dev = mdev->pndev[priv->port];
	cq->mcq.set_ci_db  = cq->wqres.db.db;
	cq->mcq.arm_db     = cq->wqres.db.db + 1;
	*cq->mcq.set_ci_db = 0;
	*cq->mcq.arm_db    = 0;
	memset(cq->buf, 0, cq->buf_size);

	if (cq->is_tx == RX) {
		if (mdev->dev->caps.comp_pool) {
			if (!cq->vector) {
				sprintf(name, "%s-%d", priv->dev->name,
					cq->ring);
				/* Set IRQ for specific name (per ring) */
				if (mlx4_assign_eq(mdev->dev, name, rmap,
						   &cq->vector)) {
					cq->vector = (cq->ring + 1 + priv->port)
					    % mdev->dev->caps.num_comp_vectors;
					mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
						  name);
				}

				cq->irq_desc =
					irq_to_desc(mlx4_eq_get_irq(mdev->dev,
								    cq->vector));
			}
		} else {
			cq->vector = (cq->ring + 1 + priv->port) %
				mdev->dev->caps.num_comp_vectors;
		}
	} else {
		/* For TX we use the same irq per
		ring we assigned for the RX    */
		struct mlx4_en_cq *rx_cq;

		cq_idx = cq_idx % priv->rx_ring_num;
		rx_cq = priv->rx_cq[cq_idx];
		cq->vector = rx_cq->vector;
	}

	if (!cq->is_tx)
		cq->size = priv->rx_ring[cq->ring]->actual_size;

	if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
	    (!cq->is_tx && priv->hwtstamp_config.rx_filter))
		timestamp_en = 1;

	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
			    cq->vector, 0, timestamp_en);
	if (err)
		return err;

	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

	if (cq->is_tx) {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
			       NAPI_POLL_WEIGHT);
	} else {
		struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];

		err = irq_set_affinity_hint(cq->mcq.irq,
					    ring->affinity_mask);
		if (err)
			mlx4_warn(mdev, "Failed setting affinity hint\n");

		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
		napi_hash_add(&cq->napi);
	}

	napi_enable(&cq->napi);

	return 0;
}
コード例 #15
0
ファイル: lio_core.c プロジェクト: AlexShiLucky/linux
/**
 * \brief Setup interrupt for octeon device
 * @param oct octeon device
 *
 *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
 */
int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
{
	struct msix_entry *msix_entries;
	char *queue_irq_names = NULL;
	int i, num_interrupts = 0;
	int num_alloc_ioq_vectors;
	char *aux_irq_name = NULL;
	int num_ioq_vectors;
	int irqret, err;

	if (oct->msix_on) {
		oct->num_msix_irqs = num_ioqs;
		if (OCTEON_CN23XX_PF(oct)) {
			num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;

			/* one non ioq interrupt for handling
			 * sli_mac_pf_int_sum
			 */
			oct->num_msix_irqs += 1;
		} else if (OCTEON_CN23XX_VF(oct)) {
			num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
		}

		/* allocate storage for the names assigned to each irq */
		oct->irq_name_storage =
			kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
		if (!oct->irq_name_storage) {
			dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
			return -ENOMEM;
		}

		queue_irq_names = oct->irq_name_storage;

		if (OCTEON_CN23XX_PF(oct))
			aux_irq_name = &queue_irq_names
				[IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];

		oct->msix_entries = kcalloc(oct->num_msix_irqs,
					    sizeof(struct msix_entry),
					    GFP_KERNEL);
		if (!oct->msix_entries) {
			dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
			kfree(oct->irq_name_storage);
			oct->irq_name_storage = NULL;
			return -ENOMEM;
		}

		msix_entries = (struct msix_entry *)oct->msix_entries;

		/*Assumption is that pf msix vectors start from pf srn to pf to
		 * trs and not from 0. if not change this code
		 */
		if (OCTEON_CN23XX_PF(oct)) {
			for (i = 0; i < oct->num_msix_irqs - 1; i++)
				msix_entries[i].entry =
					oct->sriov_info.pf_srn + i;

			msix_entries[oct->num_msix_irqs - 1].entry =
				oct->sriov_info.trs;
		} else if (OCTEON_CN23XX_VF(oct)) {
			for (i = 0; i < oct->num_msix_irqs; i++)
				msix_entries[i].entry = i;
		}
		num_alloc_ioq_vectors = pci_enable_msix_range(
						oct->pci_dev, msix_entries,
						oct->num_msix_irqs,
						oct->num_msix_irqs);
		if (num_alloc_ioq_vectors < 0) {
			dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
			kfree(oct->msix_entries);
			oct->msix_entries = NULL;
			kfree(oct->irq_name_storage);
			oct->irq_name_storage = NULL;
			return num_alloc_ioq_vectors;
		}

		dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");

		num_ioq_vectors = oct->num_msix_irqs;
		/** For PF, there is one non-ioq interrupt handler */
		if (OCTEON_CN23XX_PF(oct)) {
			num_ioq_vectors -= 1;

			snprintf(aux_irq_name, INTRNAMSIZ,
				 "LiquidIO%u-pf%u-aux", oct->octeon_id,
				 oct->pf_num);
			irqret = request_irq(
					msix_entries[num_ioq_vectors].vector,
					liquidio_legacy_intr_handler, 0,
					aux_irq_name, oct);
			if (irqret) {
				dev_err(&oct->pci_dev->dev,
					"Request_irq failed for MSIX interrupt Error: %d\n",
					irqret);
				pci_disable_msix(oct->pci_dev);
				kfree(oct->msix_entries);
				kfree(oct->irq_name_storage);
				oct->irq_name_storage = NULL;
				oct->msix_entries = NULL;
				return irqret;
			}
		}
		for (i = 0 ; i < num_ioq_vectors ; i++) {
			if (OCTEON_CN23XX_PF(oct))
				snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
					 INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
					 oct->octeon_id, oct->pf_num, i);

			if (OCTEON_CN23XX_VF(oct))
				snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
					 INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
					 oct->octeon_id, oct->vf_num, i);

			irqret = request_irq(msix_entries[i].vector,
					     liquidio_msix_intr_handler, 0,
					     &queue_irq_names[IRQ_NAME_OFF(i)],
					     &oct->ioq_vector[i]);

			if (irqret) {
				dev_err(&oct->pci_dev->dev,
					"Request_irq failed for MSIX interrupt Error: %d\n",
					irqret);
				/** Freeing the non-ioq irq vector here . */
				free_irq(msix_entries[num_ioq_vectors].vector,
					 oct);

				while (i) {
					i--;
					/** clearing affinity mask. */
					irq_set_affinity_hint(
						      msix_entries[i].vector,
						      NULL);
					free_irq(msix_entries[i].vector,
						 &oct->ioq_vector[i]);
				}
				pci_disable_msix(oct->pci_dev);
				kfree(oct->msix_entries);
				kfree(oct->irq_name_storage);
				oct->irq_name_storage = NULL;
				oct->msix_entries = NULL;
				return irqret;
			}
			oct->ioq_vector[i].vector = msix_entries[i].vector;
			/* assign the cpu mask for this msix interrupt vector */
			irq_set_affinity_hint(msix_entries[i].vector,
					      &oct->ioq_vector[i].affinity_mask
					      );
		}
		dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
			oct->octeon_id);
	} else {
		err = pci_enable_msi(oct->pci_dev);
		if (err)
			dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
				 err);
		else
			oct->flags |= LIO_FLAG_MSI_ENABLED;

		/* allocate storage for the names assigned to the irq */
		oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
		if (!oct->irq_name_storage)
			return -ENOMEM;

		queue_irq_names = oct->irq_name_storage;

		if (OCTEON_CN23XX_PF(oct))
			snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
				 "LiquidIO%u-pf%u-rxtx-%u",
				 oct->octeon_id, oct->pf_num, 0);

		if (OCTEON_CN23XX_VF(oct))
			snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
				 "LiquidIO%u-vf%u-rxtx-%u",
				 oct->octeon_id, oct->vf_num, 0);

		irqret = request_irq(oct->pci_dev->irq,
				     liquidio_legacy_intr_handler,
				     IRQF_SHARED,
				     &queue_irq_names[IRQ_NAME_OFF(0)], oct);
		if (irqret) {
			if (oct->flags & LIO_FLAG_MSI_ENABLED)
				pci_disable_msi(oct->pci_dev);
			dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
				irqret);
			kfree(oct->irq_name_storage);
			oct->irq_name_storage = NULL;
			return irqret;
		}
	}
	return 0;
}
コード例 #16
0
ファイル: bcm-flexrm-mailbox.c プロジェクト: Lyude/linux
static int flexrm_startup(struct mbox_chan *chan)
{
	u64 d;
	u32 val, off;
	int ret = 0;
	dma_addr_t next_addr;
	struct flexrm_ring *ring = chan->con_priv;

	/* Allocate BD memory */
	ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
				       GFP_KERNEL, &ring->bd_dma_base);
	if (!ring->bd_base) {
		dev_err(ring->mbox->dev,
			"can't allocate BD memory for ring%d\n",
			ring->num);
		ret = -ENOMEM;
		goto fail;
	}

	/* Configure next table pointer entries in BD memory */
	for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) {
		next_addr = off + RING_DESC_SIZE;
		if (next_addr == RING_BD_SIZE)
			next_addr = 0;
		next_addr += ring->bd_dma_base;
		if (RING_BD_ALIGN_CHECK(next_addr))
			d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off),
						    next_addr);
		else
			d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off));
		flexrm_write_desc(ring->bd_base + off, d);
	}

	/* Allocate completion memory */
	ring->cmpl_base = dma_pool_zalloc(ring->mbox->cmpl_pool,
					 GFP_KERNEL, &ring->cmpl_dma_base);
	if (!ring->cmpl_base) {
		dev_err(ring->mbox->dev,
			"can't allocate completion memory for ring%d\n",
			ring->num);
		ret = -ENOMEM;
		goto fail_free_bd_memory;
	}

	/* Request IRQ */
	if (ring->irq == UINT_MAX) {
		dev_err(ring->mbox->dev,
			"ring%d IRQ not available\n", ring->num);
		ret = -ENODEV;
		goto fail_free_cmpl_memory;
	}
	ret = request_threaded_irq(ring->irq,
				   flexrm_irq_event,
				   flexrm_irq_thread,
				   0, dev_name(ring->mbox->dev), ring);
	if (ret) {
		dev_err(ring->mbox->dev,
			"failed to request ring%d IRQ\n", ring->num);
		goto fail_free_cmpl_memory;
	}
	ring->irq_requested = true;

	/* Set IRQ affinity hint */
	ring->irq_aff_hint = CPU_MASK_NONE;
	val = ring->mbox->num_rings;
	val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
	cpumask_set_cpu((ring->num / val) % num_online_cpus(),
			&ring->irq_aff_hint);
	ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
	if (ret) {
		dev_err(ring->mbox->dev,
			"failed to set IRQ affinity hint for ring%d\n",
			ring->num);
		goto fail_free_irq;
	}

	/* Disable/inactivate ring */
	writel_relaxed(0x0, ring->regs + RING_CONTROL);

	/* Program BD start address */
	val = BD_START_ADDR_VALUE(ring->bd_dma_base);
	writel_relaxed(val, ring->regs + RING_BD_START_ADDR);

	/* BD write pointer will be same as HW write pointer */
	ring->bd_write_offset =
			readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
	ring->bd_write_offset *= RING_DESC_SIZE;

	/* Program completion start address */
	val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
	writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);

	/* Completion read pointer will be same as HW write pointer */
	ring->cmpl_read_offset =
			readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
	ring->cmpl_read_offset *= RING_DESC_SIZE;

	/* Read ring Tx, Rx, and Outstanding counts to clear */
	readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
	readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
	readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
	readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
	readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);

	/* Configure RING_MSI_CONTROL */
	val = 0;
	val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
	val |= BIT(MSI_ENABLE_SHIFT);
	val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
	writel_relaxed(val, ring->regs + RING_MSI_CONTROL);

	/* Enable/activate ring */
	val = BIT(CONTROL_ACTIVE_SHIFT);
	writel_relaxed(val, ring->regs + RING_CONTROL);

	/* Reset stats to zero */
	atomic_set(&ring->msg_send_count, 0);
	atomic_set(&ring->msg_cmpl_count, 0);

	return 0;

fail_free_irq:
	free_irq(ring->irq, ring);
	ring->irq_requested = false;
fail_free_cmpl_memory:
	dma_pool_free(ring->mbox->cmpl_pool,
		      ring->cmpl_base, ring->cmpl_dma_base);
	ring->cmpl_base = NULL;
fail_free_bd_memory:
	dma_pool_free(ring->mbox->bd_pool,
		      ring->bd_base, ring->bd_dma_base);
	ring->bd_base = NULL;
fail:
	return ret;
}
コード例 #17
0
ファイル: bcm-flexrm-mailbox.c プロジェクト: Lyude/linux
static void flexrm_shutdown(struct mbox_chan *chan)
{
	u32 reqid;
	unsigned int timeout;
	struct brcm_message *msg;
	struct flexrm_ring *ring = chan->con_priv;

	/* Disable/inactivate ring */
	writel_relaxed(0x0, ring->regs + RING_CONTROL);

	/* Set ring flush state */
	timeout = 1000; /* timeout of 1s */
	writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
			ring->regs + RING_CONTROL);
	do {
		if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
		    FLUSH_DONE_MASK)
			break;
		mdelay(1);
	} while (--timeout);
	if (!timeout)
		dev_err(ring->mbox->dev,
			"setting ring%d flush state timedout\n", ring->num);

	/* Clear ring flush state */
	timeout = 1000; /* timeout of 1s */
	writel_relaxed(0x0, ring + RING_CONTROL);
	do {
		if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
		      FLUSH_DONE_MASK))
			break;
		mdelay(1);
	} while (--timeout);
	if (!timeout)
		dev_err(ring->mbox->dev,
			"clearing ring%d flush state timedout\n", ring->num);

	/* Abort all in-flight requests */
	for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
		msg = ring->requests[reqid];
		if (!msg)
			continue;

		/* Release reqid for recycling */
		ring->requests[reqid] = NULL;

		/* Unmap DMA mappings */
		flexrm_dma_unmap(ring->mbox->dev, msg);

		/* Give-back message to mailbox client */
		msg->error = -EIO;
		mbox_chan_received_data(chan, msg);
	}

	/* Clear requests bitmap */
	bitmap_zero(ring->requests_bmap, RING_MAX_REQ_COUNT);

	/* Release IRQ */
	if (ring->irq_requested) {
		irq_set_affinity_hint(ring->irq, NULL);
		free_irq(ring->irq, ring);
		ring->irq_requested = false;
	}

	/* Free-up completion descriptor ring */
	if (ring->cmpl_base) {
		dma_pool_free(ring->mbox->cmpl_pool,
			      ring->cmpl_base, ring->cmpl_dma_base);
		ring->cmpl_base = NULL;
	}

	/* Free-up BD descriptor ring */
	if (ring->bd_base) {
		dma_pool_free(ring->mbox->bd_pool,
			      ring->bd_base, ring->bd_dma_base);
		ring->bd_base = NULL;
	}
}