Esempio n. 1
0
int cxgb4_get_free_ftid(struct net_device *dev, int family)
{
	struct adapter *adap = netdev2adap(dev);
	struct tid_info *t = &adap->tids;
	int ftid;

	spin_lock_bh(&t->ftid_lock);
	if (family == PF_INET) {
		ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
		if (ftid >= t->nftids)
			ftid = -1;
	} else {
		if (is_t6(adap->params.chip)) {
			ftid = bitmap_find_free_region(t->ftid_bmap,
						       t->nftids, 1);
			if (ftid < 0)
				goto out_unlock;

			/* this is only a lookup, keep the found region
			 * unallocated
			 */
			bitmap_release_region(t->ftid_bmap, ftid, 1);
		} else {
			ftid = bitmap_find_free_region(t->ftid_bmap,
						       t->nftids, 2);
			if (ftid < 0)
				goto out_unlock;

			bitmap_release_region(t->ftid_bmap, ftid, 2);
		}
	}
out_unlock:
	spin_unlock_bh(&t->ftid_lock);
	return ftid;
}
Esempio n. 2
0
static void __init test_basics(void)
{
	struct msi_bitmap bmp;
	int rc, i, size = 512;

	/* Can't allocate a bitmap of 0 irqs */
	WARN_ON(msi_bitmap_alloc(&bmp, 0, NULL) == 0);

	/* of_node may be NULL */
	WARN_ON(msi_bitmap_alloc(&bmp, size, NULL));

	/* Should all be free by default */
	WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(size));

	/* With no node, there's no msi-available-ranges, so expect > 0 */
	WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);

	/* Should all still be free */
	WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(size));

	/* Check we can fill it up and then no more */
	for (i = 0; i < size; i++)
		WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0);

	WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0);

	/* Should all be allocated */
	WARN_ON(bitmap_find_free_region(bmp.bitmap, size, 0) >= 0);

	/* And if we free one we can then allocate another */
	msi_bitmap_free_hwirqs(&bmp, size / 2, 1);
	WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) != size / 2);

	/* Free most of them for the alignment tests */
	msi_bitmap_free_hwirqs(&bmp, 3, size - 3);

	/* Check we get a naturally aligned offset */
	rc = msi_bitmap_alloc_hwirqs(&bmp, 2);
	WARN_ON(rc < 0 && rc % 2 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 4);
	WARN_ON(rc < 0 && rc % 4 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 8);
	WARN_ON(rc < 0 && rc % 8 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 9);
	WARN_ON(rc < 0 && rc % 16 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 3);
	WARN_ON(rc < 0 && rc % 4 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 7);
	WARN_ON(rc < 0 && rc % 8 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 121);
	WARN_ON(rc < 0 && rc % 128 != 0);

	msi_bitmap_free(&bmp);

	/* Clients may WARN_ON bitmap == NULL for "not-allocated" */
	WARN_ON(bmp.bitmap != NULL);
}
Esempio n. 3
0
void __init test_basics(void)
{
	struct msi_bitmap bmp;
	int i, size = 512;

	/* Can't allocate a bitmap of 0 irqs */
	check(msi_bitmap_alloc(&bmp, 0, NULL) != 0);

	/* of_node may be NULL */
	check(0 == msi_bitmap_alloc(&bmp, size, NULL));

	/* Should all be free by default */
	check(0 == bitmap_find_free_region(bmp.bitmap, size,
					   get_count_order(size)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(size));

	/* With no node, there's no msi-available-ranges, so expect > 0 */
	check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0);

	/* Should all still be free */
	check(0 == bitmap_find_free_region(bmp.bitmap, size,
					   get_count_order(size)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(size));

	/* Check we can fill it up and then no more */
	for (i = 0; i < size; i++)
		check(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0);

	check(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0);

	/* Should all be allocated */
	check(bitmap_find_free_region(bmp.bitmap, size, 0) < 0);

	/* And if we free one we can then allocate another */
	msi_bitmap_free_hwirqs(&bmp, size / 2, 1);
	check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2);

	/* Check we get a naturally aligned offset */
	check(msi_bitmap_alloc_hwirqs(&bmp, 2) % 2 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 4) % 4 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 8) % 8 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 9) % 16 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 3) % 4 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 7) % 8 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 121) % 128 == 0);

	msi_bitmap_free(&bmp);

	/* Clients may check bitmap == NULL for "not-allocated" */
	check(bmp.bitmap == NULL);

	kfree(bmp.bitmap);
}
Esempio n. 4
0
static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
			     unsigned int chip_ver)
{
	spin_lock_bh(&t->ftid_lock);
	if (family == PF_INET) {
		__clear_bit(fidx, t->ftid_bmap);
	} else {
		if (chip_ver < CHELSIO_T6)
			bitmap_release_region(t->ftid_bmap, fidx, 2);
		else
			bitmap_release_region(t->ftid_bmap, fidx, 1);
	}
	spin_unlock_bh(&t->ftid_lock);
}
void iscsi_deallocate_thread_sets(void)
{
	u32 released_count = 0;
	struct iscsi_thread_set *ts = NULL;

	while ((ts = iscsi_get_ts_from_inactive_list())) {

		spin_lock_bh(&ts->ts_state_lock);
		ts->status = ISCSI_THREAD_SET_DIE;
		spin_unlock_bh(&ts->ts_state_lock);

		if (ts->rx_thread) {
			send_sig(SIGINT, ts->rx_thread, 1);
			kthread_stop(ts->rx_thread);
		}
		if (ts->tx_thread) {
			send_sig(SIGINT, ts->tx_thread, 1);
			kthread_stop(ts->tx_thread);
		}
		/*
		 * Release this thread_id in the thread_set_bitmap
		 */
		spin_lock(&ts_bitmap_lock);
		bitmap_release_region(iscsit_global->ts_bitmap,
				ts->thread_id, get_order(1));
		spin_unlock(&ts_bitmap_lock);

		released_count++;
		kfree(ts);
	}

	if (released_count)
		pr_debug("Stopped %d thread set(s) (%d total threads)."
			"\n", released_count, released_count * 2);
}
Esempio n. 6
0
static void te_put_free_params(struct te_device *dev,
		struct te_oper_param *params, uint32_t nparams)
{
	int idx, nbits;
	idx = (params - dev->param_addr);
	nbits = get_count_order(nparams);
	bitmap_release_region(dev->param_bitmap, idx, nbits);
}
Esempio n. 7
0
void dma_free_coherent(struct device *dev, size_t size,
             void *vaddr, dma_addr_t dma_handle)
{
    struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    int order = get_order(size);

    if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
        int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;

        bitmap_release_region(mem->bitmap, page, order);
    } else {
Esempio n. 8
0
static int dma_release_coherent(struct device *dev, int order, void *vaddr)
{
    struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;

    if (mem && vaddr >= mem->virt_base && vaddr <
           (mem->virt_base + (mem->size << PAGE_SHIFT))) {
        int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;

        bitmap_release_region(mem->bitmap, page, order);
        return 1;
    }
Esempio n. 9
0
void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
			    unsigned int num)
{
	unsigned long flags;
	int order = get_count_order(num);

	pr_debug("msi_bitmap: freeing 0x%x (2^%d) at offset 0x%x\n",
		 num, order, offset);

	spin_lock_irqsave(&bmp->lock, flags);
	bitmap_release_region(bmp->bitmap, offset, order);
	spin_unlock_irqrestore(&bmp->lock, flags);
}
Esempio n. 10
0
static void __free_dma_pages(u32 addr, int order)
{
	unsigned long flags;
	u32 pos = (addr - dma_base) >> PAGE_SHIFT;

	if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
		printk(KERN_ERR "%s: freeing outside range.\n", __func__);
		BUG();
	}

	spin_lock_irqsave(&dma_lock, flags);
	bitmap_release_region(dma_bitmap, pos, order);
	spin_unlock_irqrestore(&dma_lock, flags);
}
Esempio n. 11
0
static void __init test_of_node(void)
{
	u32 prop_data[] = { 10, 10, 25, 3, 40, 1, 100, 100, 200, 20 };
	const char *expected_str = "0-9,20-24,28-39,41-99,220-255";
	char *prop_name = "msi-available-ranges";
	char *node_name = "/fakenode";
	struct device_node of_node;
	struct property prop;
	struct msi_bitmap bmp;
#define SIZE_EXPECTED 256
	DECLARE_BITMAP(expected, SIZE_EXPECTED);

	/* There should really be a struct device_node allocator */
	memset(&of_node, 0, sizeof(of_node));
	of_node_init(&of_node);
	of_node.full_name = node_name;

	WARN_ON(msi_bitmap_alloc(&bmp, SIZE_EXPECTED, &of_node));

	/* No msi-available-ranges, so expect > 0 */
	WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);

	/* Should all still be free */
	WARN_ON(bitmap_find_free_region(bmp.bitmap, SIZE_EXPECTED,
					get_count_order(SIZE_EXPECTED)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(SIZE_EXPECTED));

	/* Now create a fake msi-available-ranges property */

	/* There should really .. oh whatever */
	memset(&prop, 0, sizeof(prop));
	prop.name = prop_name;
	prop.value = &prop_data;
	prop.length = sizeof(prop_data);

	of_node.properties = &prop;

	/* msi-available-ranges, so expect == 0 */
	WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp));

	/* Check we got the expected result */
	WARN_ON(bitmap_parselist(expected_str, expected, SIZE_EXPECTED));
	WARN_ON(!bitmap_equal(expected, bmp.bitmap, SIZE_EXPECTED));

	msi_bitmap_free(&bmp);
	kfree(bmp.bitmap);
}
static void iscsi_deallocate_extra_thread_sets(void)
{
	u32 orig_count, released_count = 0;
	struct iscsi_thread_set *ts = NULL;

	orig_count = TARGET_THREAD_SET_COUNT;

	while ((iscsit_global->inactive_ts + 1) > orig_count) {
		ts = iscsi_get_ts_from_inactive_list();
		if (!ts)
			break;

		spin_lock_bh(&ts->ts_state_lock);
		ts->status = ISCSI_THREAD_SET_DIE;
		spin_unlock_bh(&ts->ts_state_lock);

		if (ts->rx_thread) {
			send_sig(SIGINT, ts->rx_thread, 1);
			kthread_stop(ts->rx_thread);
		}
		if (ts->tx_thread) {
			send_sig(SIGINT, ts->tx_thread, 1);
			kthread_stop(ts->tx_thread);
		}
		/*
                                                    
   */
		spin_lock(&ts_bitmap_lock);
		bitmap_release_region(iscsit_global->ts_bitmap,
				ts->thread_id, get_order(1));
		spin_unlock(&ts_bitmap_lock);

		released_count++;
		kfree(ts);
	}

	if (released_count) {
		pr_debug("Stopped %d thread set(s) (%d total threads)."
			"\n", released_count, released_count * 2);
	}
}
Esempio n. 13
0
static int clear_entries(struct irq_2_iommu *irq_iommu)
{
	struct irte *start, *entry, *end;
	struct intel_iommu *iommu;
	int index;

	if (irq_iommu->sub_handle)
		return 0;

	iommu = irq_iommu->iommu;
	index = irq_iommu->irte_index;

	start = iommu->ir_table->base + index;
	end = start + (1 << irq_iommu->irte_mask);

	for (entry = start; entry < end; entry++) {
		set_64bit(&entry->low, 0);
		set_64bit(&entry->high, 0);
	}
	bitmap_release_region(iommu->ir_table->bitmap, index,
			      irq_iommu->irte_mask);

	return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
Esempio n. 14
0
/**
 * msi_bitmap_reserve_dt_hwirqs - Reserve irqs specified in the device tree.
 * @bmp: pointer to the MSI bitmap.
 *
 * Looks in the device tree to see if there is a property specifying which
 * irqs can be used for MSI. If found those irqs reserved in the device tree
 * are reserved in the bitmap.
 *
 * Returns 0 for success, < 0 if there was an error, and > 0 if no property
 * was found in the device tree.
 **/
int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp)
{
    int i, j, len;
    const u32 *p;

    if (!bmp->of_node)
        return 1;

    p = of_get_property(bmp->of_node, "msi-available-ranges", &len);
    if (!p) {
        pr_debug("msi_bitmap: no msi-available-ranges property " \
                 "found on %s\n", bmp->of_node->full_name);
        return 1;
    }

    if (len % (2 * sizeof(u32)) != 0) {
        printk(KERN_WARNING "msi_bitmap: Malformed msi-available-ranges"
               " property on %s\n", bmp->of_node->full_name);
        return -EINVAL;
    }

    bitmap_allocate_region(bmp->bitmap, 0, get_count_order(bmp->irq_count));

    spin_lock(&bmp->lock);

    /* Format is: (<u32 start> <u32 count>)+ */
    len /= 2 * sizeof(u32);
    for (i = 0; i < len; i++, p += 2) {
        for (j = 0; j < *(p + 1); j++)
            bitmap_release_region(bmp->bitmap, *p + j, 0);
    }

    spin_unlock(&bmp->lock);

    return 0;
}
Esempio n. 15
0
static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
		       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
		       struct ieee80211_key_conf *key)
{
	struct p54_common *priv = dev->priv;
	int slot, ret = 0;
	u8 algo = 0;
	u8 *addr = NULL;

	if (modparam_nohwcrypt)
		return -EOPNOTSUPP;

	mutex_lock(&priv->conf_mutex);
	if (cmd == SET_KEY) {
		switch (key->alg) {
		case ALG_TKIP:
			if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL |
			      BR_DESC_PRIV_CAP_TKIP))) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_TKIPMICHAEL;
			break;
		case ALG_WEP:
			if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_WEP;
			break;
		case ALG_CCMP:
			if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_AESCCMP;
			break;
		default:
			ret = -EOPNOTSUPP;
			goto out_unlock;
		}
		slot = bitmap_find_free_region(priv->used_rxkeys,
					       priv->rx_keycache_size, 0);

		if (slot < 0) {
			/*
			 * The device supports the choosen algorithm, but the
			 * firmware does not provide enough key slots to store
			 * all of them.
			 * But encryption offload for outgoing frames is always
			 * possible, so we just pretend that the upload was
			 * successful and do the decryption in software.
			 */

			/* mark the key as invalid. */
			key->hw_key_idx = 0xff;
			goto out_unlock;
		}
	} else {
		slot = key->hw_key_idx;

		if (slot == 0xff) {
			/* This key was not uploaded into the rx key cache. */

			goto out_unlock;
		}

		bitmap_release_region(priv->used_rxkeys, slot, 0);
		algo = 0;
	}

	if (sta)
		addr = sta->addr;

	ret = p54_upload_key(priv, algo, slot, key->keyidx,
			     key->keylen, addr, key->key);
	if (ret) {
		bitmap_release_region(priv->used_rxkeys, slot, 0);
		ret = -EOPNOTSUPP;
		goto out_unlock;
	}

	key->hw_key_idx = slot;

out_unlock:
	mutex_unlock(&priv->conf_mutex);
	return ret;
}
Esempio n. 16
0
static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
		       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
		       struct ieee80211_key_conf *key)
{
	struct p54_common *priv = dev->priv;
	int slot, ret = 0;
	u8 algo = 0;
	u8 *addr = NULL;

	if (modparam_nohwcrypt)
		return -EOPNOTSUPP;

	if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
		/*
		 * Unfortunately most/all firmwares are trying to decrypt
		 * incoming management frames if a suitable key can be found.
		 * However, in doing so the data in these frames gets
		 * corrupted. So, we can't have firmware supported crypto
		 * offload in this case.
		 */
		return -EOPNOTSUPP;
	}

	mutex_lock(&priv->conf_mutex);
	if (cmd == SET_KEY) {
		switch (key->cipher) {
		case WLAN_CIPHER_SUITE_TKIP:
			if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL |
			      BR_DESC_PRIV_CAP_TKIP))) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_TKIPMICHAEL;
			break;
		case WLAN_CIPHER_SUITE_WEP40:
		case WLAN_CIPHER_SUITE_WEP104:
			if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_WEP;
			break;
		case WLAN_CIPHER_SUITE_CCMP:
			if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_AESCCMP;
			break;
		default:
			ret = -EOPNOTSUPP;
			goto out_unlock;
		}
		slot = bitmap_find_free_region(priv->used_rxkeys,
					       priv->rx_keycache_size, 0);

		if (slot < 0) {
			/*
			 * The device supports the chosen algorithm, but the
			 * firmware does not provide enough key slots to store
			 * all of them.
			 * But encryption offload for outgoing frames is always
			 * possible, so we just pretend that the upload was
			 * successful and do the decryption in software.
			 */

			/* mark the key as invalid. */
			key->hw_key_idx = 0xff;
			goto out_unlock;
		}
	} else {
		slot = key->hw_key_idx;

		if (slot == 0xff) {
			/* This key was not uploaded into the rx key cache. */

			goto out_unlock;
		}

		bitmap_release_region(priv->used_rxkeys, slot, 0);
		algo = 0;
	}

	if (sta)
		addr = sta->addr;

	ret = p54_upload_key(priv, algo, slot, key->keyidx,
			     key->keylen, addr, key->key);
	if (ret) {
		bitmap_release_region(priv->used_rxkeys, slot, 0);
		ret = -EOPNOTSUPP;
		goto out_unlock;
	}

	key->hw_key_idx = slot;

out_unlock:
	mutex_unlock(&priv->conf_mutex);
	return ret;
}
Esempio n. 17
0
static int flexrm_new_request(struct flexrm_ring *ring,
				struct brcm_message *batch_msg,
				struct brcm_message *msg)
{
	void *next;
	unsigned long flags;
	u32 val, count, nhcnt;
	u32 read_offset, write_offset;
	bool exit_cleanup = false;
	int ret = 0, reqid;

	/* Do sanity check on message */
	if (!flexrm_sanity_check(msg))
		return -EIO;
	msg->error = 0;

	/* If no requests possible then save data pointer and goto done. */
	spin_lock_irqsave(&ring->lock, flags);
	reqid = bitmap_find_free_region(ring->requests_bmap,
					RING_MAX_REQ_COUNT, 0);
	spin_unlock_irqrestore(&ring->lock, flags);
	if (reqid < 0)
		return -ENOSPC;
	ring->requests[reqid] = msg;

	/* Do DMA mappings for the message */
	ret = flexrm_dma_map(ring->mbox->dev, msg);
	if (ret < 0) {
		ring->requests[reqid] = NULL;
		spin_lock_irqsave(&ring->lock, flags);
		bitmap_release_region(ring->requests_bmap, reqid, 0);
		spin_unlock_irqrestore(&ring->lock, flags);
		return ret;
	}

	/* Determine current HW BD read offset */
	read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
	val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
	read_offset *= RING_DESC_SIZE;
	read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);

	/*
	 * Number required descriptors = number of non-header descriptors +
	 *				 number of header descriptors +
	 *				 1x null descriptor
	 */
	nhcnt = flexrm_estimate_nonheader_desc_count(msg);
	count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1;

	/* Check for available descriptor space. */
	write_offset = ring->bd_write_offset;
	while (count) {
		if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
			count--;
		write_offset += RING_DESC_SIZE;
		if (write_offset == RING_BD_SIZE)
			write_offset = 0x0;
		if (write_offset == read_offset)
			break;
	}
	if (count) {
		ret = -ENOSPC;
		exit_cleanup = true;
		goto exit;
	}

	/* Write descriptors to ring */
	next = flexrm_write_descs(msg, nhcnt, reqid,
			ring->bd_base + ring->bd_write_offset,
			RING_BD_TOGGLE_VALID(ring->bd_write_offset),
			ring->bd_base, ring->bd_base + RING_BD_SIZE);
	if (IS_ERR(next)) {
		ret = PTR_ERR(next);
		exit_cleanup = true;
		goto exit;
	}

	/* Save ring BD write offset */
	ring->bd_write_offset = (unsigned long)(next - ring->bd_base);

	/* Increment number of messages sent */
	atomic_inc_return(&ring->msg_send_count);

exit:
	/* Update error status in message */
	msg->error = ret;

	/* Cleanup if we failed */
	if (exit_cleanup) {
		flexrm_dma_unmap(ring->mbox->dev, msg);
		ring->requests[reqid] = NULL;
		spin_lock_irqsave(&ring->lock, flags);
		bitmap_release_region(ring->requests_bmap, reqid, 0);
		spin_unlock_irqrestore(&ring->lock, flags);
	}

	return ret;
}
Esempio n. 18
0
static int flexrm_process_completions(struct flexrm_ring *ring)
{
	u64 desc;
	int err, count = 0;
	unsigned long flags;
	struct brcm_message *msg = NULL;
	u32 reqid, cmpl_read_offset, cmpl_write_offset;
	struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];

	spin_lock_irqsave(&ring->lock, flags);

	/*
	 * Get current completion read and write offset
	 *
	 * Note: We should read completion write pointer atleast once
	 * after we get a MSI interrupt because HW maintains internal
	 * MSI status which will allow next MSI interrupt only after
	 * completion write pointer is read.
	 */
	cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
	cmpl_write_offset *= RING_DESC_SIZE;
	cmpl_read_offset = ring->cmpl_read_offset;
	ring->cmpl_read_offset = cmpl_write_offset;

	spin_unlock_irqrestore(&ring->lock, flags);

	/* For each completed request notify mailbox clients */
	reqid = 0;
	while (cmpl_read_offset != cmpl_write_offset) {
		/* Dequeue next completion descriptor */
		desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));

		/* Next read offset */
		cmpl_read_offset += RING_DESC_SIZE;
		if (cmpl_read_offset == RING_CMPL_SIZE)
			cmpl_read_offset = 0;

		/* Decode error from completion descriptor */
		err = flexrm_cmpl_desc_to_error(desc);
		if (err < 0) {
			dev_warn(ring->mbox->dev,
			"ring%d got completion desc=0x%lx with error %d\n",
			ring->num, (unsigned long)desc, err);
		}

		/* Determine request id from completion descriptor */
		reqid = flexrm_cmpl_desc_to_reqid(desc);

		/* Determine message pointer based on reqid */
		msg = ring->requests[reqid];
		if (!msg) {
			dev_warn(ring->mbox->dev,
			"ring%d null msg pointer for completion desc=0x%lx\n",
			ring->num, (unsigned long)desc);
			continue;
		}

		/* Release reqid for recycling */
		ring->requests[reqid] = NULL;
		spin_lock_irqsave(&ring->lock, flags);
		bitmap_release_region(ring->requests_bmap, reqid, 0);
		spin_unlock_irqrestore(&ring->lock, flags);

		/* Unmap DMA mappings */
		flexrm_dma_unmap(ring->mbox->dev, msg);

		/* Give-back message to mailbox client */
		msg->error = err;
		mbox_chan_received_data(chan, msg);

		/* Increment number of completions processed */
		atomic_inc_return(&ring->msg_cmpl_count);
		count++;
	}

	return count;
}