int cxgb4_get_free_ftid(struct net_device *dev, int family)
{
	struct adapter *adap = netdev2adap(dev);
	struct tid_info *t = &adap->tids;
	int ftid;

	spin_lock_bh(&t->ftid_lock);
	if (family == PF_INET) {
		ftid = find_first_zero_bit(t->ftid_bmap, t->nftids);
		if (ftid >= t->nftids)
			ftid = -1;
	} else {
		if (is_t6(adap->params.chip)) {
			ftid = bitmap_find_free_region(t->ftid_bmap,
						       t->nftids, 1);
			if (ftid < 0)
				goto out_unlock;

			/* this is only a lookup, keep the found region
			 * unallocated
			 */
			bitmap_release_region(t->ftid_bmap, ftid, 1);
		} else {
			ftid = bitmap_find_free_region(t->ftid_bmap,
						       t->nftids, 2);
			if (ftid < 0)
				goto out_unlock;

			bitmap_release_region(t->ftid_bmap, ftid, 2);
		}
	}
out_unlock:
	spin_unlock_bh(&t->ftid_lock);
	return ftid;
}
示例#2
0
static void __init test_basics(void)
{
	struct msi_bitmap bmp;
	int rc, i, size = 512;

	/* Can't allocate a bitmap of 0 irqs */
	WARN_ON(msi_bitmap_alloc(&bmp, 0, NULL) == 0);

	/* of_node may be NULL */
	WARN_ON(msi_bitmap_alloc(&bmp, size, NULL));

	/* Should all be free by default */
	WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(size));

	/* With no node, there's no msi-available-ranges, so expect > 0 */
	WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);

	/* Should all still be free */
	WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(size));

	/* Check we can fill it up and then no more */
	for (i = 0; i < size; i++)
		WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0);

	WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0);

	/* Should all be allocated */
	WARN_ON(bitmap_find_free_region(bmp.bitmap, size, 0) >= 0);

	/* And if we free one we can then allocate another */
	msi_bitmap_free_hwirqs(&bmp, size / 2, 1);
	WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) != size / 2);

	/* Free most of them for the alignment tests */
	msi_bitmap_free_hwirqs(&bmp, 3, size - 3);

	/* Check we get a naturally aligned offset */
	rc = msi_bitmap_alloc_hwirqs(&bmp, 2);
	WARN_ON(rc < 0 && rc % 2 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 4);
	WARN_ON(rc < 0 && rc % 4 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 8);
	WARN_ON(rc < 0 && rc % 8 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 9);
	WARN_ON(rc < 0 && rc % 16 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 3);
	WARN_ON(rc < 0 && rc % 4 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 7);
	WARN_ON(rc < 0 && rc % 8 != 0);
	rc = msi_bitmap_alloc_hwirqs(&bmp, 121);
	WARN_ON(rc < 0 && rc % 128 != 0);

	msi_bitmap_free(&bmp);

	/* Clients may WARN_ON bitmap == NULL for "not-allocated" */
	WARN_ON(bmp.bitmap != NULL);
}
示例#3
0
void __init test_basics(void)
{
	struct msi_bitmap bmp;
	int i, size = 512;

	/* Can't allocate a bitmap of 0 irqs */
	check(msi_bitmap_alloc(&bmp, 0, NULL) != 0);

	/* of_node may be NULL */
	check(0 == msi_bitmap_alloc(&bmp, size, NULL));

	/* Should all be free by default */
	check(0 == bitmap_find_free_region(bmp.bitmap, size,
					   get_count_order(size)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(size));

	/* With no node, there's no msi-available-ranges, so expect > 0 */
	check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0);

	/* Should all still be free */
	check(0 == bitmap_find_free_region(bmp.bitmap, size,
					   get_count_order(size)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(size));

	/* Check we can fill it up and then no more */
	for (i = 0; i < size; i++)
		check(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0);

	check(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0);

	/* Should all be allocated */
	check(bitmap_find_free_region(bmp.bitmap, size, 0) < 0);

	/* And if we free one we can then allocate another */
	msi_bitmap_free_hwirqs(&bmp, size / 2, 1);
	check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2);

	/* Check we get a naturally aligned offset */
	check(msi_bitmap_alloc_hwirqs(&bmp, 2) % 2 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 4) % 4 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 8) % 8 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 9) % 16 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 3) % 4 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 7) % 8 == 0);
	check(msi_bitmap_alloc_hwirqs(&bmp, 121) % 128 == 0);

	msi_bitmap_free(&bmp);

	/* Clients may check bitmap == NULL for "not-allocated" */
	check(bmp.bitmap == NULL);

	kfree(bmp.bitmap);
}
示例#4
0
void *dma_alloc_coherent(struct device *dev, size_t size,
			   dma_addr_t *dma_handle, gfp_t gfp)
{
	void *ret;
	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
	int order = get_order(size);
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);

	if (mem) {
		int page = bitmap_find_free_region(mem->bitmap, mem->size,
						     order);
		if (page >= 0) {
			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
			ret = mem->virt_base + (page << PAGE_SHIFT);
			memset(ret, 0, size);
			return ret;
		}
		if (mem->flags & DMA_MEMORY_EXCLUSIVE)
			return NULL;
	}

	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
		gfp |= GFP_DMA;

	ret = (void *)__get_free_pages(gfp, order);

	if (ret != NULL) {
		memset(ret, 0, size);
		*dma_handle = virt_to_phys(ret);
	}
	return ret;
}
示例#5
0
/*
 * Return a DMA coherent and contiguous memory chunk from the DMA memory
 */
static inline u32 __alloc_dma_pages(int order)
{
	unsigned long flags;
	u32 pos;

	spin_lock_irqsave(&dma_lock, flags);
	pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
	spin_unlock_irqrestore(&dma_lock, flags);

	return dma_base + (pos << PAGE_SHIFT);
}
示例#6
0
int vmm_host_irqext_alloc_region(unsigned int size)
{
	int tries=3, pos = -1;
	int size_log = 0;
	int idx = 0;
	irq_flags_t flags;

	while ((1 << size_log) < size) {
		++size_log;
	}

	if (!size_log || size_log > BITS_PER_LONG)
		return VMM_ENOTAVAIL;

	vmm_write_lock_irqsave_lite(&iectrl.lock, flags);

try_again:
	for (idx = 0; idx < BITS_TO_LONGS(iectrl.count); ++idx) {
		pos = bitmap_find_free_region(&iectrl.bitmap[idx],
					      BITS_PER_LONG, size_log);
		if (pos >= 0) {
			bitmap_set(&iectrl.bitmap[idx], pos, size_log);
			pos += idx * (BITS_PER_LONG);
			break;
		}
	}

	if (pos < 0) {
		/*
		 * Give a second try, reallocate some memory for extended
		 * IRQs
		 */
		if (VMM_OK == _irqext_expand()) {
			if (tries) {
				tries--;
				goto try_again;
			}
		}
	}

	vmm_write_unlock_irqrestore_lite(&iectrl.lock, flags);

	if (pos < 0) {
		vmm_printf("%s: Failed to find an extended IRQ region\n",
			   __func__);
		return pos;
	}

	return pos + CONFIG_HOST_IRQ_COUNT;
}
示例#7
0
static struct te_oper_param *te_get_free_params(struct te_device *dev,
		unsigned int nparams)
{
	struct te_oper_param *params = NULL;
	int idx, nbits;

	if (nparams) {
		nbits = get_count_order(nparams);
		idx = bitmap_find_free_region(dev->param_bitmap, TE_PARAM_MAX, nbits);
		if (idx >= 0){
			params = dev->param_addr + idx;
		}
	}
	return params;
}
示例#8
0
static void __init test_of_node(void)
{
	u32 prop_data[] = { 10, 10, 25, 3, 40, 1, 100, 100, 200, 20 };
	const char *expected_str = "0-9,20-24,28-39,41-99,220-255";
	char *prop_name = "msi-available-ranges";
	char *node_name = "/fakenode";
	struct device_node of_node;
	struct property prop;
	struct msi_bitmap bmp;
#define SIZE_EXPECTED 256
	DECLARE_BITMAP(expected, SIZE_EXPECTED);

	/* There should really be a struct device_node allocator */
	memset(&of_node, 0, sizeof(of_node));
	of_node_init(&of_node);
	of_node.full_name = node_name;

	WARN_ON(msi_bitmap_alloc(&bmp, SIZE_EXPECTED, &of_node));

	/* No msi-available-ranges, so expect > 0 */
	WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);

	/* Should all still be free */
	WARN_ON(bitmap_find_free_region(bmp.bitmap, SIZE_EXPECTED,
					get_count_order(SIZE_EXPECTED)));
	bitmap_release_region(bmp.bitmap, 0, get_count_order(SIZE_EXPECTED));

	/* Now create a fake msi-available-ranges property */

	/* There should really .. oh whatever */
	memset(&prop, 0, sizeof(prop));
	prop.name = prop_name;
	prop.value = &prop_data;
	prop.length = sizeof(prop_data);

	of_node.properties = &prop;

	/* msi-available-ranges, so expect == 0 */
	WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp));

	/* Check we got the expected result */
	WARN_ON(bitmap_parselist(expected_str, expected, SIZE_EXPECTED));
	WARN_ON(!bitmap_equal(expected, bmp.bitmap, SIZE_EXPECTED));

	msi_bitmap_free(&bmp);
	kfree(bmp.bitmap);
}
示例#9
0
int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num)
{
	unsigned long flags;
	int offset, order = get_count_order(num);

	spin_lock_irqsave(&bmp->lock, flags);
	/*
	 * This is fast, but stricter than we need. We might want to add
	 * a fallback routine which does a linear search with no alignment.
	 */
	offset = bitmap_find_free_region(bmp->bitmap, bmp->irq_count, order);
	spin_unlock_irqrestore(&bmp->lock, flags);

	pr_debug("msi_bitmap: allocated 0x%x (2^%d) at offset 0x%x\n",
		 num, order, offset);

	return offset;
}
示例#10
0
文件: pci-dma.c 项目: 274914765/C
static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
                       dma_addr_t *dma_handle, void **ret)
{
    struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    int order = get_order(size);

    if (mem) {
        int page = bitmap_find_free_region(mem->bitmap, mem->size,
                             order);
        if (page >= 0) {
            *dma_handle = mem->device_base + (page << PAGE_SHIFT);
            *ret = mem->virt_base + (page << PAGE_SHIFT);
            memset(*ret, 0, size);
        }
        if (mem->flags & DMA_MEMORY_EXCLUSIVE)
            *ret = NULL;
    }
    return (mem != NULL);
}
示例#11
0
static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
	struct ir_table *table = iommu->ir_table;
	struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
	struct irq_cfg *cfg = irq_get_chip_data(irq);
	unsigned int mask = 0;
	unsigned long flags;
	int index;

	if (!count || !irq_iommu)
		return -1;

	if (count > 1) {
		count = __roundup_pow_of_two(count);
		mask = ilog2(count);
	}

	if (mask > ecap_max_handle_mask(iommu->ecap)) {
		printk(KERN_ERR
		       "Requested mask %x exceeds the max invalidation handle"
		       " mask value %Lx\n", mask,
		       ecap_max_handle_mask(iommu->ecap));
		return -1;
	}

	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
	index = bitmap_find_free_region(table->bitmap,
					INTR_REMAP_TABLE_ENTRIES, mask);
	if (index < 0) {
		pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
	} else {
		cfg->remapped = 1;
		irq_iommu->iommu = iommu;
		irq_iommu->irte_index =  index;
		irq_iommu->sub_handle = 0;
		irq_iommu->irte_mask = mask;
	}
	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);

	return index;
}
示例#12
0
文件: consistent.c 项目: 274914765/C
void *dma_alloc_coherent(struct device *dev, size_t size,
               dma_addr_t *dma_handle, gfp_t gfp)
{
    void *ret, *ret_nocache;
    struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
    int order = get_order(size);

    if (mem) {
        int page = bitmap_find_free_region(mem->bitmap, mem->size,
                             order);
        if (page >= 0) {
            *dma_handle = mem->device_base + (page << PAGE_SHIFT);
            ret = mem->virt_base + (page << PAGE_SHIFT);
            memset(ret, 0, size);
            return ret;
        }
        if (mem->flags & DMA_MEMORY_EXCLUSIVE)
            return NULL;
    }

    ret = (void *)__get_free_pages(gfp, order);
    if (!ret)
        return NULL;

    memset(ret, 0, size);
    /*
     * Pages from the page allocator may have data present in
     * cache. So flush the cache before using uncached memory.
     */
    dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);

    ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
    if (!ret_nocache) {
        free_pages((unsigned long)ret, order);
        return NULL;
    }

    *dma_handle = virt_to_phys(ret);
    return ret_nocache;
}
示例#13
0
void *dma_alloc_coherent(struct device *dev, size_t size,
			   dma_addr_t *dma_handle, gfp_t gfp)
{
	void *ret;
	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
	unsigned int order = get_order(size);
	unsigned long vstart;
	/* ignore region specifiers */
	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);

	if (mem) {
		int page = bitmap_find_free_region(mem->bitmap, mem->size,
						     order);
		if (page >= 0) {
			*dma_handle = mem->device_base + (page << PAGE_SHIFT);
			ret = mem->virt_base + (page << PAGE_SHIFT);
			memset(ret, 0, size);
			return ret;
		}
		if (mem->flags & DMA_MEMORY_EXCLUSIVE)
			return NULL;
	}

	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
		gfp |= GFP_DMA;

	vstart = __get_free_pages(gfp, order);
	ret = (void *)vstart;

	if (ret != NULL) {
		/* NB. Hardcode 31 address bits for now: aacraid limitation. */
		if (xen_create_contiguous_region(vstart, order, 31) != 0) {
			free_pages(vstart, order);
			return NULL;
		}
		memset(ret, 0, size);
		*dma_handle = virt_to_bus(ret);
	}
	return ret;
}
示例#14
0
static int flexrm_new_request(struct flexrm_ring *ring,
				struct brcm_message *batch_msg,
				struct brcm_message *msg)
{
	void *next;
	unsigned long flags;
	u32 val, count, nhcnt;
	u32 read_offset, write_offset;
	bool exit_cleanup = false;
	int ret = 0, reqid;

	/* Do sanity check on message */
	if (!flexrm_sanity_check(msg))
		return -EIO;
	msg->error = 0;

	/* If no requests possible then save data pointer and goto done. */
	spin_lock_irqsave(&ring->lock, flags);
	reqid = bitmap_find_free_region(ring->requests_bmap,
					RING_MAX_REQ_COUNT, 0);
	spin_unlock_irqrestore(&ring->lock, flags);
	if (reqid < 0)
		return -ENOSPC;
	ring->requests[reqid] = msg;

	/* Do DMA mappings for the message */
	ret = flexrm_dma_map(ring->mbox->dev, msg);
	if (ret < 0) {
		ring->requests[reqid] = NULL;
		spin_lock_irqsave(&ring->lock, flags);
		bitmap_release_region(ring->requests_bmap, reqid, 0);
		spin_unlock_irqrestore(&ring->lock, flags);
		return ret;
	}

	/* Determine current HW BD read offset */
	read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
	val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
	read_offset *= RING_DESC_SIZE;
	read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);

	/*
	 * Number required descriptors = number of non-header descriptors +
	 *				 number of header descriptors +
	 *				 1x null descriptor
	 */
	nhcnt = flexrm_estimate_nonheader_desc_count(msg);
	count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1;

	/* Check for available descriptor space. */
	write_offset = ring->bd_write_offset;
	while (count) {
		if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
			count--;
		write_offset += RING_DESC_SIZE;
		if (write_offset == RING_BD_SIZE)
			write_offset = 0x0;
		if (write_offset == read_offset)
			break;
	}
	if (count) {
		ret = -ENOSPC;
		exit_cleanup = true;
		goto exit;
	}

	/* Write descriptors to ring */
	next = flexrm_write_descs(msg, nhcnt, reqid,
			ring->bd_base + ring->bd_write_offset,
			RING_BD_TOGGLE_VALID(ring->bd_write_offset),
			ring->bd_base, ring->bd_base + RING_BD_SIZE);
	if (IS_ERR(next)) {
		ret = PTR_ERR(next);
		exit_cleanup = true;
		goto exit;
	}

	/* Save ring BD write offset */
	ring->bd_write_offset = (unsigned long)(next - ring->bd_base);

	/* Increment number of messages sent */
	atomic_inc_return(&ring->msg_send_count);

exit:
	/* Update error status in message */
	msg->error = ret;

	/* Cleanup if we failed */
	if (exit_cleanup) {
		flexrm_dma_unmap(ring->mbox->dev, msg);
		ring->requests[reqid] = NULL;
		spin_lock_irqsave(&ring->lock, flags);
		bitmap_release_region(ring->requests_bmap, reqid, 0);
		spin_unlock_irqrestore(&ring->lock, flags);
	}

	return ret;
}
示例#15
0
static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
		       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
		       struct ieee80211_key_conf *key)
{
	struct p54_common *priv = dev->priv;
	int slot, ret = 0;
	u8 algo = 0;
	u8 *addr = NULL;

	if (modparam_nohwcrypt)
		return -EOPNOTSUPP;

	if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) {
		/*
		 * Unfortunately most/all firmwares are trying to decrypt
		 * incoming management frames if a suitable key can be found.
		 * However, in doing so the data in these frames gets
		 * corrupted. So, we can't have firmware supported crypto
		 * offload in this case.
		 */
		return -EOPNOTSUPP;
	}

	mutex_lock(&priv->conf_mutex);
	if (cmd == SET_KEY) {
		switch (key->cipher) {
		case WLAN_CIPHER_SUITE_TKIP:
			if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL |
			      BR_DESC_PRIV_CAP_TKIP))) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_TKIPMICHAEL;
			break;
		case WLAN_CIPHER_SUITE_WEP40:
		case WLAN_CIPHER_SUITE_WEP104:
			if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_WEP;
			break;
		case WLAN_CIPHER_SUITE_CCMP:
			if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_AESCCMP;
			break;
		default:
			ret = -EOPNOTSUPP;
			goto out_unlock;
		}
		slot = bitmap_find_free_region(priv->used_rxkeys,
					       priv->rx_keycache_size, 0);

		if (slot < 0) {
			/*
			 * The device supports the chosen algorithm, but the
			 * firmware does not provide enough key slots to store
			 * all of them.
			 * But encryption offload for outgoing frames is always
			 * possible, so we just pretend that the upload was
			 * successful and do the decryption in software.
			 */

			/* mark the key as invalid. */
			key->hw_key_idx = 0xff;
			goto out_unlock;
		}
	} else {
		slot = key->hw_key_idx;

		if (slot == 0xff) {
			/* This key was not uploaded into the rx key cache. */

			goto out_unlock;
		}

		bitmap_release_region(priv->used_rxkeys, slot, 0);
		algo = 0;
	}

	if (sta)
		addr = sta->addr;

	ret = p54_upload_key(priv, algo, slot, key->keyidx,
			     key->keylen, addr, key->key);
	if (ret) {
		bitmap_release_region(priv->used_rxkeys, slot, 0);
		ret = -EOPNOTSUPP;
		goto out_unlock;
	}

	key->hw_key_idx = slot;

out_unlock:
	mutex_unlock(&priv->conf_mutex);
	return ret;
}
示例#16
0
extern int iscsi_allocate_thread_sets(u32 thread_pair_count)
{
	int allocated_thread_pair_count = 0, i, thread_id;
	struct iscsi_thread_set *ts = NULL;

	for (i = 0; i < thread_pair_count; i++) {
		ts = kzalloc(sizeof(struct iscsi_thread_set), GFP_KERNEL);
		if (!ts) {
			pr_err("Unable to allocate memory for"
					" thread set.\n");
			return allocated_thread_pair_count;
		}
		/*
		 * Locate the next available regision in the thread_set_bitmap
		 */
		spin_lock(&ts_bitmap_lock);
		thread_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
				iscsit_global->ts_bitmap_count, get_order(1));
		spin_unlock(&ts_bitmap_lock);
		if (thread_id < 0) {
			pr_err("bitmap_find_free_region() failed for"
				" thread_set_bitmap\n");
			kfree(ts);
			return allocated_thread_pair_count;
		}

		ts->thread_id = thread_id;
		ts->status = ISCSI_THREAD_SET_FREE;
		INIT_LIST_HEAD(&ts->ts_list);
		spin_lock_init(&ts->ts_state_lock);
		init_completion(&ts->rx_post_start_comp);
		init_completion(&ts->tx_post_start_comp);
		init_completion(&ts->rx_restart_comp);
		init_completion(&ts->tx_restart_comp);
		init_completion(&ts->deferred_restart_comp);
		init_completion(&ts->rx_start_comp);
		init_completion(&ts->tx_start_comp);
		init_completion(&ts->deferred_start_comp);

		ts->create_threads = 1;
		ts->tx_thread = kthread_run(iscsi_target_tx_thread, ts, "%s-%d",
					ISCSI_TX_THREAD_NAME, thread_id);
		if (IS_ERR(ts->tx_thread)) {
			dump_stack();
			pr_err("Unable to start iscsi_target_tx_thread\n");
			break;
		}

		ts->rx_thread = kthread_run(iscsi_target_rx_thread, ts, "%s-%d",
					ISCSI_RX_THREAD_NAME, thread_id);
		if (IS_ERR(ts->rx_thread)) {
			kthread_stop(ts->tx_thread);
			pr_err("Unable to start iscsi_target_rx_thread\n");
			break;
		}

		ts->deferred_thread = kthread_run(iscsi_target_deferred_thread, ts, "%s-%d",
					ISCSI_DEFERRED_THREAD_NAME, thread_id);
		if (IS_ERR(ts->deferred_thread)) {
			kthread_stop(ts->rx_thread);
			kthread_stop(ts->tx_thread);
			pr_err("Unable to start iscsi_target_deferred_thread\n");
			break;
		}
		ts->create_threads = 0;

		iscsi_add_ts_to_inactive_list(ts);
		allocated_thread_pair_count++;
	}

	pr_debug("Spawned %d thread set(s) (%d total threads).\n",
		allocated_thread_pair_count, allocated_thread_pair_count * 3);
	return allocated_thread_pair_count;
}
示例#17
0
static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
		       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
		       struct ieee80211_key_conf *key)
{
	struct p54_common *priv = dev->priv;
	int slot, ret = 0;
	u8 algo = 0;
	u8 *addr = NULL;

	if (modparam_nohwcrypt)
		return -EOPNOTSUPP;

	mutex_lock(&priv->conf_mutex);
	if (cmd == SET_KEY) {
		switch (key->alg) {
		case ALG_TKIP:
			if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL |
			      BR_DESC_PRIV_CAP_TKIP))) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_TKIPMICHAEL;
			break;
		case ALG_WEP:
			if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_WEP;
			break;
		case ALG_CCMP:
			if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) {
				ret = -EOPNOTSUPP;
				goto out_unlock;
			}
			key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
			algo = P54_CRYPTO_AESCCMP;
			break;
		default:
			ret = -EOPNOTSUPP;
			goto out_unlock;
		}
		slot = bitmap_find_free_region(priv->used_rxkeys,
					       priv->rx_keycache_size, 0);

		if (slot < 0) {
			/*
			 * The device supports the choosen algorithm, but the
			 * firmware does not provide enough key slots to store
			 * all of them.
			 * But encryption offload for outgoing frames is always
			 * possible, so we just pretend that the upload was
			 * successful and do the decryption in software.
			 */

			/* mark the key as invalid. */
			key->hw_key_idx = 0xff;
			goto out_unlock;
		}
	} else {
		slot = key->hw_key_idx;

		if (slot == 0xff) {
			/* This key was not uploaded into the rx key cache. */

			goto out_unlock;
		}

		bitmap_release_region(priv->used_rxkeys, slot, 0);
		algo = 0;
	}

	if (sta)
		addr = sta->addr;

	ret = p54_upload_key(priv, algo, slot, key->keyidx,
			     key->keylen, addr, key->key);
	if (ret) {
		bitmap_release_region(priv->used_rxkeys, slot, 0);
		ret = -EOPNOTSUPP;
		goto out_unlock;
	}

	key->hw_key_idx = slot;

out_unlock:
	mutex_unlock(&priv->conf_mutex);
	return ret;
}