Exemple #1
0
/**
 * batadv_hash_new() - Allocates and clears the hashtable
 * @size: number of hash buckets to allocate
 *
 * Return: newly allocated hashtable, NULL on errors
 */
struct batadv_hashtable *batadv_hash_new(u32 size)
{
	struct batadv_hashtable *hash;

	hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
	if (!hash)
		return NULL;

	hash->table = kmalloc_array(size, sizeof(*hash->table), GFP_ATOMIC);
	if (!hash->table)
		goto free_hash;

	hash->list_locks = kmalloc_array(size, sizeof(*hash->list_locks),
					 GFP_ATOMIC);
	if (!hash->list_locks)
		goto free_table;

	hash->size = size;
	batadv_hash_init(hash);
	return hash;

free_table:
	kfree(hash->table);
free_hash:
	kfree(hash);
	return NULL;
}
Exemple #2
0
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
			     unsigned int host_num_mems)
{
	struct nfp_flower_priv *priv = app->priv;
	int err, stats_size;

	hash_init(priv->mask_table);

	err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params);
	if (err)
		return err;

	get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));

	/* Init ring buffer and unallocated mask_ids. */
	priv->mask_ids.mask_id_free_list.buf =
		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
			      NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
	if (!priv->mask_ids.mask_id_free_list.buf)
		goto err_free_flow_table;

	priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;

	/* Init timestamps for mask id*/
	priv->mask_ids.last_used =
		kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
			      sizeof(*priv->mask_ids.last_used), GFP_KERNEL);
	if (!priv->mask_ids.last_used)
		goto err_free_mask_id;

	/* Init ring buffer and unallocated stats_ids. */
	priv->stats_ids.free_list.buf =
		vmalloc(array_size(NFP_FL_STATS_ELEM_RS,
				   priv->stats_ring_size));
	if (!priv->stats_ids.free_list.buf)
		goto err_free_last_used;

	priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);

	stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
		     FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
	priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
				     GFP_KERNEL);
	if (!priv->stats)
		goto err_free_ring_buf;

	spin_lock_init(&priv->stats_lock);

	return 0;

err_free_ring_buf:
	vfree(priv->stats_ids.free_list.buf);
err_free_last_used:
	kfree(priv->mask_ids.last_used);
err_free_mask_id:
	kfree(priv->mask_ids.mask_id_free_list.buf);
err_free_flow_table:
	rhashtable_destroy(&priv->flow_table);
	return -ENOMEM;
}
Exemple #3
0
int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range,
	     struct module *module, kobj_probe_t *probe,
	     int (*lock)(dev_t, void *), void *data)
{
	unsigned n = MAJOR(dev + range - 1) - MAJOR(dev) + 1;
	unsigned index = MAJOR(dev);
	unsigned i;
	struct probe *p;

	if (n > 255)
		n = 255;

	p = kmalloc_array(n, sizeof(struct probe), GFP_KERNEL);
	if (p == NULL)
		return -ENOMEM;

	for (i = 0; i < n; i++, p++) {
		p->owner = module;
		p->get = probe;
		p->lock = lock;
		p->dev = dev;
		p->range = range;
		p->data = data;
	}
	mutex_lock(domain->lock);
	for (i = 0, p -= n; i < n; i++, p++, index++) {
		struct probe **s = &domain->probes[index % 255];
		while (*s && (*s)->range < range)
			s = &(*s)->next;
		p->next = *s;
		*s = p;
	}
	mutex_unlock(domain->lock);
	return 0;
}
Exemple #4
0
int
radeon_kfd_interrupt_init(struct kfd_dev *kfd)
{
	void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
					kfd->device_info->ih_ring_entry_size,
					GFP_KERNEL);
	if (!interrupt_ring)
		return -ENOMEM;

	kfd->interrupt_ring = interrupt_ring;
	kfd->interrupt_ring_size =
		KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
	atomic_set(&kfd->interrupt_ring_wptr, 0);
	atomic_set(&kfd->interrupt_ring_rptr, 0);

	spin_lock_init(&kfd->interrupt_lock);

	INIT_WORK(&kfd->interrupt_work, interrupt_wq);

	kfd->interrupts_active = true;

	/*
	 * After this function returns, the interrupt will be enabled. This
	 * barrier ensures that the interrupt running on a different processor
	 * sees all the above writes.
	 */
	smp_wmb();

	return 0;
}
Exemple #5
0
/**
 * amdgpu_ring_backup - Back up the content of a ring
 *
 * @ring: the ring we want to back up
 *
 * Saves all unprocessed commits from a ring, returns the number of dwords saved.
 */
unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
			    uint32_t **data)
{
	unsigned size, ptr, i;

	*data = NULL;

	if (ring->ring_obj == NULL)
		return 0;

	/* it doesn't make sense to save anything if all fences are signaled */
	if (!amdgpu_fence_count_emitted(ring))
		return 0;

	ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);

	size = ring->wptr + (ring->ring_size / 4);
	size -= ptr;
	size &= ring->ptr_mask;
	if (size == 0)
		return 0;

	/* and then save the content of the ring */
	*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
	if (!*data)
		return 0;
	for (i = 0; i < size; ++i) {
		(*data)[i] = ring->ring[ptr++];
		ptr &= ring->ptr_mask;
	}

	return size;
}
Exemple #6
0
static int loop_set_wakeup_filter(struct rc_dev *dev,
				  struct rc_scancode_filter *sc)
{
	static const unsigned int max = 512;
	struct ir_raw_event *raw;
	int ret;
	int i;

	/* fine to disable filter */
	if (!sc->mask)
		return 0;

	/* encode the specified filter and loop it back */
	raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
	if (!raw)
		return -ENOMEM;

	ret = ir_raw_encode_scancode(dev->wakeup_protocol, sc->data, raw, max);
	/* still loop back the partial raw IR even if it's incomplete */
	if (ret == -ENOBUFS)
		ret = max;
	if (ret >= 0) {
		/* do the loopback */
		for (i = 0; i < ret; ++i)
			ir_raw_event_store(dev, &raw[i]);
		ir_raw_event_handle(dev);

		ret = 0;
	}

	kfree(raw);

	return ret;
}
Exemple #7
0
static int ath9k_rng_kthread(void *data)
{
    int bytes_read;
    struct ath_softc *sc = data;
    u32 *rng_buf;
    u32 delay, fail_stats = 0;

    rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
    if (!rng_buf)
        goto out;

    while (!kthread_should_stop()) {
        bytes_read = ath9k_rng_data_read(sc, rng_buf,
                                         ATH9K_RNG_BUF_SIZE);
        if (unlikely(!bytes_read)) {
            delay = ath9k_rng_delay_get(++fail_stats);
            msleep_interruptible(delay);
            continue;
        }

        fail_stats = 0;

        /* sleep until entropy bits under write_wakeup_threshold */
        add_hwgenerator_randomness((void *)rng_buf, bytes_read,
                                   ATH9K_RNG_ENTROPY(bytes_read));
    }

    kfree(rng_buf);
out:
    sc->rng_task = NULL;

    return 0;
}
Exemple #8
0
/**
 * mic_smpt_init - Initialize MIC System Memory Page Tables.
 *
 * @mdev: pointer to mic_device instance.
 *
 * returns 0 for success and -errno for error.
 */
int mic_smpt_init(struct mic_device *mdev)
{
	int i, err = 0;
	dma_addr_t dma_addr;
	struct mic_smpt_info *smpt_info;

	mdev->smpt = kmalloc(sizeof(*mdev->smpt), GFP_KERNEL);
	if (!mdev->smpt)
		return -ENOMEM;

	smpt_info = mdev->smpt;
	mdev->smpt_ops->init(mdev);
	smpt_info->entry = kmalloc_array(smpt_info->info.num_reg,
					 sizeof(*smpt_info->entry), GFP_KERNEL);
	if (!smpt_info->entry) {
		err = -ENOMEM;
		goto free_smpt;
	}
	spin_lock_init(&smpt_info->smpt_lock);
	for (i = 0; i < smpt_info->info.num_reg; i++) {
		dma_addr = i * smpt_info->info.page_size;
		smpt_info->entry[i].dma_addr = dma_addr;
		smpt_info->entry[i].ref_count = 0;
		mdev->smpt_ops->set(mdev, dma_addr, i);
	}
	smpt_info->ref_count = 0;
	smpt_info->map_count = 0;
	smpt_info->unmap_count = 0;
	return 0;
free_smpt:
	kfree(smpt_info);
	return err;
}
Exemple #9
0
/**
 * irq_sim_init - Initialize the interrupt simulator: allocate a range of
 *                dummy interrupts.
 *
 * @sim:        The interrupt simulator object to initialize.
 * @num_irqs:   Number of interrupts to allocate
 *
 * Returns 0 on success and a negative error number on failure.
 */
int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs)
{
	int i;

	sim->irqs = kmalloc_array(num_irqs, sizeof(*sim->irqs), GFP_KERNEL);
	if (!sim->irqs)
		return -ENOMEM;

	sim->irq_base = irq_alloc_descs(-1, 0, num_irqs, 0);
	if (sim->irq_base < 0) {
		kfree(sim->irqs);
		return sim->irq_base;
	}

	for (i = 0; i < num_irqs; i++) {
		sim->irqs[i].irqnum = sim->irq_base + i;
		sim->irqs[i].enabled = false;
		irq_set_chip(sim->irq_base + i, &irq_sim_irqchip);
		irq_set_chip_data(sim->irq_base + i, &sim->irqs[i]);
		irq_set_handler(sim->irq_base + i, &handle_simple_irq);
		irq_modify_status(sim->irq_base + i,
				  IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
	}

	init_irq_work(&sim->work_ctx.work, irq_sim_handle_irq);
	sim->irq_count = num_irqs;

	return 0;
}
Exemple #10
0
struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
			       int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
			       u32 size)
{
	struct hashtab *p;
	u32 i;

	p = kzalloc(sizeof(*p), GFP_KERNEL);
	if (!p)
		return p;

	p->size = size;
	p->nel = 0;
	p->hash_value = hash_value;
	p->keycmp = keycmp;
	p->htable = kmalloc_array(size, sizeof(*p->htable), GFP_KERNEL);
	if (!p->htable) {
		kfree(p);
		return NULL;
	}

	for (i = 0; i < size; i++)
		p->htable[i] = NULL;

	return p;
}
Exemple #11
0
/**
 * mempool_resize - resize an existing memory pool
 * @pool:       pointer to the memory pool which was allocated via
 *              mempool_create().
 * @new_min_nr: the new minimum number of elements guaranteed to be
 *              allocated for this pool.
 *
 * This function shrinks/grows the pool. In the case of growing,
 * it cannot be guaranteed that the pool will be grown to the new
 * size immediately, but new mempool_free() calls will refill it.
 * This function may sleep.
 *
 * Note, the caller must guarantee that no mempool_destroy is called
 * while this function is running. mempool_alloc() & mempool_free()
 * might be called (eg. from IRQ contexts) while this function executes.
 */
int mempool_resize(mempool_t *pool, int new_min_nr)
{
	void *element;
	void **new_elements;
	unsigned long flags;

	BUG_ON(new_min_nr <= 0);
	might_sleep();

	spin_lock_irqsave(&pool->lock, flags);
	if (new_min_nr <= pool->min_nr) {
		while (new_min_nr < pool->curr_nr) {
			element = remove_element(pool);
			spin_unlock_irqrestore(&pool->lock, flags);
			pool->free(element, pool->pool_data);
			spin_lock_irqsave(&pool->lock, flags);
		}
		pool->min_nr = new_min_nr;
		goto out_unlock;
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	/* Grow the pool */
	new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
				     GFP_KERNEL);
	if (!new_elements)
		return -ENOMEM;

	spin_lock_irqsave(&pool->lock, flags);
	if (unlikely(new_min_nr <= pool->min_nr)) {
		/* Raced, other resize will do our work */
		spin_unlock_irqrestore(&pool->lock, flags);
		kfree(new_elements);
		goto out;
	}
	memcpy(new_elements, pool->elements,
			pool->curr_nr * sizeof(*new_elements));
	kfree(pool->elements);
	pool->elements = new_elements;
	pool->min_nr = new_min_nr;

	while (pool->curr_nr < pool->min_nr) {
		spin_unlock_irqrestore(&pool->lock, flags);
		element = pool->alloc(GFP_KERNEL, pool->pool_data);
		if (!element)
			goto out;
		spin_lock_irqsave(&pool->lock, flags);
		if (pool->curr_nr < pool->min_nr) {
			add_element(pool, element);
		} else {
			spin_unlock_irqrestore(&pool->lock, flags);
			pool->free(element, pool->pool_data);	/* Raced */
			goto out;
		}
	}
out_unlock:
	spin_unlock_irqrestore(&pool->lock, flags);
out:
	return 0;
}
Exemple #12
0
static int genwqe_ffdc_buffs_alloc(struct genwqe_dev *cd)
{
	unsigned int type, e = 0;

	for (type = 0; type < GENWQE_DBG_UNITS; type++) {
		switch (type) {
		case GENWQE_DBG_UNIT0:
			e = genwqe_ffdc_buff_size(cd, 0);
			break;
		case GENWQE_DBG_UNIT1:
			e = genwqe_ffdc_buff_size(cd, 1);
			break;
		case GENWQE_DBG_UNIT2:
			e = genwqe_ffdc_buff_size(cd, 2);
			break;
		case GENWQE_DBG_REGS:
			e = GENWQE_FFDC_REGS;
			break;
		}

		/* currently support only the debug units mentioned here */
		cd->ffdc[type].entries = e;
		cd->ffdc[type].regs =
			kmalloc_array(e, sizeof(struct genwqe_reg),
				      GFP_KERNEL);
		/*
		 * regs == NULL is ok, the using code treats this as no regs,
		 * Printing warning is ok in this case.
		 */
	}
	return 0;
}
Exemple #13
0
static int __must_check of_clk_bulk_get_all(struct device_node *np,
					    struct clk_bulk_data **clks)
{
	struct clk_bulk_data *clk_bulk;
	int num_clks;
	int ret;

	num_clks = of_clk_get_parent_count(np);
	if (!num_clks)
		return 0;

	clk_bulk = kmalloc_array(num_clks, sizeof(*clk_bulk), GFP_KERNEL);
	if (!clk_bulk)
		return -ENOMEM;

	ret = of_clk_bulk_get(np, num_clks, clk_bulk);
	if (ret) {
		kfree(clk_bulk);
		return ret;
	}

	*clks = clk_bulk;

	return num_clks;
}
Exemple #14
0
static int s32v_dt_node_to_map(struct pinctrl_dev *pctldev,
			struct device_node *np,
			struct pinctrl_map **map, unsigned *num_maps)
{
	struct s32v_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
	const struct s32v_pinctrl_soc_info *info = ipctl->info;
	const struct s32v_pin_group *grp;
	struct pinctrl_map *new_map;
	struct device_node *parent;
	int map_num = 1;
	int i, j;

	/*
	 * first find the group of this node and check if we need create
	 * config maps for pins
	 */
	grp = s32v_pinctrl_find_group_by_name(info, np->name);
	if (!grp) {
		dev_err(info->dev, "unable to find group for node %s\n",
			np->name);
		return -EINVAL;
	}

	for (i = 0; i < grp->npins; i++)
		map_num++;

	new_map = kmalloc_array(map_num, sizeof(struct pinctrl_map), GFP_KERNEL);
	if (!new_map)
		return -ENOMEM;

	*map = new_map;
	*num_maps = map_num;

	/* create mux map */
	parent = of_get_parent(np);
	if (!parent) {
		kfree(new_map);
		return -EINVAL;
	}
	new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
	new_map[0].data.mux.function = parent->name;
	new_map[0].data.mux.group = np->name;
	of_node_put(parent);

	/* create config map */
	new_map++;
	for (i = j = 0; i < grp->npins; i++) {
		new_map[j].type = PIN_MAP_TYPE_CONFIGS_PIN;
		new_map[j].data.configs.group_or_pin =
			pin_get_name(pctldev, grp->pins[i].pin_id);
		new_map[j].data.configs.configs = &grp->pins[i].config;
		new_map[j].data.configs.num_configs = 1;
		j++;
	}

	dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
		(*map)->data.mux.function, (*map)->data.mux.group, map_num);

	return 0;
}
Exemple #15
0
static int
wbcir_tx(struct rc_dev *dev, unsigned *b, unsigned count)
{
	struct wbcir_data *data = dev->priv;
	unsigned *buf;
	unsigned i;
	unsigned long flags;

	buf = kmalloc_array(count, sizeof(*b), GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	/* Convert values to multiples of 10us */
	for (i = 0; i < count; i++)
		buf[i] = DIV_ROUND_CLOSEST(b[i], 10);

	/* Not sure if this is possible, but better safe than sorry */
	spin_lock_irqsave(&data->spinlock, flags);
	if (data->txstate != WBCIR_TXSTATE_INACTIVE) {
		spin_unlock_irqrestore(&data->spinlock, flags);
		kfree(buf);
		return -EBUSY;
	}

	/* Fill the TX fifo once, the irq handler will do the rest */
	data->txbuf = buf;
	data->txlen = count;
	data->txoff = 0;
	wbcir_irq_tx(data);

	/* We're done */
	spin_unlock_irqrestore(&data->spinlock, flags);
	return count;
}
Exemple #16
0
static void *pidlist_allocate(int count)
{
	if (PIDLIST_TOO_LARGE(count))
		return vmalloc(array_size(count, sizeof(pid_t)));
	else
		return kmalloc_array(count, sizeof(pid_t), GFP_KERNEL);
}
Exemple #17
0
pgd_t * __init efi_call_phys_prolog(void)
{
	unsigned long vaddress;
	pgd_t *save_pgd;

	int pgd;
	int n_pgds;

	if (!efi_enabled(EFI_OLD_MEMMAP)) {
		save_pgd = (pgd_t *)read_cr3();
		write_cr3((unsigned long)efi_scratch.efi_pgt);
		goto out;
	}

	early_code_mapping_set_exec(1);

	n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
	save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);

	for (pgd = 0; pgd < n_pgds; pgd++) {
		save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
		vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
	}
out:
	__flush_tlb_all();

	return save_pgd;
}
Exemple #18
0
static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
{
	int shift = 0;
	int tmp = hash_size;
	struct jbd2_revoke_table_s *table;

	table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
	if (!table)
		goto out;

	while((tmp >>= 1UL) != 0UL)
		shift++;

	table->hash_size = hash_size;
	table->hash_shift = shift;
	table->hash_table =
		kmalloc_array(hash_size, sizeof(struct list_head), GFP_KERNEL);
	if (!table->hash_table) {
		kmem_cache_free(jbd2_revoke_table_cache, table);
		table = NULL;
		goto out;
	}

	for (tmp = 0; tmp < hash_size; tmp++)
		INIT_LIST_HEAD(&table->hash_table[tmp]);

out:
	return table;
}
Exemple #19
0
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
			      gfp_t gfp)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(spinlock_t) > PAGE_SIZE &&
		    gfp == GFP_KERNEL)
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
					   gfp);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}
static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
			     uint32_t engine_id, uint32_t queue_id,
			     uint32_t (**dump)[2], uint32_t *n_regs)
{
	struct amdgpu_device *adev = get_amdgpu_device(kgd);
	uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
		queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
	uint32_t i = 0, reg;
#undef HQD_N_REGS
#define HQD_N_REGS (19+4+2+3+7)

	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
	if (*dump == NULL)
		return -ENOMEM;

	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
		DUMP_REG(sdma_offset + reg);
	for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
	     reg++)
		DUMP_REG(sdma_offset + reg);
	for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
	     reg++)
		DUMP_REG(sdma_offset + reg);
	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
	     reg++)
		DUMP_REG(sdma_offset + reg);
	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
	     reg++)
		DUMP_REG(sdma_offset + reg);

	WARN_ON_ONCE(i != HQD_N_REGS);
	*n_regs = i;

	return 0;
}
static int kgd_hqd_dump(struct kgd_dev *kgd,
			uint32_t pipe_id, uint32_t queue_id,
			uint32_t (**dump)[2], uint32_t *n_regs)
{
	struct amdgpu_device *adev = get_amdgpu_device(kgd);
	uint32_t i = 0, reg;
#define HQD_N_REGS (54+4)
#define DUMP_REG(addr) do {				\
		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
			break;				\
		(*dump)[i][0] = (addr) << 2;		\
		(*dump)[i++][1] = RREG32(addr);		\
	} while (0)

	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
	if (*dump == NULL)
		return -ENOMEM;

	acquire_queue(kgd, pipe_id, queue_id);

	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
	DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);

	for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
		DUMP_REG(reg);

	release_queue(kgd);

	WARN_ON_ONCE(i != HQD_N_REGS);
	*n_regs = i;

	return 0;
}
Exemple #22
0
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
			      gfp_t gfp)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

	/* Never allocate more than 0.5 locks per bucket */
	size = min_t(unsigned int, size, tbl->size >> 1);

	if (tbl->nest)
		size = min(size, 1U << tbl->nest);

	if (sizeof(spinlock_t) != 0) {
		if (gfpflags_allow_blocking(gfp))
			tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
		else
			tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
						   gfp);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}
Exemple #23
0
static int __init test_sort_init(void)
{
	int *a, i, r = 1, err = -ENOMEM;

	a = kmalloc_array(TEST_LEN, sizeof(*a), GFP_KERNEL);
	if (!a)
		return err;

	for (i = 0; i < TEST_LEN; i++) {
		r = (r * 725861) % 6599;
		a[i] = r;
	}

	sort(a, TEST_LEN, sizeof(*a), cmpint, NULL);

	err = -EINVAL;
	for (i = 0; i < TEST_LEN-1; i++)
		if (a[i] > a[i+1]) {
			pr_err("test has failed\n");
			goto exit;
		}
	err = 0;
	pr_info("test passed\n");
exit:
	kfree(a);
	return err;
}
Exemple #24
0
/*
 * trusted_read - copy the sealed blob data to userspace in hex.
 * On success, return to userspace the trusted key datablob size.
 */
static long trusted_read(const struct key *key, char __user *buffer,
			 size_t buflen)
{
	const struct trusted_key_payload *p;
	char *ascii_buf;
	char *bufp;
	int i;

	p = dereference_key_locked(key);
	if (!p)
		return -EINVAL;

	if (buffer && buflen >= 2 * p->blob_len) {
		ascii_buf = kmalloc_array(2, p->blob_len, GFP_KERNEL);
		if (!ascii_buf)
			return -ENOMEM;

		bufp = ascii_buf;
		for (i = 0; i < p->blob_len; i++)
			bufp = hex_byte_pack(bufp, p->blob[i]);
		if (copy_to_user(buffer, ascii_buf, 2 * p->blob_len) != 0) {
			kzfree(ascii_buf);
			return -EFAULT;
		}
		kzfree(ascii_buf);
	}
	return 2 * p->blob_len;
}
static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
					struct xdr_stream *xdr,
					struct cb_sequenceargs *args)
{
	__be32 *p;
	int i;
	__be32 status;

	status = decode_sessionid(xdr, &args->csa_sessionid);
	if (status)
		goto out;

	status = htonl(NFS4ERR_RESOURCE);
	p = read_buf(xdr, 5 * sizeof(uint32_t));
	if (unlikely(p == NULL))
		goto out;

	args->csa_addr = svc_addr(rqstp);
	args->csa_sequenceid = ntohl(*p++);
	args->csa_slotid = ntohl(*p++);
	args->csa_highestslotid = ntohl(*p++);
	args->csa_cachethis = ntohl(*p++);
	args->csa_nrclists = ntohl(*p++);
	args->csa_rclists = NULL;
	if (args->csa_nrclists) {
		args->csa_rclists = kmalloc_array(args->csa_nrclists,
						  sizeof(*args->csa_rclists),
						  GFP_KERNEL);
		if (unlikely(args->csa_rclists == NULL))
			goto out;

		for (i = 0; i < args->csa_nrclists; i++) {
			status = decode_rc_list(xdr, &args->csa_rclists[i]);
			if (status)
				goto out_free;
		}
	}
	status = 0;

	dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u "
		"highestslotid %u cachethis %d nrclists %u\n",
		__func__,
		((u32 *)&args->csa_sessionid)[0],
		((u32 *)&args->csa_sessionid)[1],
		((u32 *)&args->csa_sessionid)[2],
		((u32 *)&args->csa_sessionid)[3],
		args->csa_sequenceid, args->csa_slotid,
		args->csa_highestslotid, args->csa_cachethis,
		args->csa_nrclists);
out:
	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
	return status;

out_free:
	for (i = 0; i < args->csa_nrclists; i++)
		kfree(args->csa_rclists[i].rcl_refcalls);
	kfree(args->csa_rclists);
	goto out;
}
Exemple #26
0
static int bgmac_mii_register(struct bgmac *bgmac)
{
	struct mii_bus *mii_bus;
	struct phy_device *phy_dev;
	char bus_id[MII_BUS_ID_SIZE + 3];
	int i, err = 0;

	mii_bus = mdiobus_alloc();
	if (!mii_bus)
		return -ENOMEM;

	mii_bus->name = "bgmac mii bus";
	sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
		bgmac->core->core_unit);
	mii_bus->priv = bgmac;
	mii_bus->read = bgmac_mii_read;
	mii_bus->write = bgmac_mii_write;
	mii_bus->parent = &bgmac->core->dev;
	mii_bus->phy_mask = ~(1 << bgmac->phyaddr);

	mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
	if (!mii_bus->irq) {
		err = -ENOMEM;
		goto err_free_bus;
	}
	for (i = 0; i < PHY_MAX_ADDR; i++)
		mii_bus->irq[i] = PHY_POLL;

	err = mdiobus_register(mii_bus);
	if (err) {
		bgmac_err(bgmac, "Registration of mii bus failed\n");
		goto err_free_irq;
	}

	bgmac->mii_bus = mii_bus;

	/* Connect to the PHY */
	snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
		 bgmac->phyaddr);
	phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
			      PHY_INTERFACE_MODE_MII);
	if (IS_ERR(phy_dev)) {
		bgmac_err(bgmac, "PHY connecton failed\n");
		err = PTR_ERR(phy_dev);
		goto err_unregister_bus;
	}
	bgmac->phy_dev = phy_dev;

	return err;

err_unregister_bus:
	mdiobus_unregister(mii_bus);
err_free_irq:
	kfree(mii_bus->irq);
err_free_bus:
	mdiobus_free(mii_bus);
	return err;
}
Exemple #27
0
static int isert_alloc_for_rdma(struct isert_cmnd *pdu, int sge_cnt,
				struct isert_connection *isert_conn)
{
	struct isert_wr *wr;
	struct ib_sge *sg_pool;
	int i, ret = 0;
	int wr_cnt;

	sg_pool = kmalloc_array(sge_cnt, sizeof(*sg_pool), GFP_KERNEL);
	if (unlikely(sg_pool == NULL)) {
		ret = -ENOMEM;
		goto out;
	}

	wr_cnt = DIV_ROUND_UP(sge_cnt, isert_conn->max_sge);
	wr = kmalloc_array(wr_cnt, sizeof(*wr), GFP_KERNEL);
	if (unlikely(wr == NULL)) {
		ret = -ENOMEM;
		goto out_free_sg_pool;
	}

	kfree(pdu->wr);
	pdu->wr = wr;

	kfree(pdu->sg_pool);
	pdu->sg_pool = sg_pool;

	pdu->n_wr = wr_cnt;
	pdu->n_sge = sge_cnt;

	for (i = 0; i < wr_cnt; ++i)
		isert_wr_set_fields(&pdu->wr[i], isert_conn, pdu);

	for (i = 0; i < sge_cnt; ++i)
		pdu->sg_pool[i].lkey = isert_conn->isert_dev->mr->lkey;

	goto out;

out_free_sg_pool:
	kfree(sg_pool);
out:
	return ret;
}
Exemple #28
0
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
{
	struct scatterlist *sg;

	sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
	if (sg)
		sg_init_table(sg, sg_len);

	return sg;
}
Exemple #29
0
static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl)
{
	unsigned int block_num, log_num, phymax;
	loff_t block_adr;
	u8 *oob;
	int i, ret;

	oob = kzalloc(mtd->oobsize, GFP_KERNEL);
	if (!oob)
		return -ENOMEM;

	phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd);

	/* FTL reserves 5% of the blocks + 1 spare  */
	ftl->logmax = ((phymax * 95) / 100) - 1;

	ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy),
				     GFP_KERNEL);
	if (!ftl->log2phy) {
		ret = -ENOMEM;
		goto exit;
	}

	/* initialize ftl->log2phy */
	for (i = 0; i < ftl->logmax; i++)
		ftl->log2phy[i] = UINT_MAX;

	/* create physical-logical table */
	for (block_num = 0; block_num < phymax; block_num++) {
		block_adr = block_num * mtd->erasesize;

		if (mtd_block_isbad(mtd, block_adr))
			continue;

		if (sharpsl_nand_read_oob(mtd, block_adr, oob))
			continue;

		/* get logical block */
		log_num = sharpsl_nand_get_logical_num(oob);

		/* cut-off errors and skip the out-of-range values */
		if (log_num > 0 && log_num < ftl->logmax) {
			if (ftl->log2phy[log_num] == UINT_MAX)
				ftl->log2phy[log_num] = block_num;
		}
	}

	pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n",
		phymax, ftl->logmax, phymax - ftl->logmax);

	ret = 0;
exit:
	kfree(oob);
	return ret;
}
static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
				 struct tipc_nl_compat_msg *msg)
{
	int err;
	struct sk_buff *doit_buf;
	struct sk_buff *trans_buf;
	struct nlattr **attrbuf;
	struct genl_info info;

	trans_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
	if (!trans_buf)
		return -ENOMEM;

	attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1,
				sizeof(struct nlattr *),
				GFP_KERNEL);
	if (!attrbuf) {
		err = -ENOMEM;
		goto trans_out;
	}

	doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
	if (!doit_buf) {
		err = -ENOMEM;
		goto attrbuf_out;
	}

	memset(&info, 0, sizeof(info));
	info.attrs = attrbuf;

	rtnl_lock();
	err = (*cmd->transcode)(cmd, trans_buf, msg);
	if (err)
		goto doit_out;

	err = nla_parse(attrbuf, tipc_genl_family.maxattr,
			(const struct nlattr *)trans_buf->data,
			trans_buf->len, NULL, NULL);
	if (err)
		goto doit_out;

	doit_buf->sk = msg->dst_sk;

	err = (*cmd->doit)(doit_buf, &info);
doit_out:
	rtnl_unlock();

	kfree_skb(doit_buf);
attrbuf_out:
	kfree(attrbuf);
trans_out:
	kfree_skb(trans_buf);

	return err;
}