Exemple #1
0
/*
 * row_init_queue() - Init scheduler data structures
 * @q:	requests queue
 *
 * Return pointer to struct row_data to be saved in elevator for
 * this dispatch queue
 *
 */
static void *row_init_queue(struct request_queue *q)
{

	struct row_data *rdata;
	int i;

	rdata = kmalloc_node(sizeof(*rdata),
			     GFP_KERNEL | __GFP_ZERO, q->node);
	if (!rdata)
		return NULL;

	memset(rdata, 0, sizeof(*rdata));
	for (i = 0; i < ROWQ_MAX_PRIO; i++) {
		INIT_LIST_HEAD(&rdata->row_queues[i].fifo);
		rdata->row_queues[i].disp_quantum = row_queues_def[i].quantum;
		rdata->row_queues[i].rdata = rdata;
		rdata->row_queues[i].prio = i;
		rdata->row_queues[i].idle_data.begin_idling = false;
		rdata->row_queues[i].idle_data.last_insert_time =
			ktime_set(0, 0);
	}

	/*
	 * Currently idling is enabled only for READ queues. If we want to
	 * enable it for write queues also, note that idling frequency will
	 * be the same in both cases
	 */
	rdata->read_idle.idle_time = msecs_to_jiffies(ROW_IDLE_TIME_MSEC);
	/* Maybe 0 on some platforms */
	if (!rdata->read_idle.idle_time)
		rdata->read_idle.idle_time = 1;
	rdata->read_idle.freq = ROW_READ_FREQ_MSEC;
	rdata->read_idle.idle_workqueue = alloc_workqueue("row_idle_work",
					    WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
	if (!rdata->read_idle.idle_workqueue)
		panic("Failed to create idle workqueue\n");
	INIT_DELAYED_WORK(&rdata->read_idle.idle_work, kick_queue);

	rdata->curr_queue = ROWQ_PRIO_HIGH_READ;
	rdata->dispatch_queue = q;

	return rdata;
}
Exemple #2
0
static struct ib_ucontext *ntrdma_alloc_ucontext(struct ib_device *ibdev,
						 struct ib_udata *ibudata)
{
	struct ntrdma_dev *dev = ntrdma_ib_dev(ibdev);
	struct ib_ucontext *ibuctx;
	int rc;

	ibuctx = kmalloc_node(sizeof(*ibuctx), GFP_KERNEL, dev->node);
	if (!ibuctx) {
		rc = -ENOMEM;
		goto err_ctx;
	}

	return ibuctx;

	// kfree(ibuctx);
err_ctx:
	return ERR_PTR(rc);
}
/*
 * initialize elevator private data (deadline_data).
 */
static void *deadline_init_queue(struct request_queue *q)
{
	struct deadline_data *dd;

	dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
	if (!dd)
		return NULL;

	INIT_LIST_HEAD(&dd->fifo_list[READ]);
	INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
	dd->sort_list[READ] = RB_ROOT;
	dd->sort_list[WRITE] = RB_ROOT;
	dd->fifo_expire[READ] = read_expire;
	dd->fifo_expire[WRITE] = write_expire;
	dd->writes_starved = writes_starved;
	dd->front_merges = 0;
	dd->fifo_batch = fifo_batch;
	return dd;
}
Exemple #4
0
static int fb_bpf_init_filter(struct fb_bpf_priv __percpu *fb_priv_cpu,
			      struct sock_fprog_kern *fprog, unsigned int cpu)
{
	int err;
	struct sk_filter *sf, *sfold;
	unsigned int fsize;
	unsigned long flags;

	if (fprog->filter == NULL)
		return -EINVAL;

	fsize = sizeof(struct sock_filter) * fprog->len;

	sf = kmalloc_node(fsize + sizeof(*sf), GFP_KERNEL, cpu_to_node(cpu));
	if (!sf)
		return -ENOMEM;

	memcpy(sf->insns, fprog->filter, fsize);
	atomic_set(&sf->refcnt, 1);
	sf->len = fprog->len;
	sf->bpf_func = sk_run_filter;

	err = sk_chk_filter(sf->insns, sf->len);
	if (err) {
		kfree(sf);
		return err;
	}

	fb_bpf_jit_compile(sf);

	spin_lock_irqsave(&fb_priv_cpu->flock, flags);
	sfold = fb_priv_cpu->filter;
	fb_priv_cpu->filter = sf;
	spin_unlock_irqrestore(&fb_priv_cpu->flock, flags);

	if (sfold) {
		fb_bpf_jit_free(sfold);
		kfree(sfold);
	}

	return 0;
}
Exemple #5
0
static struct ib_cq *ntrdma_create_cq(struct ib_device *ibdev,
				      const struct ib_cq_init_attr *ibattr,
				      struct ib_ucontext *ibuctx,
				      struct ib_udata *ibudata)
{
	struct ntrdma_dev *dev = ntrdma_ib_dev(ibdev);
	struct ntrdma_cq *cq;
	u32 vbell_idx;
	int rc;

	cq = kmalloc_node(sizeof(*cq), GFP_KERNEL, dev->node);
	if (!cq) {
		rc = -ENOMEM;
		goto err_cq;
	}

	if (ibattr->comp_vector)
		vbell_idx = ibattr->comp_vector;
	else
		vbell_idx = ntrdma_dev_vbell_next(dev);

	rc = ntrdma_cq_init(cq, dev, vbell_idx);
	if (rc)
		goto err_init;

	rc = ntrdma_cq_add(cq);
	if (rc)
		goto err_add;

	ntrdma_dbg(dev, "added cq %p\n", cq);

	return &cq->ibcq;

	// ntrdma_cq_del(cq);
err_add:
	ntrdma_cq_deinit(cq);
err_init:
	kfree(cq);
err_cq:
	ntrdma_dbg(dev, "failed, returning err %d\n", rc);
	return ERR_PTR(rc);
}
Exemple #6
0
/**
 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
 * @mask: pointer to cpumask_var_t where the cpumask is returned
 * @flags: GFP_ flags
 *
 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 * a nop returning a constant 1 (in <linux/cpumask.h>)
 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
 *
 * In addition, mask will be NULL if this fails.  Note that gcc is
 * usually smart enough to know that mask can never be NULL if
 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
 * too.
 */
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
{
	*mask = kmalloc_node(cpumask_size(), flags, node);

#ifdef CONFIG_DEBUG_PER_CPU_MAPS
	if (!*mask) {
		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
		dump_stack();
	}
#endif
	/* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
	if (*mask) {
		unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
		unsigned int tail;
		tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
		memset(ptr + cpumask_size() - tail, 0, tail);
	}

	return *mask != NULL;
}
/*
 * initialize elevator private data (deadline_data).
 */
static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
{
    struct deadline_data *dd;

    dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
    if (!dd)
        return NULL;
    memset(dd, 0, sizeof(*dd));

    INIT_LIST_HEAD(&dd->fifo_list[READ]);
    INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
    dd->sort_list[READ] = RB_ROOT;
    dd->sort_list[WRITE] = RB_ROOT;
    dd->fifo_expire[READ] = read_expire;
    dd->fifo_expire[WRITE] = write_expire;
    dd->writes_starved = writes_starved;
    dd->front_merges = 1;
    dd->fifo_batch = fifo_batch;
    return dd;
}
/*
* initialize elevator private data (vr_data).
*/
static void *vr_init_queue(struct request_queue *q)
{
	struct vr_data *vd;

	vd = kmalloc_node(sizeof(*vd), GFP_KERNEL | __GFP_ZERO, q->node);
	if (!vd)
	return NULL;

	INIT_LIST_HEAD(&vd->fifo_list[SYNC]);
	INIT_LIST_HEAD(&vd->fifo_list[ASYNC]);
	vd->sort_list = RB_ROOT;
	
	
	vd->fifo_expire[SYNC] = sync_expire;
	vd->fifo_expire[ASYNC] = async_expire;
	vd->fifo_batch = fifo_batch;
	vd->rev_penalty = rev_penalty;

	return vd;
}
/*
 * row_init_queue() - Init scheduler data structures
 * @q:	requests queue
 *
 * Return pointer to struct row_data to be saved in elevator for
 * this dispatch queue
 *
 */
static void *row_init_queue(struct request_queue *q)
{

	struct row_data *rdata;
	int i;

	rdata = kmalloc_node(sizeof(*rdata),
			     GFP_KERNEL | __GFP_ZERO, q->node);
	if (!rdata)
		return NULL;

	for (i = 0; i < ROWQ_MAX_PRIO; i++) {
		INIT_LIST_HEAD(&rdata->row_queues[i].fifo);
		rdata->row_queues[i].disp_quantum = row_queues_def[i].quantum;
		rdata->row_queues[i].rdata = rdata;
		rdata->row_queues[i].prio = i;
		rdata->row_queues[i].idle_data.begin_idling = false;
		rdata->row_queues[i].idle_data.last_insert_time =
			ktime_set(0, 0);
	}

	/*
	 * Currently idling is enabled only for READ queues. If we want to
	 * enable it for write queues also, note that idling frequency will
	 * be the same in both cases
	 */
	rdata->rd_idle_data.idle_time_ms = ROW_IDLE_TIME_MSEC;
	rdata->rd_idle_data.freq_ms = ROW_READ_FREQ_MSEC;
	hrtimer_init(&rdata->rd_idle_data.hr_timer,
		CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	rdata->rd_idle_data.hr_timer.function = &row_idle_hrtimer_fn;

	INIT_WORK(&rdata->rd_idle_data.idle_work, kick_queue);

	rdata->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
	rdata->dispatch_queue = q;

	rdata->nr_reqs[READ] = rdata->nr_reqs[WRITE] = 0;

	return rdata;
}
Exemple #10
0
static struct ib_pd *ntrdma_alloc_pd(struct ib_device *ibdev,
				     struct ib_ucontext *ibuctx,
				     struct ib_udata *ibudata)
{
	struct ntrdma_dev *dev = ntrdma_ib_dev(ibdev);
	struct ntrdma_pd *pd;
	int rc;

	ntrdma_vdbg(dev, "called\n");

	pd = kmalloc_node(sizeof(*pd), GFP_KERNEL, dev->node);
	if (!pd) {
		rc = -ENOMEM;
		goto err_pd;
	}

	ntrdma_vdbg(dev, "allocated pd %p\n", pd);

	rc = ntrdma_pd_init(pd, dev, dev->pd_next_key++);
	if (rc)
		goto err_init;

	ntrdma_vdbg(dev, "initialized pd %p\n", pd);

	rc = ntrdma_pd_add(pd);
	if (rc)
		goto err_add;

	ntrdma_dbg(dev, "added pd%d\n", pd->key);

	return &pd->ibpd;

	// ntrdma_pd_del(pd);
err_add:
	ntrdma_pd_deinit(pd);
err_init:
	kfree(pd);
err_pd:
	ntrdma_dbg(dev, "failed, returning err %d\n", rc);
	return ERR_PTR(rc);
}
Exemple #11
0
static struct amd_nb *amd_alloc_nb(int cpu)
{
	struct amd_nb *nb;
	int i;

	nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
			  cpu_to_node(cpu));
	if (!nb)
		return NULL;

	nb->nb_id = -1;

	/*
	 * initialize all possible NB constraints
	 */
	for (i = 0; i < x86_pmu.num_counters; i++) {
		__set_bit(i, nb->event_constraints[i].idxmsk);
		nb->event_constraints[i].weight = 1;
	}
	return nb;
}
static void *
sio_init_queue(struct request_queue *q)
{
	struct sio_data *sd;
	sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node);
	if (!sd)
		return NULL;

	INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]);
	INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]);
	INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]);
	INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]);
	sd->batched = 0;
	sd->fifo_expire[SYNC][READ] = sync_read_expire;
	sd->fifo_expire[SYNC][WRITE] = sync_write_expire;
	sd->fifo_expire[ASYNC][READ] = async_read_expire;
	sd->fifo_expire[ASYNC][WRITE] = async_write_expire;
	sd->fifo_batch = fifo_batch;
	sd->writes_starved = writes_starved;
	return sd;
}
//set head position to 0
//get max size of request_queue q->nr_requests
//allocate space of arrays based on max size
//set counts to 0
//allocate space for table
static void *optimal_init_queue(struct request_queue *q)
{
	struct optimal_data *nd;
	int i, j, k;

	nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
	if (!nd)
		return NULL;
	/*
	for(i=0; i<2; i++){
		nd->counts[i] = 0;
	}
	*/
	nd->headpos.__sector = 0;
	nd->max_requests = q->nr_requests;
	nd->shouldBuild = 1;
	nd->currentNdx = 0;
	nd->dispatchSize = 0;
	nd->C = kmalloc(nd->max_requests*sizeof(struct pathMember **), GFP_KERNEL);
	for(i=0; i<nd->max_requests; i++){
		nd->C[i] = kmalloc(nd->max_requests*sizeof(struct pathMember*), GFP_KERNEL);
		for(j=0; j<nd->max_requests; j++) nd->C[i][j] = kmalloc(2*sizeof(struct pathMember), GFP_KERNEL);
	}
	nd->mylist = kmalloc(nd->max_requests*sizeof(struct request*), GFP_KERNEL);
	nd->dispatch_head = kmalloc(nd->max_requests*sizeof(struct request*), GFP_KERNEL);
	for(i = 0; i < nd->max_requests; i++){
		nd->C[i][i][0].head = kmalloc(sizeof(struct requestList), GFP_KERNEL);
		nd->C[i][i][1].head = kmalloc(sizeof(struct requestList), GFP_KERNEL);
	}
	for(k=0; k<nd->max_requests-1; k++){
		for(i = 0, j = k+1; j < nd->max_requests; i++, j++){
			nd->C[i][j][0].head = kmalloc(sizeof(struct requestList), GFP_KERNEL);
			nd->C[i][j][1].head = kmalloc(sizeof(struct requestList), GFP_KERNEL);
		}
	}
	
	
	INIT_LIST_HEAD(&nd->arrival_queue);
	return nd;
}
static void *row_init_queue(struct request_queue *q)
{

	struct row_data *rdata;
	int i;

	rdata = kmalloc_node(sizeof(*rdata),
			     GFP_KERNEL | __GFP_ZERO, q->node);
	if (!rdata)
		return NULL;

	memset(rdata, 0, sizeof(*rdata));
	for (i = 0; i < ROWQ_MAX_PRIO; i++) {
		INIT_LIST_HEAD(&rdata->row_queues[i].fifo);
		rdata->row_queues[i].disp_quantum = row_queues_def[i].quantum;
		rdata->row_queues[i].rdata = rdata;
		rdata->row_queues[i].prio = i;
		rdata->row_queues[i].idle_data.begin_idling = false;
		rdata->row_queues[i].idle_data.last_insert_time =
			ktime_set(0, 0);
	}

	rdata->reg_prio_starvation.starvation_limit =
			ROW_REG_STARVATION_TOLLERANCE;
	rdata->low_prio_starvation.starvation_limit =
			ROW_LOW_STARVATION_TOLLERANCE;
	rdata->rd_idle_data.idle_time_ms = ROW_IDLE_TIME_MSEC;
	rdata->rd_idle_data.freq_ms = ROW_READ_FREQ_MSEC;
	hrtimer_init(&rdata->rd_idle_data.hr_timer,
		CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	rdata->rd_idle_data.hr_timer.function = &row_idle_hrtimer_fn;

	INIT_WORK(&rdata->rd_idle_data.idle_work, kick_queue);
	rdata->last_served_ioprio_class = IOPRIO_CLASS_NONE;
	rdata->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
	rdata->dispatch_queue = q;

	return rdata;
}
Exemple #15
0
static int
sio_init_queue(struct request_queue *q, struct elevator_type *e)
{
	struct sio_data *sd;
	struct elevator_queue *eq;

	eq = elevator_alloc(q, e);
	if (!eq)
		return -ENOMEM;

	/* Allocate structure */
	sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node);
	if (!sd) {
		kobject_put(&eq->kobj);
		return -ENOMEM;
	}
	eq->elevator_data = sd;

	/* Initialize fifo lists */
	INIT_LIST_HEAD(&sd->fifo_list[SYNC][READ]);
	INIT_LIST_HEAD(&sd->fifo_list[SYNC][WRITE]);
	INIT_LIST_HEAD(&sd->fifo_list[ASYNC][READ]);
	INIT_LIST_HEAD(&sd->fifo_list[ASYNC][WRITE]);

	/* Initialize data */
	sd->batched = 0;
	sd->fifo_expire[SYNC][READ] = sync_read_expire;
	sd->fifo_expire[SYNC][WRITE] = sync_write_expire;
	sd->fifo_expire[ASYNC][READ] = async_read_expire;
	sd->fifo_expire[ASYNC][WRITE] = async_write_expire;
	sd->writes_starved = writes_starved;
	sd->fifo_batch = fifo_batch;
	
	spin_lock_irq(q->queue_lock);
	q->elevator = eq;
	spin_unlock_irq(q->queue_lock);

	return 0;
}
static void *
sio_init_queue(struct request_queue *q)
{
struct sio_data *sd;

/* Allocate structure */
sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node);
if (!sd)
return NULL;

/* Initialize fifo lists */
INIT_LIST_HEAD(&sd->fifo_list[SYNC]);
INIT_LIST_HEAD(&sd->fifo_list[ASYNC]);

/* Initialize data */
sd->batched = 0;
sd->fifo_expire[SYNC] = sync_expire;
sd->fifo_expire[ASYNC] = async_expire;
sd->fifo_batch = fifo_batch;

return sd;
}
Exemple #17
0
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
			       struct scatterlist *assoc,
			       struct scatterlist *sgl,
			       struct scatterlist *sglout, uint8_t *iv,
			       uint8_t ivlen,
			       struct qat_crypto_request *qat_req)
{
	struct device *dev = &GET_DEV(inst->accel_dev);
	int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
	struct qat_alg_buf_list *bufl;
	struct qat_alg_buf_list *buflout = NULL;
	dma_addr_t blp;
	dma_addr_t bloutp = 0;
	struct scatterlist *sg;
	size_t sz = sizeof(struct qat_alg_buf_list) +
			((1 + n + assoc_n) * sizeof(struct qat_alg_buf));

	if (unlikely(!n))
		return -EINVAL;

	bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
	if (unlikely(!bufl))
		return -ENOMEM;

	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(dev, blp)))
		goto err;

	for_each_sg(assoc, sg, assoc_n, i) {
		bufl->bufers[bufs].addr = dma_map_single(dev,
							 sg_virt(sg),
							 sg->length,
							 DMA_BIDIRECTIONAL);
		bufl->bufers[bufs].len = sg->length;
		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
			goto err;
		bufs++;
	}
static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu)
{
	struct crypto_comp *tfm;
	u8 *dst;

	switch (action) {
	case CPU_UP_PREPARE:
		tfm = crypto_alloc_comp(zswap_compressor, 0, 0);
		if (IS_ERR(tfm)) {
			pr_err("can't allocate compressor transform\n");
			return NOTIFY_BAD;
		}
		*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm;
		dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
		if (!dst) {
			pr_err("can't allocate compressor buffer\n");
			crypto_free_comp(tfm);
			*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
			return NOTIFY_BAD;
		}
		per_cpu(zswap_dstmem, cpu) = dst;
		break;
	case CPU_DEAD:
	case CPU_UP_CANCELED:
		tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu);
		if (tfm) {
			crypto_free_comp(tfm);
			*per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL;
		}
		dst = per_cpu(zswap_dstmem, cpu);
		kfree(dst);
		per_cpu(zswap_dstmem, cpu) = NULL;
		break;
	default:
		break;
	}
	return NOTIFY_OK;
}
Exemple #19
0
static void pnv_alloc_idle_core_states(void)
{
	int i, j;
	int nr_cores = cpu_nr_cores();
	u32 *core_idle_state;

	/*
	 * core_idle_state - First 8 bits track the idle state of each thread
	 * of the core. The 8th bit is the lock bit. Initially all thread bits
	 * are set. They are cleared when the thread enters deep idle state
	 * like sleep and winkle. Initially the lock bit is cleared.
	 * The lock bit has 2 purposes
	 * a. While the first thread is restoring core state, it prevents
	 * other threads in the core from switching to process context.
	 * b. While the last thread in the core is saving the core state, it
	 * prevents a different thread from waking up.
	 */
	for (i = 0; i < nr_cores; i++) {
		int first_cpu = i * threads_per_core;
		int node = cpu_to_node(first_cpu);

		core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
		*core_idle_state = PNV_CORE_IDLE_THREAD_BITS;

		for (j = 0; j < threads_per_core; j++) {
			int cpu = first_cpu + j;

			paca[cpu].core_idle_state_ptr = core_idle_state;
			paca[cpu].thread_idle_state = PNV_THREAD_RUNNING;
			paca[cpu].thread_mask = 1 << j;
		}
	}

	update_subcore_sibling_mask();

	if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED)
		pnv_save_sprs_for_winkle();
}
static int greedy_init_queue(struct request_queue *q, struct elevator_type *e) {
   struct greedy_data *nd;
   struct elevator_queue *eq;

   eq = elevator_alloc(q, e);
   if (!eq) return -ENOMEM;

   nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
   if (!nd) {
      kobject_put(&eq->kobj);
      return -ENOMEM;
   }
   eq->elevator_data = nd;

   INIT_LIST_HEAD(&nd->lower_queue);
   INIT_LIST_HEAD(&nd->upper_queue);
   nd->disk_head = 0ul;

	spin_lock_irq(q->queue_lock);
	q->elevator = eq;
	spin_unlock_irq(q->queue_lock);
	return 0;
}
static int alloc_callchain_buffers(void)
{
	int cpu;
	int size;
	struct callchain_cpus_entries *entries;

	/*
	 * We can't use the percpu allocation API for data that can be
	 * accessed from NMI. Use a temporary manual per cpu allocation
	 * until that gets sorted out.
	 */
	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);

	entries = kzalloc(size, GFP_KERNEL);
	if (!entries)
		return -ENOMEM;

	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;

	for_each_possible_cpu(cpu) {
		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
							 cpu_to_node(cpu));
		if (!entries->cpu_entries[cpu])
			goto fail;
	}

	rcu_assign_pointer(callchain_cpus_entries, entries);

	return 0;

fail:
	for_each_possible_cpu(cpu)
		kfree(entries->cpu_entries[cpu]);
	kfree(entries);

	return -ENOMEM;
}
Exemple #22
0
/*
* initialize elevator private data (vr_data).
*/
static void *vr_init_queue(struct request_queue *q)
{
	struct vr_data *vd;

	vd = kmalloc_node(sizeof(*vd), GFP_KERNEL | __GFP_ZERO, q->node);
	if (!vd)
	return NULL;

	INIT_LIST_HEAD(&vd->fifo_list[SYNC]);
	INIT_LIST_HEAD(&vd->fifo_list[ASYNC]);
	vd->sort_list = RB_ROOT;
	
	
	load_prev_screen_on = isload_prev_screen_on();
	if (load_prev_screen_on == 2)
	{
		vd->fifo_expire[SYNC] = gsched_vars[0] / 5;
		vd->fifo_expire[ASYNC] = gsched_vars[1] / 5;
		vd->fifo_batch = gsched_vars[2];
		vd->rev_penalty = gsched_vars[3];
	}
	else
	{
		vd->fifo_expire[SYNC] = sync_expire;
		vd->fifo_expire[ASYNC] = async_expire;
		vd->fifo_batch = fifo_batch;
		vd->rev_penalty = rev_penalty;
		if (load_prev_screen_on == 0)
		{
			gsched_vars[0] = vd->fifo_expire[SYNC] * 5;
			gsched_vars[1] = vd->fifo_expire[ASYNC] * 5;
			gsched_vars[2] = vd->fifo_batch;
			gsched_vars[3] = vd->rev_penalty;
		}
	}
	return vd;
}
Exemple #23
0
mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
			       mempool_free_t *free_fn, void *pool_data,
			       gfp_t gfp_mask, int node_id)
{
	mempool_t *pool;
	pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
	if (!pool)
		return NULL;
	pool->elements = kmalloc_node(min_nr * sizeof(void *),
				      gfp_mask, node_id);
	if (!pool->elements) {
		kfree(pool);
		return NULL;
	}
	spin_lock_init(&pool->lock);
	pool->min_nr = min_nr;
	pool->pool_data = pool_data;
	init_waitqueue_head(&pool->wait);
	pool->alloc = alloc_fn;
	pool->free = free_fn;

	/*
	 * First pre-allocate the guaranteed number of buffers.
	 */
	while (pool->curr_nr < pool->min_nr) {
		void *element;

		element = pool->alloc(gfp_mask, pool->pool_data);
		if (unlikely(!element)) {
			mempool_destroy(pool);
			return NULL;
		}
		add_element(pool, element);
	}
	return pool;
}
Exemple #24
0
Fichier : dtl.c Projet : 710leo/LVS
static int dtl_enable(struct dtl *dtl)
{
	long int n_entries;
	long int rc;
	struct dtl_entry *buf = NULL;

	/* only allow one reader */
	if (dtl->buf)
		return -EBUSY;

	n_entries = dtl_buf_entries;
	buf = kmalloc_node(n_entries * sizeof(struct dtl_entry),
			GFP_KERNEL, cpu_to_node(dtl->cpu));
	if (!buf) {
		printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
				__func__, dtl->cpu);
		return -ENOMEM;
	}

	spin_lock(&dtl->lock);
	rc = -EBUSY;
	if (!dtl->buf) {
		/* store the original allocation size for use during read */
		dtl->buf_entries = n_entries;
		dtl->buf = buf;
		dtl->last_idx = 0;
		rc = dtl_start(dtl);
		if (rc)
			dtl->buf = NULL;
	}
	spin_unlock(&dtl->lock);

	if (rc)
		kfree(buf);
	return rc;
}
Exemple #25
0
/**
 * init_tti_timers - initialize a timer array
 * @base_tti: the TTI to be initialized
 *
 * init_tti_timers() must be done to a timer prior calling *any* of the
 * other timer array functions. 
 */
int init_tti_timers_array (unsigned long base_tti)
{
    int j;
    long cpu = (long)smp_processor_id();
    struct tti_tvec_base *base;
    //BUG_ON(_tti_tvec_base != NULL);
    
    base = kmalloc_node(sizeof(*base),
                        GFP_KERNEL | __GFP_ZERO,
                        cpu_to_node(cpu));
    if (!base)
        return -ENOMEM;

    for (j = 0; j < TVR_SIZE; j++)
		INIT_LIST_HEAD(base->tv.vec + j);

    base->timer_jiffies = base_tti;
    base->next_timer = base->timer_jiffies;

    _tti_tvec_base = base;
    CDBG("_tti_tvec_base %x\n", _tti_tvec_base);
    
    return 0;
}
Exemple #26
0
static void pnv_alloc_idle_core_states(void)
{
	int i, j;
	int nr_cores = cpu_nr_cores();
	u32 *core_idle_state;

	/*
	 * core_idle_state - The lower 8 bits track the idle state of
	 * each thread of the core.
	 *
	 * The most significant bit is the lock bit.
	 *
	 * Initially all the bits corresponding to threads_per_core
	 * are set. They are cleared when the thread enters deep idle
	 * state like sleep and winkle/stop.
	 *
	 * Initially the lock bit is cleared.  The lock bit has 2
	 * purposes:
	 * 	a. While the first thread in the core waking up from
	 * 	   idle is restoring core state, it prevents other
	 * 	   threads in the core from switching to process
	 * 	   context.
	 * 	b. While the last thread in the core is saving the
	 *	   core state, it prevents a different thread from
	 *	   waking up.
	 */
	for (i = 0; i < nr_cores; i++) {
		int first_cpu = i * threads_per_core;
		int node = cpu_to_node(first_cpu);
		size_t paca_ptr_array_size;

		core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
		*core_idle_state = (1 << threads_per_core) - 1;
		paca_ptr_array_size = (threads_per_core *
				       sizeof(struct paca_struct *));

		for (j = 0; j < threads_per_core; j++) {
			int cpu = first_cpu + j;

			paca_ptrs[cpu]->core_idle_state_ptr = core_idle_state;
			paca_ptrs[cpu]->thread_idle_state = PNV_THREAD_RUNNING;
			paca_ptrs[cpu]->thread_mask = 1 << j;
			if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
				continue;
			paca_ptrs[cpu]->thread_sibling_pacas =
				kmalloc_node(paca_ptr_array_size,
					     GFP_KERNEL, node);
		}
	}

	update_subcore_sibling_mask();

	if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) {
		int rc = pnv_save_sprs_for_deep_states();

		if (likely(!rc))
			return;

		/*
		 * The stop-api is unable to restore hypervisor
		 * resources on wakeup from platform idle states which
		 * lose full context. So disable such states.
		 */
		supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT;
		pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n");
		pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n");

		if (cpu_has_feature(CPU_FTR_ARCH_300) &&
		    (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) {
			/*
			 * Use the default stop state for CPU-Hotplug
			 * if available.
			 */
			if (default_stop_found) {
				pnv_deepest_stop_psscr_val =
					pnv_default_stop_val;
				pnv_deepest_stop_psscr_mask =
					pnv_default_stop_mask;
				pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n",
					pnv_deepest_stop_psscr_val);
			} else { /* Fallback to snooze loop for CPU-Hotplug */
				deepest_stop_found = false;
				pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n");
			}
		}
	}
}
Exemple #27
0
int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
		   struct scatterlist *sg, unsigned int nents)
{
	struct ib_device *dev = rds_ibdev->dev;
	struct rds_ib_fmr *fmr = &ibmr->u.fmr;
	struct scatterlist *scat = sg;
	u64 io_addr = 0;
	u64 *dma_pages;
	u32 len;
	int page_cnt, sg_dma_len;
	int i, j;
	int ret;

	sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
	if (unlikely(!sg_dma_len)) {
		pr_warn("RDS/IB: %s failed!\n", __func__);
		return -EBUSY;
	}

	len = 0;
	page_cnt = 0;

	for (i = 0; i < sg_dma_len; ++i) {
		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);

		if (dma_addr & ~PAGE_MASK) {
			if (i > 0)
				return -EINVAL;
			else
				++page_cnt;
		}
		if ((dma_addr + dma_len) & ~PAGE_MASK) {
			if (i < sg_dma_len - 1)
				return -EINVAL;
			else
				++page_cnt;
		}

		len += dma_len;
	}

	page_cnt += len >> PAGE_SHIFT;
	if (page_cnt > ibmr->pool->fmr_attr.max_pages)
		return -EINVAL;

	dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
				 rdsibdev_to_node(rds_ibdev));
	if (!dma_pages)
		return -ENOMEM;

	page_cnt = 0;
	for (i = 0; i < sg_dma_len; ++i) {
		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);

		for (j = 0; j < dma_len; j += PAGE_SIZE)
			dma_pages[page_cnt++] =
				(dma_addr & PAGE_MASK) + j;
	}

	ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
	if (ret)
		goto out;

	/* Success - we successfully remapped the MR, so we can
	 * safely tear down the old mapping.
	 */
	rds_ib_teardown_mr(ibmr);

	ibmr->sg = scat;
	ibmr->sg_len = nents;
	ibmr->sg_dma_len = sg_dma_len;
	ibmr->remap_count++;

	if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
		rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
	else
		rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
	ret = 0;

out:
	kfree(dma_pages);

	return ret;
}
Exemple #28
0
static int ntb_perf_thread(void *data)
{
	struct pthr_ctx *pctx = data;
	struct perf_ctx *perf = pctx->perf;
	struct pci_dev *pdev = perf->ntb->pdev;
	struct perf_mw *mw = &perf->mw;
	char __iomem *dst;
	u64 win_size, buf_size, total;
	void *src;
	int rc, node, i;
	struct dma_chan *dma_chan = NULL;

	pr_debug("kthread %s starting...\n", current->comm);

	node = dev_to_node(&pdev->dev);

	if (use_dma && !pctx->dma_chan) {
		dma_cap_mask_t dma_mask;

		dma_cap_zero(dma_mask);
		dma_cap_set(DMA_MEMCPY, dma_mask);
		dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
					       (void *)(unsigned long)node);
		if (!dma_chan) {
			pr_warn("%s: cannot acquire DMA channel, quitting\n",
				current->comm);
			return -ENODEV;
		}
		pctx->dma_chan = dma_chan;
	}

	for (i = 0; i < MAX_SRCS; i++) {
		pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
		if (!pctx->srcs[i]) {
			rc = -ENOMEM;
			goto err;
		}
	}

	win_size = mw->phys_size;
	buf_size = 1ULL << seg_order;
	total = 1ULL << run_order;

	if (buf_size > MAX_TEST_SIZE)
		buf_size = MAX_TEST_SIZE;

	dst = (char __iomem *)mw->vbase;

	atomic_inc(&perf->tsync);
	while (atomic_read(&perf->tsync) != perf->perf_threads)
		schedule();

	src = pctx->srcs[pctx->src_idx];
	pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);

	rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);

	atomic_dec(&perf->tsync);

	if (rc < 0) {
		pr_err("%s: failed\n", current->comm);
		rc = -ENXIO;
		goto err;
	}

	for (i = 0; i < MAX_SRCS; i++) {
		kfree(pctx->srcs[i]);
		pctx->srcs[i] = NULL;
	}

	atomic_inc(&perf->tdone);
	wake_up(pctx->wq);
	rc = 0;
	goto done;

err:
	for (i = 0; i < MAX_SRCS; i++) {
		kfree(pctx->srcs[i]);
		pctx->srcs[i] = NULL;
	}

	if (dma_chan) {
		dma_release_channel(dma_chan);
		pctx->dma_chan = NULL;
	}

done:
	/* Wait until we are told to stop */
	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (kthread_should_stop())
			break;
		schedule();
	}
	__set_current_state(TASK_RUNNING);

	return rc;
}
Exemple #29
0
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
			   struct mlx4_en_tx_ring **pring, u32 size,
			   u16 stride, int node, int queue_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_tx_ring *ring;
	int tmp;
	int err;

	ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node);
	if (!ring) {
		ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL);
		if (!ring) {
			en_err(priv, "Failed allocating TX ring\n");
			return -ENOMEM;
		}
	}

	ring->size = size;
	ring->size_mask = size - 1;
	ring->stride = stride;
#ifdef CONFIG_RATELIMIT
	ring->rl_data.rate_index = 0;
	/* User_valid should be false in a rate_limit ring until the
	 * creation process of the ring is done, after the activation. */
	if (queue_idx < priv->native_tx_ring_num)
		ring->rl_data.user_valid = true;
	else
		ring->rl_data.user_valid = false;
#endif
	ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
	ring->inline_thold = min(inline_thold, MAX_INLINE);
	mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
	mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);

	/* Allocate the buf ring */
#ifdef CONFIG_RATELIMIT
	if (queue_idx < priv->native_tx_ring_num)
		ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF,
					  M_WAITOK, &ring->tx_lock.m);
	else
		ring->br = buf_ring_alloc(size / 4, M_DEVBUF, M_WAITOK,
					  &ring->tx_lock.m);
#else
	ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF,
				  M_WAITOK, &ring->tx_lock.m);
#endif
	if (ring->br == NULL) {
		en_err(priv, "Failed allocating tx_info ring\n");
		return -ENOMEM;
	}

	tmp = size * sizeof(struct mlx4_en_tx_info);
	ring->tx_info = vmalloc_node(tmp, node);
	if (!ring->tx_info) {
		ring->tx_info = vmalloc(tmp);
		if (!ring->tx_info) {
			err = -ENOMEM;
			goto err_ring;
		}
	}

	en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
		 ring->tx_info, tmp);

	ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
	if (!ring->bounce_buf) {
		ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
		if (!ring->bounce_buf) {
			err = -ENOMEM;
			goto err_info;
		}
	}
	ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);

	/* Allocate HW buffers on provided NUMA node */
	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
				 2 * PAGE_SIZE);
	if (err) {
		en_err(priv, "Failed allocating hwq resources\n");
		goto err_bounce;
	}

	err = mlx4_en_map_buffer(&ring->wqres.buf);
	if (err) {
		en_err(priv, "Failed to map TX buffer\n");
		goto err_hwq_res;
	}

	ring->buf = ring->wqres.buf.direct.buf;

	en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
	       "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
	       ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);

	err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
				    MLX4_RESERVE_BF_QP);
	if (err) {
		en_err(priv, "failed reserving qp for TX ring\n");
		goto err_map;
	}

	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
	if (err) {
		en_err(priv, "Failed allocating qp %d\n", ring->qpn);
		goto err_reserve;
	}
	ring->qp.event = mlx4_en_sqp_event;

	err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
	if (err) {
		en_dbg(DRV, priv, "working without blueflame (%d)", err);
		ring->bf.uar = &mdev->priv_uar;
		ring->bf.uar->map = mdev->uar_map;
		ring->bf_enabled = false;
	} else
		ring->bf_enabled = true;
	ring->queue_index = queue_idx;
	if (queue_idx < priv->num_tx_rings_p_up )
		CPU_SET(queue_idx, &ring->affinity_mask);

	*pring = ring;
	return 0;

err_reserve:
	mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
err_map:
	mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_bounce:
	kfree(ring->bounce_buf);
err_info:
	vfree(ring->tx_info);
err_ring:
	buf_ring_free(ring->br, M_DEVBUF);
	kfree(ring);
	return err;
}
Exemple #30
0
Fichier : pci.c Projet : E-LLP/n900
struct pci_bus * __devinit
pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
{
	struct pci_controller *controller;
	unsigned int windows = 0;
	struct pci_bus *pbus;
	char *name;
	int pxm;

	controller = alloc_pci_controller(domain);
	if (!controller)
		goto out1;

	controller->acpi_handle = device->handle;

	pxm = acpi_get_pxm(controller->acpi_handle);
#ifdef CONFIG_NUMA
	if (pxm >= 0)
		controller->node = pxm_to_node(pxm);
#endif

	acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
			&windows);
	if (windows) {
		struct pci_root_info info;

		controller->window =
			kmalloc_node(sizeof(*controller->window) * windows,
				     GFP_KERNEL, controller->node);
		if (!controller->window)
			goto out2;

		name = kmalloc(16, GFP_KERNEL);
		if (!name)
			goto out3;

		sprintf(name, "PCI Bus %04x:%02x", domain, bus);
		info.controller = controller;
		info.name = name;
		acpi_walk_resources(device->handle, METHOD_NAME__CRS,
			add_window, &info);
	}
	/*
	 * See arch/x86/pci/acpi.c.
	 * The desired pci bus might already be scanned in a quirk. We
	 * should handle the case here, but it appears that IA64 hasn't
	 * such quirk. So we just ignore the case now.
	 */
	pbus = pci_scan_bus_parented(NULL, bus, &pci_root_ops, controller);
	if (pbus)
		pcibios_setup_root_windows(pbus, controller);

	return pbus;

out3:
	kfree(controller->window);
out2:
	kfree(controller);
out1:
	return NULL;
}