Esempio n. 1
0
File: smp.c Progetto: Lyude/linux
int smpcfd_prepare_cpu(unsigned int cpu)
{
	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);

	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
				     cpu_to_node(cpu)))
		return -ENOMEM;
	if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
				     cpu_to_node(cpu))) {
		free_cpumask_var(cfd->cpumask);
		return -ENOMEM;
	}
	cfd->csd = alloc_percpu(call_single_data_t);
	if (!cfd->csd) {
		free_cpumask_var(cfd->cpumask);
		free_cpumask_var(cfd->cpumask_ipi);
		return -ENOMEM;
	}

	return 0;
}
Esempio n. 2
0
/**
 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
 * @attr: breakpoint attributes
 * @triggered: callback to trigger when we hit the breakpoint
 *
 * @return a set of per_cpu pointers to perf events
 */
struct perf_event * __percpu *
register_wide_hw_breakpoint(struct perf_event_attr *attr,
			    perf_overflow_handler_t triggered)
{
	struct perf_event * __percpu *cpu_events, **pevent, *bp;
	long err;
	int cpu;

	cpu_events = alloc_percpu(typeof(*cpu_events));
	if (!cpu_events)
		return (void __percpu __force *)ERR_PTR(-ENOMEM);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
		bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);

		*pevent = bp;

		if (IS_ERR(bp)) {
			err = PTR_ERR(bp);
			goto fail;
		}
	}
	put_online_cpus();

	return cpu_events;

fail:
	for_each_online_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
		if (IS_ERR(*pevent))
			break;
		unregister_hw_breakpoint(*pevent);
	}
	put_online_cpus();

	free_percpu(cpu_events);
	return (void __percpu __force *)ERR_PTR(err);
}
Esempio n. 3
0
File: arm.c Progetto: mdamt/linux
/**
 * kvm_arch_init_vm - initializes a VM data structure
 * @kvm:	pointer to the KVM struct
 */
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
	int ret, cpu;

	if (type)
		return -EINVAL;

	kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
	if (!kvm->arch.last_vcpu_ran)
		return -ENOMEM;

	for_each_possible_cpu(cpu)
		*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;

	ret = kvm_alloc_stage2_pgd(kvm);
	if (ret)
		goto out_fail_alloc;

	ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
	if (ret)
		goto out_free_stage2_pgd;

	kvm_vgic_early_init(kvm);

	/* Mark the initial VMID generation invalid */
	kvm->arch.vmid_gen = 0;

	/* The maximum number of VCPUs is limited by the host's GIC model */
	kvm->arch.max_vcpus = vgic_present ?
				kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;

	return ret;
out_free_stage2_pgd:
	kvm_free_stage2_pgd(kvm);
out_fail_alloc:
	free_percpu(kvm->arch.last_vcpu_ran);
	kvm->arch.last_vcpu_ran = NULL;
	return ret;
}
Esempio n. 4
0
/**
 * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
 * @matrix_bits:	Number of matrix bits must be <= IRQ_MATRIX_BITS
 * @alloc_start:	From which bit the allocation search starts
 * @alloc_end:		At which bit the allocation search ends, i.e first
 *			invalid bit
 */
__init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
					   unsigned int alloc_start,
					   unsigned int alloc_end)
{
	struct irq_matrix *m;

	if (matrix_bits > IRQ_MATRIX_BITS)
		return NULL;

	m = kzalloc(sizeof(*m), GFP_KERNEL);
	if (!m)
		return NULL;

	m->matrix_bits = matrix_bits;
	m->alloc_start = alloc_start;
	m->alloc_end = alloc_end;
	m->alloc_size = alloc_end - alloc_start;
	m->maps = alloc_percpu(*m->maps);
	if (!m->maps) {
		kfree(m);
		return NULL;
	}
	return m;
}
Esempio n. 5
0
File: sock.c Progetto: pfq/PFQ
int pfq_sock_init(struct pfq_sock *so, pfq_id_t id, size_t caplen, size_t xmitlen)
{
	int i;

	/* setup stats */

	so->stats = alloc_percpu(pfq_sock_stats_t);
	if (!so->stats)
		return -ENOMEM;

	for_each_present_cpu(i)
	{
		pfq_sock_stats_t * stat = per_cpu_ptr(so->stats, i);

		local_set(&stat->recv, 0);
		local_set(&stat->lost, 0);
		local_set(&stat->drop, 0);
		local_set(&stat->sent, 0);
		local_set(&stat->disc, 0);
		local_set(&stat->fail, 0);
		local_set(&stat->frwd, 0);
		local_set(&stat->kern, 0);
	}

	/* setup id */

	so->id = id;

        /* memory mapped queues are allocated later, when the socket is enabled */

	so->egress_type = Q_ENDPOINT_SOCKET;
	so->egress_index = 0;
	so->egress_queue = 0;

	/* default weight */

	so->weight = 1;

        so->shmem.addr = NULL;
        so->shmem.size = 0;
        so->shmem.kind = 0;
        so->shmem.hugepages_descr = NULL;

        atomic_long_set(&so->shmem_addr,0);

        /* disable tiemstamping by default */

        so->tstamp = false;

        /* initialize waitqueue */

        pfq_sock_init_waitqueue_head(&so->waitqueue);

	/* Rx queue setup */

	pfq_queue_info_init(&so->rx);

        so->rx_len = caplen;
        so->rx_queue_len = 0;
        so->rx_slot_size  = PFQ_SHARED_QUEUE_SLOT_SIZE(caplen);

	/* Tx queues setup */

	pfq_queue_info_init(&so->tx);

        so->tx_len = xmitlen;
        so->tx_queue_len  = 0;
        so->tx_slot_size  = PFQ_SHARED_QUEUE_SLOT_SIZE(xmitlen);
	so->txq_num_async = 0;

	/* Tx async queues setup */

	for(i = 0; i < Q_MAX_TX_QUEUES; ++i)
	{
		pfq_queue_info_init(&so->tx_async[i]);
	}
        return 0;
}