Пример #1
0
static struct vport *internal_dev_create(const struct vport_parms *parms)
{
	struct vport *vport;
	struct internal_dev *internal_dev;
	int err;

	vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
	if (IS_ERR(vport)) {
		err = PTR_ERR(vport);
		goto error;
	}

	vport->dev = alloc_netdev(sizeof(struct internal_dev),
				  parms->name, NET_NAME_UNKNOWN, do_setup);
	if (!vport->dev) {
		err = -ENOMEM;
		goto error_free_vport;
	}
	vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
	if (!vport->dev->tstats) {
		err = -ENOMEM;
		goto error_free_netdev;
	}

	dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
	internal_dev = internal_dev_priv(vport->dev);
	internal_dev->vport = vport;

	/* Restrict bridge port to current netns. */
	if (vport->port_no == OVSP_LOCAL)
		vport->dev->features |= NETIF_F_NETNS_LOCAL;

	rtnl_lock();
	err = register_netdevice(vport->dev);
	if (err)
		goto error_unlock;

	dev_set_promiscuity(vport->dev, 1);
	rtnl_unlock();
	netif_start_queue(vport->dev);

	return vport;

error_unlock:
	rtnl_unlock();
	free_percpu(vport->dev->tstats);
error_free_netdev:
	free_netdev(vport->dev);
error_free_vport:
	ovs_vport_free(vport);
error:
	return ERR_PTR(err);
}
Пример #2
0
void zcomp_destroy(struct zcomp *comp)
{
	unsigned long cpu;

	cpu_notifier_register_begin();
	for_each_online_cpu(cpu)
		__zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
	__unregister_cpu_notifier(&comp->notifier);
	cpu_notifier_register_done();

	free_percpu(comp->stream);
	kfree(comp);
}
Пример #3
0
/* pseries_idle_devices_uninit(void)
 * unregister cpuidle devices and de-allocate memory
 */
static void pseries_idle_devices_uninit(void)
{
	int i;
	struct cpuidle_device *dev;

	for_each_possible_cpu(i) {
		dev = per_cpu_ptr(pseries_cpuidle_devices, i);
		cpuidle_unregister_device(dev);
	}

	free_percpu(pseries_cpuidle_devices);
	return;
}
Пример #4
0
int ip_tunnel_init(struct net_device *dev)
{
    struct ip_tunnel *tunnel = netdev_priv(dev);
    struct iphdr *iph = &tunnel->parms.iph;
    int i, err;

    dev->destructor	= ip_tunnel_dev_free;
    dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
    if (!dev->tstats)
        return -ENOMEM;

    for_each_possible_cpu(i) {
        struct pcpu_sw_netstats *ipt_stats;
        ipt_stats = per_cpu_ptr(dev->tstats, i);
        u64_stats_init(&ipt_stats->syncp);
    }

    tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
    if (!tunnel->dst_cache) {
        free_percpu(dev->tstats);
        return -ENOMEM;
    }

    err = gro_cells_init(&tunnel->gro_cells, dev);
    if (err) {
        free_percpu(tunnel->dst_cache);
        free_percpu(dev->tstats);
        return err;
    }

    tunnel->dev = dev;
    tunnel->net = dev_net(dev);
    strcpy(tunnel->parms.name, dev->name);
    iph->version		= 4;
    iph->ihl		= 5;

    return 0;
}
Пример #5
0
Файл: sock.c Проект: pfq/PFQ
void pfq_sock_destruct(struct sock *sk)
{
	struct pfq_sock *so = pfq_sk(sk);

	free_percpu(so->stats);
        so->stats = NULL;

        skb_queue_purge(&sk->sk_error_queue);

        WARN_ON(atomic_read(&sk->sk_rmem_alloc));
        WARN_ON(atomic_read(&sk->sk_wmem_alloc));

        sk_refcnt_debug_dec(sk);
}
Пример #6
0
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
			  const struct Qdisc_ops *ops,
			  struct netlink_ext_ack *extack)
{
	void *p;
	struct Qdisc *sch;
	unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
	int err = -ENOBUFS;
	struct net_device *dev;

	if (!dev_queue) {
		NL_SET_ERR_MSG(extack, "No device queue given");
		err = -EINVAL;
		goto errout;
	}

	dev = dev_queue->dev;
	p = kzalloc_node(size, GFP_KERNEL,
			 netdev_queue_numa_node_read(dev_queue));

	if (!p)
		goto errout;
	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
	/* if we got non aligned memory, ask more and do alignment ourself */
	if (sch != p) {
		kfree(p);
		p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
				 netdev_queue_numa_node_read(dev_queue));
		if (!p)
			goto errout;
		sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
		sch->padded = (char *) sch - (char *) p;
	}
	__skb_queue_head_init(&sch->gso_skb);
	__skb_queue_head_init(&sch->skb_bad_txq);
	qdisc_skb_head_init(&sch->q);
	spin_lock_init(&sch->q.lock);

	if (ops->static_flags & TCQ_F_CPUSTATS) {
		sch->cpu_bstats =
			netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
		if (!sch->cpu_bstats)
			goto errout1;

		sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
		if (!sch->cpu_qstats) {
			free_percpu(sch->cpu_bstats);
			goto errout1;
		}
	}
Пример #7
0
static void free_desc(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	unregister_irq_proc(irq, desc);

	mutex_lock(&sparse_irq_lock);
	delete_irq_desc(irq);
	mutex_unlock(&sparse_irq_lock);

	free_masks(desc);
	free_percpu(desc->kstat_irqs);
	kfree(desc);
}
static void vlan_dev_uninit(struct net_device *dev)
{
	struct vlan_priority_tci_mapping *pm;
	struct vlan_dev_info *vlan = vlan_dev_info(dev);
	int i;

	free_percpu(vlan->vlan_rx_stats);
	vlan->vlan_rx_stats = NULL;
	for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
		while ((pm = vlan->egress_priority_map[i]) != NULL) {
			vlan->egress_priority_map[i] = pm->next;
			kfree(pm);
		}
	}
}
Пример #9
0
static void vrf_dev_uninit(struct net_device *dev)
{
	struct net_vrf *vrf = netdev_priv(dev);
	struct net_device *port_dev;
	struct list_head *iter;

	vrf_rtable_release(vrf);
	vrf_rt6_release(vrf);

	netdev_for_each_lower_dev(dev, port_dev, iter)
		vrf_del_slave(dev, port_dev);

	free_percpu(dev->dstats);
	dev->dstats = NULL;
}
Пример #10
0
void ehca_destroy_comp_pool(void)
{
	int i;

	if (!ehca_scaling_code)
		return;

	unregister_hotcpu_notifier(&comp_pool_callback_nb);

	for_each_online_cpu(i)
		destroy_comp_task(pool, i);

	free_percpu(pool->cpu_comp_tasks);
	kfree(pool);
}
Пример #11
0
int ip_tunnel_init(struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	struct iphdr *iph = &tunnel->parms.iph;
	int err;

	dev->destructor	= ip_tunnel_dev_free;
	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
	if (!dev->tstats)
		return -ENOMEM;

	tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
	if (!tunnel->dst_cache) {
		free_percpu(dev->tstats);
		return -ENOMEM;
	}

	err = gro_cells_init(&tunnel->gro_cells, dev);
	if (err) {
		free_percpu(tunnel->dst_cache);
		free_percpu(dev->tstats);
		return err;
	}

	tunnel->dev = dev;
	tunnel->net = dev_net(dev);
	strcpy(tunnel->parms.name, dev->name);
	iph->version		= 4;
	iph->ihl		= 5;

	if (tunnel->collect_md) {
		dev->features |= NETIF_F_NETNS_LOCAL;
		netif_keep_dst(dev);
	}
	return 0;
}
Пример #12
0
static void vrf_dev_uninit(struct net_device *dev)
{
	struct net_vrf *vrf = netdev_priv(dev);
	struct slave_queue *queue = &vrf->queue;
	struct list_head *head = &queue->all_slaves;
	struct slave *slave, *next;

	vrf_rtable_destroy(vrf);
	vrf_rt6_destroy(vrf);

	list_for_each_entry_safe(slave, next, head, list)
		vrf_del_slave(dev, slave->dev);

	free_percpu(dev->dstats);
	dev->dstats = NULL;
}
Пример #13
0
static void free_iova_flush_queue(struct iova_domain *iovad)
{
	if (!iovad->fq)
		return;

	if (timer_pending(&iovad->fq_timer))
		del_timer(&iovad->fq_timer);

	fq_destroy_all_entries(iovad);

	free_percpu(iovad->fq);

	iovad->fq         = NULL;
	iovad->flush_cb   = NULL;
	iovad->entry_dtor = NULL;
}
static void trace_freeup(int level)
{
	int i;

	if (level & 0x1)
		dma_free_coherent(NULL, trace_log_data.sz,
			trace_log_data.start_vaddr,
			trace_log_data.start_paddr);

	if (level & 0x2)
		for (i = 0; i < MAX_TRACES; i++)
			free_percpu(trace_log_data.index[i]);

	if (level & 0x4)
		unregister_tracepoints();
}
Пример #15
0
Файл: arm.c Проект: mdamt/linux
/**
 * kvm_arch_destroy_vm - destroy the VM data structure
 * @kvm:	pointer to the KVM struct
 */
void kvm_arch_destroy_vm(struct kvm *kvm)
{
	int i;

	free_percpu(kvm->arch.last_vcpu_ran);
	kvm->arch.last_vcpu_ran = NULL;

	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
		if (kvm->vcpus[i]) {
			kvm_arch_vcpu_free(kvm->vcpus[i]);
			kvm->vcpus[i] = NULL;
		}
	}

	kvm_vgic_destroy(kvm);
}
Пример #16
0
void ehca_destroy_comp_pool(void)
{
	int i;

	if (!ehca_scaling_code)
		return;

	unregister_hotcpu_notifier(&comp_pool_callback_nb);

	for (i = 0; i < NR_CPUS; i++) {
		if (cpu_online(i))
			destroy_comp_task(pool, i);
	}
	free_percpu(pool->cpu_comp_tasks);
	kfree(pool);
}
Пример #17
0
static void ipcomp_free_scratches(void)
{
	int i;
	void * __percpu *scratches;

	if (--ipcomp_scratch_users)
		return;

	scratches = ipcomp_scratches;
	if (!scratches)
		return;

	for_each_possible_cpu(i)
		vfree(*per_cpu_ptr(scratches, i));

	free_percpu(scratches);
}
Пример #18
0
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node)
{
#if defined(CONFIG_KSTAT_IRQS_ONDEMAND)
	struct irq_desc *desc;
	unsigned int i;

	for (i = 0; i < cnt; i++) {
		desc = irq_to_desc(start + i);
		if (desc && !desc->kstat_irqs) {
			unsigned int __percpu *stats = alloc_percpu(unsigned int);

			if (!stats)
				return -1;
			if (cmpxchg(&desc->kstat_irqs, NULL, stats) != NULL)
				free_percpu(stats);
		}
	}
Пример #19
0
static void free_plum(struct plum *plum, u8 flags)
{
	int i;

	if (flags & PLUM_DATA) {
		for (i = 0; i < PLUM_MAX_PORTS; i++)
			free_percpu(plum->stats[i]);

		kfree(plum->replicators);
	}

	if (flags & PLUM_TABLES)
		free_plum_tables(plum);

	bpf_free(plum->bpf_prog);
	kfree(plum);
}
Пример #20
0
static int __init twd_local_timer_common_register(struct device_node *np)
{
	int err;

	twd_evt = alloc_percpu(struct clock_event_device);
	if (!twd_evt) {
		err = -ENOMEM;
		goto out_free;
	}

	err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt);
	if (err) {
		pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err);
		goto out_free;
	}

	err = register_cpu_notifier(&twd_timer_cpu_nb);
	if (err)
		goto out_irq;

#ifndef CONFIG_ARCH_CNS3XXX
	twd_get_clock(np);
#endif

	/*
	 * Immediately configure the timer on the boot CPU, unless we need
	 * jiffies to be incrementing to calibrate the rate in which case
	 * setup the timer in late_time_init.
	 */
	if (twd_timer_rate)
		twd_timer_setup();
	else
		late_time_init = twd_timer_setup;

	return 0;

out_irq:
	free_percpu_irq(twd_ppi, twd_evt);
out_free:
	iounmap(twd_base);
	twd_base = NULL;
	free_percpu(twd_evt);

	return err;
}
Пример #21
0
void ehca_destroy_comp_pool(void)
{
    int i;

    if (!ehca_scaling_code)
        return;

#ifdef CONFIG_HOTPLUG_CPU
    unregister_cpu_notifier(&comp_pool_callback_nb);
#endif

    for (i = 0; i < NR_CPUS; i++) {
        if (cpu_online(i))
            destroy_comp_task(pool, i);
    }
    free_percpu(pool->cpu_comp_tasks);
    kfree(pool);
}
Пример #22
0
static void nd_region_release(struct device *dev)
{
	struct nd_region *nd_region = to_nd_region(dev);
	u16 i;

	for (i = 0; i < nd_region->ndr_mappings; i++) {
		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
		struct nvdimm *nvdimm = nd_mapping->nvdimm;

		put_device(&nvdimm->dev);
	}
	free_percpu(nd_region->lane);
	ida_simple_remove(&region_ida, nd_region->id);
	if (is_nd_blk(dev))
		kfree(to_nd_blk_region(dev));
	else
		kfree(nd_region);
}
Пример #23
0
Файл: raw.c Проект: 020gzh/linux
static int raw_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct raw_sock *ro;

	if (!sk)
		return 0;

	ro = raw_sk(sk);

	unregister_netdevice_notifier(&ro->notifier);

	lock_sock(sk);

	/* remove current filters & unregister */
	if (ro->bound) {
		if (ro->ifindex) {
			struct net_device *dev;

			dev = dev_get_by_index(&init_net, ro->ifindex);
			if (dev) {
				raw_disable_allfilters(dev, sk);
				dev_put(dev);
			}
		} else
			raw_disable_allfilters(NULL, sk);
	}

	if (ro->count > 1)
		kfree(ro->filter);

	ro->ifindex = 0;
	ro->bound   = 0;
	ro->count   = 0;
	free_percpu(ro->uniq);

	sock_orphan(sk);
	sock->sk = NULL;

	release_sock(sk);
	sock_put(sk);

	return 0;
}
Пример #24
0
static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
{
	struct cpu_perf_ibs __percpu *pcpu;
	int ret;

	pcpu = alloc_percpu(struct cpu_perf_ibs);
	if (!pcpu)
		return -ENOMEM;

	perf_ibs->pcpu = pcpu;

	ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
	if (ret) {
		perf_ibs->pcpu = NULL;
		free_percpu(pcpu);
	}

	return ret;
}
Пример #25
0
/** 20140920    
 * twd를 percpu irq로 등록하고, local timer로 등록한다.
 **/
static int __init twd_local_timer_common_register(void)
{
	int err;

	/** 20140913    
	 * clock_event_device용 percpu 변수 할당.
	 **/
	twd_evt = alloc_percpu(struct clock_event_device *);
	if (!twd_evt) {
		err = -ENOMEM;
		goto out_free;
	}

	/** 20140920    
	 * percpu irq로 twd_ppi(IRQ_LOCALTIMER, 29) 등록.
	 * handler는 twd_handler
	 * dev_id는 twd_evt
	 **/
	err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt);
	if (err) {
		pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err);
		goto out_free;
	}

	/** 20140920    
	 * twd_lt_ops를 local timer operations (lt_ops)로 지정한다.
	 **/
	err = local_timer_register(&twd_lt_ops);
	if (err)
		goto out_irq;

	return 0;

out_irq:
	free_percpu_irq(twd_ppi, twd_evt);
out_free:
	iounmap(twd_base);
	twd_base = NULL;
	free_percpu(twd_evt);

	return err;
}
Пример #26
0
static void free_desc(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);

	unregister_irq_proc(irq, desc);

	/*
	 * sparse_irq_lock protects also show_interrupts() and
	 * kstat_irq_usr(). Once we deleted the descriptor from the
	 * sparse tree we can free it. Access in proc will fail to
	 * lookup the descriptor.
	 */
	mutex_lock(&sparse_irq_lock);
	delete_irq_desc(irq);
	mutex_unlock(&sparse_irq_lock);

	free_masks(desc);
	free_percpu(desc->kstat_irqs);
	kfree(desc);
}
Пример #27
0
static void mexit(void)
{
	int cpu;
	unsigned long *this;

	
	if (percpu_ptr) {
		for_each_online_cpu(cpu) {
			this = *per_cpu_ptr(percpu_ptr, cpu);;
		
			if (this)
				vfree(this);	
		}
	}
	

	if (percpu_ptr)
		free_percpu(percpu_ptr);

	printk("Exit %s.\n", THIS_MODULE->name);
}
Пример #28
0
static int __init init_dccp_v4_mibs(void)
{
	int rc = -ENOMEM;

	dccp_statistics[0] = alloc_percpu(struct dccp_mib);
	if (dccp_statistics[0] == NULL)
		goto out;

	dccp_statistics[1] = alloc_percpu(struct dccp_mib);
	if (dccp_statistics[1] == NULL)
		goto out_free_one;

	rc = 0;
out:
	return rc;
out_free_one:
	free_percpu(dccp_statistics[0]);
	dccp_statistics[0] = NULL;
	goto out;

}
Пример #29
0
/**
 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
 * @attr: breakpoint attributes
 * @triggered: callback to trigger when we hit the breakpoint
 *
 * @return a set of per_cpu pointers to perf events
 */
struct perf_event * __percpu *
register_wide_hw_breakpoint(struct perf_event_attr *attr,
			    perf_overflow_handler_t triggered)
{
	struct perf_event * __percpu *cpu_events, **pevent, *bp;
	long err;
	int cpu;

	cpu_events = alloc_percpu(typeof(*cpu_events));
	if (!cpu_events)
		return (void __percpu __force *)ERR_PTR(-ENOMEM);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
		bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);

		*pevent = bp;

		if (IS_ERR(bp)) {
			err = PTR_ERR(bp);
			goto fail;
		}
	}
	put_online_cpus();

	return cpu_events;

fail:
	for_each_online_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
		if (IS_ERR(*pevent))
			break;
		unregister_hw_breakpoint(*pevent);
	}
	put_online_cpus();

	free_percpu(cpu_events);
	return (void __percpu __force *)ERR_PTR(err);
}
Пример #30
0
static int __net_init synproxy_net_init(struct net *net)
{
	struct synproxy_net *snet = synproxy_pernet(net);
	struct nf_conntrack_tuple t;
	struct nf_conn *ct;
	int err = -ENOMEM;

	memset(&t, 0, sizeof(t));
	ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL);
	if (IS_ERR(ct)) {
		err = PTR_ERR(ct);
		goto err1;
	}

	if (!nfct_seqadj_ext_add(ct))
		goto err2;
	if (!nfct_synproxy_ext_add(ct))
		goto err2;
	__set_bit(IPS_TEMPLATE_BIT, &ct->status);
	__set_bit(IPS_CONFIRMED_BIT, &ct->status);

	snet->tmpl = ct;

	snet->stats = alloc_percpu(struct synproxy_stats);
	if (snet->stats == NULL)
		goto err2;

	err = synproxy_proc_init(net);
	if (err < 0)
		goto err3;

	return 0;

err3:
	free_percpu(snet->stats);
err2:
	nf_conntrack_free(ct);
err1:
	return err;
}