Esempio n. 1
0
void nf_log_unregister(struct nf_logger *logger)
{
	int i;

	mutex_lock(&nf_log_mutex);
	for (i = 0; i < NPROTO; i++) {
		if (nf_loggers[i] == logger)
			rcu_assign_pointer(nf_loggers[i], NULL);
	}
	mutex_unlock(&nf_log_mutex);

	synchronize_rcu();
}
Esempio n. 2
0
void nf_log_unregister(const struct nf_logger *logger)
{
	int i;

	mutex_lock(&nf_log_mutex);
	for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) {
		if (nf_loggers[i] == logger)
			RCU_INIT_POINTER(nf_loggers[i], NULL);
	}
	mutex_unlock(&nf_log_mutex);

	synchronize_rcu();
}
Esempio n. 3
0
void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
{
	struct ieee80211_key *key;

	key = container_of(key_conf, struct ieee80211_key, conf);

	might_sleep();
	assert_key_lock(key->local);

	key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;

	synchronize_rcu();
}
Esempio n. 4
0
File: input.c Progetto: adnfx2/qemu
static void __input_release_device(struct input_handle *handle)
{
	struct input_dev *dev = handle->dev;

	if (dev->grab == handle) {
		rcu_assign_pointer(dev->grab, NULL);
		/* Make sure input_pass_event() notices that grab is gone */
		synchronize_rcu();

		list_for_each_entry(handle, &dev->h_list, d_node)
			if (handle->open && handle->handler->start)
				handle->handler->start(handle);
	}
Esempio n. 5
0
void stp_proto_unregister(const struct stp_proto *proto)
{
	mutex_lock(&stp_proto_mutex);
	if (is_zero_ether_addr(proto->group_address))
		rcu_assign_pointer(stp_proto, NULL);
	else
		rcu_assign_pointer(garp_protos[proto->group_address[5] -
					       GARP_ADDR_MIN], NULL);
	synchronize_rcu();

	if (--sap_registered == 0)
		llc_sap_put(sap);
	mutex_unlock(&stp_proto_mutex);
}
static void do_test_bulk_ioremapping(void)
{
	void __iomem *p;
	int i;

	for (i = 0; i < 10; ++i) {
		p = ioremap_nocache(mmio_address, PAGE_SIZE);
		if (p)
			iounmap(p);
	}

	
	synchronize_rcu();
}
void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto)
{
	BUG_ON(proto->l3proto >= AF_MAX);

	mutex_lock(&nf_ct_proto_mutex);
	BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
					 lockdep_is_held(&nf_ct_proto_mutex)
					 ) != proto);
	rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
			   &nf_conntrack_l3proto_generic);
	mutex_unlock(&nf_ct_proto_mutex);

	synchronize_rcu();
}
Esempio n. 8
0
void nf_log_unregister(struct nf_logger *logger)
{
	const struct nf_logger *log;
	int i;

	mutex_lock(&nf_log_mutex);
	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
		log = nft_log_dereference(loggers[i][logger->type]);
		if (log == logger)
			RCU_INIT_POINTER(loggers[i][logger->type], NULL);
	}
	mutex_unlock(&nf_log_mutex);
	synchronize_rcu();
}
Esempio n. 9
0
void nf_log_unset(struct net *net, const struct nf_logger *logger)
{
	int i;
	const struct nf_logger *log;

	mutex_lock(&nf_log_mutex);
	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
		log = nft_log_dereference(net->nf.nf_loggers[i]);
		if (log == logger)
			RCU_INIT_POINTER(net->nf.nf_loggers[i], NULL);
	}
	mutex_unlock(&nf_log_mutex);
	synchronize_rcu();
}
void dev_deactivate(struct net_device *dev)
{
	netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
	dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);

	dev_watchdog_down(dev);

	/* Wait for outstanding qdisc-less dev_queue_xmit calls. */
	synchronize_rcu();

	/* Wait for outstanding qdisc_run calls. */
	while (some_qdisc_is_busy(dev))
		yield();
}
Esempio n. 11
0
static inline void
micvcons_del_timer_entry(micvcons_port_t *port)
{
	spin_lock(&timer_list_lock);
	list_del_rcu(&port->list_member);
	if (list_empty(&timer_list_head)) {
		restart_timer_flag = MICVCONS_TIMER_SHUTDOWN;
		spin_unlock(&timer_list_lock);
		del_timer_sync(&vcons_timer);
	} else {
		spin_unlock(&timer_list_lock);
	}
	synchronize_rcu();
}
Esempio n. 12
0
Boolean
ssh_interceptor_stop(SshInterceptor interceptor)
{
  SSH_DEBUG(2, ("interceptor stopping"));

  /* 'interceptor_lock protects the 'interfaces_callback'
     and 'num_interface_callbacks'. */
  ssh_kernel_mutex_lock(interceptor->interceptor_lock);

  if (interceptor->num_interface_callbacks)
    {
      ssh_kernel_mutex_unlock(interceptor->interceptor_lock);
      SSH_DEBUG(SSH_D_ERROR,
                ("%d interface callbacks pending, can't stop",
                 interceptor->num_interface_callbacks));
      return FALSE;
    }

  /* No more interfaces are delivered to the engine after this. */
  interceptor->interfaces_callback = ssh_interceptor_dummy_interface_cb;

  /* Route callback is currently not used. */
  interceptor->route_callback = NULL_FNPTR;

  ssh_kernel_mutex_unlock(interceptor->interceptor_lock);

  /* After this the engine will receive no more packets from
     the interceptor, although the netfilter hooks are still
     installed. */

  /* Set packet_callback to point to our dummy_db */
  rcu_assign_pointer(interceptor->packet_callback,
		     ssh_interceptor_dummy_packet_cb);
  
  /* Wait for state synchronization. */
  local_bh_enable();
  synchronize_rcu();
  local_bh_disable();

  /* Callback context can now be safely zeroed, as both
     the interface_callback and the packet_callback point to
     our dummy_cb, and all kernel threads have returned from
     the engine. */
  interceptor->callback_context = NULL;

  SSH_DEBUG(2, ("interceptor stopped"));

  return TRUE;
}
Esempio n. 13
0
void js_input_gpio_clear( void )
{
    unsigned int i;
    for (i = 0; i < JS_MAX_INPUT_GPIO_COUNT; ++i)
    {
        gpio_config_t *config = g_gpio_configs + i;
        if (!config->gpio) {
            break;
        }
        free_gpio_irq(config);
    }

    synchronize_rcu();
    memset(g_gpio_configs, 0, sizeof(g_gpio_configs));
}
Esempio n. 14
0
/* Replace the subnet list, and free the old one when readers have
 * abandoned it.
 */
static void update_subnet_list(const struct ipv4_subnet_list **subnets_ptr,
			       const struct ipv4_subnet_list *new_subnets)
{
	const struct ipv4_subnet_list *old_subnets;

	/* Perform RCU update */
	spin_lock(&param_write_lock);
	old_subnets = *subnets_ptr;
	rcu_assign_pointer(*subnets_ptr, new_subnets);
	spin_unlock(&param_write_lock);

	/* Free the old subnets when ready. */
	synchronize_rcu();
	kfree(old_subnets);
}
Esempio n. 15
0
/* Replace the cipher instance, and free the old one when readers have
 * abandoned it.
 */
static void update_cipher(struct crypto_cipher **cipher_ptr,
			  struct crypto_cipher *new_cipher)
{
	struct crypto_cipher *old_cipher;

	/* Perform RCU update */
	spin_lock(&param_write_lock);
	old_cipher = *cipher_ptr;
	rcu_assign_pointer(*cipher_ptr, new_cipher);
	spin_unlock(&param_write_lock);

	/* Free the old cipher when ready. */
	synchronize_rcu();
	crypto_free_cipher(old_cipher);
}
Esempio n. 16
0
/* Replace the IPv6 prefix, and free the old one when readers have
 * abandoned it.
 */
static void update_ipv6_prefix(const __be16 **prefix_ptr,
			       const __be16 *new_prefix)
{
	const __be16 *old_prefix;

	/* Perform RCU update */
	spin_lock(&param_write_lock);
	old_prefix = *prefix_ptr;
	rcu_assign_pointer(*prefix_ptr, new_prefix);
	spin_unlock(&param_write_lock);

	/* Free the old prefix when ready. */
	synchronize_rcu();
	kfree(old_prefix);
}
Esempio n. 17
0
static ssize_t rcugp_read(struct file *filp, char __user *buffer,
				size_t count, loff_t *ppos)
{
	long oldgp = rcu_batches_completed();
	ssize_t bcount;

	mutex_lock(&rcupreempt_trace_mutex);
	synchronize_rcu();
	snprintf(rcupreempt_trace_buf, RCUPREEMPT_TRACE_BUF_SIZE,
		"oldggp=%ld  newggp=%ld\n", oldgp, rcu_batches_completed());
	bcount = simple_read_from_buffer(buffer, count, ppos,
			rcupreempt_trace_buf, strlen(rcupreempt_trace_buf));
	mutex_unlock(&rcupreempt_trace_mutex);
	return bcount;
}
Esempio n. 18
0
static void *rcu_read_stress_test(void *arg)
{
    int i;
    int itercnt = 0;
    struct rcu_stress *p;
    int pc;
    long long n_reads_local = 0;
    long long rcu_stress_local[RCU_STRESS_PIPE_LEN + 1] = { 0 };
    volatile int garbage = 0;

    rcu_register_thread();

    *(struct rcu_reader_data **)arg = &rcu_reader;
    while (goflag == GOFLAG_INIT) {
        g_usleep(1000);
    }
    while (goflag == GOFLAG_RUN) {
        rcu_read_lock();
        p = atomic_rcu_read(&rcu_stress_current);
        if (p->mbtest == 0) {
            n_mberror++;
        }
        rcu_read_lock();
        for (i = 0; i < 100; i++) {
            garbage++;
        }
        rcu_read_unlock();
        pc = p->pipe_count;
        rcu_read_unlock();
        if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) {
            pc = RCU_STRESS_PIPE_LEN;
        }
        rcu_stress_local[pc]++;
        n_reads_local++;
        if ((++itercnt % 0x1000) == 0) {
            synchronize_rcu();
        }
    }
    qemu_mutex_lock(&counts_mutex);
    n_reads += n_reads_local;
    for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
        rcu_stress_count[i] += rcu_stress_local[i];
    }
    qemu_mutex_unlock(&counts_mutex);

    rcu_unregister_thread();
    return NULL;
}
Esempio n. 19
0
static void __input_release_device(struct input_handle *handle)
{
	struct input_dev *dev = handle->dev;
	struct input_handle *grabber;

	grabber = rcu_dereference_protected(dev->grab,
					    lockdep_is_held(&dev->mutex));
	if (grabber == handle) {
		rcu_assign_pointer(dev->grab, NULL);
		
		synchronize_rcu();

		list_for_each_entry(handle, &dev->h_list, d_node)
			if (handle->open && handle->handler->start)
				handle->handler->start(handle);
	}
int gre_del_protocol(const struct gre_protocol *proto, u8 version)
{
	int ret;

	if (version >= GREPROTO_MAX)
		return -EINVAL;

	ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ?
		0 : -EBUSY;

	if (ret)
		return ret;

	synchronize_rcu();
	return 0;
}
Esempio n. 21
0
void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
{
    BUG_ON(l4proto->l3proto >= PF_MAX);

    mutex_lock(&nf_ct_proto_mutex);
    BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto);
    RCU_INIT_POINTER(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
                     &nf_conntrack_l4proto_generic);
    nf_ct_l4proto_unregister_sysctl(l4proto);
    mutex_unlock(&nf_ct_proto_mutex);

    synchronize_rcu();

    /* Remove all contrack entries for this protocol */
    nf_ct_iterate_cleanup(kill_l4proto, l4proto);
}
Esempio n. 22
0
void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto)
{
	BUG_ON(proto->l3proto >= NFPROTO_NUMPROTO);

	mutex_lock(&nf_ct_proto_mutex);
	BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
					 lockdep_is_held(&nf_ct_proto_mutex)
					 ) != proto);
	rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
			   &nf_conntrack_l3proto_generic);
	mutex_unlock(&nf_ct_proto_mutex);

	synchronize_rcu();
	/* Remove all contrack entries for this protocol */
	nf_ct_iterate_destroy(kill_l3proto, (void*)proto);
}
void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
{
	BUG_ON(proto->l3proto >= AF_MAX);

	write_lock_bh(&nf_conntrack_lock);
	BUG_ON(nf_ct_l3protos[proto->l3proto] != proto);
	rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
			   &nf_conntrack_l3proto_generic);
	write_unlock_bh(&nf_conntrack_lock);
	synchronize_rcu();

	nf_ct_l3proto_unregister_sysctl(proto);

	/* Remove all contrack entries for this protocol */
	nf_ct_iterate_cleanup(kill_l3proto, proto);
}
Esempio n. 24
0
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
static void array_map_free(struct bpf_map *map)
{
    struct bpf_array *array = container_of(map, struct bpf_array, map);

    /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
     * so the programs (can be more than one that used this map) were
     * disconnected from events. Wait for outstanding programs to complete
     * and free the array
     */
    synchronize_rcu();

    if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
        bpf_array_free_percpu(array);

    kvfree(array);
}
Esempio n. 25
0
static void *rcu_fake_update_stress_test(void *arg)
{
    rcu_register_thread();

    *(struct rcu_reader_data **)arg = &rcu_reader;
    while (goflag == GOFLAG_INIT) {
        g_usleep(1000);
    }
    while (goflag == GOFLAG_RUN) {
        synchronize_rcu();
        g_usleep(1000);
    }

    rcu_unregister_thread();
    return NULL;
}
Esempio n. 26
0
void dev_deactivate(struct net_device *dev)
{
	struct Qdisc *qdisc;
	struct sk_buff *skb;
	int running;

	spin_lock_bh(&dev->queue_lock);
	qdisc = dev->qdisc;
	dev->qdisc = &noop_qdisc;

	qdisc_reset(qdisc);

	skb = dev->gso_skb;
	dev->gso_skb = NULL;
	spin_unlock_bh(&dev->queue_lock);

	kfree_skb(skb);

	dev_watchdog_down(dev);

	/* Wait for outstanding qdisc-less dev_queue_xmit calls. */
	synchronize_rcu();

	/* Wait for outstanding qdisc_run calls. */
	do {
		while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
			yield();

		/*
		 * Double-check inside queue lock to ensure that all effects
		 * of the queue run are visible when we return.
		 */
		spin_lock_bh(&dev->queue_lock);
		running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
		spin_unlock_bh(&dev->queue_lock);

		/*
		 * The running flag should never be set at this point because
		 * we've already set dev->qdisc to noop_qdisc *inside* the same
		 * pair of spin locks.  That is, if any qdisc_run starts after
		 * our initial test it should see the noop_qdisc and then
		 * clear the RUNNING bit before dropping the queue lock.  So
		 * if it is set here then we've found a bug.
		 */
	} while (WARN_ON_ONCE(running));
}
Esempio n. 27
0
void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
		       struct ieee80211_sta *sta)
{
	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
	int i, idx = wcid->idx;

	rcu_assign_pointer(dev->wcid[idx], NULL);
	synchronize_rcu();

	if (dev->drv->sta_remove)
		dev->drv->sta_remove(dev, vif, sta);

	mt76_tx_status_check(dev, wcid, true);
	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
		mt76_txq_remove(dev, sta->txq[i]);
	mt76_wcid_free(dev->wcid_mask, idx);
}
Esempio n. 28
0
static void __exit nx842_exit(void)
{
	struct nx842_devdata *old_devdata;
	unsigned long flags;

	pr_info("Exiting IBM Power 842 compression driver\n");
	spin_lock_irqsave(&devdata_mutex, flags);
	old_devdata = rcu_dereference_check(devdata,
			lockdep_is_held(&devdata_mutex));
	RCU_INIT_POINTER(devdata, NULL);
	spin_unlock_irqrestore(&devdata_mutex, flags);
	synchronize_rcu();
	if (old_devdata)
		dev_set_drvdata(old_devdata->dev, NULL);
	kfree(old_devdata);
	vio_unregister_driver(&nx842_driver);
}
Esempio n. 29
0
/* According to comments in the declaration of struct net_device, this function
 * is "Called from unregister, can be used to call free_netdev". Ok then...
 */
static void hsr_dev_destroy(struct net_device *hsr_dev)
{
	struct hsr_priv *hsr;
	struct hsr_port *port;

	hsr = netdev_priv(hsr_dev);

	rtnl_lock();
	hsr_for_each_port(hsr, port)
		hsr_del_port(port);
	rtnl_unlock();

	del_timer_sync(&hsr->prune_timer);
	del_timer_sync(&hsr->announce_timer);

	synchronize_rcu();
}
Esempio n. 30
0
static int write_hello (struct file *file,const char * buf, 
		unsigned long count, void *data) 
{
	int length=count;
	int err=0;
	static struct proc_hello_data *old, *t;
	char *tmpbuf;

	tmpbuf=kmalloc(PROC_HELLO_BUFLEN,GFP_KERNEL);
	t=kmalloc(sizeof(*hello_data),GFP_KERNEL);

	length = (length<PROC_HELLO_BUFLEN)? length:PROC_HELLO_BUFLEN;
	
	err=copy_from_user(tmpbuf, buf, length); 
       	printk(KERN_ALERT "2470:5.8: after copy_from_user!\n");

	// check for copy_from_user error here
	if (err) 
		return -EFAULT;

	// handle trailing nl char
	tmpbuf[length-1]=0;

	t->proc_hello_flag=1;
	t->proc_hello_counter=length;
	t->proc_hello_value=tmpbuf;

       	printk(KERN_ALERT "2470:5.8: '%d' before rcu spinlock\n", length);
	spin_lock(&hello_data->proc_hello_sp);
	
       	printk(KERN_ALERT "2470:5.8: got rcu\n");

	old=hello_data;
	rcu_assign_pointer(hello_data, t);
	spin_unlock(&hello_data->proc_hello_sp);

	synchronize_rcu();
       	printk(KERN_ALERT "2470:5.8: synchronize rcu\n");

	kfree(old->proc_hello_value);

	kfree(old);

	return(length);
}