void dev_activate(struct net_device *dev)
{
	/* No queueing discipline is attached to device;
	   create default one i.e. pfifo_fast for devices,
	   which need queueing and noqueue_qdisc for
	   virtual interfaces
	 */

	if (dev->qdisc_sleeping == &noop_qdisc) {
		struct Qdisc *qdisc;
		if (dev->tx_queue_len) {
			qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops,
						  TC_H_ROOT);
			if (qdisc == NULL) {
				printk(KERN_INFO "%s: activation failed\n", dev->name);
				return;
			}
			write_lock(&qdisc_tree_lock);
			list_add_tail(&qdisc->list, &dev->qdisc_list);
			write_unlock(&qdisc_tree_lock);
		} else {
			qdisc =  &noqueue_qdisc;
		}
		write_lock(&qdisc_tree_lock);
		dev->qdisc_sleeping = qdisc;
		write_unlock(&qdisc_tree_lock);
	}

	if (!netif_carrier_ok(dev))
		/* Delay activation until next carrier-on event */
		return;

	spin_lock_bh(&dev->queue_lock);
	rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
	if (dev->qdisc != &noqueue_qdisc) {
		dev->trans_start = jiffies;
		dev_watchdog_up(dev);
	}
	spin_unlock_bh(&dev->queue_lock);
}
示例#2
0
/**
 * disk_map_sector_rcu - map sector to partition
 * @disk: gendisk of interest
 * @sector: sector to map
 *
 * Find out which partition @sector maps to on @disk.  This is
 * primarily used for stats accounting.
 *
 * CONTEXT:
 * RCU read locked.  The returned partition pointer is valid only
 * while preemption is disabled.
 *
 * RETURNS:
 * Found partition on success, part0 is returned if no partition matches
 */
struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
{
	struct disk_part_tbl *ptbl;
	struct hd_struct *part;
	int i;

	ptbl = rcu_dereference(disk->part_tbl);

	part = rcu_dereference(ptbl->last_lookup);
	if (part && sector_in_part(part, sector))
		return part;

	for (i = 1; i < ptbl->len; i++) {
		part = rcu_dereference(ptbl->part[i]);

		if (part && sector_in_part(part, sector)) {
			rcu_assign_pointer(ptbl->last_lookup, part);
			return part;
		}
	}
	return &disk->part0;
}
示例#3
0
/**
 *	sk_attach_filter - attach a socket filter
 *	@fprog: the filter program
 *	@sk: the socket to use
 *
 * Attach the user's filter code. We first run some sanity checks on
 * it to make sure it does not explode on us later. If an error
 * occurs or there is insufficient memory for the filter a negative
 * errno code is returned. On success the return is zero.
 */
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
	struct sk_filter *fp, *old_fp;
	unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
	int err;

	/* Make sure new filter is there and in the right amounts. */
	if (fprog->filter == NULL)
		return -EINVAL;

	fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
	if (!fp)
		return -ENOMEM;
	if (copy_from_user(fp->insns, fprog->filter, fsize)) {
		sock_kfree_s(sk, fp, fsize+sizeof(*fp));
		return -EFAULT;
	}

	atomic_set(&fp->refcnt, 1);
	fp->len = fprog->len;
	fp->bpf_func = sk_run_filter;

	err = sk_chk_filter(fp->insns, fp->len);
	if (err) {
		sk_filter_uncharge(sk, fp);
		return err;
	}

	bpf_jit_compile(fp);

	rcu_read_lock_bh();
	old_fp = rcu_dereference_bh(sk->sk_filter);
	rcu_assign_pointer(sk->sk_filter, fp);
	rcu_read_unlock_bh();

	if (old_fp)
		sk_filter_delayed_uncharge(sk, old_fp);
	return 0;
}
示例#4
0
static int
mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
	struct mt7603_vif *mvif = (struct mt7603_vif *) vif->drv_priv;
	struct mt7603_dev *dev = hw->priv;
	u8 bc_addr[ETH_ALEN];
	int idx;
	int ret = 0;

	mutex_lock(&dev->mutex);

	mvif->idx = ffs(~dev->vif_mask) - 1;
	if (mvif->idx >= MT7603_MAX_INTERFACES) {
		ret = -ENOSPC;
		goto out;
	}

	mt76_wr(dev, MT_MAC_ADDR0(mvif->idx),
		get_unaligned_le32(vif->addr));
	mt76_wr(dev, MT_MAC_ADDR1(mvif->idx),
		(get_unaligned_le16(vif->addr + 4) |
		 MT_MAC_ADDR1_VALID));

	idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
	dev->vif_mask |= BIT(mvif->idx);
	mvif->sta.wcid.idx = idx;
	mvif->sta.wcid.hw_key_idx = -1;

	eth_broadcast_addr(bc_addr);
	mt7603_wtbl_init(dev, idx, bc_addr);

	rcu_assign_pointer(dev->wcid[idx], &mvif->sta.wcid);
	mt7603_txq_init(dev, vif->txq);

out:
	mutex_unlock(&dev->mutex);

	return ret;
}
示例#5
0
static int
mt7603_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		struct ieee80211_sta *sta)
{
	struct mt7603_dev *dev = hw->priv;
	struct mt7603_sta *msta = (struct mt7603_sta *) sta->drv_priv;
	int idx = msta->wcid.idx;
	int i;

	mutex_lock(&dev->mutex);
	rcu_assign_pointer(dev->wcid[idx], NULL);
	mt7603_wtbl_clear(dev, idx);

	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
		mt76_txq_remove(&dev->mt76, sta->txq[i]);

	mt76_wcid_free(dev->wcid_mask, idx);

	mutex_unlock(&dev->mutex);

	return 0;
}
示例#6
0
int omx_xen_destroy_user_region(omx_xenif_t * omx_xenif, uint32_t id,
				uint32_t seqnum, uint8_t eid)
{
	struct backend_info *be = omx_xenif->be;
	struct omxback_dev *dev = be->omxdev;
	struct omx_endpoint *endpoint = dev->endpoints[eid];
	struct omx_xen_user_region *region;
	int ret = 0;

	dprintk_in();

	TIMER_START(&t_destroy_reg);
	if (eid >= 0 && eid < 255) {
		endpoint = dev->endpoints[eid];
	} else {
		printk_err
		    ("Wrong endpoint number (%u) check your frontend/backend communication!\n",
		     eid);
		ret = -EINVAL;
		goto out;
	}

	region = rcu_dereference_protected(endpoint->xen_regions[id], 1);
	if (unlikely(!region)) {
		printk_err(
		       "%s: Cannot access non-existing region %d\n", __func__, id);
		//ret = -EINVAL;
		goto out;
	}

	rcu_assign_pointer(endpoint->xen_regions[region->id], NULL);
	//omx_xen_user_region_release(region);
	kfree(region);
out:
	TIMER_STOP(&t_destroy_reg);
	dprintk_out();
	return ret;

}
示例#7
0
static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
					  struct inetpeer_addr *addr,
					  unsigned int hash,
					  bool reclaim)
{
	struct tcp_metrics_block *tm;
	struct net *net;

	spin_lock_bh(&tcp_metrics_lock);
	net = dev_net(dst->dev);
	if (unlikely(reclaim)) {
		struct tcp_metrics_block *oldest;

		oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
		for (tm = rcu_dereference(oldest->tcpm_next); tm;
		     tm = rcu_dereference(tm->tcpm_next)) {
			if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
				oldest = tm;
		}
		tm = oldest;
	} else {
		tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
		if (!tm)
			goto out_unlock;
	}
	tm->tcpm_addr = *addr;

	tcpm_suck_dst(tm, dst, true);

	if (likely(!reclaim)) {
		tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
		rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
	}

out_unlock:
	spin_unlock_bh(&tcp_metrics_lock);
	return tm;
}
示例#8
0
文件: bnxt_ulp.c 项目: Lyude/linux
static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
			     struct bnxt_ulp_ops *ulp_ops, void *handle)
{
	struct net_device *dev = edev->net;
	struct bnxt *bp = netdev_priv(dev);
	struct bnxt_ulp *ulp;

	ASSERT_RTNL();
	if (ulp_id >= BNXT_MAX_ULP)
		return -EINVAL;

	ulp = &edev->ulp_tbl[ulp_id];
	if (rcu_access_pointer(ulp->ulp_ops)) {
		netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
		return -EBUSY;
	}
	if (ulp_id == BNXT_ROCE_ULP) {
		unsigned int max_stat_ctxs;

		max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
		if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
		    bp->num_stat_ctxs == max_stat_ctxs)
			return -ENOMEM;
		bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs -
					    BNXT_MIN_ROCE_STAT_CTXS);
	}

	atomic_set(&ulp->ref_count, 0);
	ulp->handle = handle;
	rcu_assign_pointer(ulp->ulp_ops, ulp_ops);

	if (ulp_id == BNXT_ROCE_ULP) {
		if (test_bit(BNXT_STATE_OPEN, &bp->state))
			bnxt_hwrm_vnic_cfg(bp, 0);
	}

	return 0;
}
示例#9
0
文件: mt76x2_main.c 项目: krzk/linux
static int
mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
	       struct ieee80211_sta *sta)
{
	struct mt76x2_dev *dev = hw->priv;
	struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
	struct mt76x2_vif *mvif = (struct mt76x2_vif *) vif->drv_priv;
	int ret = 0;
	int idx = 0;
	int i;

	mutex_lock(&dev->mutex);

	idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
	if (idx < 0) {
		ret = -ENOSPC;
		goto out;
	}

	msta->vif = mvif;
	msta->wcid.sta = 1;
	msta->wcid.idx = idx;
	msta->wcid.hw_key_idx = -1;
	mt76x2_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
	mt76x2_mac_wcid_set_drop(dev, idx, false);
	for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
		mt76x2_txq_init(dev, sta->txq[i]);

	if (vif->type == NL80211_IFTYPE_AP)
		set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);

	rcu_assign_pointer(dev->wcid[idx], &msta->wcid);

out:
	mutex_unlock(&dev->mutex);

	return ret;
}
/*
 * Careful here! We test whether the file pointer is NULL before
 * releasing the fd. This ensures that one clone task can't release
 * an fd while another clone is opening it.
 */
SYSCALL_DEFINE1(close, unsigned int, fd)
{
	struct file * filp;
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	int retval;

#ifdef CONFIG_SEC_DEBUG_ZERO_FD_CLOSE
	if (fd == 0 && strcmp(current->group_leader->comm,"mediaserver") == 0)
		panic("trying to close fd=0");
#endif

	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	if (fd >= fdt->max_fds)
		goto out_unlock;
	filp = fdt->fd[fd];
	if (!filp)
		goto out_unlock;
	rcu_assign_pointer(fdt->fd[fd], NULL);
	__clear_close_on_exec(fd, fdt);
	__put_unused_fd(files, fd);
	spin_unlock(&files->file_lock);
	retval = filp_close(filp, files);

	/* can't restart close syscall because file table entry was cleared */
	if (unlikely(retval == -ERESTARTSYS ||
		     retval == -ERESTARTNOINTR ||
		     retval == -ERESTARTNOHAND ||
		     retval == -ERESTART_RESTARTBLOCK))
		retval = -EINTR;

	return retval;

out_unlock:
	spin_unlock(&files->file_lock);
	return -EBADF;
}
示例#11
0
/* return EBUSY when somebody else is registered, return EEXIST if the
 * same handler is registered, return 0 in case of success. */
int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
{
	int ret;
	const struct nf_queue_handler *old;

	if (pf >= ARRAY_SIZE(queue_handler))
		return -EINVAL;

	mutex_lock(&queue_handler_mutex);
	old = rcu_dereference_protected(queue_handler[pf],
					lockdep_is_held(&queue_handler_mutex));
	if (old == qh)
		ret = -EEXIST;
	else if (old)
		ret = -EBUSY;
	else {
		rcu_assign_pointer(queue_handler[pf], qh);
		ret = 0;
	}
	mutex_unlock(&queue_handler_mutex);

	return ret;
}
示例#12
0
/* return EBUSY if somebody else is registered, EEXIST if the same logger
 * is registred, 0 on success. */
int nf_log_register(int pf, struct nf_logger *logger)
{
	int ret;

	if (pf >= NPROTO)
		return -EINVAL;

	/* Any setup of logging members must be done before
	 * substituting pointer. */
	ret = mutex_lock_interruptible(&nf_log_mutex);
	if (ret < 0)
		return ret;

	if (!nf_loggers[pf])
		rcu_assign_pointer(nf_loggers[pf], logger);
	else if (nf_loggers[pf] == logger)
		ret = -EEXIST;
	else
		ret = -EBUSY;

	mutex_unlock(&nf_log_mutex);
	return ret;
}
示例#13
0
void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
{
	struct net *net;

	BUG_ON(proto->l3proto >= AF_MAX);

	mutex_lock(&nf_ct_proto_mutex);
	BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
					 lockdep_is_held(&nf_ct_proto_mutex)
					 ) != proto);
	rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
			   &nf_conntrack_l3proto_generic);
	nf_ct_l3proto_unregister_sysctl(proto);
	mutex_unlock(&nf_ct_proto_mutex);

	synchronize_rcu();

	/* Remove all contrack entries for this protocol */
	rtnl_lock();
	for_each_net(net)
		nf_ct_iterate_cleanup(net, kill_l3proto, proto);
	rtnl_unlock();
}
/*
 * Careful here! We test whether the file pointer is NULL before
 * releasing the fd. This ensures that one clone task can't release
 * an fd while another clone is opening it.
 */
asmlinkage long sys_close(unsigned int fd)
{
	struct file * filp;
	struct files_struct *files = current->files;
	struct fdtable *fdt;

	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	if (fd >= fdt->max_fds)
		goto out_unlock;
	filp = fdt->fd[fd];
	if (!filp)
		goto out_unlock;
	rcu_assign_pointer(fdt->fd[fd], NULL);
	FD_CLR(fd, fdt->close_on_exec);
	__put_unused_fd(files, fd);
	spin_unlock(&files->file_lock);
	return filp_close(filp, files);

out_unlock:
	spin_unlock(&files->file_lock);
	return -EBADF;
}
示例#15
0
文件: swap_state.c 项目: mdamt/linux
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
{
	struct address_space *spaces, *space;
	unsigned int i, nr;

	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
	spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
	if (!spaces)
		return -ENOMEM;
	for (i = 0; i < nr; i++) {
		space = spaces + i;
		INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN);
		atomic_set(&space->i_mmap_writable, 0);
		space->a_ops = &swap_aops;
		/* swap cache doesn't use writeback related tags */
		mapping_set_no_writeback_tags(space);
		spin_lock_init(&space->tree_lock);
	}
	nr_swapper_spaces[type] = nr;
	rcu_assign_pointer(swapper_spaces[type], spaces);

	return 0;
}
示例#16
0
/* Attach toplevel qdisc to device queue. */
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
			      struct Qdisc *qdisc)
{
	struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
	spinlock_t *root_lock;

	root_lock = qdisc_lock(oqdisc);
	spin_lock_bh(root_lock);

	/* Prune old scheduler */
	if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
		qdisc_reset(oqdisc);

	/* ... and graft new one */
	if (qdisc == NULL)
		qdisc = &noop_qdisc;
	dev_queue->qdisc_sleeping = qdisc;
	rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);

	spin_unlock_bh(root_lock);

	return oqdisc;
}
示例#17
0
文件: conntrack.c 项目: 18SUN/ovs
static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
			     const struct sw_flow_key *key, bool log)
{
	struct nf_conntrack_helper *helper;
	struct nf_conn_help *help;

	helper = nf_conntrack_helper_try_module_get(name, info->family,
						    key->ip.proto);
	if (!helper) {
		OVS_NLERR(log, "Unknown helper \"%s\"", name);
		return -EINVAL;
	}

	help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL);
	if (!help) {
		module_put(helper->me);
		return -ENOMEM;
	}

	rcu_assign_pointer(help->helper, helper);
	info->helper = helper;
	return 0;
}
示例#18
0
int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
{
    int ret = 0;

    if (proto->l3proto >= AF_MAX)
        return -EBUSY;

    mutex_lock(&nf_ct_proto_mutex);
    if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
        ret = -EBUSY;
        goto out_unlock;
    }

    ret = nf_ct_l3proto_register_sysctl(proto);
    if (ret < 0)
        goto out_unlock;

    rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto);

out_unlock:
    mutex_unlock(&nf_ct_proto_mutex);
    return ret;
}
示例#19
0
static struct in_device *inetdev_init(struct net_device *dev)
{
	struct in_device *in_dev;

	ASSERT_RTNL();

	in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
	if (!in_dev)
		goto out;
	memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
			sizeof(in_dev->cnf));
	in_dev->cnf.sysctl = NULL;
	in_dev->dev = dev;
	in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
	if (!in_dev->arp_parms)
		goto out_kfree;
	if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
		dev_disable_lro(dev);
	/* Reference in_dev->dev */
	dev_hold(dev);
	/* Account for reference dev->ip_ptr (below) */
	in_dev_hold(in_dev);

	devinet_sysctl_register(in_dev);
	ip_mc_init_dev(in_dev);
	if (dev->flags & IFF_UP)
		ip_mc_up(in_dev);

	/* we can receive as soon as ip_ptr is set -- do this last */
	rcu_assign_pointer(dev->ip_ptr, in_dev);
out:
	return in_dev;
out_kfree:
	kfree(in_dev);
	in_dev = NULL;
	goto out;
}
示例#20
0
/*
 * Careful here! We test whether the file pointer is NULL before
 * releasing the fd. This ensures that one clone task can't release
 * an fd while another clone is opening it.
 */
asmlinkage long sys_close(unsigned int fd)
{
	struct file * filp;
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	int retval;
	if(need_files_checkpoint())
		checkpoint_files();

	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	if (fd >= fdt->max_fds)
		goto out_unlock;

	filp = fdt->fd[fd];
	if (!filp)
		goto out_unlock;
	tx_cache_get_file(filp); //get tx refcount on file

	rcu_assign_pointer(fdt->fd[fd], NULL);
	FD_CLR(fd, fdt->close_on_exec); //??check later
	__put_unused_fd(files, fd);
	spin_unlock(&files->file_lock);
	retval = filp_close(filp, files);
	/* can't restart close syscall because file table entry was cleared */
	if (unlikely(retval == -ERESTARTSYS ||
		     retval == -ERESTARTNOINTR ||
		     retval == -ERESTARTNOHAND ||
		     retval == -ERESTART_RESTARTBLOCK))
		retval = -EINTR;

	return retval;

out_unlock:
	spin_unlock(&files->file_lock);
	return -EBADF;
}
示例#21
0
static int alloc_callchain_buffers(void)
{
	int cpu;
	int size;
	struct callchain_cpus_entries *entries;

	/*
	 * We can't use the percpu allocation API for data that can be
	 * accessed from NMI. Use a temporary manual per cpu allocation
	 * until that gets sorted out.
	 */
	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);

	entries = kzalloc(size, GFP_KERNEL);
	if (!entries)
		return -ENOMEM;

	size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;

	for_each_possible_cpu(cpu) {
		entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
							 cpu_to_node(cpu));
		if (!entries->cpu_entries[cpu])
			goto fail;
	}

	rcu_assign_pointer(callchain_cpus_entries, entries);

	return 0;

fail:
	for_each_possible_cpu(cpu)
		kfree(entries->cpu_entries[cpu]);
	kfree(entries);

	return -ENOMEM;
}
/*
 * update a user defined key
 * - the key's semaphore is write-locked
 */
int user_update(struct key *key, const void *data, size_t datalen)
{
	struct user_key_payload *upayload, *zap;
	int ret;

	ret = -EINVAL;
	if (datalen <= 0 || datalen > 32767 || !data)
		goto error;

	/* construct a replacement payload */
	ret = -ENOMEM;
	upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
	if (!upayload)
		goto error;

	upayload->datalen = datalen;
	memcpy(upayload->data, data, datalen);

	/* check the quota and attach the new data */
	zap = upayload;

	ret = key_payload_reserve(key, datalen);

	if (ret == 0) {
		/* attach the new data, displacing the old */
		zap = key->payload.data;
		rcu_assign_pointer(key->payload.data, upayload);
		key->expiry = 0;
	}

	call_rcu(&zap->rcu, user_update_rcu_disposal);

error:
	return ret;

} /* end user_update() */
示例#23
0
void chroot_to_physical_root(struct prev_root *prev_root)
{
	struct krg_namespace *krg_ns = find_get_krg_ns();
	struct fs_struct *fs = current->fs;
	struct path root, prev_pwd;

	BUG_ON(!krg_ns);
	put_krg_ns(krg_ns);
//	BUG_ON(fs->users != 1);

	get_physical_root(&root);
	write_lock(&fs->lock);
	prev_root->path = fs->root;
	fs->root = root;
	path_get(&root);
	prev_pwd = fs->pwd;
	fs->pwd = root;
	write_unlock(&fs->lock);
	path_put(&prev_pwd);

	BUG_ON(prev_root->path.mnt->mnt_ns != current->nsproxy->mnt_ns);
	prev_root->nsproxy = current->nsproxy;
	rcu_assign_pointer(current->nsproxy, &krg_ns->root_nsproxy);
}
示例#24
0
static struct files_struct *alloc_files(void)
{
	struct files_struct *newf;
	struct fdtable *fdt;

	newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
	if (!newf)
		goto out;

	atomic_set(&newf->count, 1);

	spin_lock_init(&newf->file_lock);
	newf->next_fd = 0;
	fdt = &newf->fdtab;
	fdt->max_fds = NR_OPEN_DEFAULT;
	fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
	fdt->open_fds = (fd_set *)&newf->open_fds_init;
	fdt->fd = &newf->fd_array[0];
	INIT_RCU_HEAD(&fdt->rcu);
	fdt->next = NULL;
	rcu_assign_pointer(newf->fdt, fdt);
out:
	return newf;
}
示例#25
0
文件: main.c 项目: chunkeey/rtl8192su
static void r92su_bss_free_connected(struct r92su *r92su,
				     bool locally_generated)
{
	struct cfg80211_bss *old_bss;

	if (r92su_is_connected(r92su))
		r92su_set_state(r92su, R92SU_OPEN);

	rcu_read_lock();
	old_bss = rcu_dereference(r92su->connect_bss);
	rcu_assign_pointer(r92su->connect_bss, NULL);
	if (old_bss) {
		switch (r92su->wdev.iftype) {
		case NL80211_IFTYPE_STATION:
			/* cfg80211 doesn't like it when cfg80211_disconnected
			 * is called without reason. So check if we were really
			 * connected.
			 */
			cfg80211_disconnected(r92su->wdev.netdev,
				      WLAN_STATUS_UNSPECIFIED_FAILURE, NULL, 0,
				      locally_generated, GFP_ATOMIC);
			break;

		case NL80211_IFTYPE_ADHOC:
			cfg80211_unlink_bss(r92su->wdev.wiphy, old_bss);
			break;

		default:
			WARN(1, "unsupported network type %d\n",
			     r92su->wdev.iftype);
			break;
		}
		r92su_bss_free(r92su, old_bss);
	}
	rcu_read_unlock();
}
示例#26
0
int tcp_fastopen_reset_cipher(void *key, unsigned int len)
{
    int err;
    struct tcp_fastopen_context *ctx, *octx;

    ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
    if (!ctx)
        return -ENOMEM;
    ctx->tfm = crypto_alloc_cipher("aes", 0, 0);

    if (IS_ERR(ctx->tfm)) {
        err = PTR_ERR(ctx->tfm);
error:
        kfree(ctx);
        pr_err("TCP: TFO aes cipher alloc error: %d\n", err);
        return err;
    }
    err = crypto_cipher_setkey(ctx->tfm, key, len);
    if (err) {
        pr_err("TCP: TFO cipher key error: %d\n", err);
        crypto_free_cipher(ctx->tfm);
        goto error;
    }
    memcpy(ctx->key, key, len);

    spin_lock(&tcp_fastopen_ctx_lock);

    octx = rcu_dereference_protected(tcp_fastopen_ctx,
                                     lockdep_is_held(&tcp_fastopen_ctx_lock));
    rcu_assign_pointer(tcp_fastopen_ctx, ctx);
    spin_unlock(&tcp_fastopen_ctx_lock);

    if (octx)
        call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
    return err;
}
示例#27
0
/**
 *  reuseport_add_sock - Add a socket to the reuseport group of another.
 *  @sk:  New socket to add to the group.
 *  @sk2: Socket belonging to the existing reuseport group.
 *  May return ENOMEM and not add socket to group under memory pressure.
 */
int reuseport_add_sock(struct sock *sk, struct sock *sk2)
{
	struct sock_reuseport *reuse;

	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
		int err = reuseport_alloc(sk2);

		if (err)
			return err;
	}

	spin_lock_bh(&reuseport_lock);
	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
					  lockdep_is_held(&reuseport_lock)),
	WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
					    lockdep_is_held(&reuseport_lock)),
		  "socket already in reuseport group");

	if (reuse->num_socks == reuse->max_socks) {
		reuse = reuseport_grow(reuse);
		if (!reuse) {
			spin_unlock_bh(&reuseport_lock);
			return -ENOMEM;
		}
	}

	reuse->socks[reuse->num_socks] = sk;
	/* paired with smp_rmb() in reuseport_select_sock() */
	smp_wmb();
	reuse->num_socks++;
	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);

	spin_unlock_bh(&reuseport_lock);

	return 0;
}
示例#28
0
文件: vport.c 项目: JunoZhu/ovs
/**
 *	ovs_vport_set_upcall_portids - set upcall portids of @vport.
 *
 * @vport: vport to modify.
 * @ids: new configuration, an array of port ids.
 *
 * Sets the vport's upcall_portids to @ids.
 *
 * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
 * as an array of U32.
 *
 * Must be called with ovs_mutex.
 */
int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
{
	struct vport_portids *old, *vport_portids;

	if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
		return -EINVAL;

	old = ovsl_dereference(vport->upcall_portids);

	vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
				GFP_KERNEL);
	if (!vport_portids)
		return -ENOMEM;

	vport_portids->n_ids = nla_len(ids) / sizeof(u32);
	vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
	nla_memcpy(vport_portids->ids, ids, nla_len(ids));

	rcu_assign_pointer(vport->upcall_portids, vport_portids);

	if (old)
		kfree_rcu(old, rcu);
	return 0;
}
示例#29
0
/**
 * amdgpu_fence_emit - emit a fence on the requested ring
 *
 * @ring: ring the fence is associated with
 * @f: resulting fence object
 *
 * Emits a fence command on the requested ring (all asics).
 * Returns 0 on success, -ENOMEM on failure.
 */
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f)
{
	struct amdgpu_device *adev = ring->adev;
	struct amdgpu_fence *fence;
	struct fence *old, **ptr;
	uint32_t seq;

	fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
	if (fence == NULL)
		return -ENOMEM;

	seq = ++ring->fence_drv.sync_seq;
	fence->ring = ring;
	fence_init(&fence->base, &amdgpu_fence_ops,
		   &ring->fence_drv.lock,
		   adev->fence_context + ring->idx,
		   seq);
	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
			       seq, AMDGPU_FENCE_FLAG_INT);

	ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
	/* This function can't be called concurrently anyway, otherwise
	 * emitting the fence would mess up the hardware ring buffer.
	 */
	old = rcu_dereference_protected(*ptr, 1);
	if (old && !fence_is_signaled(old)) {
		DRM_INFO("rcu slot is busy\n");
		fence_wait(old, false);
	}

	rcu_assign_pointer(*ptr, fence_get(&fence->base));

	*f = &fence->base;

	return 0;
}
示例#30
0
static int rps_sock_flow_sysctl(ctl_table *table, int write,
				void __user *buffer, size_t *lenp, loff_t *ppos)
{
	unsigned int orig_size, size;
	int ret, i;
	ctl_table tmp = {
		.data = &size,
		.maxlen = sizeof(size),
		.mode = table->mode
	};
	struct rps_sock_flow_table *orig_sock_table, *sock_table;
	static DEFINE_MUTEX(sock_flow_mutex);

	mutex_lock(&sock_flow_mutex);

	orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
					lockdep_is_held(&sock_flow_mutex));
	size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;

	ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);

	if (write) {
		if (size) {
			if (size > 1<<30) {
				/* Enforce limit to prevent overflow */
				mutex_unlock(&sock_flow_mutex);
				return -EINVAL;
			}
			size = roundup_pow_of_two(size);
			if (size != orig_size) {
				sock_table =
				    vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size));
				if (!sock_table) {
					mutex_unlock(&sock_flow_mutex);
					return -ENOMEM;
				}

				sock_table->mask = size - 1;
			} else
				sock_table = orig_sock_table;

			for (i = 0; i < size; i++)
				sock_table->ents[i] = RPS_NO_CPU;
		} else
			sock_table = NULL;

		if (sock_table != orig_sock_table) {
			rcu_assign_pointer(rps_sock_flow_table, sock_table);
			if (sock_table)
				static_key_slow_inc(&rps_needed);
			if (orig_sock_table) {
				static_key_slow_dec(&rps_needed);
				synchronize_rcu();
				vfree(orig_sock_table);
			}
		}
	}

	mutex_unlock(&sock_flow_mutex);

	return ret;
}
#endif /* CONFIG_RPS */

static struct ctl_table net_core_table[] = {
#ifdef CONFIG_NET
	{
		.procname	= "wmem_max",
		.data		= &sysctl_wmem_max,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec
	},
	{
		.procname	= "rmem_max",