コード例 #1
0
ファイル: dvb_ringbuffer.c プロジェクト: AshishNamdev/linux
ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, size_t len)
{
	size_t todo = len;
	size_t split;

	split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0;
	if (split > 0) {
		if (copy_to_user(buf, rbuf->data+rbuf->pread, split))
			return -EFAULT;
		buf += split;
		todo -= split;
		/* smp_store_release() for read pointer update to ensure
		 * that buf is not overwritten until read is complete,
		 * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
		 */
		smp_store_release(&rbuf->pread, 0);
	}
	if (copy_to_user(buf, rbuf->data+rbuf->pread, todo))
		return -EFAULT;

	/* smp_store_release() to update read pointer, see above */
	smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);

	return len;
}
コード例 #2
0
ファイル: dvb_ringbuffer.c プロジェクト: AshishNamdev/linux
ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
				  const u8 __user *buf, size_t len)
{
	int status;
	size_t todo = len;
	size_t split;

	split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0;

	if (split > 0) {
		status = copy_from_user(rbuf->data+rbuf->pwrite, buf, split);
		if (status)
			return len - todo;
		buf += split;
		todo -= split;
		/* smp_store_release() for write pointer update to ensure that
		 * written data is visible on other cpu cores before the pointer
		 * update, this pairs with smp_load_acquire() in
		 * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
		 */
		smp_store_release(&rbuf->pwrite, 0);
	}
	status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
	if (status)
		return len - todo;
	/* smp_store_release() for write pointer update, see above */
	smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);

	return len;
}
コード例 #3
0
ファイル: dvb_ringbuffer.c プロジェクト: AshishNamdev/linux
void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf)
{
	/* dvb_ringbuffer_reset() counts as read and write operation
	 * smp_store_release() to update read pointer
	 */
	smp_store_release(&rbuf->pread, 0);
	/* smp_store_release() to update write pointer */
	smp_store_release(&rbuf->pwrite, 0);
	rbuf->error = 0;
}
コード例 #4
0
ファイル: test.c プロジェクト: msullivan/rmc-compiler
void run_test(int threads, int n)
{
    reset();
    // Let my people go
    smp_store_release(&go, n+1);
    // Wait for everyone
    for (int i = 0; i < threads; i++) {
        while (!smp_load_acquire(&done[i]))
            ;

        smp_store_release(&done[i], false);
    }

    // Everyone done, process results.
    process_results(results);
}
コード例 #5
0
ファイル: ldt.c プロジェクト: magarto/linux-rpi-grsecurity
/* context.lock is held */
static void install_ldt(struct mm_struct *current_mm,
			struct ldt_struct *ldt)
{
	/* Synchronizes with lockless_dereference in load_mm_ldt. */
	smp_store_release(&current_mm->context.ldt, ldt);

	/* Activate the LDT for all CPUs using current_mm. */
	on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
}
コード例 #6
0
ファイル: smp.c プロジェクト: Lyude/linux
static __always_inline void csd_unlock(call_single_data_t *csd)
{
	WARN_ON(!(csd->flags & CSD_FLAG_LOCK));

	/*
	 * ensure we're all done before releasing data:
	 */
	smp_store_release(&csd->flags, 0);
}
コード例 #7
0
ファイル: test.c プロジェクト: msullivan/rmc-compiler
void iteration(int thread, int *count) {
    test_fn *f = test_fns[thread];
    while (smp_load_acquire(&go) == *count)
        ;
    int r = f();
    results[thread] = r;
    smp_store_release(&done[thread], true);

    (*count)++;
}
コード例 #8
0
ファイル: dvb_ringbuffer.c プロジェクト: AshishNamdev/linux
void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf)
{
	/* dvb_ringbuffer_flush() counts as read operation
	 * smp_load_acquire() to load write pointer
	 * smp_store_release() to update read pointer, this ensures that the
	 * correct pointer is visible for subsequent dvb_ringbuffer_free()
	 * calls on other cpu cores
	 */
	smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite));
	rbuf->error = 0;
}
コード例 #9
0
ファイル: dvb_ringbuffer.c プロジェクト: AshishNamdev/linux
void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
{
	size_t todo = len;
	size_t split;

	split = (rbuf->pread + len > rbuf->size) ? rbuf->size - rbuf->pread : 0;
	if (split > 0) {
		memcpy(buf, rbuf->data+rbuf->pread, split);
		buf += split;
		todo -= split;
		/* smp_store_release() for read pointer update to ensure
		 * that buf is not overwritten until read is complete,
		 * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
		 */
		smp_store_release(&rbuf->pread, 0);
	}
	memcpy(buf, rbuf->data+rbuf->pread, todo);

	/* smp_store_release() to update read pointer, see above */
	smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
}
コード例 #10
0
/*
 * Discard a packet we've used up and advance the Rx window by one.
 */
static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
{
    struct rxrpc_skb_priv *sp;
    struct sk_buff *skb;
    rxrpc_serial_t serial;
    rxrpc_seq_t hard_ack, top;
    u8 flags;
    int ix;

    _enter("%d", call->debug_id);

    hard_ack = call->rx_hard_ack;
    top = smp_load_acquire(&call->rx_top);
    ASSERT(before(hard_ack, top));

    hard_ack++;
    ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
    skb = call->rxtx_buffer[ix];
    rxrpc_see_skb(skb, rxrpc_skb_rx_rotated);
    sp = rxrpc_skb(skb);
    flags = sp->hdr.flags;
    serial = sp->hdr.serial;
    if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO)
        serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1;

    call->rxtx_buffer[ix] = NULL;
    call->rxtx_annotations[ix] = 0;
    /* Barrier against rxrpc_input_data(). */
    smp_store_release(&call->rx_hard_ack, hard_ack);

    rxrpc_free_skb(skb, rxrpc_skb_rx_freed);

    _debug("%u,%u,%02x", hard_ack, top, flags);
    trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
    if (flags & RXRPC_LAST_PACKET) {
        rxrpc_end_rx_phase(call, serial);
    } else {
        /* Check to see if there's an ACK that needs sending. */
        if (after_eq(hard_ack, call->ackr_consumed + 2) ||
                after_eq(top, call->ackr_seen + 2) ||
                (hard_ack == top && after(hard_ack, call->ackr_consumed)))
            rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
                              true, false,
                              rxrpc_propose_ack_rotate_rx);
        if (call->ackr_reason)
            rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
    }
}
コード例 #11
0
ファイル: sch_fq.c プロジェクト: ReneNyffenegger/linux
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
		      struct sk_buff **to_free)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	struct fq_flow *f;

	if (unlikely(sch->q.qlen >= sch->limit))
		return qdisc_drop(skb, sch, to_free);

	f = fq_classify(skb, q);
	if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
		q->stat_flows_plimit++;
		return qdisc_drop(skb, sch, to_free);
	}

	f->qlen++;
	if (skb_is_retransmit(skb))
		q->stat_tcp_retrans++;
	qdisc_qstats_backlog_inc(sch, skb);
	if (fq_flow_is_detached(f)) {
		struct sock *sk = skb->sk;

		fq_flow_add_tail(&q->new_flows, f);
		if (time_after(jiffies, f->age + q->flow_refill_delay))
			f->credit = max_t(u32, f->credit, q->quantum);
		if (sk && q->rate_enable) {
			if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
				     SK_PACING_FQ))
				smp_store_release(&sk->sk_pacing_status,
						  SK_PACING_FQ);
		}
		q->inactive_flows--;
	}

	/* Note: this overwrites f->age */
	flow_queue_add(f, skb);

	if (unlikely(f == &q->internal)) {
		q->stat_internal_packets++;
	}
	sch->q.qlen++;

	return NET_XMIT_SUCCESS;
}
コード例 #12
0
void quarantine_reduce(void)
{
	size_t new_quarantine_size;
	unsigned long flags;
	struct qlist to_free = QLIST_INIT;
	size_t size_to_free = 0;
	void **last;

	/* smp_load_acquire() here pairs with smp_store_release() below. */
	if (likely(ACCESS_ONCE(global_quarantine.bytes) <=
		   smp_load_acquire(&quarantine_size)))
		return;

	spin_lock_irqsave(&quarantine_lock, flags);

	/* Update quarantine size in case of hotplug. Allocate a fraction of
	 * the installed memory to quarantine minus per-cpu queue limits.
	 */
	new_quarantine_size = (ACCESS_ONCE(totalram_pages) << PAGE_SHIFT) /
		QUARANTINE_FRACTION;
	new_quarantine_size -= QUARANTINE_PERCPU_SIZE * num_online_cpus();
	/* Pairs with smp_load_acquire() above and in QUARANTINE_LOW_SIZE. */
	smp_store_release(&quarantine_size, new_quarantine_size);

	last = global_quarantine.head;
	while (last) {
		struct kmem_cache *cache = qlink_to_cache(last);

		size_to_free += cache->size;
		if (!*last || size_to_free >
		    global_quarantine.bytes - QUARANTINE_LOW_SIZE)
			break;
		last = (void **) *last;
	}
	qlist_move(&global_quarantine, last, &to_free, size_to_free);

	spin_unlock_irqrestore(&quarantine_lock, flags);

	qlist_free_all(&to_free, NULL);
}
コード例 #13
0
ファイル: process_keys.c プロジェクト: avagin/linux
/*
 * Install the user and user session keyrings for the current process's UID.
 */
int install_user_keyrings(void)
{
	struct user_struct *user;
	const struct cred *cred;
	struct key *uid_keyring, *session_keyring;
	key_perm_t user_keyring_perm;
	char buf[20];
	int ret;
	uid_t uid;

	user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
	cred = current_cred();
	user = cred->user;
	uid = from_kuid(cred->user_ns, user->uid);

	kenter("%p{%u}", user, uid);

	if (READ_ONCE(user->uid_keyring) && READ_ONCE(user->session_keyring)) {
		kleave(" = 0 [exist]");
		return 0;
	}

	mutex_lock(&key_user_keyring_mutex);
	ret = 0;

	if (!user->uid_keyring) {
		/* get the UID-specific keyring
		 * - there may be one in existence already as it may have been
		 *   pinned by a session, but the user_struct pointing to it
		 *   may have been destroyed by setuid */
		sprintf(buf, "_uid.%u", uid);

		uid_keyring = find_keyring_by_name(buf, true);
		if (IS_ERR(uid_keyring)) {
			uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
						    cred, user_keyring_perm,
						    KEY_ALLOC_UID_KEYRING |
							KEY_ALLOC_IN_QUOTA,
						    NULL, NULL);
			if (IS_ERR(uid_keyring)) {
				ret = PTR_ERR(uid_keyring);
				goto error;
			}
		}

		/* get a default session keyring (which might also exist
		 * already) */
		sprintf(buf, "_uid_ses.%u", uid);

		session_keyring = find_keyring_by_name(buf, true);
		if (IS_ERR(session_keyring)) {
			session_keyring =
				keyring_alloc(buf, user->uid, INVALID_GID,
					      cred, user_keyring_perm,
					      KEY_ALLOC_UID_KEYRING |
						  KEY_ALLOC_IN_QUOTA,
					      NULL, NULL);
			if (IS_ERR(session_keyring)) {
				ret = PTR_ERR(session_keyring);
				goto error_release;
			}

			/* we install a link from the user session keyring to
			 * the user keyring */
			ret = key_link(session_keyring, uid_keyring);
			if (ret < 0)
				goto error_release_both;
		}

		/* install the keyrings */
		/* paired with READ_ONCE() */
		smp_store_release(&user->uid_keyring, uid_keyring);
		/* paired with READ_ONCE() */
		smp_store_release(&user->session_keyring, session_keyring);
	}

	mutex_unlock(&key_user_keyring_mutex);
	kleave(" = 0");
	return 0;

error_release_both:
	key_put(session_keyring);
error_release:
	key_put(uid_keyring);
error:
	mutex_unlock(&key_user_keyring_mutex);
	kleave(" = %d", ret);
	return ret;
}
コード例 #14
0
ファイル: cell.c プロジェクト: avagin/linux
/*
 * Set up a cell record and fill in its name, VL server address list and
 * allocate an anonymous key
 */
static struct afs_cell *afs_alloc_cell(struct afs_net *net,
				       const char *name, unsigned int namelen,
				       const char *addresses)
{
	struct afs_vlserver_list *vllist;
	struct afs_cell *cell;
	int i, ret;

	ASSERT(name);
	if (namelen == 0)
		return ERR_PTR(-EINVAL);
	if (namelen > AFS_MAXCELLNAME) {
		_leave(" = -ENAMETOOLONG");
		return ERR_PTR(-ENAMETOOLONG);
	}
	if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
		return ERR_PTR(-EINVAL);

	_enter("%*.*s,%s", namelen, namelen, name, addresses);

	cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
	if (!cell) {
		_leave(" = -ENOMEM");
		return ERR_PTR(-ENOMEM);
	}

	cell->net = net;
	cell->name_len = namelen;
	for (i = 0; i < namelen; i++)
		cell->name[i] = tolower(name[i]);

	atomic_set(&cell->usage, 2);
	INIT_WORK(&cell->manager, afs_manage_cell);
	INIT_LIST_HEAD(&cell->proc_volumes);
	rwlock_init(&cell->proc_lock);
	rwlock_init(&cell->vl_servers_lock);

	/* Provide a VL server list, filling it in if we were given a list of
	 * addresses to use.
	 */
	if (addresses) {
		vllist = afs_parse_text_addrs(net,
					      addresses, strlen(addresses), ':',
					      VL_SERVICE, AFS_VL_PORT);
		if (IS_ERR(vllist)) {
			ret = PTR_ERR(vllist);
			goto parse_failed;
		}

		vllist->source = DNS_RECORD_FROM_CONFIG;
		vllist->status = DNS_LOOKUP_NOT_DONE;
		cell->dns_expiry = TIME64_MAX;
	} else {
		ret = -ENOMEM;
		vllist = afs_alloc_vlserver_list(0);
		if (!vllist)
			goto error;
		vllist->source = DNS_RECORD_UNAVAILABLE;
		vllist->status = DNS_LOOKUP_NOT_DONE;
		cell->dns_expiry = ktime_get_real_seconds();
	}

	rcu_assign_pointer(cell->vl_servers, vllist);

	cell->dns_source = vllist->source;
	cell->dns_status = vllist->status;
	smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */

	_leave(" = %p", cell);
	return cell;

parse_failed:
	if (ret == -EINVAL)
		printk(KERN_ERR "kAFS: bad VL server IP address\n");
error:
	kfree(cell);
	_leave(" = %d", ret);
	return ERR_PTR(ret);
}
コード例 #15
0
/*
 * Preallocate a single service call, connection and peer and, if possible,
 * give them a user ID and attach the user's side of the ID to them.
 */
static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
				      struct rxrpc_backlog *b,
				      rxrpc_notify_rx_t notify_rx,
				      rxrpc_user_attach_call_t user_attach_call,
				      unsigned long user_call_ID, gfp_t gfp)
{
	const void *here = __builtin_return_address(0);
	struct rxrpc_call *call;
	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
	int max, tmp;
	unsigned int size = RXRPC_BACKLOG_MAX;
	unsigned int head, tail, call_head, call_tail;

	max = rx->sk.sk_max_ack_backlog;
	tmp = rx->sk.sk_ack_backlog;
	if (tmp >= max) {
		_leave(" = -ENOBUFS [full %u]", max);
		return -ENOBUFS;
	}
	max -= tmp;

	/* We don't need more conns and peers than we have calls, but on the
	 * other hand, we shouldn't ever use more peers than conns or conns
	 * than calls.
	 */
	call_head = b->call_backlog_head;
	call_tail = READ_ONCE(b->call_backlog_tail);
	tmp = CIRC_CNT(call_head, call_tail, size);
	if (tmp >= max) {
		_leave(" = -ENOBUFS [enough %u]", tmp);
		return -ENOBUFS;
	}
	max = tmp + 1;

	head = b->peer_backlog_head;
	tail = READ_ONCE(b->peer_backlog_tail);
	if (CIRC_CNT(head, tail, size) < max) {
		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
		if (!peer)
			return -ENOMEM;
		b->peer_backlog[head] = peer;
		smp_store_release(&b->peer_backlog_head,
				  (head + 1) & (size - 1));
	}

	head = b->conn_backlog_head;
	tail = READ_ONCE(b->conn_backlog_tail);
	if (CIRC_CNT(head, tail, size) < max) {
		struct rxrpc_connection *conn;

		conn = rxrpc_prealloc_service_connection(rxnet, gfp);
		if (!conn)
			return -ENOMEM;
		b->conn_backlog[head] = conn;
		smp_store_release(&b->conn_backlog_head,
				  (head + 1) & (size - 1));

		trace_rxrpc_conn(conn, rxrpc_conn_new_service,
				 atomic_read(&conn->usage), here);
	}

	/* Now it gets complicated, because calls get registered with the
	 * socket here, particularly if a user ID is preassigned by the user.
	 */
	call = rxrpc_alloc_call(gfp);
	if (!call)
		return -ENOMEM;
	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
	call->state = RXRPC_CALL_SERVER_PREALLOC;

	trace_rxrpc_call(call, rxrpc_call_new_service,
			 atomic_read(&call->usage),
			 here, (const void *)user_call_ID);

	write_lock(&rx->call_lock);
	if (user_attach_call) {
		struct rxrpc_call *xcall;
		struct rb_node *parent, **pp;

		/* Check the user ID isn't already in use */
		pp = &rx->calls.rb_node;
		parent = NULL;
		while (*pp) {
			parent = *pp;
			xcall = rb_entry(parent, struct rxrpc_call, sock_node);
			if (user_call_ID < call->user_call_ID)
				pp = &(*pp)->rb_left;
			else if (user_call_ID > call->user_call_ID)
				pp = &(*pp)->rb_right;
			else
				goto id_in_use;
		}

		call->user_call_ID = user_call_ID;
		call->notify_rx = notify_rx;
		rxrpc_get_call(call, rxrpc_call_got_kernel);
		user_attach_call(call, user_call_ID);
		rxrpc_get_call(call, rxrpc_call_got_userid);
		rb_link_node(&call->sock_node, parent, pp);
		rb_insert_color(&call->sock_node, &rx->calls);
		set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
	}

	list_add(&call->sock_link, &rx->sock_calls);

	write_unlock(&rx->call_lock);

	write_lock(&rxnet->call_lock);
	list_add_tail(&call->link, &rxnet->calls);
	write_unlock(&rxnet->call_lock);

	b->call_backlog[call_head] = call;
	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
	return 0;

id_in_use:
	write_unlock(&rx->call_lock);
	rxrpc_cleanup_call(call);
	_leave(" = -EBADSLT");
	return -EBADSLT;
}
コード例 #16
0
/*
 * Allocate a new incoming call from the prealloc pool, along with a connection
 * and a peer as necessary.
 */
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
						    struct rxrpc_local *local,
						    struct rxrpc_connection *conn,
						    struct sk_buff *skb)
{
	struct rxrpc_backlog *b = rx->backlog;
	struct rxrpc_peer *peer, *xpeer;
	struct rxrpc_call *call;
	unsigned short call_head, conn_head, peer_head;
	unsigned short call_tail, conn_tail, peer_tail;
	unsigned short call_count, conn_count;

	/* #calls >= #conns >= #peers must hold true. */
	call_head = smp_load_acquire(&b->call_backlog_head);
	call_tail = b->call_backlog_tail;
	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
	conn_head = smp_load_acquire(&b->conn_backlog_head);
	conn_tail = b->conn_backlog_tail;
	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
	ASSERTCMP(conn_count, >=, call_count);
	peer_head = smp_load_acquire(&b->peer_backlog_head);
	peer_tail = b->peer_backlog_tail;
	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
		  conn_count);

	if (call_count == 0)
		return NULL;

	if (!conn) {
		/* No connection.  We're going to need a peer to start off
		 * with.  If one doesn't yet exist, use a spare from the
		 * preallocation set.  We dump the address into the spare in
		 * anticipation - and to save on stack space.
		 */
		xpeer = b->peer_backlog[peer_tail];
		if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0)
			return NULL;

		peer = rxrpc_lookup_incoming_peer(local, xpeer);
		if (peer == xpeer) {
			b->peer_backlog[peer_tail] = NULL;
			smp_store_release(&b->peer_backlog_tail,
					  (peer_tail + 1) &
					  (RXRPC_BACKLOG_MAX - 1));
		}

		/* Now allocate and set up the connection */
		conn = b->conn_backlog[conn_tail];
		b->conn_backlog[conn_tail] = NULL;
		smp_store_release(&b->conn_backlog_tail,
				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
		rxrpc_get_local(local);
		conn->params.local = local;
		conn->params.peer = peer;
		rxrpc_see_connection(conn);
		rxrpc_new_incoming_connection(rx, conn, skb);
	} else {
		rxrpc_get_connection(conn);
	}

	/* And now we can allocate and set up a new call */
	call = b->call_backlog[call_tail];
	b->call_backlog[call_tail] = NULL;
	smp_store_release(&b->call_backlog_tail,
			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));

	rxrpc_see_call(call);
	call->conn = conn;
	call->peer = rxrpc_get_peer(conn->params.peer);
	call->cong_cwnd = call->peer->cong_cwnd;
	return call;
}