Exemple #1
0
int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf)
{
	/* smp_load_acquire() to load write pointer on reader side
	 * this pairs with smp_store_release() in dvb_ringbuffer_write(),
	 * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
	 *
	 * for memory barriers also see Documentation/circular-buffers.txt
	 */
	return (rbuf->pread == smp_load_acquire(&rbuf->pwrite));
}
Exemple #2
0
void iteration(int thread, int *count) {
    test_fn *f = test_fns[thread];
    while (smp_load_acquire(&go) == *count)
        ;
    int r = f();
    results[thread] = r;
    smp_store_release(&done[thread], true);

    (*count)++;
}
Exemple #3
0
/*
 * Fill out an ACK packet.
 */
static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
				 struct rxrpc_ack_buffer *pkt,
				 rxrpc_seq_t *_hard_ack,
				 rxrpc_seq_t *_top,
				 u8 reason)
{
	rxrpc_serial_t serial;
	rxrpc_seq_t hard_ack, top, seq;
	int ix;
	u32 mtu, jmax;
	u8 *ackp = pkt->acks;

	/* Barrier against rxrpc_input_data(). */
	serial = call->ackr_serial;
	hard_ack = READ_ONCE(call->rx_hard_ack);
	top = smp_load_acquire(&call->rx_top);
	*_hard_ack = hard_ack;
	*_top = top;

	pkt->ack.bufferSpace	= htons(8);
	pkt->ack.maxSkew	= htons(call->ackr_skew);
	pkt->ack.firstPacket	= htonl(hard_ack + 1);
	pkt->ack.previousPacket	= htonl(call->ackr_prev_seq);
	pkt->ack.serial		= htonl(serial);
	pkt->ack.reason		= reason;
	pkt->ack.nAcks		= top - hard_ack;

	if (reason == RXRPC_ACK_PING)
		pkt->whdr.flags |= RXRPC_REQUEST_ACK;

	if (after(top, hard_ack)) {
		seq = hard_ack + 1;
		do {
			ix = seq & RXRPC_RXTX_BUFF_MASK;
			if (call->rxtx_buffer[ix])
				*ackp++ = RXRPC_ACK_TYPE_ACK;
			else
				*ackp++ = RXRPC_ACK_TYPE_NACK;
			seq++;
		} while (before_eq(seq, top));
	}

	mtu = call->conn->params.peer->if_mtu;
	mtu -= call->conn->params.peer->hdrsize;
	jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
	pkt->ackinfo.rxMTU	= htonl(rxrpc_rx_mtu);
	pkt->ackinfo.maxMTU	= htonl(mtu);
	pkt->ackinfo.rwind	= htonl(call->rx_winsize);
	pkt->ackinfo.jumbo_max	= htonl(jmax);

	*ackp++ = 0;
	*ackp++ = 0;
	*ackp++ = 0;
	return top - hard_ack + 3;
}
Exemple #4
0
void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf)
{
	/* dvb_ringbuffer_flush() counts as read operation
	 * smp_load_acquire() to load write pointer
	 * smp_store_release() to update read pointer, this ensures that the
	 * correct pointer is visible for subsequent dvb_ringbuffer_free()
	 * calls on other cpu cores
	 */
	smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite));
	rbuf->error = 0;
}
Exemple #5
0
/*
 * Begin iteration through a server list, starting with the last used server if
 * possible, or the last recorded good server if not.
 */
static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
{
	struct afs_cell *cell = vc->cell;
	unsigned int dns_lookup_count;

	if (cell->dns_source == DNS_RECORD_UNAVAILABLE ||
	    cell->dns_expiry <= ktime_get_real_seconds()) {
		dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count);
		set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
		queue_work(afs_wq, &cell->manager);

		if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
			if (wait_var_event_interruptible(
				    &cell->dns_lookup_count,
				    smp_load_acquire(&cell->dns_lookup_count)
				    != dns_lookup_count) < 0) {
				vc->error = -ERESTARTSYS;
				return false;
			}
		}

		/* Status load is ordered after lookup counter load */
		if (cell->dns_source == DNS_RECORD_UNAVAILABLE) {
			vc->error = -EDESTADDRREQ;
			return false;
		}
	}

	read_lock(&cell->vl_servers_lock);
	vc->server_list = afs_get_vlserverlist(
		rcu_dereference_protected(cell->vl_servers,
					  lockdep_is_held(&cell->vl_servers_lock)));
	read_unlock(&cell->vl_servers_lock);
	if (!vc->server_list->nr_servers)
		return false;

	vc->untried = (1UL << vc->server_list->nr_servers) - 1;
	vc->index = -1;
	return true;
}
Exemple #6
0
ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
{
	ssize_t avail;

	/* smp_load_acquire() to load write pointer on reader side
	 * this pairs with smp_store_release() in dvb_ringbuffer_write(),
	 * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
	 */
	avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread;
	if (avail < 0)
		avail += rbuf->size;
	return avail;
}
Exemple #7
0
/*
 * Discard a packet we've used up and advance the Rx window by one.
 */
static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
{
    struct rxrpc_skb_priv *sp;
    struct sk_buff *skb;
    rxrpc_serial_t serial;
    rxrpc_seq_t hard_ack, top;
    u8 flags;
    int ix;

    _enter("%d", call->debug_id);

    hard_ack = call->rx_hard_ack;
    top = smp_load_acquire(&call->rx_top);
    ASSERT(before(hard_ack, top));

    hard_ack++;
    ix = hard_ack & RXRPC_RXTX_BUFF_MASK;
    skb = call->rxtx_buffer[ix];
    rxrpc_see_skb(skb, rxrpc_skb_rx_rotated);
    sp = rxrpc_skb(skb);
    flags = sp->hdr.flags;
    serial = sp->hdr.serial;
    if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO)
        serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1;

    call->rxtx_buffer[ix] = NULL;
    call->rxtx_annotations[ix] = 0;
    /* Barrier against rxrpc_input_data(). */
    smp_store_release(&call->rx_hard_ack, hard_ack);

    rxrpc_free_skb(skb, rxrpc_skb_rx_freed);

    _debug("%u,%u,%02x", hard_ack, top, flags);
    trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
    if (flags & RXRPC_LAST_PACKET) {
        rxrpc_end_rx_phase(call, serial);
    } else {
        /* Check to see if there's an ACK that needs sending. */
        if (after_eq(hard_ack, call->ackr_consumed + 2) ||
                after_eq(top, call->ackr_seen + 2) ||
                (hard_ack == top && after(hard_ack, call->ackr_consumed)))
            rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
                              true, false,
                              rxrpc_propose_ack_rotate_rx);
        if (call->ackr_reason)
            rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
    }
}
Exemple #8
0
void run_test(int threads, int n)
{
    reset();
    // Let my people go
    smp_store_release(&go, n+1);
    // Wait for everyone
    for (int i = 0; i < threads; i++) {
        while (!smp_load_acquire(&done[i]))
            ;

        smp_store_release(&done[i], false);
    }

    // Everyone done, process results.
    process_results(results);
}
Exemple #9
0
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
		      struct sk_buff **to_free)
{
	struct fq_sched_data *q = qdisc_priv(sch);
	struct fq_flow *f;

	if (unlikely(sch->q.qlen >= sch->limit))
		return qdisc_drop(skb, sch, to_free);

	f = fq_classify(skb, q);
	if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
		q->stat_flows_plimit++;
		return qdisc_drop(skb, sch, to_free);
	}

	f->qlen++;
	if (skb_is_retransmit(skb))
		q->stat_tcp_retrans++;
	qdisc_qstats_backlog_inc(sch, skb);
	if (fq_flow_is_detached(f)) {
		struct sock *sk = skb->sk;

		fq_flow_add_tail(&q->new_flows, f);
		if (time_after(jiffies, f->age + q->flow_refill_delay))
			f->credit = max_t(u32, f->credit, q->quantum);
		if (sk && q->rate_enable) {
			if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
				     SK_PACING_FQ))
				smp_store_release(&sk->sk_pacing_status,
						  SK_PACING_FQ);
		}
		q->inactive_flows--;
	}

	/* Note: this overwrites f->age */
	flow_queue_add(f, skb);

	if (unlikely(f == &q->internal)) {
		q->stat_internal_packets++;
	}
	sch->q.qlen++;

	return NET_XMIT_SUCCESS;
}
void quarantine_reduce(void)
{
	size_t new_quarantine_size;
	unsigned long flags;
	struct qlist to_free = QLIST_INIT;
	size_t size_to_free = 0;
	void **last;

	/* smp_load_acquire() here pairs with smp_store_release() below. */
	if (likely(ACCESS_ONCE(global_quarantine.bytes) <=
		   smp_load_acquire(&quarantine_size)))
		return;

	spin_lock_irqsave(&quarantine_lock, flags);

	/* Update quarantine size in case of hotplug. Allocate a fraction of
	 * the installed memory to quarantine minus per-cpu queue limits.
	 */
	new_quarantine_size = (ACCESS_ONCE(totalram_pages) << PAGE_SHIFT) /
		QUARANTINE_FRACTION;
	new_quarantine_size -= QUARANTINE_PERCPU_SIZE * num_online_cpus();
	/* Pairs with smp_load_acquire() above and in QUARANTINE_LOW_SIZE. */
	smp_store_release(&quarantine_size, new_quarantine_size);

	last = global_quarantine.head;
	while (last) {
		struct kmem_cache *cache = qlink_to_cache(last);

		size_to_free += cache->size;
		if (!*last || size_to_free >
		    global_quarantine.bytes - QUARANTINE_LOW_SIZE)
			break;
		last = (void **) *last;
	}
	qlist_move(&global_quarantine, last, &to_free, size_to_free);

	spin_unlock_irqrestore(&quarantine_lock, flags);

	qlist_free_all(&to_free, NULL);
}
Exemple #11
0
bool osq_lock(struct optimistic_spin_queue *lock)
{
	struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
	struct optimistic_spin_node *prev, *next;
	int curr = encode_cpu(smp_processor_id());
	int old;

	node->locked = 0;
	node->next = NULL;
	node->cpu = curr;

	/*
	 * We need both ACQUIRE (pairs with corresponding RELEASE in
	 * unlock() uncontended, or fastpath) and RELEASE (to publish
	 * the node fields we just initialised) semantics when updating
	 * the lock tail.
	 */
	old = atomic_xchg(&lock->tail, curr);
	if (old == OSQ_UNLOCKED_VAL)
		return true;

	prev = decode_cpu(old);
	node->prev = prev;
	WRITE_ONCE(prev->next, node);

	/*
	 * Normally @prev is untouchable after the above store; because at that
	 * moment unlock can proceed and wipe the node element from stack.
	 *
	 * However, since our nodes are static per-cpu storage, we're
	 * guaranteed their existence -- this allows us to apply
	 * cmpxchg in an attempt to undo our queueing.
	 */

	while (!READ_ONCE(node->locked)) {
		/*
		 * If we need to reschedule bail... so we can block.
		 */
		if (need_resched())
			goto unqueue;

		cpu_relax_lowlatency();
	}
	return true;

unqueue:
	/*
	 * Step - A  -- stabilize @prev
	 *
	 * Undo our @prev->next assignment; this will make @prev's
	 * unlock()/unqueue() wait for a next pointer since @lock points to us
	 * (or later).
	 */

	for (;;) {
		if (prev->next == node &&
		    cmpxchg(&prev->next, node, NULL) == node)
			break;

		/*
		 * We can only fail the cmpxchg() racing against an unlock(),
		 * in which case we should observe @node->locked becomming
		 * true.
		 */
		if (smp_load_acquire(&node->locked))
			return true;

		cpu_relax_lowlatency();

		/*
		 * Or we race against a concurrent unqueue()'s step-B, in which
		 * case its step-C will write us a new @node->prev pointer.
		 */
		prev = READ_ONCE(node->prev);
	}

	/*
	 * Step - B -- stabilize @next
	 *
	 * Similar to unlock(), wait for @node->next or move @lock from @node
	 * back to @prev.
	 */

	next = osq_wait_next(lock, node, prev);
	if (!next)
		return false;

	/*
	 * Step - C -- unlink
	 *
	 * @prev is stable because its still waiting for a new @prev->next
	 * pointer, @next is stable because our @node->next pointer is NULL and
	 * it will wait in Step-A.
	 */

	WRITE_ONCE(next->prev, prev);
	WRITE_ONCE(prev->next, next);

	return false;
}
Exemple #12
0
/**
 * dump_common_audit_data - helper to dump common audit data
 * @a : common audit data
 *
 */
static void dump_common_audit_data(struct audit_buffer *ab,
				   struct common_audit_data *a)
{
	char comm[sizeof(current->comm)];

	/*
	 * To keep stack sizes in check force programers to notice if they
	 * start making this union too large!  See struct lsm_network_audit
	 * as an example of how to deal with large data.
	 */
	BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);

	audit_log_format(ab, " pid=%d comm=", task_tgid_nr(current));
	audit_log_untrustedstring(ab, memcpy(comm, current->comm, sizeof(comm)));

	switch (a->type) {
	case LSM_AUDIT_DATA_NONE:
		return;
	case LSM_AUDIT_DATA_IPC:
		audit_log_format(ab, " key=%d ", a->u.ipc_id);
		break;
	case LSM_AUDIT_DATA_CAP:
		audit_log_format(ab, " capability=%d ", a->u.cap);
		break;
	case LSM_AUDIT_DATA_PATH: {
		struct inode *inode;

		audit_log_d_path(ab, " path=", &a->u.path);

		inode = d_backing_inode(a->u.path.dentry);
		if (inode) {
			audit_log_format(ab, " dev=");
			audit_log_untrustedstring(ab, inode->i_sb->s_id);
			audit_log_format(ab, " ino=%lu", inode->i_ino);
		}
		break;
	}
	case LSM_AUDIT_DATA_FILE: {
		struct inode *inode;

		audit_log_d_path(ab, " path=", &a->u.file->f_path);

		inode = file_inode(a->u.file);
		if (inode) {
			audit_log_format(ab, " dev=");
			audit_log_untrustedstring(ab, inode->i_sb->s_id);
			audit_log_format(ab, " ino=%lu", inode->i_ino);
		}
		break;
	}
	case LSM_AUDIT_DATA_IOCTL_OP: {
		struct inode *inode;

		audit_log_d_path(ab, " path=", &a->u.op->path);

		inode = a->u.op->path.dentry->d_inode;
		if (inode) {
			audit_log_format(ab, " dev=");
			audit_log_untrustedstring(ab, inode->i_sb->s_id);
			audit_log_format(ab, " ino=%lu", inode->i_ino);
		}

		audit_log_format(ab, " ioctlcmd=0x%hx", a->u.op->cmd);
		break;
	}
	case LSM_AUDIT_DATA_DENTRY: {
		struct inode *inode;

		audit_log_format(ab, " name=");
		audit_log_untrustedstring(ab, a->u.dentry->d_name.name);

		inode = d_backing_inode(a->u.dentry);
		if (inode) {
			audit_log_format(ab, " dev=");
			audit_log_untrustedstring(ab, inode->i_sb->s_id);
			audit_log_format(ab, " ino=%lu", inode->i_ino);
		}
		break;
	}
	case LSM_AUDIT_DATA_INODE: {
		struct dentry *dentry;
		struct inode *inode;

		inode = a->u.inode;
		dentry = d_find_alias(inode);
		if (dentry) {
			audit_log_format(ab, " name=");
			audit_log_untrustedstring(ab,
					 dentry->d_name.name);
			dput(dentry);
		}
		audit_log_format(ab, " dev=");
		audit_log_untrustedstring(ab, inode->i_sb->s_id);
		audit_log_format(ab, " ino=%lu", inode->i_ino);
		break;
	}
	case LSM_AUDIT_DATA_TASK: {
		struct task_struct *tsk = a->u.tsk;
		if (tsk) {
			pid_t pid = task_tgid_nr(tsk);
			if (pid) {
				char comm[sizeof(tsk->comm)];
				audit_log_format(ab, " opid=%d ocomm=", pid);
				audit_log_untrustedstring(ab,
				    memcpy(comm, tsk->comm, sizeof(comm)));
			}
		}
		break;
	}
	case LSM_AUDIT_DATA_NET:
		if (a->u.net->sk) {
			struct sock *sk = a->u.net->sk;
			struct unix_sock *u;
			struct unix_address *addr;
			int len = 0;
			char *p = NULL;

			switch (sk->sk_family) {
			case AF_INET: {
				struct inet_sock *inet = inet_sk(sk);

				print_ipv4_addr(ab, inet->inet_rcv_saddr,
						inet->inet_sport,
						"laddr", "lport");
				print_ipv4_addr(ab, inet->inet_daddr,
						inet->inet_dport,
						"faddr", "fport");
				break;
			}
#if IS_ENABLED(CONFIG_IPV6)
			case AF_INET6: {
				struct inet_sock *inet = inet_sk(sk);

				print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
						inet->inet_sport,
						"laddr", "lport");
				print_ipv6_addr(ab, &sk->sk_v6_daddr,
						inet->inet_dport,
						"faddr", "fport");
				break;
			}
#endif
			case AF_UNIX:
				u = unix_sk(sk);
				addr = smp_load_acquire(&u->addr);
				if (!addr)
					break;
				if (u->path.dentry) {
					audit_log_d_path(ab, " path=", &u->path);
					break;
				}
				len = addr->len-sizeof(short);
				p = &addr->name->sun_path[0];
				audit_log_format(ab, " path=");
				if (*p)
					audit_log_untrustedstring(ab, p);
				else
					audit_log_n_hex(ab, p, len);
				break;
			}
		}

		switch (a->u.net->family) {
		case AF_INET:
			print_ipv4_addr(ab, a->u.net->v4info.saddr,
					a->u.net->sport,
					"saddr", "src");
			print_ipv4_addr(ab, a->u.net->v4info.daddr,
					a->u.net->dport,
					"daddr", "dest");
			break;
		case AF_INET6:
			print_ipv6_addr(ab, &a->u.net->v6info.saddr,
					a->u.net->sport,
					"saddr", "src");
			print_ipv6_addr(ab, &a->u.net->v6info.daddr,
					a->u.net->dport,
					"daddr", "dest");
			break;
		}
		if (a->u.net->netif > 0) {
			struct net_device *dev;

			/* NOTE: we always use init's namespace */
			dev = dev_get_by_index(&init_net, a->u.net->netif);
			if (dev) {
				audit_log_format(ab, " netif=%s", dev->name);
				dev_put(dev);
			}
		}
		break;
#ifdef CONFIG_KEYS
	case LSM_AUDIT_DATA_KEY:
		audit_log_format(ab, " key_serial=%u", a->u.key_struct.key);
		if (a->u.key_struct.key_desc) {
			audit_log_format(ab, " key_desc=");
			audit_log_untrustedstring(ab, a->u.key_struct.key_desc);
		}
		break;
#endif
	case LSM_AUDIT_DATA_KMOD:
		audit_log_format(ab, " kmod=");
		audit_log_untrustedstring(ab, a->u.kmod_name);
		break;
	case LSM_AUDIT_DATA_IBPKEY: {
		struct in6_addr sbn_pfx;

		memset(&sbn_pfx.s6_addr, 0,
		       sizeof(sbn_pfx.s6_addr));
		memcpy(&sbn_pfx.s6_addr, &a->u.ibpkey->subnet_prefix,
		       sizeof(a->u.ibpkey->subnet_prefix));
		audit_log_format(ab, " pkey=0x%x subnet_prefix=%pI6c",
				 a->u.ibpkey->pkey, &sbn_pfx);
		break;
	}
	case LSM_AUDIT_DATA_IBENDPORT:
		audit_log_format(ab, " device=%s port_num=%u",
				 a->u.ibendport->dev_name,
				 a->u.ibendport->port);
		break;
	} /* switch (a->type) */
}
Exemple #13
0
/*
 * afs_lookup_cell - Look up or create a cell record.
 * @net:	The network namespace
 * @name:	The name of the cell.
 * @namesz:	The strlen of the cell name.
 * @vllist:	A colon/comma separated list of numeric IP addresses or NULL.
 * @excl:	T if an error should be given if the cell name already exists.
 *
 * Look up a cell record by name and query the DNS for VL server addresses if
 * needed.  Note that that actual DNS query is punted off to the manager thread
 * so that this function can return immediately if interrupted whilst allowing
 * cell records to be shared even if not yet fully constructed.
 */
struct afs_cell *afs_lookup_cell(struct afs_net *net,
				 const char *name, unsigned int namesz,
				 const char *vllist, bool excl)
{
	struct afs_cell *cell, *candidate, *cursor;
	struct rb_node *parent, **pp;
	enum afs_cell_state state;
	int ret, n;

	_enter("%s,%s", name, vllist);

	if (!excl) {
		rcu_read_lock();
		cell = afs_lookup_cell_rcu(net, name, namesz);
		rcu_read_unlock();
		if (!IS_ERR(cell))
			goto wait_for_cell;
	}

	/* Assume we're probably going to create a cell and preallocate and
	 * mostly set up a candidate record.  We can then use this to stash the
	 * name, the net namespace and VL server addresses.
	 *
	 * We also want to do this before we hold any locks as it may involve
	 * upcalling to userspace to make DNS queries.
	 */
	candidate = afs_alloc_cell(net, name, namesz, vllist);
	if (IS_ERR(candidate)) {
		_leave(" = %ld", PTR_ERR(candidate));
		return candidate;
	}

	/* Find the insertion point and check to see if someone else added a
	 * cell whilst we were allocating.
	 */
	write_seqlock(&net->cells_lock);

	pp = &net->cells.rb_node;
	parent = NULL;
	while (*pp) {
		parent = *pp;
		cursor = rb_entry(parent, struct afs_cell, net_node);

		n = strncasecmp(cursor->name, name,
				min_t(size_t, cursor->name_len, namesz));
		if (n == 0)
			n = cursor->name_len - namesz;
		if (n < 0)
			pp = &(*pp)->rb_left;
		else if (n > 0)
			pp = &(*pp)->rb_right;
		else
			goto cell_already_exists;
	}

	cell = candidate;
	candidate = NULL;
	rb_link_node_rcu(&cell->net_node, parent, pp);
	rb_insert_color(&cell->net_node, &net->cells);
	atomic_inc(&net->cells_outstanding);
	write_sequnlock(&net->cells_lock);

	queue_work(afs_wq, &cell->manager);

wait_for_cell:
	_debug("wait_for_cell");
	wait_var_event(&cell->state,
		       ({
			       state = smp_load_acquire(&cell->state); /* vs error */
			       state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED;
		       }));
Exemple #14
0
/*
 * Deliver messages to a call.  This keeps processing packets until the buffer
 * is filled and we find either more DATA (returns 0) or the end of the DATA
 * (returns 1).  If more packets are required, it returns -EAGAIN.
 */
static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                              struct msghdr *msg, struct iov_iter *iter,
                              size_t len, int flags, size_t *_offset)
{
    struct rxrpc_skb_priv *sp;
    struct sk_buff *skb;
    rxrpc_seq_t hard_ack, top, seq;
    size_t remain;
    bool last;
    unsigned int rx_pkt_offset, rx_pkt_len;
    int ix, copy, ret = -EAGAIN, ret2;

    rx_pkt_offset = call->rx_pkt_offset;
    rx_pkt_len = call->rx_pkt_len;

    if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
        seq = call->rx_hard_ack;
        ret = 1;
        goto done;
    }

    /* Barriers against rxrpc_input_data(). */
    hard_ack = call->rx_hard_ack;
    top = smp_load_acquire(&call->rx_top);
    for (seq = hard_ack + 1; before_eq(seq, top); seq++) {
        ix = seq & RXRPC_RXTX_BUFF_MASK;
        skb = call->rxtx_buffer[ix];
        if (!skb) {
            trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
                                rx_pkt_offset, rx_pkt_len, 0);
            break;
        }
        smp_rmb();
        rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
        sp = rxrpc_skb(skb);

        if (!(flags & MSG_PEEK))
            trace_rxrpc_receive(call, rxrpc_receive_front,
                                sp->hdr.serial, seq);

        if (msg)
            sock_recv_timestamp(msg, sock->sk, skb);

        if (rx_pkt_offset == 0) {
            ret2 = rxrpc_locate_data(call, skb,
                                     &call->rxtx_annotations[ix],
                                     &rx_pkt_offset, &rx_pkt_len);
            trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
                                rx_pkt_offset, rx_pkt_len, ret2);
            if (ret2 < 0) {
                ret = ret2;
                goto out;
            }
        } else {
            trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
                                rx_pkt_offset, rx_pkt_len, 0);
        }

        /* We have to handle short, empty and used-up DATA packets. */
        remain = len - *_offset;
        copy = rx_pkt_len;
        if (copy > remain)
            copy = remain;
        if (copy > 0) {
            ret2 = skb_copy_datagram_iter(skb, rx_pkt_offset, iter,
                                          copy);
            if (ret2 < 0) {
                ret = ret2;
                goto out;
            }

            /* handle piecemeal consumption of data packets */
            rx_pkt_offset += copy;
            rx_pkt_len -= copy;
            *_offset += copy;
        }

        if (rx_pkt_len > 0) {
            trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
                                rx_pkt_offset, rx_pkt_len, 0);
            ASSERTCMP(*_offset, ==, len);
            ret = 0;
            break;
        }

        /* The whole packet has been transferred. */
        last = sp->hdr.flags & RXRPC_LAST_PACKET;
        if (!(flags & MSG_PEEK))
            rxrpc_rotate_rx_window(call);
        rx_pkt_offset = 0;
        rx_pkt_len = 0;

        if (last) {
            ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
            ret = 1;
            goto out;
        }
    }
Exemple #15
0
/*
 * Allocate a new incoming call from the prealloc pool, along with a connection
 * and a peer as necessary.
 */
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
						    struct rxrpc_local *local,
						    struct rxrpc_connection *conn,
						    struct sk_buff *skb)
{
	struct rxrpc_backlog *b = rx->backlog;
	struct rxrpc_peer *peer, *xpeer;
	struct rxrpc_call *call;
	unsigned short call_head, conn_head, peer_head;
	unsigned short call_tail, conn_tail, peer_tail;
	unsigned short call_count, conn_count;

	/* #calls >= #conns >= #peers must hold true. */
	call_head = smp_load_acquire(&b->call_backlog_head);
	call_tail = b->call_backlog_tail;
	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
	conn_head = smp_load_acquire(&b->conn_backlog_head);
	conn_tail = b->conn_backlog_tail;
	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
	ASSERTCMP(conn_count, >=, call_count);
	peer_head = smp_load_acquire(&b->peer_backlog_head);
	peer_tail = b->peer_backlog_tail;
	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
		  conn_count);

	if (call_count == 0)
		return NULL;

	if (!conn) {
		/* No connection.  We're going to need a peer to start off
		 * with.  If one doesn't yet exist, use a spare from the
		 * preallocation set.  We dump the address into the spare in
		 * anticipation - and to save on stack space.
		 */
		xpeer = b->peer_backlog[peer_tail];
		if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0)
			return NULL;

		peer = rxrpc_lookup_incoming_peer(local, xpeer);
		if (peer == xpeer) {
			b->peer_backlog[peer_tail] = NULL;
			smp_store_release(&b->peer_backlog_tail,
					  (peer_tail + 1) &
					  (RXRPC_BACKLOG_MAX - 1));
		}

		/* Now allocate and set up the connection */
		conn = b->conn_backlog[conn_tail];
		b->conn_backlog[conn_tail] = NULL;
		smp_store_release(&b->conn_backlog_tail,
				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
		rxrpc_get_local(local);
		conn->params.local = local;
		conn->params.peer = peer;
		rxrpc_see_connection(conn);
		rxrpc_new_incoming_connection(rx, conn, skb);
	} else {
		rxrpc_get_connection(conn);
	}

	/* And now we can allocate and set up a new call */
	call = b->call_backlog[call_tail];
	b->call_backlog[call_tail] = NULL;
	smp_store_release(&b->call_backlog_tail,
			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));

	rxrpc_see_call(call);
	call->conn = conn;
	call->peer = rxrpc_get_peer(conn->params.peer);
	call->cong_cwnd = call->peer->cong_cwnd;
	return call;
}