Ejemplo n.º 1
0
void rose_kick(struct sock *sk)
{
    rose_cb *rose = rose_sk(sk);
    struct sk_buff *skb, *skbn;
    unsigned short start, end;

    if (rose->state != ROSE_STATE_3)
        return;

    if (rose->condition & ROSE_COND_PEER_RX_BUSY)
        return;

    if (!skb_peek(&sk->sk_write_queue))
        return;

    start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs;
    end   = (rose->va + sysctl_rose_window_size) % ROSE_MODULUS;

    if (start == end)
        return;

    rose->vs = start;

    /*
     * Transmit data until either we're out of data to send or
     * the window is full.
     */

    skb  = skb_dequeue(&sk->sk_write_queue);

    do {
        if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
            skb_queue_head(&sk->sk_write_queue, skb);
            break;
        }

        skb_set_owner_w(skbn, sk);

        /*
         * Transmit the frame copy.
         */
        rose_send_iframe(sk, skbn);

        rose->vs = (rose->vs + 1) % ROSE_MODULUS;

        /*
         * Requeue the original data frame.
         */
        skb_queue_tail(&rose->ack_queue, skb);

    } while (rose->vs != end &&
             (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);

    rose->vl         = rose->vr;
    rose->condition &= ~ROSE_COND_ACK_PENDING;

    rose_stop_timer(sk);
}
Ejemplo n.º 2
0
void rose_enquiry_response(struct sock *sk)
{
	struct rose_sock *rose = rose_sk(sk);

	if (rose->condition & ROSE_COND_OWN_RX_BUSY)
		rose_write_internal(sk, ROSE_RNR);
	else
		rose_write_internal(sk, ROSE_RR);

	rose->vl         = rose->vr;
	rose->condition &= ~ROSE_COND_ACK_PENDING;

	rose_stop_timer(sk);
}
Ejemplo n.º 3
0
void rose_kick(struct sock *sk)
{
	struct rose_sock *rose = rose_sk(sk);
	struct sk_buff *skb, *skbn;
	unsigned short start, end;

	if (rose->state != ROSE_STATE_3)
		return;

	if (rose->condition & ROSE_COND_PEER_RX_BUSY)
		return;

	if (!skb_peek(&sk->sk_write_queue))
		return;

	start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs;
	end   = (rose->va + sysctl_rose_window_size) % ROSE_MODULUS;

	if (start == end)
		return;

	rose->vs = start;


	skb  = skb_dequeue(&sk->sk_write_queue);

	do {
		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
			skb_queue_head(&sk->sk_write_queue, skb);
			break;
		}

		skb_set_owner_w(skbn, sk);

		rose_send_iframe(sk, skbn);

		rose->vs = (rose->vs + 1) % ROSE_MODULUS;

		skb_queue_tail(&rose->ack_queue, skb);

	} while (rose->vs != end &&
		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);

	rose->vl         = rose->vr;
	rose->condition &= ~ROSE_COND_ACK_PENDING;

	rose_stop_timer(sk);
}
Ejemplo n.º 4
0
static void rose_heartbeat_expiry(unsigned long param)
{
	struct sock *sk = (struct sock *)param;
	struct rose_sock *rose = rose_sk(sk);

	bh_lock_sock(sk);
	switch (rose->state) {
	case ROSE_STATE_0:
		/*                                                         
                                                           */
		if (sock_flag(sk, SOCK_DESTROY) ||
		    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
			bh_unlock_sock(sk);
			rose_destroy_socket(sk);
			return;
		}
		break;

	case ROSE_STATE_3:
		/*
                                               
   */
		if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
		    (rose->condition & ROSE_COND_OWN_RX_BUSY)) {
			rose->condition &= ~ROSE_COND_OWN_RX_BUSY;
			rose->condition &= ~ROSE_COND_ACK_PENDING;
			rose->vl         = rose->vr;
			rose_write_internal(sk, ROSE_RR);
			rose_stop_timer(sk);	/*    */
			break;
		}
		break;
	}

	rose_start_heartbeat(sk);
	bh_unlock_sock(sk);
}
Ejemplo n.º 5
0
static void rose_heartbeat_expiry(unsigned long param)
{
	struct sock *sk = (struct sock *)param;
	struct rose_sock *rose = rose_sk(sk);

	bh_lock_sock(sk);
	switch (rose->state) {
	case ROSE_STATE_0:
		/* Magic here: If we listen() and a new link dies before it
		   is accepted() it isn't 'dead' so doesn't get removed. */
		if (sock_flag(sk, SOCK_DESTROY) ||
		    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
			bh_unlock_sock(sk);
			rose_destroy_socket(sk);
			return;
		}
		break;

	case ROSE_STATE_3:
		/*
		 * Check for the state of the receive buffer.
		 */
		if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
		    (rose->condition & ROSE_COND_OWN_RX_BUSY)) {
			rose->condition &= ~ROSE_COND_OWN_RX_BUSY;
			rose->condition &= ~ROSE_COND_ACK_PENDING;
			rose->vl         = rose->vr;
			rose_write_internal(sk, ROSE_RR);
			rose_stop_timer(sk);	/* HB */
			break;
		}
		break;
	}

	rose_start_heartbeat(sk);
	bh_unlock_sock(sk);
}