void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb) { struct nr_sock *nr = nr_sk(sk); unsigned char *dptr; /* * Add the protocol byte and network header. */ dptr = skb_push(skb, NR_NETWORK_LEN); memcpy(dptr, &nr->source_addr, AX25_ADDR_LEN); dptr[6] &= ~AX25_CBIT; dptr[6] &= ~AX25_EBIT; dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; memcpy(dptr, &nr->dest_addr, AX25_ADDR_LEN); dptr[6] &= ~AX25_CBIT; dptr[6] |= AX25_EBIT; dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; *dptr++ = sysctl_netrom_network_ttl_initialiser; if (!nr_route_frame(skb, NULL)) { kfree_skb(skb); nr_disconnect(sk, ENETUNREACH); } }
static void nr_idletimer_expiry(unsigned long param) { struct sock *sk = (struct sock *)param; struct nr_sock *nr = nr_sk(sk); bh_lock_sock(sk); nr_clear_queues(sk); nr->n2count = 0; nr_write_internal(sk, NR_DISCREQ); nr->state = NR_STATE_2; nr_start_t1timer(sk); nr_stop_t2timer(sk); nr_stop_t4timer(sk); sk->sk_state = TCP_CLOSE; sk->sk_err = 0; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } bh_unlock_sock(sk); }
void nr_start_idletimer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); if (nr->idle > 0) mod_timer(&nr->idletimer, jiffies + nr->idle); }
/* Higher level upcall for a LAPB frame - called with sk locked */ int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb) { struct nr_sock *nr = nr_sk(sk); int queued = 0, frametype; if (nr->state == NR_STATE_0) return 0; frametype = skb->data[19]; switch (nr->state) { case NR_STATE_1: queued = nr_state1_machine(sk, skb, frametype); break; case NR_STATE_2: queued = nr_state2_machine(sk, skb, frametype); break; case NR_STATE_3: queued = nr_state3_machine(sk, skb, frametype); break; } nr_kick(sk); return queued; }
static void nr_t4timer_expiry(unsigned long param) { struct sock *sk = (struct sock *)param; bh_lock_sock(sk); nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY; bh_unlock_sock(sk); }
/* * This routine purges all of the queues of frames. */ void nr_clear_queues(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); skb_queue_purge(&sk->sk_write_queue); skb_queue_purge(&nr->ack_queue); skb_queue_purge(&nr->reseq_queue); skb_queue_purge(&nr->frag_queue); }
/* * Kill all bound sockets on a dropped device. */ static void nr_kill_by_device(struct net_device *dev) { struct sock *s; spin_lock_bh(&nr_list_lock); sk_for_each(s, &nr_list) if (nr_sk(s)->device == dev) nr_disconnect(s, ENETUNREACH); spin_unlock_bh(&nr_list_lock); }
static void nr_t2timer_expiry(unsigned long param) { struct sock *sk = (struct sock *)param; struct nr_sock *nr = nr_sk(sk); bh_lock_sock(sk); if (nr->condition & NR_COND_ACK_PENDING) { nr->condition &= ~NR_COND_ACK_PENDING; nr_enquiry_response(sk); } bh_unlock_sock(sk); }
/* * Validate that the value of nr is between va and vs. Return true or * false for testing. */ int nr_validate_nr(struct sock *sk, unsigned short nr) { struct nr_sock *nrom = nr_sk(sk); unsigned short vc = nrom->va; while (vc != nrom->vs) { if (nr == vc) return 1; vc = (vc + 1) % NR_MODULUS; } return nr == nrom->vs; }
/* * Requeue all the un-ack-ed frames on the output queue to be picked * up by nr_kick called from the timer. This arrangement handles the * possibility of an empty output queue. */ void nr_requeue_frames(struct sock *sk) { struct sk_buff *skb, *skb_prev = NULL; while ((skb = skb_dequeue(&nr_sk(sk)->ack_queue)) != NULL) { if (skb_prev == NULL) skb_queue_head(&sk->sk_write_queue, skb); else skb_append(skb_prev, skb, &sk->sk_write_queue); skb_prev = skb; } }
/* * Check that ns is within the receive window. */ int nr_in_rx_window(struct sock *sk, unsigned short ns) { struct nr_sock *nr = nr_sk(sk); unsigned short vc = nr->vr; unsigned short vt = (nr->vl + nr->window) % NR_MODULUS; while (vc != vt) { if (ns == vc) return 1; vc = (vc + 1) % NR_MODULUS; } return 0; }
void nr_init_timers(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); setup_timer(&nr->t1timer, nr_t1timer_expiry, (unsigned long)sk); setup_timer(&nr->t2timer, nr_t2timer_expiry, (unsigned long)sk); setup_timer(&nr->t4timer, nr_t4timer_expiry, (unsigned long)sk); setup_timer(&nr->idletimer, nr_idletimer_expiry, (unsigned long)sk); /* initialized by sock_init_data */ sk->sk_timer.data = (unsigned long)sk; sk->sk_timer.function = &nr_heartbeat_expiry; }
void nr_establish_data_link(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); nr->condition = 0x00; nr->n2count = 0; nr_write_internal(sk, NR_CONNREQ); nr_stop_t2timer(sk); nr_stop_t4timer(sk); nr_stop_idletimer(sk); nr_start_t1timer(sk); }
void nr_frames_acked(struct sock *sk, unsigned short nr) { struct nr_sock *nrom = nr_sk(sk); struct sk_buff *skb; if (nrom->va != nr) { while (skb_peek(&nrom->ack_queue) != NULL && nrom->va != nr) { skb = skb_dequeue(&nrom->ack_queue); kfree_skb(skb); nrom->va = (nrom->va + 1) % NR_MODULUS; } } }
void nr_check_iframes_acked(struct sock *sk, unsigned short nr) { struct nr_sock *nrom = nr_sk(sk); if (nrom->vs == nr) { nr_frames_acked(sk, nr); nr_stop_t1timer(sk); nrom->n2count = 0; } else { if (nrom->va != nr) { nr_frames_acked(sk, nr); nr_start_t1timer(sk); } } }
static void nr_send_iframe(struct sock *sk, struct sk_buff *skb) { struct nr_sock *nr = nr_sk(sk); if (skb == NULL) return; skb->data[2] = nr->vs; skb->data[3] = nr->vr; if (nr->condition & NR_COND_OWN_RX_BUSY) skb->data[4] |= NR_CHOKE_FLAG; nr_start_idletimer(sk); nr_transmit_buffer(sk, skb); }
void nr_enquiry_response(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); int frametype = NR_INFOACK; if (nr->condition & NR_COND_OWN_RX_BUSY) { frametype |= NR_CHOKE_FLAG; } else { if (skb_peek(&nr->reseq_queue) != NULL) frametype |= NR_NAK_FLAG; } nr_write_internal(sk, frametype); nr->vl = nr->vr; nr->condition &= ~NR_COND_ACK_PENDING; }
static void nr_t1timer_expiry(unsigned long param) { struct sock *sk = (struct sock *)param; struct nr_sock *nr = nr_sk(sk); bh_lock_sock(sk); switch (nr->state) { case NR_STATE_1: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); bh_unlock_sock(sk); return; } else { nr->n2count++; nr_write_internal(sk, NR_CONNREQ); } break; case NR_STATE_2: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); bh_unlock_sock(sk); return; } else { nr->n2count++; nr_write_internal(sk, NR_DISCREQ); } break; case NR_STATE_3: if (nr->n2count == nr->n2) { nr_disconnect(sk, ETIMEDOUT); bh_unlock_sock(sk); return; } else { nr->n2count++; nr_requeue_frames(sk); } break; } nr_start_t1timer(sk); bh_unlock_sock(sk); }
void nr_disconnect(struct sock *sk, int reason) { nr_stop_t1timer(sk); nr_stop_t2timer(sk); nr_stop_t4timer(sk); nr_stop_idletimer(sk); nr_clear_queues(sk); nr_sk(sk)->state = NR_STATE_0; sk->sk_state = TCP_CLOSE; sk->sk_err = reason; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } }
/* * State machine for state 1, Awaiting Connection State. * The handling of the timer(s) is in file nr_timer.c. * Handling of state 0 and connection release is in netrom.c. */ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) { switch (frametype) { case NR_CONNACK: { struct nr_sock *nr = nr_sk(sk); nr_stop_t1timer(sk); nr_start_idletimer(sk); nr->your_index = skb->data[17]; nr->your_id = skb->data[18]; nr->vs = 0; nr->va = 0; nr->vr = 0; nr->vl = 0; nr->state = NR_STATE_3; nr->n2count = 0; nr->window = skb->data[20]; sk->sk_state = TCP_ESTABLISHED; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_state_change(sk); break; } case NR_CONNACK | NR_CHOKE_FLAG: nr_disconnect(sk, ECONNREFUSED); break; case NR_RESET: if (sysctl_netrom_reset_circuit) nr_disconnect(sk, ECONNRESET); break; default: break; } return 0; }
static void nr_heartbeat_expiry(unsigned long param) { struct sock *sk = (struct sock *)param; struct nr_sock *nr = nr_sk(sk); bh_lock_sock(sk); switch (nr->state) { case NR_STATE_0: /* Magic here: If we listen() and a new link dies before it is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { sock_hold(sk); bh_unlock_sock(sk); nr_destroy_socket(sk); sock_put(sk); return; } break; case NR_STATE_3: /* * Check for the state of the receive buffer. */ if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) && (nr->condition & NR_COND_OWN_RX_BUSY)) { nr->condition &= ~NR_COND_OWN_RX_BUSY; nr->condition &= ~NR_COND_ACK_PENDING; nr->vl = nr->vr; nr_write_internal(sk, NR_INFOACK); break; } break; } nr_start_heartbeat(sk); bh_unlock_sock(sk); }
void nr_send_nak_frame(struct sock *sk) { struct sk_buff *skb, *skbn; struct nr_sock *nr = nr_sk(sk); if ((skb = skb_peek(&nr->ack_queue)) == NULL) return; if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) return; skbn->data[2] = nr->va; skbn->data[3] = nr->vr; if (nr->condition & NR_COND_OWN_RX_BUSY) skbn->data[4] |= NR_CHOKE_FLAG; nr_transmit_buffer(sk, skbn); nr->condition &= ~NR_COND_ACK_PENDING; nr->vl = nr->vr; nr_stop_t1timer(sk); }
static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) { struct sk_buff *skbo, *skbn = skb; struct nr_sock *nr = nr_sk(sk); skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); nr_start_idletimer(sk); if (more) { nr->fraglen += skb->len; skb_queue_tail(&nr->frag_queue, skb); return 0; } if (!more && nr->fraglen > 0) { /* End of fragment */ nr->fraglen += skb->len; skb_queue_tail(&nr->frag_queue, skb); if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL) return 1; skb_reset_transport_header(skbn); while ((skbo = skb_dequeue(&nr->frag_queue)) != NULL) { skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len), skbo->len); kfree_skb(skbo); } nr->fraglen = 0; } return sock_queue_rcv_skb(sk, skbn); }
/* * State machine for state 3, Connected State. * The handling of the timer(s) is in file nr_timer.c * Handling of state 0 and connection release is in netrom.c. */ static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype) { struct nr_sock *nrom = nr_sk(sk); struct sk_buff_head temp_queue; struct sk_buff *skbn; unsigned short save_vr; unsigned short nr, ns; int queued = 0; nr = skb->data[18]; ns = skb->data[17]; switch (frametype) { case NR_CONNREQ: nr_write_internal(sk, NR_CONNACK); break; case NR_DISCREQ: nr_write_internal(sk, NR_DISCACK); nr_disconnect(sk, 0); break; case NR_CONNACK | NR_CHOKE_FLAG: case NR_DISCACK: nr_disconnect(sk, ECONNRESET); break; case NR_INFOACK: case NR_INFOACK | NR_CHOKE_FLAG: case NR_INFOACK | NR_NAK_FLAG: case NR_INFOACK | NR_NAK_FLAG | NR_CHOKE_FLAG: if (frametype & NR_CHOKE_FLAG) { nrom->condition |= NR_COND_PEER_RX_BUSY; nr_start_t4timer(sk); } else { nrom->condition &= ~NR_COND_PEER_RX_BUSY; nr_stop_t4timer(sk); } if (!nr_validate_nr(sk, nr)) { break; } if (frametype & NR_NAK_FLAG) { nr_frames_acked(sk, nr); nr_send_nak_frame(sk); } else { if (nrom->condition & NR_COND_PEER_RX_BUSY) { nr_frames_acked(sk, nr); } else { nr_check_iframes_acked(sk, nr); } } break; case NR_INFO: case NR_INFO | NR_NAK_FLAG: case NR_INFO | NR_CHOKE_FLAG: case NR_INFO | NR_MORE_FLAG: case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG: case NR_INFO | NR_CHOKE_FLAG | NR_MORE_FLAG: case NR_INFO | NR_NAK_FLAG | NR_MORE_FLAG: case NR_INFO | NR_NAK_FLAG | NR_CHOKE_FLAG | NR_MORE_FLAG: if (frametype & NR_CHOKE_FLAG) { nrom->condition |= NR_COND_PEER_RX_BUSY; nr_start_t4timer(sk); } else { nrom->condition &= ~NR_COND_PEER_RX_BUSY; nr_stop_t4timer(sk); } if (nr_validate_nr(sk, nr)) { if (frametype & NR_NAK_FLAG) { nr_frames_acked(sk, nr); nr_send_nak_frame(sk); } else { if (nrom->condition & NR_COND_PEER_RX_BUSY) { nr_frames_acked(sk, nr); } else { nr_check_iframes_acked(sk, nr); } } } queued = 1; skb_queue_head(&nrom->reseq_queue, skb); if (nrom->condition & NR_COND_OWN_RX_BUSY) break; skb_queue_head_init(&temp_queue); do { save_vr = nrom->vr; while ((skbn = skb_dequeue(&nrom->reseq_queue)) != NULL) { ns = skbn->data[17]; if (ns == nrom->vr) { if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) { nrom->vr = (nrom->vr + 1) % NR_MODULUS; } else { nrom->condition |= NR_COND_OWN_RX_BUSY; skb_queue_tail(&temp_queue, skbn); } } else if (nr_in_rx_window(sk, ns)) { skb_queue_tail(&temp_queue, skbn); } else { kfree_skb(skbn); } } while ((skbn = skb_dequeue(&temp_queue)) != NULL) { skb_queue_tail(&nrom->reseq_queue, skbn); } } while (save_vr != nrom->vr); /* * Window is full, ack it immediately. */ if (((nrom->vl + nrom->window) % NR_MODULUS) == nrom->vr) { nr_enquiry_response(sk); } else { if (!(nrom->condition & NR_COND_ACK_PENDING)) { nrom->condition |= NR_COND_ACK_PENDING; nr_start_t2timer(sk); } } break; case NR_RESET: if (sysctl_netrom_reset_circuit) nr_disconnect(sk, ECONNRESET); break; default: break; } return queued; }
/* * This routine is called when the HDLC layer internally generates a * control frame. */ void nr_write_internal(struct sock *sk, int frametype) { struct nr_sock *nr = nr_sk(sk); struct sk_buff *skb; unsigned char *dptr; int len, timeout; len = NR_NETWORK_LEN + NR_TRANSPORT_LEN; switch (frametype & 0x0F) { case NR_CONNREQ: len += 17; break; case NR_CONNACK: len += (nr->bpqext) ? 2 : 1; break; case NR_DISCREQ: case NR_DISCACK: case NR_INFOACK: break; default: ; return; } if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) return; /* * Space for AX.25 and NET/ROM network header */ skb_reserve(skb, NR_NETWORK_LEN); dptr = skb_put(skb, skb_tailroom(skb)); switch (frametype & 0x0F) { case NR_CONNREQ: timeout = nr->t1 / HZ; *dptr++ = nr->my_index; *dptr++ = nr->my_id; *dptr++ = 0; *dptr++ = 0; *dptr++ = frametype; *dptr++ = nr->window; memcpy(dptr, &nr->user_addr, AX25_ADDR_LEN); dptr[6] &= ~AX25_CBIT; dptr[6] &= ~AX25_EBIT; dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; memcpy(dptr, &nr->source_addr, AX25_ADDR_LEN); dptr[6] &= ~AX25_CBIT; dptr[6] &= ~AX25_EBIT; dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; *dptr++ = timeout % 256; *dptr++ = timeout / 256; break; case NR_CONNACK: *dptr++ = nr->your_index; *dptr++ = nr->your_id; *dptr++ = nr->my_index; *dptr++ = nr->my_id; *dptr++ = frametype; *dptr++ = nr->window; if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser; break; case NR_DISCREQ: case NR_DISCACK: *dptr++ = nr->your_index; *dptr++ = nr->your_id; *dptr++ = 0; *dptr++ = 0; *dptr++ = frametype; break; case NR_INFOACK: *dptr++ = nr->your_index; *dptr++ = nr->your_id; *dptr++ = 0; *dptr++ = nr->vr; *dptr++ = frametype; break; } nr_transmit_buffer(sk, skb); }
int nr_t1timer_running(struct sock *sk) { return timer_pending(&nr_sk(sk)->t1timer); }
void nr_stop_idletimer(struct sock *sk) { del_timer(&nr_sk(sk)->idletimer); }
void nr_stop_t4timer(struct sock *sk) { del_timer(&nr_sk(sk)->t4timer); }
void nr_start_t4timer(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); mod_timer(&nr->t4timer, jiffies + nr->t4); }
void nr_kick(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); struct sk_buff *skb, *skbn; unsigned short start, end; if (nr->state != NR_STATE_3) return; if (nr->condition & NR_COND_PEER_RX_BUSY) return; if (!skb_peek(&sk->sk_write_queue)) return; start = (skb_peek(&nr->ack_queue) == NULL) ? nr->va : nr->vs; end = (nr->va + nr->window) % NR_MODULUS; if (start == end) return; nr->vs = start; /* * Transmit data until either we're out of data to send or * the window is full. */ /* * Dequeue the frame and copy it. */ skb = skb_dequeue(&sk->sk_write_queue); do { if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) { skb_queue_head(&sk->sk_write_queue, skb); break; } skb_set_owner_w(skbn, sk); /* * Transmit the frame copy. */ nr_send_iframe(sk, skbn); nr->vs = (nr->vs + 1) % NR_MODULUS; /* * Requeue the original data frame. */ skb_queue_tail(&nr->ack_queue, skb); } while (nr->vs != end && (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); nr->vl = nr->vr; nr->condition &= ~NR_COND_ACK_PENDING; if (!nr_t1timer_running(sk)) nr_start_t1timer(sk); }