Пример #1
0
/*
 *	Higher level upcall for a LAPB frame
 */
int ax25_std_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type)
{
	int queued = 0, frametype, ns, nr, pf;

	frametype = ax25_decode(ax25, skb, &ns, &nr, &pf);

	switch (ax25->state) {
	case AX25_STATE_1:
		queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type);
		break;
	case AX25_STATE_2:
		queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type);
		break;
	case AX25_STATE_3:
		queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type);
		break;
	case AX25_STATE_4:
		queued = ax25_std_state4_machine(ax25, skb, frametype, ns, nr, pf, type);
		break;
	}

	ax25_kick(ax25);

	return queued;
}
Пример #2
0
/*
 *	All outgoing AX.25 I frames pass via this routine. Therefore this is
 *	where the fragmentation of frames takes place. If fragment is set to
 *	zero then we are not allowed to do fragmentation, even if the frame
 *	is too large.
 */
void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
{
    struct sk_buff *skbn;
    unsigned char *p;
    int frontlen, len, fragno, ka9qfrag, first = 1;

    if (paclen < 16) {
        WARN_ON_ONCE(1);
        kfree_skb(skb);
        return;
    }

    if ((skb->len - 1) > paclen) {
        if (*skb->data == AX25_P_TEXT) {
            skb_pull(skb, 1); /* skip PID */
            ka9qfrag = 0;
        } else {
            paclen -= 2;	/* Allow for fragment control info */
            ka9qfrag = 1;
        }

        fragno = skb->len / paclen;
        if (skb->len % paclen == 0) fragno--;

        frontlen = skb_headroom(skb);	/* Address space + CTRL */

        while (skb->len > 0) {
            spin_lock_bh(&ax25_frag_lock);
            if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
                spin_unlock_bh(&ax25_frag_lock);
                printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
                return;
            }

            if (skb->sk != NULL)
                skb_set_owner_w(skbn, skb->sk);

            spin_unlock_bh(&ax25_frag_lock);

            len = (paclen > skb->len) ? skb->len : paclen;

            if (ka9qfrag == 1) {
                skb_reserve(skbn, frontlen + 2);
                skb_set_network_header(skbn,
                                       skb_network_offset(skb));
                skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
                p = skb_push(skbn, 2);

                *p++ = AX25_P_SEGMENT;

                *p = fragno--;
                if (first) {
                    *p |= AX25_SEG_FIRST;
                    first = 0;
                }
            } else {
                skb_reserve(skbn, frontlen + 1);
                skb_set_network_header(skbn,
                                       skb_network_offset(skb));
                skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
                p = skb_push(skbn, 1);
                *p = AX25_P_TEXT;
            }

            skb_pull(skb, len);
            skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
        }

        kfree_skb(skb);
    } else {
        skb_queue_tail(&ax25->write_queue, skb);	  /* Throw it on the queue */
    }

    switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
    case AX25_PROTO_STD_SIMPLEX:
    case AX25_PROTO_STD_DUPLEX:
        ax25_kick(ax25);
        break;

#ifdef CONFIG_AX25_DAMA_SLAVE
    /*
     * A DAMA slave is _required_ to work as normal AX.25L2V2
     * if no DAMA master is available.
     */
    case AX25_PROTO_DAMA_SLAVE:
        if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
        break;
#endif
    }
}
Пример #3
0
/*
 * All outgoing AX.25 I frames pass via this routine. Therefore this is
 * where the fragmentation of frames takes place.
 */
void ax25_output(ax25_cb *ax25, struct sk_buff *skb)
{
	struct sk_buff *skbn;
	unsigned char *p;
	int frontlen, mtu, len, fragno, ka9qfrag, first = 1;
	long flags;
	
	/*
	 * dl1bke 960301: We use the new PACLEN parameter as MTU of the AX.25 layer.
	 *                This will (hopefully) allow user programs to write() data
	 *                w/o having to think of the maximal amount of data we can
	 *		  send with one call. It's called PACLEN to (1) avoid confusion
	 *		  with (IP) MTU and (2) TAPR calls this PACLEN, too ;-)
	 */

	mtu = ax25->paclen;
	
	if ((skb->len - 1) > mtu) {
		if (*skb->data == AX25_P_TEXT) {
			skb_pull(skb, 1); /* skip PID */
			ka9qfrag = 0;
		} else {
			mtu -= 2;	/* Allow for fragment control info */
			ka9qfrag = 1;
		}
		
		fragno = skb->len / mtu;
		if (skb->len % mtu == 0) fragno--;

		frontlen = skb_headroom(skb);	/* Address space + CTRL */

		while (skb->len > 0) {
			save_flags(flags); 
			cli();
			/* 
			 * do _not_ use sock_alloc_send_skb, our socket may have
			 * sk->shutdown set...
			 */
			if ((skbn = alloc_skb(mtu + 2 + frontlen, GFP_ATOMIC)) == NULL) {
				restore_flags(flags);
				printk(KERN_DEBUG "ax25_output: alloc_skb returned NULL\n");
				if (skb_device_locked(skb))
					skb_device_unlock(skb);
				return;
			}

			skbn->sk   = skb->sk;
			
			if (skbn->sk)
				atomic_add(skbn->truesize, &skbn->sk->wmem_alloc);
			
			restore_flags(flags);
			
			skbn->free = 1;
			skbn->arp  = 1;

			len = (mtu > skb->len) ? skb->len : mtu;
			
			if (ka9qfrag == 1) {
				skb_reserve(skbn, frontlen + 2);

				memcpy(skb_put(skbn, len), skb->data, len);
				p = skb_push(skbn, 2);

				*p++ = AX25_P_SEGMENT;

				*p = fragno--;
				if (first) {
					*p |= SEG_FIRST;
					first = 0;
				}
			} else {
				skb_reserve(skbn, frontlen + 1);
				memcpy(skb_put(skbn, len), skb->data, len);
				p = skb_push(skbn, 1);
				*p = AX25_P_TEXT;
			}

			skb_pull(skb, len);
			skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
		}
		
		skb->free = 1;
		kfree_skb(skb, FREE_WRITE);
	} else {
		skb_queue_tail(&ax25->write_queue, skb);	  /* Throw it on the queue */
	}

	if (ax25->state == AX25_STATE_3 || ax25->state == AX25_STATE_4) {
		if (!ax25->dama_slave)		/* bke 960114: we aren't allowed to transmit */
			ax25_kick(ax25);	/* in DAMA mode unless we received a Poll */
	}
}
Пример #4
0
/*
 *	AX.25 TIMER 
 *
 *	This routine is called every 500ms. Decrement timer by this
 *	amount - if expired then process the event.
 */
static void ax25_timer(unsigned long param)
{
	ax25_cb *ax25 = (ax25_cb *)param;

	switch (ax25->state) {
		case AX25_STATE_0:
			/* Magic here: If we listen() and a new link dies before it
			   is accepted() it isn't 'dead' so doesn't get removed. */
			if (ax25->sk == NULL || ax25->sk->destroy || (ax25->sk->state == TCP_LISTEN && ax25->sk->dead)) {
				del_timer(&ax25->timer);
				ax25_destroy_socket(ax25);
				return;
			}
			break;

		case AX25_STATE_3:
		case AX25_STATE_4:
			/*
			 * Check the state of the receive buffer.
			 */
			if (ax25->sk != NULL) {
				if (ax25->sk->rmem_alloc < (ax25->sk->rcvbuf / 2) && (ax25->condition & OWN_RX_BUSY_CONDITION)) {
					ax25->condition &= ~OWN_RX_BUSY_CONDITION;
					if (!ax25->dama_slave)
						ax25_send_control(ax25, RR, POLLOFF, C_RESPONSE);
					ax25->condition &= ~ACK_PENDING_CONDITION;
					break;
				}
			}
			/*
			 * Check for frames to transmit.
			 */
			if (!ax25->dama_slave)
				ax25_kick(ax25);
			break;

		default:
			break;
	}

	if (ax25->t2timer > 0 && --ax25->t2timer == 0) {
		if (ax25->state == AX25_STATE_3 || ax25->state == AX25_STATE_4) {
			if (ax25->condition & ACK_PENDING_CONDITION) {
				ax25->condition &= ~ACK_PENDING_CONDITION;
				if (!ax25->dama_slave)
					ax25_timeout_response(ax25);
			}
		}
	}

	if (ax25->t3timer > 0 && --ax25->t3timer == 0) {
		/* dl1bke 960114: T3 expires and we are in DAMA mode:  */
		/*                send a DISC and abort the connection */
		if (ax25->dama_slave) {
#ifdef CONFIG_NETROM
			nr_link_failed(&ax25->dest_addr, ax25->device);
#endif
			ax25_clear_queues(ax25);
			ax25_send_control(ax25, DISC, POLLON, C_COMMAND);
				
			ax25->state = AX25_STATE_0;
			if (ax25->sk != NULL) {
				if (ax25->sk->debug)
					printk("T3 Timeout\n");
				ax25->sk->state = TCP_CLOSE;
				ax25->sk->err   = ETIMEDOUT;
				if (!ax25->sk->dead)
					ax25->sk->state_change(ax25->sk);
				ax25->sk->dead  = 1;
			}

			ax25_reset_timer(ax25);
			return;
		}
		
		if (ax25->state == AX25_STATE_3) {
			ax25->n2count = 0;
			ax25_transmit_enquiry(ax25);
			ax25->state   = AX25_STATE_4;
		}
		ax25->t3timer = ax25->t3;
	}
	
	if (ax25->idletimer > 0 && --ax25->idletimer == 0) {
		/* dl1bke 960228: close the connection when IDLE expires */
		/* 		  similar to DAMA T3 timeout but with    */
		/* 		  a "clean" disconnect of the connection */

		ax25_clear_queues(ax25);

		ax25->n2count = 0;
		if (!ax25->dama_slave) {
			ax25->t3timer = 0;
			ax25_send_control(ax25, DISC, POLLON, C_COMMAND);
		} else {
			ax25->t3timer = ax25->t3;
		}
		
		/* state 1 or 2 should not happen, but... */
		
		if (ax25->state == AX25_STATE_1 || ax25->state == AX25_STATE_2)
			ax25->state = AX25_STATE_0;
		else
			ax25->state = AX25_STATE_2;

		ax25->t1timer = ax25->t1 = ax25_calculate_t1(ax25);

		if (ax25->sk != NULL) {
			ax25->sk->state = TCP_CLOSE;
			ax25->sk->err = 0;
			if (!ax25->sk->dead)
				ax25->sk->state_change(ax25->sk);
			ax25->sk->dead = 1;
			ax25->sk->destroy = 1;
		}
	}
		                                                                                                                                                                                                                                                                                                                                        
	/* dl1bke 960114: DAMA T1 timeouts are handled in ax25_dama_slave_transmit */
	/* 		  nevertheless we have to re-enqueue the timer struct...   */
	
	if (ax25->t1timer == 0 || --ax25->t1timer > 0) {
		ax25_reset_timer(ax25);
		return;
	}

	if (!ax25_dev_is_dama_slave(ax25->device)) {
		if (ax25->dama_slave)
			ax25->dama_slave = 0;
		ax25_t1_timeout(ax25);
	}
}