Example #1
0
int rose_rx_ip(struct sk_buff *skb, struct net_device *dev)
{
	struct net_device_stats *stats = netdev_priv(dev);

#ifdef CONFIG_INET
	if (!netif_running(dev)) {
		stats->rx_errors++;
		return 0;
	}

	stats->rx_packets++;
	stats->rx_bytes += skb->len;

	skb->protocol = htons(ETH_P_IP);

	/* Spoof incoming device */
	skb->dev      = dev;
	skb->h.raw    = skb->data;
	skb->nh.raw   = skb->data;
	skb->pkt_type = PACKET_HOST;

	ip_rcv(skb, skb->dev, NULL);
#else
	kfree_skb(skb);
#endif
	return 1;
}
Example #2
0
void konlite_call(void)
{
	unsigned short tmp;
	k_tmr++;
	for(tmp=0;tmp<ETH_RXBUFNB;tmp++)
	{
		buf = get_mac_data(tmp); // буфер принятых данных
		if(buf->len)
		{
			m_pkt=get_mac_pkt(buf); // формирование структуры мак пакета
			if(check_mac(m_pkt)) // проверка мак адреса назначения
			{

				switch(get_eth_type(m_pkt))
				{
					case IP_TYPE:
						ip_p=ip_rcv(m_pkt);
						if(check_ip_req(ip_p))
						{
							if(check_mask(ip_p->ip_s)==0) {set_gate_cmd();} // запрос выходит за рамки подсети
							else{clr_gate_cmd();add_arp_tab(ip_p->ip_s,m_pkt->mac_s);} // запрос в пределах подсети
							switch(get_ip_type(ip_p))
							{
								case ICMP_TYPE:
									icmp_p=icmp_rcv(ip_p);
									if(check_ping(icmp_p)) {ping_echo(icmp_p,ip_p);}
									break;
								case UDP_TYPE:
									udp_p=udp_rcv(ip_p);
									portudp_scan(udp_p,ip_p);
									break;
								case TCP_TYPE:
									tcp_p=tcp_rcv(ip_p);
									porttcp_scan(tcp_p,ip_p);
									break;
							}
						}
						break;
					case ARP_TYPE:
						arp_p=arp_rcv(m_pkt); // формирование структуры arp пакета
						if(check_arp_req(arp_p))	// arp запрос
						{
							add_arp_tab(arp_p->ip_s,arp_p->mac_s);
							arp_answer(arp_p);
						}else
						if(check_arp_answer_gate(arp_p))	// arp ответ от шлюза
						{
							set_gate_mac(arp_p->mac_s);
						}
						break;
					case UNDEF_TYPE:
						break;
				}
			}
			unlock_mac_rx(tmp);
		}
	}
	if(k_tmr % GATE_PER == 0) get_gate_mac();	// периодический запрос mac шлюза
}
Example #3
0
/*
 *	This is where all valid I frames are sent to, to be dispatched to
 *	whichever protocol requires them.
 */
int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
{
	int (*func)(struct sk_buff *, ax25_cb *);
	volatile int queued = 0;
	unsigned char pid;

	if (skb == NULL) return 0;

	ax25_start_idletimer(ax25);

	pid = *skb->data;

#ifdef CONFIG_INET
	if (pid == AX25_P_IP) {
		/* working around a TCP bug to keep additional listeners
		 * happy. TCP re-uses the buffer and destroys the original
		 * content.
		 */
		struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
		if (skbn != NULL) {
			kfree_skb(skb);
			skb = skbn;
		}

		skb_pull(skb, 1);	/* Remove PID */
		skb->h.raw    = skb->data;
		skb->nh.raw   = skb->data;
		skb->dev      = ax25->ax25_dev->dev;
		skb->pkt_type = PACKET_HOST;
		skb->protocol = htons(ETH_P_IP);
		ip_rcv(skb, skb->dev, NULL);	/* Wrong ptype */
		return 1;
	}
#endif
	if (pid == AX25_P_SEGMENT) {
		skb_pull(skb, 1);	/* Remove PID */
		return ax25_rx_fragment(ax25, skb);
	}

	if ((func = ax25_protocol_function(pid)) != NULL) {
		skb_pull(skb, 1);	/* Remove PID */
		return (*func)(skb, ax25);
	}

	if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
		if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
		    ax25->pidincl) {
			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
				queued = 1;
			else
				ax25->condition |= AX25_COND_OWN_RX_BUSY;
		}
	}

	return queued;
}
Example #4
0
/*
 *	This is where all valid I frames are sent to, to be dispatched to
 *	whichever protocol requires them.
 */
static int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
{
	int (*func)(struct sk_buff *, ax25_cb *);
	struct sk_buff *skbn;
	volatile int queued = 0;
	unsigned char pid;

	if (skb == NULL) return 0;

	ax25->idletimer = ax25->idle;

	pid = *skb->data;

#ifdef CONFIG_INET
	if (pid == AX25_P_IP) {
		if ((skbn = skb_copy(skb, GFP_ATOMIC)) != NULL) {
			kfree_skb(skb, FREE_READ);
			skb = skbn;
		}
		skb_pull(skb, 1);	/* Remove PID */
		skb->h.raw = skb->data;
		ip_rcv(skb, ax25->device, NULL);	/* Wrong ptype */
		return 1;
	}
#endif
	if (pid == AX25_P_SEGMENT) {
		skb_pull(skb, 1);	/* Remove PID */
		return ax25_rx_fragment(ax25, skb);
	}

	if ((func = ax25_protocol_function(pid)) != NULL) {
		skb_pull(skb, 1);	/* Remove PID */
		return (*func)(skb, ax25);
	}

	if (ax25->sk != NULL && ax25_dev_get_value(ax25->device, AX25_VALUES_CONMODE) == 2) {
		if ((!ax25->pidincl && ax25->sk->protocol == pid) || ax25->pidincl) {
			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
				queued = 1;
			else
				ax25->condition |= AX25_COND_OWN_RX_BUSY;
		}
	}

	return queued;
}
Example #5
0
static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
	ax25_address *dev_addr, struct packet_type *ptype)
{
	ax25_address src, dest, *next_digi = NULL;
	int type = 0, mine = 0, dama;
	struct sock *make, *sk;
	ax25_digi dp, reverse_dp;
	ax25_cb *ax25;
	ax25_dev *ax25_dev;

	/*
	 *	Process the AX.25/LAPB frame.
	 */

	skb->h.raw = skb->data;

	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
		kfree_skb(skb);
		return 0;
	}

	/*
	 *	Parse the address header.
	 */

	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) {
		kfree_skb(skb);
		return 0;
	}

	/*
	 *	Ours perhaps ?
	 */
	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */
		next_digi = &dp.calls[dp.lastrepeat + 1];

	/*
	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes
	 */
	skb_pull(skb, ax25_addr_size(&dp));

	/* For our port addresses ? */
	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
		mine = 1;

	/* Also match on any registered callsign from L3/4 */
	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
		mine = 1;

	/* UI frame - bypass LAPB processing */
	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
		skb->h.raw = skb->data + 2;		/* skip control and pid */

		ax25_send_to_raw(&dest, skb, skb->data[1]);

		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) {
			kfree_skb(skb);
			return 0;
		}

		/* Now we are pointing at the pid byte */
		switch (skb->data[1]) {
#ifdef CONFIG_INET
		case AX25_P_IP:
			skb_pull(skb,2);		/* drop PID/CTRL */
			skb->h.raw    = skb->data;
			skb->nh.raw   = skb->data;
			skb->dev      = dev;
			skb->pkt_type = PACKET_HOST;
			skb->protocol = htons(ETH_P_IP);
			ip_rcv(skb, dev, ptype);	/* Note ptype here is the wrong one, fix me later */
			break;

		case AX25_P_ARP:
			skb_pull(skb,2);
			skb->h.raw    = skb->data;
			skb->nh.raw   = skb->data;
			skb->dev      = dev;
			skb->pkt_type = PACKET_HOST;
			skb->protocol = htons(ETH_P_ARP);
			arp_rcv(skb, dev, ptype);	/* Note ptype here is wrong... */
			break;
#endif
		case AX25_P_TEXT:
			/* Now find a suitable dgram socket */
			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
			if (sk != NULL) {
				bh_lock_sock(sk);
				if (atomic_read(&sk->sk_rmem_alloc) >=
				    sk->sk_rcvbuf) {
					kfree_skb(skb);
				} else {
					/*
					 *	Remove the control and PID.
					 */
					skb_pull(skb, 2);
					if (sock_queue_rcv_skb(sk, skb) != 0)
						kfree_skb(skb);
				}
				bh_unlock_sock(sk);
				sock_put(sk);
			} else {
				kfree_skb(skb);
			}
			break;

		default:
			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
			break;
		}

		return 0;
	}

	/*
	 *	Is connected mode supported on this device ?
	 *	If not, should we DM the incoming frame (except DMs) or
	 *	silently ignore them. For now we stay quiet.
	 */
	if (ax25_dev->values[AX25_VALUES_CONMODE] == 0) {
		kfree_skb(skb);
		return 0;
	}

	/* LAPB */

	/* AX.25 state 1-4 */

	ax25_digi_invert(&dp, &reverse_dp);

	if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
		/*
		 *	Process the frame. If it is queued up internally it
		 *	returns one otherwise we free it immediately. This
		 *	routine itself wakes the user context layers so we do
		 *	no further work
		 */
		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
			kfree_skb(skb);

		ax25_cb_put(ax25);
		return 0;
	}

	/* AX.25 state 0 (disconnected) */

	/* a) received not a SABM(E) */

	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
	    (*skb->data & ~AX25_PF) != AX25_SABME) {
		/*
		 *	Never reply to a DM. Also ignore any connects for
		 *	addresses that are not our interfaces and not a socket.
		 */
		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
			ax25_return_dm(dev, &src, &dest, &dp);

		kfree_skb(skb);
		return 0;
	}

	/* b) received SABM(E) */

	if (dp.lastrepeat + 1 == dp.ndigi)
		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
	else
		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);

	if (sk != NULL) {
		bh_lock_sock(sk);
		if (sk_acceptq_is_full(sk) ||
		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
			if (mine)
				ax25_return_dm(dev, &src, &dest, &dp);
			kfree_skb(skb);
			bh_unlock_sock(sk);
			sock_put(sk);

			return 0;
		}

		ax25 = ax25_sk(make);
		skb_set_owner_r(skb, make);
		skb_queue_head(&sk->sk_receive_queue, skb);

		make->sk_state = TCP_ESTABLISHED;

		sk->sk_ack_backlog++;
		bh_unlock_sock(sk);
	} else {
		if (!mine) {
			kfree_skb(skb);
			return 0;
		}

		if ((ax25 = ax25_create_cb()) == NULL) {
			ax25_return_dm(dev, &src, &dest, &dp);
			kfree_skb(skb);
			return 0;
		}

		ax25_fillin_cb(ax25, ax25_dev);
	}

	ax25->source_addr = dest;
	ax25->dest_addr   = src;

	/*
	 *	Sort out any digipeated paths.
	 */
	if (dp.ndigi && !ax25->digipeat &&
	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
		kfree_skb(skb);
		ax25_destroy_socket(ax25);
		if (sk)
			sock_put(sk);
		return 0;
	}

	if (dp.ndigi == 0) {
		if (ax25->digipeat != NULL) {
			kfree(ax25->digipeat);
			ax25->digipeat = NULL;
		}
	} else {
		/* Reverse the source SABM's path */
		memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
	}

	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
		ax25->modulus = AX25_EMODULUS;
		ax25->window  = ax25_dev->values[AX25_VALUES_EWINDOW];
	} else {
		ax25->modulus = AX25_MODULUS;
		ax25->window  = ax25_dev->values[AX25_VALUES_WINDOW];
	}

	ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);

#ifdef CONFIG_AX25_DAMA_SLAVE
	if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
		ax25_dama_on(ax25);
#endif

	ax25->state = AX25_STATE_3;

	ax25_cb_add(ax25);

	ax25_start_heartbeat(ax25);
	ax25_start_t3timer(ax25);
	ax25_start_idletimer(ax25);

	if (sk) {
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_data_ready(sk, skb->len);
		sock_put(sk);
	} else
		kfree_skb(skb);

	return 0;
}
Example #6
0
void
pktdemux()
{
   PACKET   pkt;
   NET      ifc;                /* interface packet came from */
   IFMIB    mib;
   int      pkts;
   char *   eth;

   pkts = 0;   /* packets per loop */

   while (rcvdq.q_len)
   {
      /* If we are low on free packets, don't hog CPU cycles */
      if (pkts++ > bigfreeq.q_len)
      {
#ifdef SUPERLOOP
         return;        /* don't hog stack on superloop */
#else    /* SUPERLOOP */
         tk_yield(); /* let application tasks process received packets */
         pkts = 0;   /* reset counter */
#endif   /* SUPERLOOP else */
      }

      /* If we get receive interupt from the net during this
      lock, the MAC driver needs to wait or reschedule */
      LOCK_NET_RESOURCE(RXQ_RESID);
      pkt = (PACKET)q_deq(&rcvdq);
      UNLOCK_NET_RESOURCE(RXQ_RESID);
      if (!pkt) panic("pktdemux: got null pkt");
      ifc = pkt->net;

      mib = ifc->n_mib;
      /* maintain mib stats for unicast and broadcast */
      if (isbcast(ifc, (u_char*)pkt->nb_buff + ETHHDR_BIAS))
         mib->ifInNUcastPkts++;
      else
         mib->ifInUcastPkts++;

      if(mib->ifAdminStatus == NI_DOWN)
      {
         LOCK_NET_RESOURCE(FREEQ_RESID);
         pk_free(pkt);  /* dump packet from downed interface */
         UNLOCK_NET_RESOURCE(FREEQ_RESID);
         mib->ifInDiscards++;
         continue;      /* next packet */
      }

#ifdef NPDEBUG
      if (*(pkt->nb_buff - ALIGN_TYPE) != 'M' ||
          *(pkt->nb_buff + pkt->nb_blen) != 'M')
      {
         dtrap();
         panic("pktdemux: corrupt pkt");
      }
#endif   /* NPDEBUG */

#ifdef  LOSSY_IO
      if(NDEBUG & LOSSY_RX)
      {
         if(myloss())  
         {
            LOCK_NET_RESOURCE(FREEQ_RESID);
            pk_free(pkt);        /* punt packet */
            UNLOCK_NET_RESOURCE(FREEQ_RESID);
            in_lastloss = (int)(cticks & 0x07) - 4;  /* pseudo random reset */
            continue;            /* act as if we sent OK */
         }
      }
#endif   /* LOSSY_IO */

      /* see if driver set pkt->nb_prot and pkt->type */
      if((ifc->n_flags & NF_NBPROT) == 0)
      {
         /* Set pkt->type and pkt->nb_prot based based on media type.
          * Some device drivers pass nb_plen as the total length of the
          * packet, while others subtract the MAC header. The latter is
          * probably the right thing to do, but because of this historic
          * inconsistency we don't try to fix it here - the longer size
          * turns out to be harmless since the IP layer fixes the size
          * based on the IP header length field.
          */
         switch(ifc->n_mib->ifType)
         {
         case ETHERNET:
            /* get pointer to ethernet header */
            eth = (pkt->nb_buff + ETHHDR_BIAS);
#ifdef IEEE_802_3
            /* see if it's got snap baggage */
            if (ET_TYPE_GET(eth) <= 0x0600)
            {
               struct snap_hdr *snap;
               snap = (struct snap_hdr *)(pkt->nb_buff + ETHHDR_SIZE);
               pkt->type = (unshort)(snap->type);
               pkt->nb_prot = pkt->nb_buff + pkt->net->n_lnh;
            }
            else
            {
               pkt->type = htons((unshort)ET_TYPE_GET(eth));
               pkt->nb_prot = pkt->nb_buff + ETHHDR_SIZE;
            }
#else
            pkt->type = htons((unshort)ET_TYPE_GET(eth));
            pkt->nb_prot = pkt->nb_buff + pkt->net->n_lnh;
#endif   /* IEEE_802_3 */
            break;
#if defined(USE_PPP) || defined(USE_SLIP) || defined(PROTOCOL_LIBS)
         case PPP:   /* PPP or SLIP over a UART */
         case SLIP:
            pkt->nb_prot = pkt->nb_buff + MaxLnh;
            pkt->type = IPTP;    /* only type our PPP supports */
            break;
#endif  /* USE_PPP || USE_SLIP */
#ifdef USE_PPPOE
         case PPPOE:
            /* do not change type yet, for PPPoE */
            break;
#endif   /* USE_PPPOE */
         default:    /* driver bug? */
            dprintf("pktdemux: bad Iface type %ld\n",ifc->n_mib->ifType);
            LOCK_NET_RESOURCE(FREEQ_RESID);
            pk_free(pkt);
            UNLOCK_NET_RESOURCE(FREEQ_RESID);
            continue;
         }
      }

      /* pkt->nb_prot and pkt->type are now set. pass pkt to upper layer */
      switch(pkt->type)
      {
      case IPTP:     /* IP type */
         LOCK_NET_RESOURCE(NET_RESID);
#ifdef SHARED_IPADDRS
         add_share_route(pkt);
#endif /* SHARED_IPADDRS */
#ifdef IP_V4
         ip_rcv(pkt);
#else
            /* don't care, it's IPv4 */
            LOCK_NET_RESOURCE(FREEQ_RESID);
            pk_free(pkt);
            UNLOCK_NET_RESOURCE(FREEQ_RESID);
#endif
		UNLOCK_NET_RESOURCE(NET_RESID);
         break;
#ifdef INCLUDE_ARP
      case ARPTP:       /* ARP type */
         LOCK_NET_RESOURCE(NET_RESID);
         arprcv(pkt);
         UNLOCK_NET_RESOURCE(NET_RESID);
         break;
#endif   /* INCLUDE_ARP */
#ifdef USE_PPPOE
      case  htons(0x8863):
      case  htons(0x8864):
         LOCK_NET_RESOURCE(NET_RESID);
         poe_rcv(pkt);
         UNLOCK_NET_RESOURCE(NET_RESID);
         break;
#endif   /* USE_PPPOE */
#ifdef IP_V6
      case  htons(0x86DD):
         /* Each received v6 pkt goes thru here exactly once, so set the
          * outer (first, and usually only) ipv6 header pointer. Tunneled headers
          * may exist further into the packet.
          */
         pkt->ip6_hdr = (struct ipv6 *)pkt->nb_prot;
         LOCK_NET_RESOURCE(NET_RESID);
         ip6_rcv(pkt);
         UNLOCK_NET_RESOURCE(NET_RESID);
         break;
#endif
      default:
#ifdef NPDEBUG
         if (NDEBUG & UPCTRACE)
            dprintf("pktdemux: bad pkt type 0x%04x\n", ntohs(pkt->type));
#endif   /* NPDEBUG */
         ifc->n_mib->ifInUnknownProtos++;
         LOCK_NET_RESOURCE(FREEQ_RESID);
         pk_free(pkt);           /* return to free buffer */
         UNLOCK_NET_RESOURCE(FREEQ_RESID);
         break;
      }
      continue;
   }
}