static void cisco_keepalive_send(hdlc_device *hdlc, u32 type, u32 par1, u32 par2) { struct sk_buff *skb; cisco_packet *data; skb = dev_alloc_skb(sizeof(hdlc_header) + sizeof(cisco_packet)); if (!skb) { printk(KERN_WARNING "%s: Memory squeeze on cisco_keepalive_send()\n", hdlc_to_name(hdlc)); return; } skb_reserve(skb, 4); cisco_hard_header(skb, hdlc_to_dev(hdlc), CISCO_KEEPALIVE, NULL, NULL, 0); data = (cisco_packet*)skb->tail; data->type = htonl(type); data->par1 = htonl(par1); data->par2 = htonl(par2); data->rel = 0xFFFF; /* we will need do_div here if 1000 % HZ != 0 */ data->time = htonl(jiffies * (1000 / HZ)); skb_put(skb, sizeof(cisco_packet)); skb->priority = TC_PRIO_CONTROL; skb->dev = hdlc_to_dev(hdlc); skb->nh.raw = skb->data; dev_queue_xmit(skb); }
void unregister_hdlc_device(hdlc_device *hdlc) { if (hdlc_to_dev(hdlc)->name==NULL) return; /* device not registered */ destroy_pvc_list(hdlc); unregister_netdevice(hdlc_to_dev(hdlc)); MOD_DEC_USE_COUNT; }
int register_hdlc_device(hdlc_device *hdlc) { int result; struct net_device *dev = hdlc_to_dev(hdlc); dev->get_stats = hdlc_get_stats; dev->change_mtu = hdlc_change_mtu; dev->mtu = HDLC_MAX_MTU; dev->type = ARPHRD_RAWHDLC; dev->hard_header_len = 16; dev->flags = IFF_POINTOPOINT | IFF_NOARP; hdlc->proto = -1; hdlc->proto_detach = NULL; result = dev_alloc_name(dev, "hdlc%d"); if (result<0) return result; result = register_netdev(dev); if (result != 0) return -EIO; MOD_INC_USE_COUNT; return 0; }
static int ppp_open(hdlc_device *hdlc) { struct net_device *dev = hdlc_to_dev(hdlc); void *old_ioctl; int result; dev->priv = &hdlc->state.ppp.syncppp_ptr; hdlc->state.ppp.syncppp_ptr = &hdlc->state.ppp.pppdev; hdlc->state.ppp.pppdev.dev = dev; old_ioctl = dev->do_ioctl; hdlc->state.ppp.old_change_mtu = dev->change_mtu; sppp_attach(&hdlc->state.ppp.pppdev); /* sppp_attach nukes them. We don't need syncppp's ioctl */ dev->do_ioctl = old_ioctl; hdlc->state.ppp.pppdev.sppp.pp_flags &= ~PP_CISCO; dev->type = ARPHRD_PPP; result = sppp_open(dev); if (result) { sppp_detach(dev); return result; } return 0; }
int register_hdlc_device(hdlc_device *hdlc) { int result; struct net_device *dev = hdlc_to_dev(hdlc); dev->init = hdlc_init; dev->priv = &hdlc->syncppp_ptr; hdlc->syncppp_ptr = &hdlc->pppdev; hdlc->pppdev.dev = dev; hdlc->mode = MODE_NONE; hdlc->lmi.T391 = 10; /* polling verification timer */ hdlc->lmi.T392 = 15; /* link integrity verification polling timer */ hdlc->lmi.N391 = 6; /* full status polling counter */ hdlc->lmi.N392 = 3; /* error threshold */ hdlc->lmi.N393 = 4; /* monitored events count */ result = dev_alloc_name(dev, "hdlc%d"); if (result<0) return result; result = register_netdev(dev); if (result != 0) return -EIO; MOD_INC_USE_COUNT; return 0; }
void unregister_hdlc_device(hdlc_device *hdlc) { hdlc_proto_detach(hdlc); unregister_netdev(hdlc_to_dev(hdlc)); MOD_DEC_USE_COUNT; }
void hdlc_netif_rx(hdlc_device *hdlc, struct sk_buff *skb, int dlci) { skb->mac.raw=skb->data; if (mode_is(hdlc, MODE_SOFT)) { if (mode_is(hdlc, MODE_FR)) { fr_netif(hdlc, skb); return; } else if (mode_is(hdlc, MODE_CISCO)) { cisco_netif(hdlc, skb); return; } else if (mode_is(hdlc, MODE_PPP)) { hdlc->stats.rx_bytes+=skb->len; hdlc->stats.rx_packets++; skb->protocol=htons(ETH_P_WAN_PPP); skb->dev=hdlc_to_dev(hdlc); netif_rx(skb); return; } } else { /* protocol support in hardware/firmware */ hdlc->stats.rx_bytes+=skb->len; hdlc->stats.rx_packets++; if (mode_is(hdlc, MODE_HDLC)) skb->protocol=htons(ETH_P_IP); /* otherwise protocol set by hw driver */ if (mode_is(hdlc, MODE_FR)) { pvc_device *pvc=find_pvc(hdlc, dlci); if (!pvc) { /* packet from nonexistent PVC */ hdlc->stats.rx_errors++; dev_kfree_skb(skb); } pvc->stats.rx_bytes+=skb->len; pvc->stats.rx_packets++; skb->dev=&pvc->netdev; } else skb->dev=hdlc_to_dev(hdlc); netif_rx(skb); return; } hdlc->stats.rx_errors++; /* unsupported mode */ dev_kfree_skb(skb); }
static void fr_cisco_open(hdlc_device *hdlc) { hdlc->lmi.state = LINK_STATE_CHANGED; hdlc->lmi.txseq = hdlc->lmi.rxseq = 0; hdlc->lmi.last_errors = 0xFFFFFFFF; hdlc->lmi.N391cnt = 0; if (mode_is(hdlc, MODE_CISCO)) hdlc_to_dev(hdlc)->hard_header=cisco_hard_header; else hdlc_to_dev(hdlc)->hard_header=fr_hard_header; init_timer(&hdlc->timer); hdlc->timer.expires = jiffies + HZ; /* First poll after 1 second */ hdlc->timer.function = mode_is(hdlc, MODE_FR) ? fr_timer : cisco_timer; hdlc->timer.data = (unsigned long)hdlc; add_timer(&hdlc->timer); }
void hdlc_netif_rx(hdlc_device *hdlc, struct sk_buff *skb) { /* skb contains raw HDLC frame, in both hard- and software modes */ skb->mac.raw = skb->data; switch(hdlc->mode & MODE_MASK) { case MODE_HDLC: skb->protocol = htons(ETH_P_IP); skb->dev = hdlc_to_dev(hdlc); netif_rx(skb); return; case MODE_FR: fr_netif(hdlc, skb); return; case MODE_CISCO: cisco_netif(hdlc, skb); return; #ifdef CONFIG_HDLC_PPP case MODE_PPP: #if 0 sppp_input(hdlc_to_dev(hdlc), skb); #else skb->protocol = htons(ETH_P_WAN_PPP); skb->dev = hdlc_to_dev(hdlc); netif_rx(skb); #endif return; #endif #ifdef CONFIG_HDLC_X25 case MODE_X25: skb->dev = hdlc_to_dev(hdlc); if (lapb_data_received(hdlc, skb) == LAPB_OK) return; break; #endif } hdlc->stats.rx_errors++; dev_kfree_skb_any(skb); }
int hdlc_cisco_ioctl(hdlc_device *hdlc, struct ifreq *ifr) { cisco_proto *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco; const size_t size = sizeof(cisco_proto); cisco_proto new_settings; struct net_device *dev = hdlc_to_dev(hdlc); int result; switch (ifr->ifr_settings.type) { case IF_GET_PROTO: ifr->ifr_settings.type = IF_PROTO_CISCO; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(cisco_s, &hdlc->state.cisco.settings, size)) return -EFAULT; return 0; case IF_PROTO_CISCO: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (dev->flags & IFF_UP) return -EBUSY; if (copy_from_user(&new_settings, cisco_s, size)) return -EFAULT; if (new_settings.interval < 1 || new_settings.timeout < 2) return -EINVAL; result=hdlc->attach(hdlc, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); if (result) return result; hdlc_proto_detach(hdlc); memcpy(&hdlc->state.cisco.settings, &new_settings, size); hdlc->open = cisco_open; hdlc->stop = cisco_close; hdlc->netif_rx = cisco_rx; hdlc->type_trans = cisco_type_trans; hdlc->proto = IF_PROTO_CISCO; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = cisco_hard_header; dev->type = ARPHRD_CISCO; dev->addr_len = 0; return 0; } return -EINVAL; }
static void ppp_close(hdlc_device *hdlc) { struct net_device *dev = hdlc_to_dev(hdlc); sppp_close(dev); sppp_detach(dev); dev->rebuild_header = NULL; dev->change_mtu = hdlc->state.ppp.old_change_mtu; dev->mtu = HDLC_MAX_MTU; dev->hard_header_len = 16; }
int x25_data_indication(void *token, struct sk_buff *skb) { hdlc_device *hdlc = token; unsigned char *ptr; ptr = skb_push(skb, 1); *ptr = 0; skb->dev = hdlc_to_dev(hdlc); skb->protocol = htons(ETH_P_X25); skb->mac.raw = skb->data; skb->pkt_type = PACKET_HOST; return netif_rx(skb); }
static void cisco_keepalive_send(hdlc_device *hdlc, u32 type, u32 par1, u32 par2) { struct sk_buff *skb; cisco_packet *data; skb=dev_alloc_skb(sizeof(hdlc_header)+sizeof(cisco_packet)); skb_reserve(skb, 4); cisco_hard_header(skb, hdlc_to_dev(hdlc), CISCO_KEEPALIVE, NULL, NULL, 0); data=(cisco_packet*)skb->tail; data->type = htonl(type); data->par1 = htonl(par1); data->par2 = htonl(par2); data->rel = 0xFFFF; data->time = htonl(jiffies * 1000/HZ); skb_put(skb, sizeof(cisco_packet)); skb->priority=TC_PRIO_CONTROL; skb->dev = hdlc_to_dev(hdlc); dev_queue_xmit(skb); }
static int pvc_xmit(struct sk_buff *skb, struct device *dev) { pvc_device *pvc=dev_to_pvc(dev); if (pvc->state & PVC_STATE_ACTIVE) { skb->dev = hdlc_to_dev(pvc->master); pvc->stats.tx_bytes+=skb->len; pvc->stats.tx_packets++; dev_queue_xmit(skb); } else { pvc->stats.tx_dropped++; dev_kfree_skb(skb); } return 0; }
static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) { pvc_device *pvc = dev_to_pvc(dev); if (pvc->state & PVC_STATE_ACTIVE) { skb->dev = hdlc_to_dev(pvc->master); pvc->stats.tx_bytes += skb->len; pvc->stats.tx_packets++; if (pvc->state & PVC_STATE_FECN) pvc->stats.tx_compressed++; /* TX Congestion counter */ dev_queue_xmit(skb); } else { pvc->stats.tx_dropped++; dev_kfree_skb(skb); } return 0; }
static int pvc_open(struct device *dev) { pvc_device *pvc=dev_to_pvc(dev); int result=0; if ((hdlc_to_dev(pvc->master)->flags & IFF_UP) == 0) return -EIO; /* Master must be UP in order to activate PVC */ memset(&(pvc->stats), 0, sizeof(struct net_device_stats)); pvc->state=0; if (!mode_is(pvc->master, MODE_SOFT) && pvc->master->open_pvc) result=pvc->master->open_pvc(pvc); if (result) return result; pvc->master->lmi.state |= LINK_STATE_CHANGED; return 0; }
void x25_connect_disconnect(void *token, int reason, int code) { hdlc_device *hdlc = token; struct sk_buff *skb; unsigned char *ptr; if ((skb = dev_alloc_skb(1)) == NULL) { printk(KERN_ERR "%s: out of memory\n", hdlc_to_name(hdlc)); return; } ptr = skb_put(skb, 1); *ptr = code; skb->dev = hdlc_to_dev(hdlc); skb->protocol = htons(ETH_P_X25); skb->mac.raw = skb->data; skb->pkt_type = PACKET_HOST; netif_rx(skb); }
int hdlc_ppp_ioctl(hdlc_device *hdlc, struct ifreq *ifr) { struct net_device *dev = hdlc_to_dev(hdlc); int result; switch (ifr->ifr_settings.type) { case IF_GET_PROTO: ifr->ifr_settings.type = IF_PROTO_PPP; return 0; /* return protocol only, no settable parameters */ case IF_PROTO_PPP: if(!capable(CAP_NET_ADMIN)) return -EPERM; if(dev->flags & IFF_UP) return -EBUSY; /* no settable parameters */ result=hdlc->attach(hdlc, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); if (result) return result; hdlc_proto_detach(hdlc); memset(&hdlc->proto, 0, sizeof(hdlc->proto)); hdlc->proto.open = ppp_open; hdlc->proto.close = ppp_close; hdlc->proto.type_trans = ppp_type_trans; hdlc->proto.id = IF_PROTO_PPP; dev->hard_start_xmit = hdlc->xmit; dev->hard_header = NULL; dev->type = ARPHRD_PPP; dev->addr_len = 0; return 0; } return -EINVAL; }
int register_hdlc_device(hdlc_device *hdlc) { int result; #ifndef MODULE if (!version_printed) { printk(KERN_INFO "%s\n", version); version_printed = 1; } #endif hdlc_to_dev(hdlc)->name = hdlc->name; hdlc_to_dev(hdlc)->init = hdlc_init; hdlc_to_dev(hdlc)->priv = &hdlc->syncppp_ptr; /* remove in 2.3 */ hdlc->syncppp_ptr = &hdlc->pppdev; hdlc->pppdev.dev=hdlc_to_dev(hdlc); hdlc->mode = MODE_NONE; hdlc->lmi.T391 = 10; /* polling verification timer */ hdlc->lmi.T392 = 15; /* link integrity verification polling timer */ hdlc->lmi.N391 = 6; /* full status polling counter */ hdlc->lmi.N392 = 3; /* error threshold */ hdlc->lmi.N393 = 4; /* monitored events count */ result=dev_alloc_name(hdlc_to_dev(hdlc), "hdlc%d"); if (result<0) return result; if (register_netdevice(hdlc_to_dev(hdlc))!=0) { hdlc_to_dev(hdlc)->name=NULL; /* non-NULL means registered */ return -EIO; } dev_init_buffers(hdlc_to_dev(hdlc)); MOD_INC_USE_COUNT; return 0; }
static void fr_lmi_send(hdlc_device *hdlc, int fullrep) { struct sk_buff *skb; pvc_device *pvc=hdlc->first_pvc; int len = mode_is(hdlc, MODE_FR_ANSI) ? LMI_ANSI_LENGTH : LMI_LENGTH; int stat_len = 3; u8 *data; int i=0; if (mode_is(hdlc, MODE_DCE) && fullrep) { len += hdlc->pvc_count * (2 + stat_len); if (len>HDLC_MAX_MTU) { printk(KERN_WARNING "%s: Too many PVCs while sending " "LMI full report\n", hdlc->name); return; } } skb=dev_alloc_skb(len); memset(skb->data, 0, len); skb_reserve(skb, 4); fr_hard_header(skb, hdlc_to_dev(hdlc), LMI_PROTO, NULL, NULL, 0); data=skb->tail; data[i++] = LMI_CALLREF; data[i++] = mode_is(hdlc, MODE_DCE) ? LMI_STATUS : LMI_STATUS_ENQUIRY; if (mode_is(hdlc, MODE_FR_ANSI)) data[i++] = LMI_ANSI_LOCKSHIFT; data[i++] = mode_is(hdlc, MODE_FR_CCITT) ? LMI_CCITT_REPTYPE : LMI_REPTYPE; data[i++] = LMI_REPT_LEN; data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY; data[i++] = mode_is(hdlc, MODE_FR_CCITT) ? LMI_CCITT_ALIVE : LMI_ALIVE; data[i++] = LMI_INTEG_LEN; data[i++] = hdlc->lmi.txseq = fr_lmi_nextseq(hdlc->lmi.txseq); data[i++] = hdlc->lmi.rxseq; if (mode_is(hdlc, MODE_DCE) && fullrep) { while (pvc) { data[i++] = mode_is(hdlc, MODE_FR_CCITT) ? LMI_CCITT_PVCSTAT:LMI_PVCSTAT; data[i++] = stat_len; if ((hdlc->lmi.state & LINK_STATE_RELIABLE) && (pvc->netdev.flags & IFF_UP) && !(pvc->state & (PVC_STATE_ACTIVE|PVC_STATE_NEW))) { pvc->state |= PVC_STATE_NEW; fr_log_dlci_active(pvc); } dlci_to_status(hdlc, netdev_dlci(&pvc->netdev), data+i, pvc->state); i+=stat_len; pvc=pvc->next; } } skb_put(skb, i); skb->priority=TC_PRIO_CONTROL; skb->dev = hdlc_to_dev(hdlc); dev_queue_xmit(skb); }
int hdlc_raw_eth_ioctl(hdlc_device *hdlc, struct ifreq *ifr) { raw_hdlc_proto *raw_s = ifr->ifr_settings.ifs_ifsu.raw_hdlc; const size_t size = sizeof(raw_hdlc_proto); raw_hdlc_proto new_settings; struct net_device *dev = hdlc_to_dev(hdlc); int result; void *old_ch_mtu; int old_qlen; switch (ifr->ifr_settings.type) { case IF_GET_PROTO: ifr->ifr_settings.type = IF_PROTO_HDLC_ETH; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } if (copy_to_user(raw_s, &hdlc->state.raw_hdlc.settings, size)) return -EFAULT; return 0; case IF_PROTO_HDLC_ETH: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (dev->flags & IFF_UP) return -EBUSY; if (copy_from_user(&new_settings, raw_s, size)) return -EFAULT; if (new_settings.encoding == ENCODING_DEFAULT) new_settings.encoding = ENCODING_NRZ; if (new_settings.parity == PARITY_DEFAULT) new_settings.parity = PARITY_CRC16_PR1_CCITT; result = hdlc->attach(hdlc, new_settings.encoding, new_settings.parity); if (result) return result; hdlc_proto_detach(hdlc); memcpy(&hdlc->state.raw_hdlc.settings, &new_settings, size); hdlc->open = NULL; hdlc->stop = NULL; hdlc->netif_rx = NULL; hdlc->type_trans = eth_type_trans; hdlc->proto = IF_PROTO_HDLC_ETH; dev->hard_start_xmit = eth_tx; old_ch_mtu = dev->change_mtu; old_qlen = dev->tx_queue_len; ether_setup(dev); dev->change_mtu = old_ch_mtu; dev->tx_queue_len = old_qlen; memcpy(dev->dev_addr, "\x00\x01", 2); get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); return 0; } return -EINVAL; }
static int c101_run(unsigned long irq, unsigned long winbase) { card_t *card; int result; if (irq<3 || irq>15 || irq == 6) /* FIXME */ { printk(KERN_ERR "c101: invalid IRQ value\n"); return -ENODEV; } if (winbase<0xC0000 || winbase>0xDFFFF || (winbase&0x3FFF)!=0) { printk(KERN_ERR "c101: invalid RAM value\n"); return -ENODEV; } card=kmalloc(sizeof(card_t), GFP_KERNEL); if (card==NULL) { printk(KERN_ERR "c101: unable to allocate memory\n"); return -ENOBUFS; } memset(card, 0, sizeof(card_t)); if (request_irq(irq, sca_intr, 0, devname, card)) { printk(KERN_ERR "c101: could not allocate IRQ\n"); c101_destroy_card(card); return(-EBUSY); } card->irq=irq; card->win0base=(u8*)winbase; /* 2 rings required for 1 port */ card->ring_buffers = (RAM_SIZE-C101_WINDOW_SIZE) / (2 * HDLC_MAX_MTU); printk(KERN_DEBUG "c101: using %u packets rings\n",card->ring_buffers); card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */ readb(card->win0base+C101_PAGE); /* Resets SCA? */ udelay(100); writeb(0, card->win0base+C101_PAGE); writeb(0, card->win0base+C101_DTR); /* Power-up for RAM? */ sca_init(card, 0); card->hdlc.ioctl=sca_ioctl; card->hdlc.open=sca_open; card->hdlc.close=sca_close; hdlc_to_dev(&card->hdlc)->hard_start_xmit=sca_xmit; hdlc_to_dev(&card->hdlc)->irq=irq; hdlc_to_dev(&card->hdlc)->tx_queue_len=50; result=register_hdlc_device(&card->hdlc); if (result) { printk(KERN_WARNING "c101: unable to register hdlc device\n"); c101_destroy_card(card); return result; } sca_init_sync_port(card); /* Set up C101 memory */ *new_card=card; new_card=&card->next_card; return 0; }
static int hdlc_set_mode(hdlc_device *hdlc, int mode) { int result=-1; /* Default to soft modes */ if(!capable(CAP_NET_ADMIN)) return -EPERM; if(hdlc_to_dev(hdlc)->flags & IFF_UP) return -EBUSY; hdlc_to_dev(hdlc)->addr_len=0; hdlc->mode=MODE_NONE; if (!(mode & MODE_SOFT)) switch(mode) { case MODE_HDLC: result = hdlc->set_mode ? hdlc->set_mode(hdlc, MODE_HDLC) : 0; break; case MODE_X25: /* By card */ case MODE_CISCO: case MODE_PPP: case MODE_FR_ANSI: case MODE_FR_CCITT: case MODE_FR_ANSI | MODE_DCE: case MODE_FR_CCITT | MODE_DCE: result = hdlc->set_mode ? hdlc->set_mode(hdlc, mode) : -ENOSYS; break; default: return -EINVAL; } if (result) { mode |= MODE_SOFT; /* Try "host software" protocol */ switch(mode & ~MODE_SOFT) { case MODE_CISCO: case MODE_PPP: break; case MODE_FR_ANSI: case MODE_FR_CCITT: case MODE_FR_ANSI | MODE_DCE: case MODE_FR_CCITT | MODE_DCE: hdlc_to_dev(hdlc)->addr_len=2; *(u16*)hdlc_to_dev(hdlc)->dev_addr=htons(LMI_DLCI); dlci_to_q922(hdlc_to_dev(hdlc)->broadcast, LMI_DLCI); break; default: return -EINVAL; } result = hdlc->set_mode ? hdlc->set_mode(hdlc, MODE_HDLC) : 0; } if (result) return result; hdlc->mode=mode; if (mode_is(hdlc, MODE_PPP)) hdlc_to_dev(hdlc)->type=ARPHRD_PPP; if (mode_is(hdlc, MODE_X25)) hdlc_to_dev(hdlc)->type=ARPHRD_X25; else if (mode_is(hdlc, MODE_FR)) hdlc_to_dev(hdlc)->type=ARPHRD_FRAD; else /* Conflict - raw HDLC and Cisco */ hdlc_to_dev(hdlc)->type=ARPHRD_HDLC; memset(&(hdlc->stats), 0, sizeof(struct net_device_stats)); destroy_pvc_list(hdlc); return 0; }
void unregister_hdlc_device(hdlc_device *hdlc) { destroy_pvc_list(hdlc); unregister_netdev(hdlc_to_dev(hdlc)); MOD_DEC_USE_COUNT; }
static void cisco_rx(struct sk_buff *skb) { hdlc_device *hdlc = dev_to_hdlc(skb->dev); hdlc_header *data = (hdlc_header*)skb->data; cisco_packet *cisco_data; struct in_device *in_dev; u32 addr, mask; if (skb->len < sizeof(hdlc_header)) goto rx_error; if (data->address != CISCO_MULTICAST && data->address != CISCO_UNICAST) goto rx_error; skb_pull(skb, sizeof(hdlc_header)); switch(ntohs(data->protocol)) { case CISCO_SYS_INFO: /* Packet is not needed, drop it. */ dev_kfree_skb_any(skb); return; case CISCO_KEEPALIVE: if (skb->len != CISCO_PACKET_LEN && skb->len != CISCO_BIG_PACKET_LEN) { printk(KERN_INFO "%s: Invalid length of Cisco " "control packet (%d bytes)\n", hdlc_to_name(hdlc), skb->len); goto rx_error; } cisco_data = (cisco_packet*)skb->data; switch(ntohl (cisco_data->type)) { case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */ in_dev = hdlc_to_dev(hdlc)->ip_ptr; addr = 0; mask = ~0; /* is the mask correct? */ if (in_dev != NULL) { struct in_ifaddr **ifap = &in_dev->ifa_list; while (*ifap != NULL) { if (strcmp(hdlc_to_name(hdlc), (*ifap)->ifa_label) == 0) { addr = (*ifap)->ifa_local; mask = (*ifap)->ifa_mask; break; } ifap = &(*ifap)->ifa_next; } cisco_keepalive_send(hdlc, CISCO_ADDR_REPLY, addr, mask); } dev_kfree_skb_any(skb); return; case CISCO_ADDR_REPLY: printk(KERN_INFO "%s: Unexpected Cisco IP address " "reply\n", hdlc_to_name(hdlc)); goto rx_error; case CISCO_KEEPALIVE_REQ: hdlc->state.cisco.rxseq = ntohl(cisco_data->par1); if (ntohl(cisco_data->par2)==hdlc->state.cisco.txseq) { hdlc->state.cisco.last_poll = jiffies; if (!hdlc->state.cisco.up) { u32 sec, min, hrs, days; sec = ntohl(cisco_data->time) / 1000; min = sec / 60; sec -= min * 60; hrs = min / 60; min -= hrs * 60; days = hrs / 24; hrs -= days * 24; printk(KERN_INFO "%s: Link up (peer " "uptime %ud%uh%um%us)\n", hdlc_to_name(hdlc), days, hrs, min, sec); } hdlc->state.cisco.up = 1; } dev_kfree_skb_any(skb); return; } /* switch(keepalive type) */ } /* switch(protocol) */ printk(KERN_INFO "%s: Unsupported protocol %x\n", hdlc_to_name(hdlc), data->protocol); dev_kfree_skb_any(skb); return; rx_error: hdlc->stats.rx_errors++; /* Mark error */ dev_kfree_skb_any(skb); }
static void cisco_netif(hdlc_device *hdlc, struct sk_buff *skb) { hdlc_header *data = (hdlc_header*)skb->data; cisco_packet *cisco_data; if (skb->len<sizeof(hdlc_header)) goto rx_error; if (data->address != CISCO_MULTICAST && data->address != CISCO_UNICAST) goto rx_error; skb_pull(skb, sizeof(hdlc_header)); switch(ntohs(data->protocol)) { #ifdef CONFIG_INET case ETH_P_IP: #endif #ifdef CONFIG_IPX case ETH_P_IPX: #endif #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case ETH_P_IPV6: #endif #if defined(CONFIG_INET) || defined(CONFIG_IPX) || \ defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) hdlc->stats.rx_packets++; /* data traffic */ hdlc->stats.rx_bytes+=skb->len; skb->protocol=data->protocol; skb->dev=hdlc_to_dev(hdlc); netif_rx(skb); return; #endif case CISCO_KEEPALIVE: if (skb->len != CISCO_PACKET_LEN && skb->len != CISCO_BIG_PACKET_LEN) { printk(KERN_INFO "%s: Invalid length of Cisco " "control packet (%d bytes)\n", hdlc->name, skb->len); goto rx_error; } cisco_data=(cisco_packet*)skb->data; switch(ntohl (cisco_data->type)) { case CISCO_ADDR_REQ: { /* Stolen from syncppp.c :-) */ struct in_device *in_dev=hdlc_to_dev(hdlc)->ip_ptr; u32 addr = 0, mask = ~0; /* is the mask correct? */ if (in_dev != NULL) { struct in_ifaddr **ifap=&in_dev->ifa_list; while (*ifap != NULL) { if (strcmp(hdlc_to_dev(hdlc)->name, (*ifap)->ifa_label) == 0) { addr = (*ifap)->ifa_local; mask = (*ifap)->ifa_mask; break; } ifap=&(*ifap)->ifa_next; } hdlc->stats.rx_bytes+=skb->len; hdlc->stats.rx_packets++; cisco_keepalive_send(hdlc, CISCO_ADDR_REPLY, addr, mask); return; } } case CISCO_ADDR_REPLY: printk(KERN_INFO "%s: Unexpected Cisco IP address " "reply\n", hdlc->name); goto rx_error; case CISCO_KEEPALIVE_REQ: hdlc->lmi.rxseq = ntohl(cisco_data->par1); if (ntohl(cisco_data->par2) == hdlc->lmi.txseq) { hdlc->lmi.last_poll = jiffies; if (!(hdlc->lmi.state & LINK_STATE_RELIABLE)) { u32 sec, min, hrs, days; sec = ntohl(cisco_data->time)/1000; min = sec / 60; sec -= min * 60; hrs = min / 60; min -= hrs * 60; days = hrs / 24; hrs -= days * 24; printk(KERN_INFO "%s: Link up (peer uptime %ud%uh%um%us)\n", hdlc->name, days, hrs, min, sec); } hdlc->lmi.state |= LINK_STATE_RELIABLE; } hdlc->stats.rx_bytes+=skb->len; hdlc->stats.rx_packets++; dev_kfree_skb(skb); return; } /* switch(keepalive type) */ } /* switch(protocol) */ printk(KERN_INFO "%s: Unusupported protocol %x\n", hdlc->name, data->protocol); hdlc->stats.rx_bytes+=skb->len; hdlc->stats.rx_packets++; dev_kfree_skb(skb); return; rx_error: hdlc->stats.rx_errors++; /* Mark error */ dev_kfree_skb(skb); }
static int hdlc_fr_pvc(hdlc_device *hdlc, int dlci) { pvc_device **pvc_p=&hdlc->first_pvc; pvc_device *pvc; int result, create=1; /* Create or delete PVC */ if(!capable(CAP_NET_ADMIN)) return -EPERM; if(dlci<0) { dlci=-dlci; create=0; } if(dlci<=0 || dlci>=1024) return -EINVAL; /* Only 10 bits for DLCI, DLCI=0 is reserved */ if(!mode_is(hdlc, MODE_FR)) return -EINVAL; /* Only meaningfull on FR */ while(*pvc_p) { if (netdev_dlci(&(*pvc_p)->netdev)==dlci) break; pvc_p=&(*pvc_p)->next; } if (create) { /* Create PVC */ if (*pvc_p!=NULL) return -EEXIST; *pvc_p=kmalloc(sizeof(pvc_device), GFP_KERNEL); pvc=*pvc_p; memset(pvc, 0, sizeof(pvc_device)); pvc->netdev.name=pvc->name; pvc->netdev.hard_start_xmit=pvc_xmit; pvc->netdev.get_stats=pvc_get_stats; pvc->netdev.open=pvc_open; pvc->netdev.stop=pvc_close; pvc->netdev.change_mtu=pvc_change_mtu; pvc->netdev.mtu=PVC_MAX_MTU; pvc->netdev.type=ARPHRD_DLCI; pvc->netdev.hard_header_len=16; pvc->netdev.hard_header=fr_hard_header; pvc->netdev.tx_queue_len=0; pvc->netdev.flags=IFF_POINTOPOINT; dev_init_buffers(&pvc->netdev); pvc->master=hdlc; *(u16*)pvc->netdev.dev_addr=htons(dlci); dlci_to_q922(pvc->netdev.broadcast, dlci); pvc->netdev.addr_len=2; /* 16 bits is enough */ pvc->netdev.irq=hdlc_to_dev(hdlc)->irq; result=dev_alloc_name(&pvc->netdev, "pvc%d"); if (result<0) { kfree(pvc); *pvc_p=NULL; return result; } if (register_netdevice(&pvc->netdev)!=0) { kfree(pvc); *pvc_p=NULL; return -EIO; } if (!mode_is(hdlc, MODE_SOFT) && hdlc->create_pvc) { result=hdlc->create_pvc(pvc); if (result) { unregister_netdevice(&pvc->netdev); kfree(pvc); *pvc_p=NULL; return result; } } hdlc->lmi.state |= LINK_STATE_CHANGED; hdlc->pvc_count++; return 0; } if (*pvc_p==NULL) /* Delete PVC */ return -ENOENT; pvc=*pvc_p; if (pvc->netdev.flags & IFF_UP) return -EBUSY; /* PVC in use */ if (!mode_is(hdlc, MODE_SOFT) && hdlc->destroy_pvc) hdlc->destroy_pvc(pvc); hdlc->lmi.state |= LINK_STATE_CHANGED; hdlc->pvc_count--; *pvc_p=pvc->next; unregister_netdevice(&pvc->netdev); kfree(pvc); return 0; }
static int hdlc_set_mode(hdlc_device *hdlc, int mode) { int result = -1; /* Default to soft modes */ struct net_device *dev = hdlc_to_dev(hdlc); if(!capable(CAP_NET_ADMIN)) return -EPERM; if(dev->flags & IFF_UP) return -EBUSY; dev->addr_len = 0; dev->hard_header = NULL; hdlc->mode = MODE_NONE; if (!(mode & MODE_SOFT)) switch(mode & MODE_MASK) { case MODE_HDLC: result = hdlc->set_mode ? hdlc->set_mode(hdlc, MODE_HDLC) : 0; break; case MODE_CISCO: /* By card */ #ifdef CONFIG_HDLC_PPP case MODE_PPP: #endif #ifdef CONFIG_HDLC_X25 case MODE_X25: #endif case MODE_FR: result = hdlc->set_mode ? hdlc->set_mode(hdlc, mode) : -ENOSYS; break; default: return -EINVAL; } if (result) { mode |= MODE_SOFT; /* Try "host software" protocol */ switch(mode & MODE_MASK) { case MODE_CISCO: dev->hard_header = cisco_hard_header; break; #ifdef CONFIG_HDLC_PPP case MODE_PPP: break; #endif #ifdef CONFIG_HDLC_X25 case MODE_X25: break; #endif case MODE_FR: dev->hard_header = fr_hard_header; dev->addr_len = 2; *(u16*)dev->dev_addr = htons(LMI_DLCI); dlci_to_q922(dev->broadcast, LMI_DLCI); break; default: return -EINVAL; } result = hdlc->set_mode ? hdlc->set_mode(hdlc, MODE_HDLC) : 0; } if (result) return result; hdlc->mode = mode; switch(mode & MODE_MASK) { #ifdef CONFIG_HDLC_PPP case MODE_PPP: dev->type = ARPHRD_PPP; break; #endif #ifdef CONFIG_HDLC_X25 case MODE_X25: dev->type = ARPHRD_X25; break; #endif case MODE_FR: dev->type = ARPHRD_FRAD; break; case MODE_CISCO: dev->type = ARPHRD_CISCO; break; default: dev->type = ARPHRD_RAWHDLC; } memset(&(hdlc->stats), 0, sizeof(struct net_device_stats)); destroy_pvc_list(hdlc); return 0; }