static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh) { struct rose_route *rose_route, *s; rose_neigh->restarted = 0; rose_stop_t0timer(rose_neigh); rose_start_ftimer(rose_neigh); skb_queue_purge(&rose_neigh->queue); rose_route = rose_route_list; while (rose_route != NULL) { if ((rose_route->neigh1 == rose_neigh && rose_route->neigh2 == rose_neigh) || (rose_route->neigh1 == rose_neigh && rose_route->neigh2 == NULL) || (rose_route->neigh2 == rose_neigh && rose_route->neigh1 == NULL)) { s = rose_route->next; rose_remove_route(rose_route); rose_route = s; continue; } if (rose_route->neigh1 == rose_neigh) { rose_route->neigh1->use--; rose_route->neigh1 = NULL; rose_transmit_clear_request(rose_route->neigh2, rose_route->lci2, ROSE_OUT_OF_ORDER, 0); } if (rose_route->neigh2 == rose_neigh) { rose_route->neigh2->use--; rose_route->neigh2 = NULL; rose_transmit_clear_request(rose_route->neigh1, rose_route->lci1, ROSE_OUT_OF_ORDER, 0); } rose_route = rose_route->next; } }
static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_rf_deactivate_ntf *ntf = (void *) skb->data; pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason); /* drop tx data queue */ skb_queue_purge(&ndev->tx_q); /* drop partial rx data packet */ if (ndev->rx_data_reassembly) { kfree_skb(ndev->rx_data_reassembly); ndev->rx_data_reassembly = NULL; } /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, -EIO); switch (ntf->type) { case NCI_DEACTIVATE_TYPE_IDLE_MODE: nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); break; case NCI_DEACTIVATE_TYPE_SLEEP_MODE: case NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE: atomic_set(&ndev->state, NCI_W4_HOST_SELECT); break; case NCI_DEACTIVATE_TYPE_DISCOVERY: nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_DISCOVERY); break; } nci_req_complete(ndev, NCI_STATUS_OK); }
static void eicon_freecard(eicon_card *card) { int i; for(i = 0; i < (card->nchannels + 1); i++) { skb_queue_purge(&card->bch[i].e.X); skb_queue_purge(&card->bch[i].e.R); } skb_queue_purge(&card->sndq); skb_queue_purge(&card->rcvq); skb_queue_purge(&card->rackq); skb_queue_purge(&card->sackq); skb_queue_purge(&card->statq); #ifdef CONFIG_ISDN_DRV_EICON_PCI kfree(card->sbufp); kfree(card->sbuf); kfree(card->dbuf); #endif kfree(card->bch); kfree(card); }
static int w6692_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) { struct bchannel *bch = container_of(ch, struct bchannel, ch); struct w6692_ch *bc = container_of(bch, struct w6692_ch, bch); struct w6692_hw *card = bch->hw; int ret = -EINVAL; u_long flags; pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg); switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); if (test_bit(FLG_ACTIVE, &bch->Flags)) { spin_lock_irqsave(&card->lock, flags); mISDN_freebchannel(bch); w6692_mode(bc, ISDN_P_NONE); spin_unlock_irqrestore(&card->lock, flags); } else { skb_queue_purge(&bch->rqueue); bch->rcount = 0; } ch->protocol = ISDN_P_NONE; ch->peer = NULL; module_put(THIS_MODULE); ret = 0; break; case CONTROL_CHANNEL: ret = channel_bctrl(bch, arg); break; default: pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd); } return ret; }
static void sigd_close(struct atm_vcc *vcc) { struct hlist_node *node; struct sock *s; int i; DPRINTK("sigd_close\n"); sigd = NULL; if (skb_peek(&sk_atm(vcc)->sk_receive_queue)) printk(KERN_ERR "sigd_close: closing with requests pending\n"); skb_queue_purge(&sk_atm(vcc)->sk_receive_queue); read_lock(&vcc_sklist_lock); for(i = 0; i < VCC_HTABLE_SIZE; ++i) { struct hlist_head *head = &vcc_hash[i]; sk_for_each(s, node, head) { struct atm_vcc *vcc = atm_sk(s); purge_vcc(vcc); } } read_unlock(&vcc_sklist_lock); }
void mem_handle_cp_crash(struct mem_link_device *mld, enum modem_state state) { struct link_device *ld = &mld->link_dev; struct modem_ctl *mc = ld->mc; int i; /* Disable normal IPC */ set_magic(mld, MEM_CRASH_MAGIC); set_access(mld, 0); if (!wake_lock_active(&mld->dump_wlock)) wake_lock(&mld->dump_wlock); stop_net_ifaces(ld); /* Purge the skb_txq in every IPC device (IPC_FMT, IPC_RAW, etc.) */ for (i = 0; i < MAX_SIPC5_DEV; i++) skb_queue_purge(mld->dev[i]->skb_txq); if (cp_online(mc)) set_modem_state(mld, state); mld->forced_cp_crash = false; }
static int mptp_release(struct socket *sock) { struct sock *sk = sock->sk; struct mptp_sock *ssk = mptp_sk(sk); if (unlikely(!sk)) return 0; mptp_unhash(ssk->src); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); synchronize_net(); sock_orphan(sk); sock->sk = NULL; skb_queue_purge(&sk->sk_receive_queue); log_debug("mptp_release sock=%p\n", sk); sock_put(sk); return 0; }
static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct nci_rf_deactivate_ntf *ntf = (void *) skb->data; pr_debug("entry, type 0x%x, reason 0x%x\n", ntf->type, ntf->reason); /* drop tx data queue */ skb_queue_purge(&ndev->tx_q); /* drop partial rx data packet */ if (ndev->rx_data_reassembly) { kfree_skb(ndev->rx_data_reassembly); ndev->rx_data_reassembly = NULL; } /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, -EIO); nci_clear_target_list(ndev); atomic_set(&ndev->state, NCI_IDLE); nci_req_complete(ndev, NCI_STATUS_OK); }
static int netlink_release(struct socket *sock) { struct sock *sk = sock->sk; if (!sk) return 0; netlink_remove(sk); spin_lock(&sk->protinfo.af_netlink->cb_lock); if (sk->protinfo.af_netlink->cb) { sk->protinfo.af_netlink->cb->done(sk->protinfo.af_netlink->cb); netlink_destroy_callback(sk->protinfo.af_netlink->cb); sk->protinfo.af_netlink->cb = NULL; __sock_put(sk); } spin_unlock(&sk->protinfo.af_netlink->cb_lock); /* OK. Socket is unlinked, and, therefore, no new packets will arrive */ sock_orphan(sk); sock->sk = NULL; wake_up_interruptible_all(&sk->protinfo.af_netlink->wait); skb_queue_purge(&sk->write_queue); if (sk->protinfo.af_netlink->pid && !sk->protinfo.af_netlink->groups) { struct netlink_notify n = { protocol:sk->protocol, pid:sk->protinfo.af_netlink->pid }; notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n); } sock_put(sk); return 0; }
static void teql_destroy(struct Qdisc *sch) { struct Qdisc *q, *prev; struct teql_sched_data *dat = qdisc_priv(sch); struct teql_master *master = dat->m; prev = master->slaves; if (prev) { do { q = NEXT_SLAVE(prev); if (q == sch) { NEXT_SLAVE(prev) = NEXT_SLAVE(q); if (q == master->slaves) { master->slaves = NEXT_SLAVE(q); if (q == master->slaves) { struct netdev_queue *txq; spinlock_t *root_lock; txq = netdev_get_tx_queue(master->dev, 0); master->slaves = NULL; root_lock = qdisc_root_sleeping_lock(txq->qdisc); spin_lock_bh(root_lock); qdisc_reset(txq->qdisc); spin_unlock_bh(root_lock); } } skb_queue_purge(&dat->q); teql_neigh_release(xchg(&dat->ncache, NULL)); break; } } while ((prev = q) != master->slaves); } }
static void rose_remove_neigh(struct rose_neigh *rose_neigh) { struct rose_neigh *s; unsigned long flags; rose_stop_ftimer(rose_neigh); rose_stop_t0timer(rose_neigh); skb_queue_purge(&rose_neigh->queue); save_flags(flags); cli(); if ((s = rose_neigh_list) == rose_neigh) { rose_neigh_list = rose_neigh->next; restore_flags(flags); if (rose_neigh->digipeat != NULL) kfree(rose_neigh->digipeat); kfree(rose_neigh); return; } while (s != NULL && s->next != NULL) { if (s->next == rose_neigh) { s->next = rose_neigh->next; restore_flags(flags); if (rose_neigh->digipeat != NULL) kfree(rose_neigh->digipeat); kfree(rose_neigh); return; } s = s->next; } restore_flags(flags); }
void eemcs_ipc_state_callback_func(EEMCS_STATE state){ switch(state){ case EEMCS_EXCEPTION: case EEMCS_GATE: //MD reset if (eemcs_ipc_inst.md_is_ready){ int i; eemcs_ipc_inst.md_is_ready = 0; for(i=0; i<EEMCS_IPCD_MAX_NUM;i++){ DBGLOG( IPCD, TRA, "ipc_state_callback: Clean device(%s) when ipc_sta=%d", \ eemcs_ipc_inst.ipc_node[i].dev_name, state); skb_queue_purge(&eemcs_ipc_inst.ipc_node[i].rx_skb_list); atomic_set(&eemcs_ipc_inst.ipc_node[i].rx_pkt_cnt, 0); } } break; case EEMCS_BOOTING_DONE: DBGLOG( IPCD, TRA, "ipc_state_callback: MD booting DONE"); wake_up_interruptible(&eemcs_ipc_inst.state_waitq); eemcs_ipc_inst.md_is_ready = 1; break; default: break; } }
static int eemcs_ipc_kern_open(int id) { int ret = 0; DEBUG_LOG_FUNCTION_ENTRY; if (id >= EEMCS_IPCD_MAX_NUM || id < 0){ DBGLOG(IPCD,ERR,"Wrong minor num(%d)", id); return -EINVAL; } DBGLOG(IPCD,DEF,"ipc_kern_open: deivce(%s) iminor(%d)", \ eemcs_ipc_inst.ipc_node[id].dev_name, id); //4 <1> check multiple open if(IPCD_CLOSE != atomic_read(&eemcs_ipc_inst.ipc_node[id].dev_state)){ DBGLOG(IPCD, ERR, "PORT%d multi-open fail!", id); return -EIO; } //4 <2> clear the rx_skb_list skb_queue_purge(&eemcs_ipc_inst.ipc_node[id].rx_skb_list); atomic_set(&eemcs_ipc_inst.ipc_node[id].rx_pkt_cnt, 0); atomic_set(&eemcs_ipc_inst.ipc_node[id].dev_state, IPCD_KERNEL); DEBUG_LOG_FUNCTION_LEAVE; return ret; }
/* * Prototype : exit_oamkernel * Description : oam kernel exit * Input : void * Return Value : void * Calls : * Called By : * * History : * 1.Date : 2012/5/23 * Author : kf74033 * Modification : Created function * */ void __exit exit_oamkernel(void) { #ifdef SDT_OAM_FOR_1151 { oam_sdt_unregister(&tx_action); kfree(tx_action); } #else hwifi_rx_extern_unregister(HCC_OAM_TEST); #endif if (gst_kerenlglobal.pst_nlsk != NULL) { OS_SOCK_RELEASE(gst_kerenlglobal.pst_nlsk->sk_socket); } OS_MEM_KFREE(gst_kerenlglobal.puc_data); destroy_workqueue(gst_kerenlglobal.oam_rx_workqueue); skb_queue_purge(&gst_kerenlglobal.rx_wifi_dbg_seq); OAM_INFO("oamkernel remove ok.\n"); return; }
static void l2_connected(struct FsmInst *fi, int event, void *arg) { struct layer2 *l2 = fi->userdata; struct sk_buff *skb = arg; int pr = -1; if (!get_PollFlag(l2, skb)) { l2_mdl_error_ua(fi, event, arg); return; } dev_kfree_skb(skb); if (test_and_clear_bit(FLG_PEND_REL, &l2->flag)) l2_disconnect(fi, event, NULL); if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) { pr = DL_ESTABLISH_CNF; } else if (l2->vs != l2->va) { skb_queue_purge(&l2->i_queue); pr = DL_ESTABLISH_IND; } stop_t200(l2, 5); l2->vr = 0; l2->vs = 0; l2->va = 0; l2->sow = 0; mISDN_FsmChangeState(fi, ST_L2_7); mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4); if (pr != -1) l2up_create(l2, pr, 0, NULL); if (skb_queue_len(&l2->i_queue) && cansend(l2)) mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL); if (l2->tm) l2_tei(l2, MDL_STATUS_UP_IND, 0); }
static void bch_l2l1(struct hisax_if *ifc, int pr, void *arg) { struct hfc4s8s_btype *bch = ifc->priv; struct hfc4s8s_l1 *l1 = bch->l1p; struct sk_buff *skb = (struct sk_buff *) arg; long mode = (long) arg; u_long flags; switch (pr) { case (PH_DATA | REQUEST): if (!l1->enabled || (bch->mode == L1_MODE_NULL)) { dev_kfree_skb(skb); break; } spin_lock_irqsave(&l1->lock, flags); skb_queue_tail(&bch->tx_queue, skb); if (!bch->tx_skb && (bch->tx_cnt <= 0)) { l1->hw->mr.r_irq_fifo_blx[l1->st_num] |= ((bch->bchan == 1) ? 1 : 4); spin_unlock_irqrestore(&l1->lock, flags); schedule_work(&l1->hw->tqueue); } else spin_unlock_irqrestore(&l1->lock, flags); break; case (PH_ACTIVATE | REQUEST): case (PH_DEACTIVATE | REQUEST): if (!l1->enabled) break; if (pr == (PH_DEACTIVATE | REQUEST)) mode = L1_MODE_NULL; switch (mode) { case L1_MODE_HDLC: spin_lock_irqsave(&l1->lock, flags); l1->hw->mr.timer_usg_cnt++; l1->hw->mr. fifo_slow_timer_service[l1-> st_num] |= ((bch->bchan == 1) ? 0x2 : 0x8); Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_CON_HDLC, 0xc); /* HDLC mode, flag fill, connect ST */ Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */ Write_hfc8(l1->hw, A_IRQ_MSK, 1); /* enable TX interrupts for hdlc */ Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */ wait_busy(l1->hw); Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 1 : 3))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_CON_HDLC, 0xc); /* HDLC mode, flag fill, connect ST */ Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */ Write_hfc8(l1->hw, A_IRQ_MSK, 1); /* enable RX interrupts for hdlc */ Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */ Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); l1->hw->mr.r_ctrl0 |= (bch->bchan & 3); Write_hfc8(l1->hw, A_ST_CTRL0, l1->hw->mr.r_ctrl0); bch->mode = L1_MODE_HDLC; spin_unlock_irqrestore(&l1->lock, flags); bch->b_if.ifc.l1l2(&bch->b_if.ifc, PH_ACTIVATE | INDICATION, NULL); break; case L1_MODE_TRANS: spin_lock_irqsave(&l1->lock, flags); l1->hw->mr. fifo_rx_trans_enables[l1-> st_num] |= ((bch->bchan == 1) ? 0x2 : 0x8); l1->hw->mr.timer_usg_cnt++; Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_CON_HDLC, 0xf); /* Transparent mode, 1 fill, connect ST */ Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */ Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable TX interrupts */ Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */ wait_busy(l1->hw); Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 1 : 3))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_CON_HDLC, 0xf); /* Transparent mode, 1 fill, connect ST */ Write_hfc8(l1->hw, A_SUBCH_CFG, 0); /* 8 bits */ Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable RX interrupts */ Write_hfc8(l1->hw, A_INC_RES_FIFO, 2); /* reset fifo */ Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); l1->hw->mr.r_ctrl0 |= (bch->bchan & 3); Write_hfc8(l1->hw, A_ST_CTRL0, l1->hw->mr.r_ctrl0); bch->mode = L1_MODE_TRANS; spin_unlock_irqrestore(&l1->lock, flags); bch->b_if.ifc.l1l2(&bch->b_if.ifc, PH_ACTIVATE | INDICATION, NULL); break; default: if (bch->mode == L1_MODE_NULL) break; spin_lock_irqsave(&l1->lock, flags); l1->hw->mr. fifo_slow_timer_service[l1-> st_num] &= ~((bch->bchan == 1) ? 0x3 : 0xc); l1->hw->mr. fifo_rx_trans_enables[l1-> st_num] &= ~((bch->bchan == 1) ? 0x3 : 0xc); l1->hw->mr.timer_usg_cnt--; Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 0 : 2))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable TX interrupts */ wait_busy(l1->hw); Write_hfc8(l1->hw, R_FIFO, (l1->st_num * 8 + ((bch->bchan == 1) ? 1 : 3))); wait_busy(l1->hw); Write_hfc8(l1->hw, A_IRQ_MSK, 0); /* disable RX interrupts */ Write_hfc8(l1->hw, R_ST_SEL, l1->st_num); l1->hw->mr.r_ctrl0 &= ~(bch->bchan & 3); Write_hfc8(l1->hw, A_ST_CTRL0, l1->hw->mr.r_ctrl0); spin_unlock_irqrestore(&l1->lock, flags); bch->mode = L1_MODE_NULL; bch->b_if.ifc.l1l2(&bch->b_if.ifc, PH_DEACTIVATE | INDICATION, NULL); if (bch->tx_skb) { dev_kfree_skb(bch->tx_skb); bch->tx_skb = NULL; } if (bch->rx_skb) { dev_kfree_skb(bch->rx_skb); bch->rx_skb = NULL; } skb_queue_purge(&bch->tx_queue); bch->tx_cnt = 0; bch->rx_ptr = NULL; break; } /* timer is only used when at least one b channel */ /* is set up to transparent mode */ if (l1->hw->mr.timer_usg_cnt) { Write_hfc8(l1->hw, R_IRQMSK_MISC, M_TI_IRQMSK); } else { Write_hfc8(l1->hw, R_IRQMSK_MISC, 0); } break; default: printk(KERN_INFO "HFC-4S/8S: Unknown B-chan cmd 0x%x received, ignored\n", pr); break; } if (!l1->enabled) bch->b_if.ifc.l1l2(&bch->b_if.ifc, PH_DEACTIVATE | INDICATION, NULL); } /* bch_l2l1 */
static void W6692_l1hw(struct PStack *st, int pr, void *arg) { struct IsdnCardState *cs = (struct IsdnCardState *) st->l1.hardware; struct sk_buff *skb = arg; int val; switch (pr) { case (PH_DATA | REQUEST): if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); if (cs->tx_skb) { skb_queue_tail(&cs->sq, skb); #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA Queued", 0); #endif } else { cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA", 0); #endif W6692_fill_fifo(cs); } break; case (PH_PULL | INDICATION): if (cs->tx_skb) { if (cs->debug & L1_DEB_WARN) debugl1(cs, " l2l1 tx_skb exist this shouldn't happen"); skb_queue_tail(&cs->sq, skb); break; } if (cs->debug & DEB_DLOG_HEX) LogFrame(cs, skb->data, skb->len); if (cs->debug & DEB_DLOG_VERBOSE) dlogframe(cs, skb, 0); cs->tx_skb = skb; cs->tx_cnt = 0; #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) Logl2Frame(cs, skb, "PH_DATA_PULLED", 0); #endif W6692_fill_fifo(cs); break; case (PH_PULL | REQUEST): #ifdef L2FRAME_DEBUG /* psa */ if (cs->debug & L1_DEB_LAPD) debugl1(cs, "-> PH_REQUEST_PULL"); #endif if (!cs->tx_skb) { test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags); st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); } else test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags); break; case (HW_RESET | REQUEST): if ((cs->dc.w6692.ph_state == W_L1IND_DRD)) ph_command(cs, W_L1CMD_ECK); else { ph_command(cs, W_L1CMD_RST); cs->dc.w6692.ph_state = W_L1CMD_RST; W6692_new_ph(cs); } break; case (HW_ENABLE | REQUEST): ph_command(cs, W_L1CMD_ECK); break; case (HW_INFO3 | REQUEST): ph_command(cs, W_L1CMD_AR8); break; case (HW_TESTLOOP | REQUEST): val = 0; if (1 & (long) arg) val |= 0x0c; if (2 & (long) arg) val |= 0x3; /* !!! not implemented yet */ break; case (HW_DEACTIVATE | RESPONSE): skb_queue_purge(&cs->rq); skb_queue_purge(&cs->sq); if (cs->tx_skb) { dev_kfree_skb_any(cs->tx_skb); cs->tx_skb = NULL; } if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags)) del_timer(&cs->dbusytimer); if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags)) W6692_sched_event(cs, D_CLEARBUSY); break; default: if (cs->debug & L1_DEB_WARN) debugl1(cs, "W6692_l1hw unknown %04x", pr); break; } }
void __exit nr_loopback_clear(void) { del_timer_sync(&loopback_timer); skb_queue_purge(&loopback_queue); }
void usbnet_purge_paused_rxq(struct usbnet *dev) { skb_queue_purge(&dev->rxq_pause); }
void hisax_unregister(struct hisax_d_if *hisax_d_if) { cards[hisax_d_if->cs->cardnr].typ = 0; HiSax_closecard(hisax_d_if->cs->cardnr); skb_queue_purge(&hisax_d_if->erq); }
int ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen) { struct ipv6_pinfo *np = inet6_sk(sk); int val, valbool; int retv = -ENOPROTOOPT; if (level == SOL_IP && sk->sk_type != SOCK_RAW) return udp_prot.setsockopt(sk, level, optname, optval, optlen); if(level!=SOL_IPV6) goto out; if (optval == NULL) val=0; else if (get_user(val, (int __user *) optval)) return -EFAULT; valbool = (val!=0); lock_sock(sk); switch (optname) { case IPV6_ADDRFORM: if (val == PF_INET) { struct ipv6_txoptions *opt; struct sk_buff *pktopt; if (sk->sk_protocol != IPPROTO_UDP && sk->sk_protocol != IPPROTO_TCP) break; if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; } if (ipv6_only_sock(sk) || !(ipv6_addr_type(&np->daddr) & IPV6_ADDR_MAPPED)) { retv = -EADDRNOTAVAIL; break; } fl6_free_socklist(sk); ipv6_sock_mc_close(sk); if (sk->sk_protocol == IPPROTO_TCP) { struct tcp_sock *tp = tcp_sk(sk); local_bh_disable(); sock_prot_dec_use(sk->sk_prot); sock_prot_inc_use(&tcp_prot); local_bh_enable(); sk->sk_prot = &tcp_prot; tp->af_specific = &ipv4_specific; sk->sk_socket->ops = &inet_stream_ops; sk->sk_family = PF_INET; tcp_sync_mss(sk, tp->pmtu_cookie); } else { local_bh_disable(); sock_prot_dec_use(sk->sk_prot); sock_prot_inc_use(&udp_prot); local_bh_enable(); sk->sk_prot = &udp_prot; sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; } opt = xchg(&np->opt, NULL); if (opt) sock_kfree_s(sk, opt, opt->tot_len); pktopt = xchg(&np->pktoptions, NULL); if (pktopt) kfree_skb(pktopt); sk->sk_destruct = inet_sock_destruct; #ifdef INET_REFCNT_DEBUG atomic_dec(&inet6_sock_nr); #endif module_put(THIS_MODULE); retv = 0; break; } goto e_inval; case IPV6_V6ONLY: if (inet_sk(sk)->num) goto e_inval; np->ipv6only = valbool; retv = 0; break; case IPV6_PKTINFO: np->rxopt.bits.rxinfo = valbool; retv = 0; break; case IPV6_HOPLIMIT: np->rxopt.bits.rxhlim = valbool; retv = 0; break; case IPV6_RTHDR: if (val < 0 || val > 2) goto e_inval; np->rxopt.bits.srcrt = val; retv = 0; break; case IPV6_HOPOPTS: np->rxopt.bits.hopopts = valbool; retv = 0; break; case IPV6_DSTOPTS: np->rxopt.bits.dstopts = valbool; retv = 0; break; case IPV6_FLOWINFO: np->rxopt.bits.rxflow = valbool; retv = 0; break; case IPV6_PKTOPTIONS: { struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi fl; int junk; fl.fl6_flowlabel = 0; fl.oif = sk->sk_bound_dev_if; if (optlen == 0) goto update; /* 1K is probably excessive * 1K is surely not enough, 2K per standard header is 16K. */ retv = -EINVAL; if (optlen > 64*1024) break; opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL); retv = -ENOBUFS; if (opt == NULL) break; memset(opt, 0, sizeof(*opt)); opt->tot_len = sizeof(*opt) + optlen; retv = -EFAULT; if (copy_from_user(opt+1, optval, optlen)) goto done; msg.msg_controllen = optlen; msg.msg_control = (void*)(opt+1); retv = datagram_send_ctl(&msg, &fl, opt, &junk); if (retv) goto done; update: retv = 0; if (sk->sk_type == SOCK_STREAM) { if (opt) { struct tcp_sock *tp = tcp_sk(sk); if (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && inet_sk(sk)->daddr != LOOPBACK4_IPV6) { tp->ext_header_len = opt->opt_flen + opt->opt_nflen; tcp_sync_mss(sk, tp->pmtu_cookie); } } opt = xchg(&np->opt, opt); sk_dst_reset(sk); } else { write_lock(&sk->sk_dst_lock); opt = xchg(&np->opt, opt); write_unlock(&sk->sk_dst_lock); sk_dst_reset(sk); } done: if (opt) sock_kfree_s(sk, opt, opt->tot_len); break; } case IPV6_UNICAST_HOPS: if (val > 255 || val < -1) goto e_inval; np->hop_limit = val; retv = 0; break; case IPV6_MULTICAST_HOPS: if (sk->sk_type == SOCK_STREAM) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->mcast_hops = val; retv = 0; break; case IPV6_MULTICAST_LOOP: np->mc_loop = valbool; retv = 0; break; case IPV6_MULTICAST_IF: if (sk->sk_type == SOCK_STREAM) goto e_inval; if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) goto e_inval; if (__dev_get_by_index(val) == NULL) { retv = -ENODEV; break; } np->mcast_oif = val; retv = 0; break; case IPV6_ADD_MEMBERSHIP: case IPV6_DROP_MEMBERSHIP: { struct ipv6_mreq mreq; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_ADD_MEMBERSHIP) retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); else retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); break; } case IPV6_JOIN_ANYCAST: case IPV6_LEAVE_ANYCAST: { struct ipv6_mreq mreq; if (optlen != sizeof(struct ipv6_mreq)) goto e_inval; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_JOIN_ANYCAST) retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); else retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); break; } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { struct group_req greq; struct sockaddr_in6 *psin6; retv = -EFAULT; if (copy_from_user(&greq, optval, sizeof(struct group_req))) break; if (greq.gr_group.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } psin6 = (struct sockaddr_in6 *)&greq.gr_group; if (optname == MCAST_JOIN_GROUP) retv = ipv6_sock_mc_join(sk, greq.gr_interface, &psin6->sin6_addr); else retv = ipv6_sock_mc_drop(sk, greq.gr_interface, &psin6->sin6_addr); break; } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: { struct group_source_req greqs; int omode, add; if (optlen != sizeof(struct group_source_req)) goto e_inval; if (copy_from_user(&greqs, optval, sizeof(greqs))) { retv = -EFAULT; break; } if (greqs.gsr_group.ss_family != AF_INET6 || greqs.gsr_source.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } if (optname == MCAST_BLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 1; } else if (optname == MCAST_UNBLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 0; } else if (optname == MCAST_JOIN_SOURCE_GROUP) { struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, &psin6->sin6_addr); if (retv) break; omode = MCAST_INCLUDE; add = 1; } else /*IP_DROP_SOURCE_MEMBERSHIP */ { omode = MCAST_INCLUDE; add = 0; } retv = ip6_mc_source(add, omode, sk, &greqs); break; } case MCAST_MSFILTER: { extern int sysctl_optmem_max; extern int sysctl_mld_max_msf; struct group_filter *gsf; if (optlen < GROUP_FILTER_SIZE(0)) goto e_inval; if (optlen > sysctl_optmem_max) { retv = -ENOBUFS; break; } gsf = (struct group_filter *)kmalloc(optlen,GFP_KERNEL); if (gsf == 0) { retv = -ENOBUFS; break; } retv = -EFAULT; if (copy_from_user(gsf, optval, optlen)) { kfree(gsf); break; } /* numsrc >= (4G-140)/128 overflow in 32 bits */ if (gsf->gf_numsrc >= 0x1ffffffU || gsf->gf_numsrc > sysctl_mld_max_msf) { kfree(gsf); retv = -ENOBUFS; break; } if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { kfree(gsf); retv = -EINVAL; break; } retv = ip6_mc_msfilter(sk, gsf); kfree(gsf); break; } case IPV6_ROUTER_ALERT: retv = ip6_ra_control(sk, val, NULL); break; case IPV6_MTU_DISCOVER: if (val<0 || val>2) goto e_inval; np->pmtudisc = val; retv = 0; break; case IPV6_MTU: if (val && val < IPV6_MIN_MTU) goto e_inval; np->frag_size = val; retv = 0; break; case IPV6_RECVERR: np->recverr = valbool; if (!val) skb_queue_purge(&sk->sk_error_queue); retv = 0; break; case IPV6_FLOWINFO_SEND: np->sndflow = valbool; retv = 0; break; case IPV6_FLOWLABEL_MGR: retv = ipv6_flowlabel_opt(sk, optval, optlen); break; case IPV6_IPSEC_POLICY: case IPV6_XFRM_POLICY: retv = -EPERM; if (!capable(CAP_NET_ADMIN)) break; retv = xfrm_user_policy(sk, optname, optval, optlen); break; #ifdef CONFIG_NETFILTER default: retv = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); break; #endif } release_sock(sk); out: return retv; e_inval: release_sock(sk); return -EINVAL; }
static int hci_vhci_flush(struct hci_dev *hdev) { struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) hdev->driver_data; skb_queue_purge(&hci_vhci->readq); return 0; }
static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); int val, valbool; int retv = -ENOPROTOOPT; if (optval == NULL) val=0; else { if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; } else val = 0; } valbool = (val!=0); if (ip6_mroute_opt(optname)) return ip6_mroute_setsockopt(sk, optname, optval, optlen); lock_sock(sk); switch (optname) { case IPV6_ADDRFORM: if (optlen < sizeof(int)) goto e_inval; if (val == PF_INET) { struct ipv6_txoptions *opt; struct sk_buff *pktopt; if (sk->sk_type == SOCK_RAW) break; if (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_UDPLITE) { struct udp_sock *up = udp_sk(sk); if (up->pending == AF_INET6) { retv = -EBUSY; break; } } else if (sk->sk_protocol != IPPROTO_TCP) break; if (sk->sk_state != TCP_ESTABLISHED) { retv = -ENOTCONN; break; } if (ipv6_only_sock(sk) || !ipv6_addr_v4mapped(&np->daddr)) { retv = -EADDRNOTAVAIL; break; } fl6_free_socklist(sk); ipv6_sock_mc_close(sk); /* * Sock is moving from IPv6 to IPv4 (sk_prot), so * remove it from the refcnt debug socks count in the * original family... */ sk_refcnt_debug_dec(sk); if (sk->sk_protocol == IPPROTO_TCP) { struct inet_connection_sock *icsk = inet_csk(sk); local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, &tcp_prot, 1); local_bh_enable(); sk->sk_prot = &tcp_prot; icsk->icsk_af_ops = &ipv4_specific; sk->sk_socket->ops = &inet_stream_ops; sk->sk_family = PF_INET; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); } else { struct proto *prot = &udp_prot; if (sk->sk_protocol == IPPROTO_UDPLITE) prot = &udplite_prot; local_bh_disable(); sock_prot_inuse_add(net, sk->sk_prot, -1); sock_prot_inuse_add(net, prot, 1); local_bh_enable(); sk->sk_prot = prot; sk->sk_socket->ops = &inet_dgram_ops; sk->sk_family = PF_INET; } opt = xchg(&np->opt, NULL); if (opt) sock_kfree_s(sk, opt, opt->tot_len); pktopt = xchg(&np->pktoptions, NULL); kfree_skb(pktopt); sk->sk_destruct = inet_sock_destruct; /* * ... and add it to the refcnt debug socks count * in the new family. -acme */ sk_refcnt_debug_inc(sk); module_put(THIS_MODULE); retv = 0; break; } goto e_inval; case IPV6_V6ONLY: if (optlen < sizeof(int) || inet_sk(sk)->inet_num) goto e_inval; np->ipv6only = valbool; retv = 0; break; case IPV6_RECVPKTINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxinfo = valbool; retv = 0; break; case IPV6_2292PKTINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxoinfo = valbool; retv = 0; break; case IPV6_RECVHOPLIMIT: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxhlim = valbool; retv = 0; break; case IPV6_2292HOPLIMIT: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxohlim = valbool; retv = 0; break; case IPV6_RECVRTHDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.srcrt = valbool; retv = 0; break; case IPV6_2292RTHDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.osrcrt = valbool; retv = 0; break; case IPV6_RECVHOPOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.hopopts = valbool; retv = 0; break; case IPV6_2292HOPOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.ohopopts = valbool; retv = 0; break; case IPV6_RECVDSTOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.dstopts = valbool; retv = 0; break; case IPV6_2292DSTOPTS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.odstopts = valbool; retv = 0; break; case IPV6_TCLASS: if (optlen < sizeof(int)) goto e_inval; if (val < -1 || val > 0xff) goto e_inval; /* RFC 3542, 6.5: default traffic class of 0x0 */ if (val == -1) val = 0; np->tclass = val; retv = 0; break; case IPV6_RECVTCLASS: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxtclass = valbool; retv = 0; break; case IPV6_FLOWINFO: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxflow = valbool; retv = 0; break; case IPV6_RECVPATHMTU: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxpmtu = valbool; retv = 0; break; case IPV6_TRANSPARENT: if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) && !ns_capable(net->user_ns, CAP_NET_RAW)) { retv = -EPERM; break; } if (optlen < sizeof(int)) goto e_inval; /* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */ inet_sk(sk)->transparent = valbool; retv = 0; break; case IPV6_RECVORIGDSTADDR: if (optlen < sizeof(int)) goto e_inval; np->rxopt.bits.rxorigdstaddr = valbool; retv = 0; break; case IPV6_HOPOPTS: case IPV6_RTHDRDSTOPTS: case IPV6_RTHDR: case IPV6_DSTOPTS: { struct ipv6_txoptions *opt; /* remove any sticky options header with a zero option * length, per RFC3542. */ if (optlen == 0) optval = NULL; else if (optval == NULL) goto e_inval; else if (optlen < sizeof(struct ipv6_opt_hdr) || optlen & 0x7 || optlen > 8 * 255) goto e_inval; /* hop-by-hop / destination options are privileged option */ retv = -EPERM; if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) break; opt = ipv6_renew_options(sk, np->opt, optname, (struct ipv6_opt_hdr __user *)optval, optlen); if (IS_ERR(opt)) { retv = PTR_ERR(opt); break; } /* routing header option needs extra check */ retv = -EINVAL; if (optname == IPV6_RTHDR && opt && opt->srcrt) { struct ipv6_rt_hdr *rthdr = opt->srcrt; switch (rthdr->type) { #if IS_ENABLED(CONFIG_IPV6_MIP6) case IPV6_SRCRT_TYPE_2: if (rthdr->hdrlen != 2 || rthdr->segments_left != 1) goto sticky_done; break; #endif default: goto sticky_done; } } retv = 0; opt = ipv6_update_options(sk, opt); sticky_done: if (opt) sock_kfree_s(sk, opt, opt->tot_len); break; } case IPV6_PKTINFO: { struct in6_pktinfo pkt; if (optlen == 0) goto e_inval; else if (optlen < sizeof(struct in6_pktinfo) || optval == NULL) goto e_inval; if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) { retv = -EFAULT; break; } if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if) goto e_inval; np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex; np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr; retv = 0; break; } case IPV6_2292PKTOPTIONS: { struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi6 fl6; int junk; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; if (optlen == 0) goto update; /* 1K is probably excessive * 1K is surely not enough, 2K per standard header is 16K. */ retv = -EINVAL; if (optlen > 64*1024) break; opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL); retv = -ENOBUFS; if (opt == NULL) break; memset(opt, 0, sizeof(*opt)); opt->tot_len = sizeof(*opt) + optlen; retv = -EFAULT; if (copy_from_user(opt+1, optval, optlen)) goto done; msg.msg_controllen = optlen; msg.msg_control = (void*)(opt+1); retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, &junk); if (retv) goto done; update: retv = 0; opt = ipv6_update_options(sk, opt); done: if (opt) sock_kfree_s(sk, opt, opt->tot_len); break; } case IPV6_UNICAST_HOPS: if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->hop_limit = val; retv = 0; break; case IPV6_MULTICAST_HOPS: if (sk->sk_type == SOCK_STREAM) break; if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) goto e_inval; np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val); retv = 0; break; case IPV6_MULTICAST_LOOP: if (optlen < sizeof(int)) goto e_inval; if (val != valbool) goto e_inval; np->mc_loop = valbool; retv = 0; break; case IPV6_UNICAST_IF: { struct net_device *dev = NULL; int ifindex; if (optlen != sizeof(int)) goto e_inval; ifindex = (__force int)ntohl((__force __be32)val); if (ifindex == 0) { np->ucast_oif = 0; retv = 0; break; } dev = dev_get_by_index(net, ifindex); retv = -EADDRNOTAVAIL; if (!dev) break; dev_put(dev); retv = -EINVAL; if (sk->sk_bound_dev_if) break; np->ucast_oif = ifindex; retv = 0; break; } case IPV6_MULTICAST_IF: if (sk->sk_type == SOCK_STREAM) break; if (optlen < sizeof(int)) goto e_inval; if (val) { struct net_device *dev; if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) goto e_inval; dev = dev_get_by_index(net, val); if (!dev) { retv = -ENODEV; break; } dev_put(dev); } np->mcast_oif = val; retv = 0; break; case IPV6_ADD_MEMBERSHIP: case IPV6_DROP_MEMBERSHIP: { struct ipv6_mreq mreq; if (optlen < sizeof(struct ipv6_mreq)) goto e_inval; retv = -EPROTO; if (inet_sk(sk)->is_icsk) break; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_ADD_MEMBERSHIP) retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); else retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr); break; } case IPV6_JOIN_ANYCAST: case IPV6_LEAVE_ANYCAST: { struct ipv6_mreq mreq; if (optlen < sizeof(struct ipv6_mreq)) goto e_inval; retv = -EFAULT; if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_JOIN_ANYCAST) retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); else retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr); break; } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: { struct group_req greq; struct sockaddr_in6 *psin6; if (optlen < sizeof(struct group_req)) goto e_inval; retv = -EFAULT; if (copy_from_user(&greq, optval, sizeof(struct group_req))) break; if (greq.gr_group.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } psin6 = (struct sockaddr_in6 *)&greq.gr_group; if (optname == MCAST_JOIN_GROUP) retv = ipv6_sock_mc_join(sk, greq.gr_interface, &psin6->sin6_addr); else retv = ipv6_sock_mc_drop(sk, greq.gr_interface, &psin6->sin6_addr); break; } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: { struct group_source_req greqs; int omode, add; if (optlen < sizeof(struct group_source_req)) goto e_inval; if (copy_from_user(&greqs, optval, sizeof(greqs))) { retv = -EFAULT; break; } if (greqs.gsr_group.ss_family != AF_INET6 || greqs.gsr_source.ss_family != AF_INET6) { retv = -EADDRNOTAVAIL; break; } if (optname == MCAST_BLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 1; } else if (optname == MCAST_UNBLOCK_SOURCE) { omode = MCAST_EXCLUDE; add = 0; } else if (optname == MCAST_JOIN_SOURCE_GROUP) { struct sockaddr_in6 *psin6; psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; retv = ipv6_sock_mc_join(sk, greqs.gsr_interface, &psin6->sin6_addr); /* prior join w/ different source is ok */ if (retv && retv != -EADDRINUSE) break; omode = MCAST_INCLUDE; add = 1; } else /* MCAST_LEAVE_SOURCE_GROUP */ { omode = MCAST_INCLUDE; add = 0; } retv = ip6_mc_source(add, omode, sk, &greqs); break; } case MCAST_MSFILTER: { struct group_filter *gsf; if (optlen < GROUP_FILTER_SIZE(0)) goto e_inval; if (optlen > sysctl_optmem_max) { retv = -ENOBUFS; break; } gsf = kmalloc(optlen,GFP_KERNEL); if (!gsf) { retv = -ENOBUFS; break; } retv = -EFAULT; if (copy_from_user(gsf, optval, optlen)) { kfree(gsf); break; } /* numsrc >= (4G-140)/128 overflow in 32 bits */ if (gsf->gf_numsrc >= 0x1ffffffU || gsf->gf_numsrc > sysctl_mld_max_msf) { kfree(gsf); retv = -ENOBUFS; break; } if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { kfree(gsf); retv = -EINVAL; break; } retv = ip6_mc_msfilter(sk, gsf); kfree(gsf); break; } case IPV6_ROUTER_ALERT: if (optlen < sizeof(int)) goto e_inval; retv = ip6_ra_control(sk, val); break; case IPV6_MTU_DISCOVER: if (optlen < sizeof(int)) goto e_inval; if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE) goto e_inval; np->pmtudisc = val; retv = 0; break; case IPV6_MTU: if (optlen < sizeof(int)) goto e_inval; if (val && val < IPV6_MIN_MTU) goto e_inval; np->frag_size = val; retv = 0; break; case IPV6_RECVERR: if (optlen < sizeof(int)) goto e_inval; np->recverr = valbool; if (!val) skb_queue_purge(&sk->sk_error_queue); retv = 0; break; case IPV6_FLOWINFO_SEND: if (optlen < sizeof(int)) goto e_inval; np->sndflow = valbool; retv = 0; break; case IPV6_FLOWLABEL_MGR: retv = ipv6_flowlabel_opt(sk, optval, optlen); break; case IPV6_IPSEC_POLICY: case IPV6_XFRM_POLICY: retv = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; retv = xfrm_user_policy(sk, optname, optval, optlen); break; case IPV6_ADDR_PREFERENCES: { unsigned int pref = 0; unsigned int prefmask = ~0; if (optlen < sizeof(int)) goto e_inval; retv = -EINVAL; /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ switch (val & (IPV6_PREFER_SRC_PUBLIC| IPV6_PREFER_SRC_TMP| IPV6_PREFER_SRC_PUBTMP_DEFAULT)) { case IPV6_PREFER_SRC_PUBLIC: pref |= IPV6_PREFER_SRC_PUBLIC; break; case IPV6_PREFER_SRC_TMP: pref |= IPV6_PREFER_SRC_TMP; break; case IPV6_PREFER_SRC_PUBTMP_DEFAULT: break; case 0: goto pref_skip_pubtmp; default: goto e_inval; } prefmask &= ~(IPV6_PREFER_SRC_PUBLIC| IPV6_PREFER_SRC_TMP); pref_skip_pubtmp: /* check HOME/COA conflicts */ switch (val & (IPV6_PREFER_SRC_HOME|IPV6_PREFER_SRC_COA)) { case IPV6_PREFER_SRC_HOME: break; case IPV6_PREFER_SRC_COA: pref |= IPV6_PREFER_SRC_COA; case 0: goto pref_skip_coa; default: goto e_inval; } prefmask &= ~IPV6_PREFER_SRC_COA; pref_skip_coa: /* check CGA/NONCGA conflicts */ switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) { case IPV6_PREFER_SRC_CGA: case IPV6_PREFER_SRC_NONCGA: case 0: break; default: goto e_inval; } np->srcprefs = (np->srcprefs & prefmask) | pref; retv = 0; break; } case IPV6_MINHOPCOUNT: if (optlen < sizeof(int)) goto e_inval; if (val < 0 || val > 255) goto e_inval; np->min_hopcount = val; retv = 0; break; case IPV6_DONTFRAG: np->dontfrag = valbool; retv = 0; break; } release_sock(sk); return retv; e_inval: release_sock(sk); return -EINVAL; }
static void __net_exit wext_pernet_exit(struct net *net) { skb_queue_purge(&net->wext_nlevents); }
static int bluecard_open(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev; unsigned char id; spin_lock_init(&(info->lock)); init_timer(&(info->timer)); info->timer.function = &bluecard_activity_led_timeout; info->timer.data = (u_long)info; skb_queue_head_init(&(info->txq)); info->rx_state = RECV_WAIT_PACKET_TYPE; info->rx_count = 0; info->rx_skb = NULL; /* Initialize HCI device */ hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can't allocate HCI device"); return -ENOMEM; } info->hdev = hdev; hdev->bus = HCI_PCCARD; hci_set_drvdata(hdev, info); SET_HCIDEV_DEV(hdev, &info->p_dev->dev); hdev->open = bluecard_hci_open; hdev->close = bluecard_hci_close; hdev->flush = bluecard_hci_flush; hdev->send = bluecard_hci_send_frame; id = inb(iobase + 0x30); if ((id & 0x0f) == 0x02) set_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)); if (id & 0x10) set_bit(CARD_HAS_POWER_LED, &(info->hw_state)); if (id & 0x20) set_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state)); /* Reset card */ info->ctrl_reg = REG_CONTROL_BT_RESET | REG_CONTROL_CARD_RESET; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Turn FPGA off */ outb(0x80, iobase + 0x30); /* Wait some time */ msleep(10); /* Turn FPGA on */ outb(0x00, iobase + 0x30); /* Activate card */ info->ctrl_reg = REG_CONTROL_BT_ON | REG_CONTROL_BT_RES_PU; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Enable interrupt */ outb(0xff, iobase + REG_INTERRUPT); info->ctrl_reg |= REG_CONTROL_INTERRUPT; outb(info->ctrl_reg, iobase + REG_CONTROL); if ((id & 0x0f) == 0x03) { /* Disable RTS */ info->ctrl_reg |= REG_CONTROL_RTS; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Set baud rate */ info->ctrl_reg |= 0x03; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Enable RTS */ info->ctrl_reg &= ~REG_CONTROL_RTS; outb(info->ctrl_reg, iobase + REG_CONTROL); set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); set_bit(XMIT_SENDING_READY, &(info->tx_state)); } /* Start the RX buffers */ outb(REG_COMMAND_RX_BUF_ONE, iobase + REG_COMMAND); outb(REG_COMMAND_RX_BUF_TWO, iobase + REG_COMMAND); /* Signal that the hardware is ready */ set_bit(CARD_READY, &(info->hw_state)); /* Drop TX queue */ skb_queue_purge(&(info->txq)); /* Control the point at which RTS is enabled */ outb((0x0f << RTS_LEVEL_SHIFT_BITS) | 1, iobase + REG_RX_CONTROL); /* Timeout before it is safe to send the first HCI packet */ msleep(1250); /* Register HCI device */ if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); info->hdev = NULL; hci_free_dev(hdev); return -ENODEV; } return 0; }
static void drain_free(struct atm_dev *dev,int pool) { skb_queue_purge(&ZATM_DEV(dev)->pool[pool]); }
/* Destroy socket. All references are gone. */ static void pn_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); }
/* * Delete an aarp queue * * Must run under aarp_lock. */ static void __aarp_expire(struct aarp_entry *a) { skb_queue_purge(&a->packet_queue); kfree(a); }
/* * This routine purges all the queues of frames. */ void lapb_clear_queues(struct lapb_cb *lapb) { skb_queue_purge(&lapb->write_queue); skb_queue_purge(&lapb->ack_queue); }
/* * @brief Flush all packets in exception instance to files for debugging * @param * None * @return * This function returns KAL_SUCCESS always. */ KAL_INT32 eemcs_expt_flush() { KAL_UINT32 pkts = 0; KAL_UINT32 i = 0; // struct sk_buff *skb = NULL; /* Flush all port skb from expt skb list */ for (i = 0; i < CCCI_PORT_NUM; i++) { pkts = atomic_read(&g_except_inst.port[i].pkt_cnt); /* No data in port */ if (pkts == 0) continue; DBGLOG(EXPT, DBG, "free %d skb in port%d expt list", pkts, i); skb_queue_purge(&g_except_inst.port[i].skb_list); atomic_set(&g_except_inst.port[i].pkt_cnt, 0); } /* Flush all rx skb from expt skb list */ for (i = 0; i < SDIO_RX_Q_NUM; i++) { pkts = atomic_read(&g_except_inst.rxq[i].pkt_cnt); /* No data in port */ if (pkts == 0) continue; DBGLOG(EXPT, DBG, "free %d skb in rxq%d expt list", pkts, i); skb_queue_purge(&g_except_inst.rxq[i].skb_list); atomic_set(&g_except_inst.rxq[i].pkt_cnt, 0); } /* Flush all tx skb from expt skb list */ for (i = 0; i < SDIO_TX_Q_NUM; i++) { pkts = atomic_read(&g_except_inst.txq[i].pkt_cnt); /* No data in port */ if (pkts == 0) continue; DBGLOG(EXPT, DBG, "free %d skb in txq%d expt list", pkts, i); skb_queue_purge(&g_except_inst.txq[i].skb_list); atomic_set(&g_except_inst.txq[i].pkt_cnt, 0); } #if 0 char log_file[NAME_MAX] = {0}; struct file *fp = NULL; KAL_UINT32 pkts = 0; KAL_UINT32 i = 0, j = 0; struct sk_buff *skb = NULL; DEBUG_LOG_FUNCTION_ENTRY; /* Flush all DL packets to a file */ for (i = 0; i < SDIO_RX_Q_NUM; i++) { pkts = atomic_read(&g_except_inst.rxq[i].pkt_cnt); DBGLOG(EXPT, DBG, "[EXPT] %d packets in DL SWQ %d", pkts, i); /* No data in Rx Q */ if (pkts == 0) continue; sprintf(log_file, "%s/eemcs_expt_rx-%02d_%d.bak", EEMCS_EXCEPTION_LOG_PATH, g_except_inst.rxq[i].id, pkts); fp = file_open(log_file, O_RDWR | O_CREAT | O_TRUNC, 0777); if (fp == NULL) { DBGLOG(EXPT, ERR, "[EXPT] Failed to open file %s", log_file); continue; } // Write packets number file_write(fp, (char*)&pkts, sizeof(KAL_UINT32)); /* Write each skb in list */ for (j = 0; j < pkts; j++) { skb = skb_dequeue(&g_except_inst.rxq[i].skb_list); if (skb == NULL) { DBGLOG(EXPT, WAR, "[EXPT] Failed to read skb from RX list %d", i); } else { hif_dl_pkt_handle_complete(i); // Write skb data length file_write(fp, (char*)&skb->len, sizeof(unsigned int)); // Write skb data file_write(fp, skb->data, skb->len); atomic_dec(&g_except_inst.rxq[i].pkt_cnt); } } file_close(fp); DBGLOG(EXPT, TRA, "[EXPT] All unhandled DL packets in Q are saved to %s", log_file); } /* Flush all UL packets to a file */ for (i = 0; i < SDIO_TX_Q_NUM; i++) { pkts = atomic_read(&g_except_inst.txq[i].pkt_cnt); DBGLOG(EXPT, DBG, "[EXPT] %d packets in UL SWQ %d", pkts, i); /* No data in Tx Q */ if (pkts == 0) continue; sprintf(log_file, "%s/eemcs_expt_tx-%02d_%d.bak", EEMCS_EXCEPTION_LOG_PATH, g_except_inst.txq[i].id, pkts); fp = file_open(log_file, O_RDWR | O_CREAT | O_TRUNC, 0777); if (fp == NULL) { DBGLOG(EXPT, ERR, "[EXPT] Failed to open file %s", log_file); continue; } // Write packets number file_write(fp, (char*)&pkts, sizeof(KAL_UINT32)); /* Write each skb in list */ for (j = 0; j < pkts; j++) { skb = skb_dequeue(&g_except_inst.txq[i].skb_list); if (skb == NULL) { DBGLOG(EXPT, WAR, "[EXPT] Failed to read skb from TX list %d", i); } else { // Write skb data length file_write(fp, (char*)&skb->len, sizeof(unsigned int)); // Write skb data file_write(fp, skb->data, skb->len); atomic_dec(&g_except_inst.txq[i].pkt_cnt); } } file_close(fp); DBGLOG(EXPT, TRA, "[EXPT] All unhandled UL packets in Q are saved to %s", log_file); } /* Flush all port packets to a file */ for (i = 0; i < CCCI_CDEV_NUM; i++) { pkts = atomic_read(&g_except_inst.port[i].pkt_cnt); DBGLOG(EXPT, DBG, "[EXPT] %d packets in port %d", pkts, i); /* No data in port */ if (pkts == 0) continue; sprintf(log_file, "%s/eemcs_expt_port-%02d_%d.bak", EEMCS_EXCEPTION_LOG_PATH, i, pkts); fp = file_open(log_file, O_RDWR | O_CREAT, 0777); if (fp == NULL) { DBGLOG(EXPT, ERR, "[EXPT] Failed to open file %s", log_file); continue; } // Write packets number file_write(fp, (char*)&pkts, sizeof(KAL_UINT32)); /* Write each skb in list */ for (j = 0; j < pkts; j++) { skb = skb_dequeue(&g_except_inst.port[i].skb_list); if (skb == NULL) { DBGLOG(EXPT, WAR, "[EXPT] Failed to read skb from port list %d", i); } else { // Write skb data length file_write(fp, (char*)&skb->len, sizeof(unsigned int)); // Write skb data file_write(fp, skb->data, skb->len); atomic_dec(&g_except_inst.port[i].pkt_cnt); } } file_close(fp); DBGLOG(EXPT, TRA, "[EXPT] All unhandled UL packets in port are saved to %s", log_file); } DBGLOG(EXPT, TRA, "[EXPT] eemcs_expt_flush() Finished !!"); DEBUG_LOG_FUNCTION_LEAVE; #else DEBUG_LOG_FUNCTION_ENTRY; DEBUG_LOG_FUNCTION_LEAVE; #endif return KAL_SUCCESS; }