static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, const struct itimerval *const value, struct itimerval *const ovalue) { u64 oval, nval, ointerval, ninterval; struct cpu_itimer *it = &tsk->signal->it[clock_id]; /* * Use the to_ktime conversion because that clamps the maximum * value to KTIME_MAX and avoid multiplication overflows. */ nval = ktime_to_ns(timeval_to_ktime(value->it_value)); ninterval = ktime_to_ns(timeval_to_ktime(value->it_interval)); spin_lock_irq(&tsk->sighand->siglock); oval = it->expires; ointerval = it->incr; if (oval || nval) { if (nval > 0) nval += TICK_NSEC; set_process_cpu_timer(tsk, clock_id, &nval, &oval); } it->expires = nval; it->incr = ninterval; trace_itimer_state(clock_id == CPUCLOCK_VIRT ? ITIMER_VIRTUAL : ITIMER_PROF, value, nval); spin_unlock_irq(&tsk->sighand->siglock); if (ovalue) { ovalue->it_value = ns_to_timeval(oval); ovalue->it_interval = ns_to_timeval(ointerval); } }
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) { struct task_struct *tsk = current; struct hrtimer *timer; ktime_t expires; /* * Validate the timevals in value. */ if (!timeval_valid(&value->it_value) || !timeval_valid(&value->it_interval)) return -EINVAL; trace_timer_itimer_set(which, value); switch (which) { case ITIMER_REAL: again: spin_lock_irq(&tsk->sighand->siglock); timer = &tsk->signal->real_timer; if (ovalue) { ovalue->it_value = itimer_get_remtime(timer); ovalue->it_interval = ktime_to_timeval(tsk->signal->it_real_incr); } /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); goto again; } expires = timeval_to_ktime(value->it_value); if (expires.tv64 != 0) { tsk->signal->it_real_incr = timeval_to_ktime(value->it_interval); hrtimer_start(timer, expires, HRTIMER_MODE_REL); } else tsk->signal->it_real_incr.tv64 = 0; trace_itimer_state(ITIMER_REAL, value, 0); spin_unlock_irq(&tsk->sighand->siglock); break; case ITIMER_VIRTUAL: set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue); break; case ITIMER_PROF: set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue); break; default: return -EINVAL; } return 0; }
/* * CFG802.11 network device handler for data transmission. */ static int mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct sk_buff *new_skb; struct mwifiex_txinfo *tx_info; struct timeval tv; dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n", jiffies, priv->bss_type, priv->bss_num); if (priv->adapter->surprise_removed) { kfree_skb(skb); priv->stats.tx_dropped++; return 0; } if (!skb->len || (skb->len > ETH_FRAME_LEN)) { dev_err(priv->adapter->dev, "Tx: bad skb len %d\n", skb->len); kfree_skb(skb); priv->stats.tx_dropped++; return 0; } if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { dev_dbg(priv->adapter->dev, "data: Tx: insufficient skb headroom %d\n", skb_headroom(skb)); /* Insufficient skb headroom - allocate a new skb */ new_skb = skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); if (unlikely(!new_skb)) { dev_err(priv->adapter->dev, "Tx: cannot alloca new_skb\n"); kfree_skb(skb); priv->stats.tx_dropped++; return 0; } kfree_skb(skb); skb = new_skb; dev_dbg(priv->adapter->dev, "info: new skb headroomd %d\n", skb_headroom(skb)); } tx_info = MWIFIEX_SKB_TXCB(skb); memset(tx_info, 0, sizeof(*tx_info)); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; /* Record the current time the packet was queued; used to * determine the amount of time the packet was queued in * the driver before it was sent to the firmware. * The delay is then sent along with the packet to the * firmware for aggregate delay calculation for stats and * MSDU lifetime expiry. */ do_gettimeofday(&tv); skb->tstamp = timeval_to_ktime(tv); mwifiex_queue_tx_pkt(priv, skb); return 0; }
static void sam4e_usb_receive_frame(struct sam4e_usb *dev, struct sam4e_can_unsl_ts_receive *frame) { struct can_frame *cf; struct sk_buff *skb; struct skb_shared_hwtstamps *skt; struct timeval tv; static int msec; int i; if (frame->can == 0) skb = alloc_can_skb(dev->netdev1, &cf); else skb = alloc_can_skb(dev->netdev2, &cf); if (skb == NULL) { pr_err("skb failed..frame->can %d", frame->can); return; } LOGNI(" rcv frame %d %x %d %x %x %x %x %x %x %x %x\n", frame->ts, frame->mid, frame->dlc, frame->data[0], frame->data[1], frame->data[2], frame->data[3], frame->data[4], frame->data[5], frame->data[6], frame->data[7]); cf->can_id = le32_to_cpu(frame->mid); cf->can_dlc = get_can_dlc(frame->dlc); for (i = 0; i < cf->can_dlc; i++) cf->data[i] = frame->data[i]; msec = le32_to_cpu(frame->ts); tv.tv_sec = msec / 1000; tv.tv_usec = (msec - tv.tv_sec * 1000) * 1000; skt = skb_hwtstamps(skb); skt->hwtstamp = timeval_to_ktime(tv); LOGNI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp)); skb->tstamp = timeval_to_ktime(tv); netif_rx(skb); }
static int setup_sock_common(struct sock *sk, struct cpt_sock_image *si, loff_t pos, struct cpt_context *ctx) { struct timeval tmptv; if (sk->sk_socket) { sk->sk_socket->flags = si->cpt_ssflags; sk->sk_socket->state = si->cpt_sstate; } sk->sk_reuse = si->cpt_reuse; sk->sk_shutdown = si->cpt_shutdown; sk->sk_userlocks = si->cpt_userlocks; sk->sk_no_check = si->cpt_no_check; sock_reset_flag(sk, SOCK_DBG); if (si->cpt_debug) sock_set_flag(sk, SOCK_DBG); sock_reset_flag(sk, SOCK_RCVTSTAMP); if (si->cpt_rcvtstamp) sock_set_flag(sk, SOCK_RCVTSTAMP); sock_reset_flag(sk, SOCK_LOCALROUTE); if (si->cpt_localroute) sock_set_flag(sk, SOCK_LOCALROUTE); sk->sk_protocol = si->cpt_protocol; sk->sk_err = si->cpt_err; sk->sk_err_soft = si->cpt_err_soft; sk->sk_priority = si->cpt_priority; sk->sk_rcvlowat = si->cpt_rcvlowat; sk->sk_rcvtimeo = si->cpt_rcvtimeo; if (si->cpt_rcvtimeo == CPT_NULL) sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_sndtimeo = si->cpt_sndtimeo; if (si->cpt_sndtimeo == CPT_NULL) sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_rcvbuf = si->cpt_rcvbuf; sk->sk_sndbuf = si->cpt_sndbuf; sk->sk_bound_dev_if = si->cpt_bound_dev_if; sk->sk_flags = si->cpt_flags; sk->sk_lingertime = si->cpt_lingertime; if (si->cpt_lingertime == CPT_NULL) sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; sk->sk_peercred.pid = si->cpt_peer_pid; sk->sk_peercred.uid = si->cpt_peer_uid; sk->sk_peercred.gid = si->cpt_peer_gid; cpt_timeval_import(&tmptv, si->cpt_stamp); sk->sk_stamp = timeval_to_ktime(tmptv); return 0; }
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) { struct task_struct *tsk = current; struct hrtimer *timer; ktime_t expires; cputime_t cval, cinterval, nval, ninterval; /* * Validate the timevals in value. */ if (!timeval_valid(&value->it_value) || !timeval_valid(&value->it_interval)) return -EINVAL; switch (which) { case ITIMER_REAL: again: spin_lock_irq(&tsk->sighand->siglock); timer = &tsk->signal->real_timer; if (ovalue) { ovalue->it_value = itimer_get_remtime(timer); ovalue->it_interval = ktime_to_timeval(tsk->signal->it_real_incr); } /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); hrtimer_wait_for_timer(&tsk->signal->real_timer); goto again; } expires = timeval_to_ktime(value->it_value); if (expires.tv64 != 0) { tsk->signal->it_real_incr = timeval_to_ktime(value->it_interval); hrtimer_start(timer, expires, HRTIMER_MODE_REL); } else tsk->signal->it_real_incr.tv64 = 0; spin_unlock_irq(&tsk->sighand->siglock); break; case ITIMER_VIRTUAL: nval = timeval_to_cputime(&value->it_value); ninterval = timeval_to_cputime(&value->it_interval); spin_lock_irq(&tsk->sighand->siglock); cval = tsk->signal->it_virt_expires; cinterval = tsk->signal->it_virt_incr; if (!cputime_eq(cval, cputime_zero) || !cputime_eq(nval, cputime_zero)) { if (cputime_gt(nval, cputime_zero)) nval = cputime_add(nval, jiffies_to_cputime(1)); set_process_cpu_timer(tsk, CPUCLOCK_VIRT, &nval, &cval); } tsk->signal->it_virt_expires = nval; tsk->signal->it_virt_incr = ninterval; spin_unlock_irq(&tsk->sighand->siglock); if (ovalue) { cputime_to_timeval(cval, &ovalue->it_value); cputime_to_timeval(cinterval, &ovalue->it_interval); } break; case ITIMER_PROF: nval = timeval_to_cputime(&value->it_value); ninterval = timeval_to_cputime(&value->it_interval); spin_lock_irq(&tsk->sighand->siglock); cval = tsk->signal->it_prof_expires; cinterval = tsk->signal->it_prof_incr; if (!cputime_eq(cval, cputime_zero) || !cputime_eq(nval, cputime_zero)) { if (cputime_gt(nval, cputime_zero)) nval = cputime_add(nval, jiffies_to_cputime(1)); set_process_cpu_timer(tsk, CPUCLOCK_PROF, &nval, &cval); } tsk->signal->it_prof_expires = nval; tsk->signal->it_prof_incr = ninterval; spin_unlock_irq(&tsk->sighand->siglock); if (ovalue) { cputime_to_timeval(cval, &ovalue->it_value); cputime_to_timeval(cinterval, &ovalue->it_interval); } break; default: return -EINVAL; } return 0; }
struct sk_buff * rst_skb(struct sock *sk, loff_t *pos_p, __u32 *owner, __u32 *queue, struct cpt_context *ctx) { int err; struct sk_buff *skb; struct cpt_skb_image v; loff_t pos = *pos_p; struct scm_fp_list *fpl = NULL; struct timeval tmptv; err = rst_get_object(CPT_OBJ_SKB, pos, &v, ctx); if (err) return ERR_PTR(err); *pos_p = pos + v.cpt_next; if (owner) *owner = v.cpt_owner; if (queue) *queue = v.cpt_queue; skb = alloc_skb(v.cpt_len + v.cpt_hspace + v.cpt_tspace, GFP_KERNEL); if (skb == NULL) return ERR_PTR(-ENOMEM); skb_reserve(skb, v.cpt_hspace); skb_put(skb, v.cpt_len); #ifdef NET_SKBUFF_DATA_USES_OFFSET skb->transport_header = v.cpt_h; skb->network_header = v.cpt_nh; skb->mac_header = v.cpt_mac; #else skb->transport_header = skb->head + v.cpt_h; skb->network_header = skb->head + v.cpt_nh; skb->mac_header = skb->head + v.cpt_mac; #endif BUILD_BUG_ON(sizeof(skb->cb) < sizeof(v.cpt_cb)); if (sk->sk_protocol == IPPROTO_TCP) { /* * According to Alexey all packets in queue have non-zero * flags, as at least TCPCB_FLAG_ACK is set on them. * Luckily for us, offset of field flags in tcp_skb_cb struct * with IPv6 is higher then total size of tcp_skb_cb struct * without IPv6. */ if (ctx->image_version >= CPT_VERSION_18_2 || ((struct tcp_skb_cb_ipv6 *)&v.cpt_cb)->flags) { #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) check_tcp_cb_conv(NOT_CONV, CONV); memcpy(skb->cb, v.cpt_cb, sizeof(v.cpt_cb)); #else check_tcp_cb_conv(CONV, NOT_CONV); rst_tcp_cb_ipv6_to_ipv4(&v, skb); #endif } else { #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) check_tcp_cb_conv(CONV, NOT_CONV); rst_tcp_cb_ipv4_to_ipv6(&v, skb); #else check_tcp_cb_conv(NOT_CONV, CONV); memcpy(skb->cb, v.cpt_cb, sizeof(v.cpt_cb)); #endif } } else memcpy(skb->cb, v.cpt_cb, sizeof(v.cpt_cb)); skb->mac_len = v.cpt_mac_len; skb->csum = v.cpt_csum; skb->local_df = v.cpt_local_df; skb->pkt_type = v.cpt_pkt_type; skb->ip_summed = v.cpt_ip_summed; skb->priority = v.cpt_priority; skb->protocol = v.cpt_protocol; cpt_timeval_import(&tmptv, v.cpt_stamp); skb->tstamp = timeval_to_ktime(tmptv); skb_shinfo(skb)->gso_segs = v.cpt_gso_segs; skb_shinfo(skb)->gso_size = v.cpt_gso_size; if (ctx->image_version == 0) { skb_shinfo(skb)->gso_segs = 1; skb_shinfo(skb)->gso_size = 0; } if (v.cpt_next > v.cpt_hdrlen) { pos = pos + v.cpt_hdrlen; while (pos < *pos_p) { union { struct cpt_obj_bits b; struct cpt_fd_image f; } u; err = rst_get_object(-1, pos, &u, ctx); if (err) { kfree_skb(skb); return ERR_PTR(err); } if (u.b.cpt_object == CPT_OBJ_BITS) { if (u.b.cpt_size != v.cpt_hspace + skb->len) { eprintk_ctx("invalid skb image %u != %u + %u\n", u.b.cpt_size, v.cpt_hspace, skb->len); kfree_skb(skb); return ERR_PTR(-EINVAL); } err = ctx->pread(skb->head, u.b.cpt_size, ctx, pos+u.b.cpt_hdrlen); if (err) { kfree_skb(skb); return ERR_PTR(err); } } else if (u.f.cpt_object == CPT_OBJ_FILEDESC) { if (!fpl) { fpl = kmalloc(sizeof(struct scm_fp_list), GFP_KERNEL_UBC); if (!fpl) { kfree_skb(skb); return ERR_PTR(-ENOMEM); } fpl->count = 0; UNIXCB(skb).fp = fpl; } fpl->fp[fpl->count] = rst_file(u.f.cpt_file, -1, ctx); if (!IS_ERR(fpl->fp[fpl->count])) fpl->count++; } pos += u.b.cpt_next; } } return skb; }
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue) { struct task_struct *tsk = current; struct hrtimer *timer; ktime_t expires; cputime_t cval, cinterval, nval, ninterval; /* * Validate the timevals in value. * * Note: Although the spec requires that invalid values shall * return -EINVAL, we just fixup the value and print a limited * number of warnings in order not to break users of this * historical misfeature. * * Scheduled for replacement in March 2007 */ check_itimerval(value); switch (which) { case ITIMER_REAL: again: spin_lock_irq(&tsk->sighand->siglock); timer = &tsk->signal->real_timer; if (ovalue) { ovalue->it_value = itimer_get_remtime(timer); ovalue->it_interval = ktime_to_timeval(tsk->signal->it_real_incr); } /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); goto again; } tsk->signal->it_real_incr = timeval_to_ktime(value->it_interval); expires = timeval_to_ktime(value->it_value); if (expires.tv64 != 0) hrtimer_start(timer, expires, HRTIMER_REL); spin_unlock_irq(&tsk->sighand->siglock); break; case ITIMER_VIRTUAL: nval = timeval_to_cputime(&value->it_value); ninterval = timeval_to_cputime(&value->it_interval); read_lock(&tasklist_lock); spin_lock_irq(&tsk->sighand->siglock); cval = tsk->signal->it_virt_expires; cinterval = tsk->signal->it_virt_incr; if (!cputime_eq(cval, cputime_zero) || !cputime_eq(nval, cputime_zero)) { if (cputime_gt(nval, cputime_zero)) nval = cputime_add(nval, jiffies_to_cputime(1)); set_process_cpu_timer(tsk, CPUCLOCK_VIRT, &nval, &cval); } tsk->signal->it_virt_expires = nval; tsk->signal->it_virt_incr = ninterval; spin_unlock_irq(&tsk->sighand->siglock); read_unlock(&tasklist_lock); if (ovalue) { cputime_to_timeval(cval, &ovalue->it_value); cputime_to_timeval(cinterval, &ovalue->it_interval); } break; case ITIMER_PROF: nval = timeval_to_cputime(&value->it_value); ninterval = timeval_to_cputime(&value->it_interval); read_lock(&tasklist_lock); spin_lock_irq(&tsk->sighand->siglock); cval = tsk->signal->it_prof_expires; cinterval = tsk->signal->it_prof_incr; if (!cputime_eq(cval, cputime_zero) || !cputime_eq(nval, cputime_zero)) { if (cputime_gt(nval, cputime_zero)) nval = cputime_add(nval, jiffies_to_cputime(1)); set_process_cpu_timer(tsk, CPUCLOCK_PROF, &nval, &cval); } tsk->signal->it_prof_expires = nval; tsk->signal->it_prof_incr = ninterval; spin_unlock_irq(&tsk->sighand->siglock); read_unlock(&tasklist_lock); if (ovalue) { cputime_to_timeval(cval, &ovalue->it_value); cputime_to_timeval(cinterval, &ovalue->it_interval); } break; default: return -EINVAL; } return 0; }
static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv, struct sk_buff *skb) { struct mwifiex_adapter *adapter = priv->adapter; struct uap_rxpd *uap_rx_pd; struct rx_packet_hdr *rx_pkt_hdr; struct sk_buff *new_skb; struct mwifiex_txinfo *tx_info; int hdr_chop; struct timeval tv; u8 rfc1042_eth_hdr[ETH_ALEN] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; uap_rx_pd = (struct uap_rxpd *)(skb->data); rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); if ((atomic_read(&adapter->pending_bridged_pkts) >= MWIFIEX_BRIDGED_PKTS_THRESHOLD)) { dev_err(priv->adapter->dev, "Tx: Bridge packet limit reached. Drop packet!\n"); kfree_skb(skb); return; } if (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_eth_hdr, sizeof(rfc1042_eth_hdr))) /* Chop off the rxpd + the excess memory from * 802.2/llc/snap header that was removed. */ hdr_chop = (u8 *)eth_hdr - (u8 *)uap_rx_pd; else /* Chop off the rxpd */ hdr_chop = (u8 *)&rx_pkt_hdr->eth803_hdr - (u8 *)uap_rx_pd; /* Chop off the leading header bytes so the it points * to the start of either the reconstructed EthII frame * or the 802.2/llc/snap frame. */ skb_pull(skb, hdr_chop); if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN) { dev_dbg(priv->adapter->dev, "data: Tx: insufficient skb headroom %d\n", skb_headroom(skb)); /* Insufficient skb headroom - allocate a new skb */ new_skb = skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN); if (unlikely(!new_skb)) { dev_err(priv->adapter->dev, "Tx: cannot allocate new_skb\n"); kfree_skb(skb); priv->stats.tx_dropped++; return; } kfree_skb(skb); skb = new_skb; dev_dbg(priv->adapter->dev, "info: new skb headroom %d\n", skb_headroom(skb)); } tx_info = MWIFIEX_SKB_TXCB(skb); tx_info->bss_num = priv->bss_num; tx_info->bss_type = priv->bss_type; tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; do_gettimeofday(&tv); skb->tstamp = timeval_to_ktime(tv); mwifiex_wmm_add_buf_txqueue(priv, skb); atomic_inc(&adapter->tx_pending); atomic_inc(&adapter->pending_bridged_pkts); if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) { mwifiex_set_trans_start(priv->netdev); mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter); } return; }