Ejemplo n.º 1
0
unsigned long route_lookup(unsigned long addr)
{
	struct route_entry *rep;
	struct route_entry **repp;
	unsigned long ret;
	unsigned long s;

retry:							//\lnlbl{lookup:retry}
	s = read_seqbegin(&sl);				//\lnlbl{lookup:r_sqbegin}
	repp = &route_list.re_next;
	do {
		rep = READ_ONCE(*repp);
		if (rep == NULL) {
			if (read_seqretry(&sl, s))	//\lnlbl{lookup:r_sqretry1}
				goto retry;		//\lnlbl{lookup:goto_retry1}
			return ULONG_MAX;
		}
								//\fcvexclude
		/* Advance to next. */				//\fcvexclude
		repp = &rep->re_next;
	} while (rep->addr != addr);
	if (READ_ONCE(rep->re_freed))			//\lnlbl{lookup:chk_freed}
		abort();				//\lnlbl{lookup:abort}
	ret = rep->iface;
	if (read_seqretry(&sl, s))			//\lnlbl{lookup:r_sqretry2}
		goto retry;				//\lnlbl{lookup:goto_retry2}
	return ret;
}
Ejemplo n.º 2
0
void do_gettimeofday(struct timeval *tv)
{
	unsigned long seq;
	unsigned long usec, sec;
	unsigned long lost;

	do {
		seq = read_seqbegin(&xtime_lock);
		usec = get_timer_offset();

		lost = jiffies - wall_jiffies;
		if (lost)
			usec += lost * (1000000 / HZ);

		sec = xtime.tv_sec;
		usec += xtime.tv_nsec / 1000;
	} while (read_seqretry(&xtime_lock, seq));

	while (usec >= 1000000) {
		usec -= 1000000;
		sec++;
	}

	tv->tv_sec = sec;
	tv->tv_usec = usec;
}
Ejemplo n.º 3
0
/*
 * High res timers changes: First we want to use full nsec for all
 * the math to avoid the double round off (on the offset and xtime).
 * Second, we want to allow a boot with HRT turned off at boot time.
 * This will cause hrtimer_use to be false, and we then fall back to 
 * the old code.  We also shorten the xtime lock region and eliminate
 * the lost tick code as this kernel will never have lost ticks under
 * the lock (i.e. wall_jiffies will never differ from jiffies except
 * when the write xtime lock is held).
 */
void do_gettimeofday(struct timeval *tv)
{
	unsigned long seq;
	unsigned long sec, nsec, clk_nsec;
	unsigned long max_ntp_tick;

	do {
		seq = read_seqbegin(&xtime_lock);
#ifdef CONFIG_HIGH_RES_TIMERS
		if (hrtimer_use) 
			nsec = arch_cycle_to_nsec(get_arch_cycles(wall_jiffies));
		else 
#endif
			nsec = cur_timer->get_offset() * NSEC_PER_USEC;
		

		sec = xtime.tv_sec;
		clk_nsec = xtime.tv_nsec;
		max_ntp_tick = current_tick_length() >> (SHIFT_SCALE - 10);
	} while (read_seqretry(&xtime_lock, seq));

	/* ensure we don't advance beyond the current tick length */
	nsec = min(nsec, max_ntp_tick);

	nsec += clk_nsec;
				
	while (nsec >= NSEC_PER_SEC) {
		nsec -=  NSEC_PER_SEC;
		sec++;
	}

	tv->tv_sec = sec;
	tv->tv_usec = nsec / NSEC_PER_USEC;
}
Ejemplo n.º 4
0
Archivo: time.c Proyecto: 274914765/C
/*
 * This version of gettimeofday has near microsecond resolution.
 */
void do_gettimeofday(struct timeval *tv)
{
    unsigned long seq;
    unsigned long usec, sec;
    unsigned long max_ntp_tick = tick_usec - tickadj;

    do {
        seq = read_seqbegin(&xtime_lock);

        usec = do_gettimeoffset();

        /*
         * If time_adjust is negative then NTP is slowing the clock
         * so make sure not to go into next possible interval.
         * Better to lose some accuracy than have time go backwards..
         */
        if (unlikely(time_adjust < 0))
            usec = min(usec, max_ntp_tick);

        sec = xtime.tv_sec;
        usec += (xtime.tv_nsec / 1000);
    } while (read_seqretry(&xtime_lock, seq));

    while (usec >= 1000000) {
        usec -= 1000000;
        sec++;
    }

    tv->tv_sec = sec;
    tv->tv_usec = usec;
}
Ejemplo n.º 5
0
/**
 * \brief Obtain last position update hi-res monotonic timestamp
 * \param pcm PCM handle
 * \param avail Number of available frames when timestamp was grabbed
 * \param mstamp Hi-res timestamp based on CLOCK_MONOTONIC rather then wall time.
 * \return 0 on success otherwise a negative error code
 *
 * This function is an extension to alsa-lib (which serves as a template for
 * all the ksound functions). However since all the in-kernel ksound clients
 * measure time using the monotonic clock this function is required if the
 * timestamp is to be meaningful.
 */
int ksnd_pcm_mtimestamp(ksnd_pcm_t *pcm, snd_pcm_uframes_t *avail, struct timespec *mstamp)
{
	unsigned long seq;
	int res;
	struct timespec tstamp;
	struct timespec tomono;
	do
	{
		seq = read_seqbegin(&xtime_lock);
		res = ksnd_pcm_htimestamp(pcm, avail, &tstamp);
		tomono = wall_to_monotonic;
	}
	while (read_seqretry(&xtime_lock, seq));
	if (res < 0)
		return res;
	mstamp->tv_sec = tstamp.tv_sec + tomono.tv_sec;
	mstamp->tv_nsec = tstamp.tv_nsec + tomono.tv_nsec;
	while (mstamp->tv_nsec >= NSEC_PER_SEC)
	{
		mstamp->tv_nsec -= NSEC_PER_SEC;
		++mstamp->tv_sec;
	}
	while (mstamp->tv_nsec < 0)
	{
		mstamp->tv_nsec += NSEC_PER_SEC;
		--mstamp->tv_sec;
	}
	return res;
}
Ejemplo n.º 6
0
static int fb_counter_netrx(const struct fblock * const fb,
			    struct sk_buff * const skb,
			    enum path_type * const dir)
{
	int drop = 0;
	unsigned int seq;
	struct fb_counter_priv __percpu *fb_priv_cpu;

	fb_priv_cpu = this_cpu_ptr(rcu_dereference_raw(fb->private_data));
	prefetchw(skb->cb);
	do {
		seq = read_seqbegin(&fb_priv_cpu->lock);
		write_next_idp_to_skb(skb, fb->idp, fb_priv_cpu->port[*dir]);
		if (fb_priv_cpu->port[*dir] == IDP_UNKNOWN)
			drop = 1;
	} while (read_seqretry(&fb_priv_cpu->lock, seq));

	u64_stats_update_begin(&fb_priv_cpu->syncp);
	fb_priv_cpu->packets++;
	fb_priv_cpu->bytes += skb->len;
	u64_stats_update_end(&fb_priv_cpu->syncp);

	if (drop) {
		kfree_skb(skb);
		return PPE_DROPPED;
	}
	return PPE_SUCCESS;
}
Ejemplo n.º 7
0
/*
 * Retrigger next event is called after clock was set
 *
 * Called with interrupts disabled via on_each_cpu()
 */
static void retrigger_next_event(void *arg)
{
	struct hrtimer_cpu_base *base;
	struct timespec realtime_offset, wtm;
	unsigned long seq;

	if (!hrtimer_hres_active())
		return;

	do {
		seq = read_seqbegin(&xtime_lock);
		wtm = __get_wall_to_monotonic();
	} while (read_seqretry(&xtime_lock, seq));
	set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);

	base = &__get_cpu_var(hrtimer_bases);

	/* Adjust CLOCK_REALTIME offset */
	raw_spin_lock(&base->lock);
	base->clock_base[CLOCK_REALTIME].offset =
		timespec_to_ktime(realtime_offset);

	hrtimer_force_reprogram(base, 0);
	raw_spin_unlock(&base->lock);
}
Ejemplo n.º 8
0
/* 
 * Send packets to output.
 */
static inline int bcm_fast_path_output(struct sk_buff *skb)
{
	int ret = 0;
	struct dst_entry *dst = skb_dst(skb);
	struct hh_cache *hh = dst->hh;

	if (hh) {
		unsigned seq;
		int hh_len;

		do {
			int hh_alen;
			seq = read_seqbegin(&hh->hh_lock);
			hh_len = hh->hh_len;
			hh_alen = HH_DATA_ALIGN(hh_len);
			memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
			} while (read_seqretry(&hh->hh_lock, seq));

			skb_push(skb, hh_len);
		ret = hh->hh_output(skb); 
		if (ret==1) 
			return 0; /* Don't return 1 */
	} else if (dst->neighbour) {
		ret = dst->neighbour->output(skb);  
		if (ret==1) 
			return 0; /* Don't return 1 */
	}
	return ret;
}
Ejemplo n.º 9
0
static int fb_udp_netrx_out(const struct fblock * const fb,
			    struct sk_buff * const skb)
{
	int fdrop = 0;
	idp_t next_fb;
	unsigned int seq;
	struct udphdr *hdr;
	struct fb_udp_priv *fb_priv;

	fb_priv = rcu_dereference_raw(fb->private_data);
	do {
		seq = read_seqbegin(&fb_priv->lock);
		next_fb = fb_priv->port[TYPE_EGRESS];
		if (next_fb == IDP_UNKNOWN)
			fdrop = 1;
	} while (read_seqretry(&fb_priv->lock, seq));
	if (fdrop)
		goto drop;

	hdr = (struct udphdr *) skb_push(skb, sizeof(*hdr));
	if (!hdr)
		goto drop;

	hdr->source = htons(fb_priv->own_port);
	hdr->dest = htons(fb_priv->rem_port);
	hdr->len = htons(skb->len);
	hdr->check = 0;

	write_next_idp_to_skb(skb, fb->idp, next_fb);
	return PPE_SUCCESS;
drop:
	kfree_skb(skb);
	return PPE_DROPPED;
}
Ejemplo n.º 10
0
static int fb_huf_netrx(const struct fblock * const fb,
			  struct sk_buff * const skb,
			  enum path_type * const dir)
{
	unsigned int seq;
//	unsigned int padding;
	struct fb_huf_priv *fb_priv;
//	size_t i = 0;
//	unsigned char ciphertext[16];

	fb_priv = rcu_dereference_raw(fb->private_data);
	do {
		seq = read_seqbegin(&fb_priv->lock);
		write_next_idp_to_skb(skb, fb->idp, fb_priv->port[*dir]);
		if (fb_priv->port[*dir] == IDP_UNKNOWN)
			goto drop;
	} while (read_seqretry(&fb_priv->lock, seq));

	read_lock(&fb_priv->klock);

	//send it trough compression
	compress(skb);	

	read_unlock(&fb_priv->klock);

	return PPE_SUCCESS;
drop:
	printk(KERN_INFO "[fb_aes] drop packet. Out of key material?\n");
	kfree_skb(skb);
	return PPE_DROPPED;
}
Ejemplo n.º 11
0
/*
 * Setup the device for a periodic tick
 */
void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
{
	tick_set_periodic_handler(dev, broadcast);

	/* Broadcast setup ? */
	if (!tick_device_is_functional(dev))
		return;

	if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
	    !tick_broadcast_oneshot_active()) {
		clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
	} else {
		unsigned long seq;
		ktime_t next;

		do {
			seq = read_seqbegin(&xtime_lock);
			next = tick_next_period;
		} while (read_seqretry(&xtime_lock, seq));

		clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);

		for (;;) {
			if (!clockevents_program_event(dev, next, false))
				return;
			next = ktime_add(next, tick_period);
		}
	}
}
Ejemplo n.º 12
0
int _schedule_next_int(unsigned long jiffie_f,long arch_cycle_in)
{
	long arch_cycle_offset; 
	unsigned long seq;
	/* 
	 * First figure where we are in time. 
	 * A note on locking.  We are under the timerlist_lock here.  This
	 * means that interrupts are off already, so don't use irq versions.
	 */
	if (unlikely(!hrtimer_use)){
		return 0;
	}
	do {
		seq = read_seqbegin(&xtime_lock);
		arch_cycle_offset = arch_cycle_in - get_arch_cycles(jiffie_f);
	} while (read_seqretry(&xtime_lock, seq));
	/*
	 * If time is already passed, just return saying so.
	 */
	if (arch_cycle_offset <= 0)
		return 1;

	__last_was_long = arch_cycles_per_jiffy == arch_cycle_in;
	reload_timer_chip(arch_cycle_offset);
	return 0;
}
Ejemplo n.º 13
0
static ssize_t seq_lock_read(struct file* filp, char __user *buf, size_t count, loff_t *ppos)
{
	unsigned seq;

	do{
		seq=read_seqbegin(&seq_lock);
		mdelay(10000);
	}while(read_seqretry(&seq_lock, seq));

	return 0;
}
Ejemplo n.º 14
0
void phonet_get_local_port_range(int *min, int *max)
{
	unsigned seq;
	do {
		seq = read_seqbegin(&local_port_range_lock);
		if (min)
			*min = local_port_range[0];
		if (max)
			*max = local_port_range[1];
	} while (read_seqretry(&local_port_range_lock, seq));
}
Ejemplo n.º 15
0
u64 get_jiffies_64(void)
{
	unsigned long seq;
	u64 ret;

	do {
		seq = read_seqbegin(&xtime_lock);
		ret = jiffies_64;
	} while (read_seqretry(&xtime_lock, seq));
	return ret;
}
Ejemplo n.º 16
0
int _schedule_jiffies_int(unsigned long jiffie_f)
{
	long past;
	unsigned long seq;
	if (unlikely(!hrtimer_use)) return 0;
	do {
		seq = read_seqbegin(&xtime_lock);
		past = get_arch_cycles(jiffie_f);
	} while (read_seqretry(&xtime_lock, seq));

	return (past >= arch_cycles_per_jiffy); 
}
Ejemplo n.º 17
0
static void sync_seqlock_read(void)
{
    unsigned int data;
    unsigned int seq;

    do {
        seq = read_seqbegin(&sync.seqlock);

        data = sync.seqlock_data;
    } while (read_seqretry(&sync.seqlock, seq));

    printk("seqlock data: %u\n", data);
}
Ejemplo n.º 18
0
inline struct timespec current_kernel_time(void)
{
        struct timespec now;
        unsigned long seq;

	do {
		seq = read_seqbegin(&xtime_lock);
		
		now = xtime;
	} while (read_seqretry(&xtime_lock, seq));

	return now; 
}
Ejemplo n.º 19
0
int myinit(void)
{
printk("\n Module Inserted\n");
while (read_seqretry(&lock,seq)%2 == 0)
{

	seq=read_seqbegin(&lock);	
	printk("\n Iam in Read Mode ");
	printk("\n Global Value =%d lock.seq=%d\n",global,lock.sequence);
	break;
} 
  
      return 0;

}
Ejemplo n.º 20
0
/**
 * ktime_get_ts - get the monotonic clock in timespec format
 * @ts:		pointer to timespec variable
 *
 * The function calculates the monotonic clock from the realtime
 * clock and the wall_to_monotonic offset and stores the result
 * in normalized timespec format in the variable pointed to by @ts.
 */
void ktime_get_ts(struct timespec *ts)
{
	struct timespec tomono;
	unsigned long seq;

	do {
		seq = read_seqbegin(&xtime_lock);
		getnstimeofday(ts);
		tomono = wall_to_monotonic;

	} while (read_seqretry(&xtime_lock, seq));

	set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
				ts->tv_nsec + tomono.tv_nsec);
}
Ejemplo n.º 21
0
static int  __vsyscall(0) asmlinkage vgettimeofday(struct timeval *tv, struct timezone *tz)
{
	unsigned long seq;
	do {
		seq = read_seqbegin(&__vsyscall_gtod_lock);

		if (tv)
			do_vgettimeofday(tv);
		if (tz)
			do_get_tz(tz);

	} while (read_seqretry(&__vsyscall_gtod_lock, seq));

	return 0;
}
Ejemplo n.º 22
0
int init_module(void)
{
    unsigned long seq;

    write_seqlock(&slock);
    printk("write_seqlock\n");
    write_sequnlock(&slock);
    printk("write_sequnlock\n");

    seq = read_seqbegin(&slock);
    printk("read_seqbegin\n");
    if (read_seqretry(&slock, seq))
        printk("wrong\n");

    return 0;
}
Ejemplo n.º 23
0
cputime_t task_gtime(struct task_struct *t)
{
    unsigned int seq;
    cputime_t gtime;

    do {
        seq = read_seqbegin(&t->vtime_seqlock);

        gtime = t->gtime;
        if (t->flags & PF_VCPU)
            gtime += vtime_delta(t);

    } while (read_seqretry(&t->vtime_seqlock, seq));

    return gtime;
}
Ejemplo n.º 24
0
void getnstimeofday (struct timespec *tv)
{
	unsigned long seq,sec,nsec;

	do {
		seq = read_seqbegin(&xtime_lock);
		sec = xtime.tv_sec;
		nsec = xtime.tv_nsec+time_interpolator_get_offset();
	} while (unlikely(read_seqretry(&xtime_lock, seq)));

	while (unlikely(nsec >= NSEC_PER_SEC)) {
		nsec -= NSEC_PER_SEC;
		++sec;
	}
	tv->tv_sec = sec;
	tv->tv_nsec = nsec;
}
Ejemplo n.º 25
0
/*
 * Get the coarse grained time at the softirq based on xtime and
 * wall_to_monotonic.
 */
static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
{
	ktime_t xtim, tomono;
	struct timespec xts, tom;
	unsigned long seq;

	do {
		seq = read_seqbegin(&xtime_lock);
		xts = __current_kernel_time();
		tom = __get_wall_to_monotonic();
	} while (read_seqretry(&xtime_lock, seq));

	xtim = timespec_to_ktime(xts);
	tomono = timespec_to_ktime(tom);
	base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
	base->clock_base[CLOCK_MONOTONIC].softirq_time =
		ktime_add(xtim, tomono);
}
Ejemplo n.º 26
0
static unsigned long long monotonic_clock_hpet(void)
{
	unsigned long long last_offset, this_offset, base;
	unsigned seq;

	/* atomically read monotonic base & last_offset */
	do {
		seq = read_seqbegin(&monotonic_lock);
		last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low;
		base = monotonic_base;
	} while (read_seqretry(&monotonic_lock, seq));

	/* Read the Time Stamp Counter */
	rdtscll(this_offset);

	/* return the value in ns */
	return base + cycles_2_ns(this_offset - last_offset);
}
Ejemplo n.º 27
0
/* Validate changes from /proc interface. */
static int ipv4_local_port_range(struct ctl_table *table, int write,
				 void __user *buffer,
				 size_t *lenp, loff_t *ppos)
{
	struct net *net =
		container_of(table->data, struct net, ipv4.ip_local_ports.range);
	int ret;
	int range[2];
	struct ctl_table tmp = {
		.data = &range,
		.maxlen = sizeof(range),
		.mode = table->mode,
		.extra1 = &ip_local_port_range_min,
		.extra2 = &ip_local_port_range_max,
	};

	inet_get_local_port_range(net, &range[0], &range[1]);

	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);

	if (write && ret == 0) {
		if (range[1] < range[0])
			ret = -EINVAL;
		else
			set_local_port_range(net, range);
	}

	return ret;
}


static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
{
	kgid_t *data = table->data;
	struct net *net =
		container_of(table->data, struct net, ipv4.ping_group_range.range);
	unsigned int seq;
	do {
		seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);

		*low = data[0];
		*high = data[1];
	} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
}
Ejemplo n.º 28
0
/* Stolen from ip_finish_output2
 * PRE : skb->dev is set to the device we are leaving by
 *       skb->dst is not NULL
 * POST: the packet is sent with the link layer header pushed
 *       the packet is destroyed
 */
static void ip_direct_send(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct hh_cache *hh = dst->hh;
	struct net_device *dev = dst->dev;
	int hh_len = LL_RESERVED_SPACE(dev);
	unsigned seq;

	/* Be paranoid, rather than too clever. */
//	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
	if (unlikely(skb_headroom(skb) < hh_len &&   (dev->header_ops && dev->header_ops->create) )) {

		struct sk_buff *skb2;

		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
		if (skb2 == NULL) {
			kfree_skb(skb);
			return;
		}
		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);
		kfree_skb(skb);
		skb = skb2;
	}

	if (hh) {
		do {
			int hh_alen;

			seq = read_seqbegin(&hh->hh_lock);
			hh_alen = HH_DATA_ALIGN(hh->hh_len);
			memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
		} while (read_seqretry(&hh->hh_lock, seq));
		skb_push(skb, hh->hh_len);
		hh->hh_output(skb);
	} else if (dst->neighbour)
		dst->neighbour->output(skb);
	else {
		if (net_ratelimit())
			DEBUGP(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n");
		kfree_skb(skb);
	}
}
Ejemplo n.º 29
0
/*
 * Keep track of foreign pages marked as PageForeign so that we don't
 * return them to the remote domain prematurely.
 *
 * PageForeign pages are pinned down by increasing their mapcount.
 *
 * All other pages are simply returned as is.
 */
void __gnttab_dma_map_page(struct page *page)
{
	unsigned int seq;

	if (!is_running_on_xen() || !PageForeign(page))
		return;

	do {
		seq = read_seqbegin(&gnttab_dma_lock);

		if (gnttab_dma_local_pfn(page))
			break;

		atomic_set(&page->_mapcount, 0);

		/* Make _mapcount visible before read_seqretry. */
		smp_mb();
	} while (unlikely(read_seqretry(&gnttab_dma_lock, seq)));
}
Ejemplo n.º 30
0
/**
 * ns_to_timespec - Convert nanoseconds to timespec
 * @nsec:       the nanoseconds value to be converted
 *
 * Returns the timespec representation of the nsec parameter.
 */
struct timespec ns_to_timespec(const s64 nsec)
{
	struct timespec ts;

	if (!nsec)
		return (struct timespec) {0, 0};

	ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec);
	if (unlikely(nsec < 0))
		set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec);

	return ts;
}

/**
 * ns_to_timeval - Convert nanoseconds to timeval
 * @nsec:       the nanoseconds value to be converted
 *
 * Returns the timeval representation of the nsec parameter.
 */
struct timeval ns_to_timeval(const s64 nsec)
{
	struct timespec ts = ns_to_timespec(nsec);
	struct timeval tv;

	tv.tv_sec = ts.tv_sec;
	tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;

	return tv;
}

#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
{
	unsigned long seq;
	u64 ret;

	do {
		seq = read_seqbegin(&xtime_lock);
		ret = jiffies_64;
	} while (read_seqretry(&xtime_lock, seq));
	return ret;
}