void
runtime·notetsleep(Note *n, int64 ns)
{
	int64 deadline, now;

	if(ns < 0) {
		runtime·notesleep(n);
		return;
	}

	if(runtime·atomicload(&n->key) != 0)
		return;

	if(m->profilehz > 0)
		runtime·setprof(false);
	deadline = runtime·nanotime() + ns;
	for(;;) {
		runtime·futexsleep(&n->key, 0, ns);
		if(runtime·atomicload(&n->key) != 0)
			break;
		now = runtime·nanotime();
		if(now >= deadline)
			break;
		ns = deadline - now;
	}
	if(m->profilehz > 0)
		runtime·setprof(true);
}
Exemple #2
0
void WorkSumMinMax::DoSumMinMax(u8 worker, TaskParam* pParam) {
	startTime = nanotime();
	this->worker = worker;

	// 初期化
    u32 size = m_width * m_height;
    u8* parse;
    u8* parseEnd= &m_greyScaleImage[size];
    
    u32 max = 0;
    u32 min = 255;
    u32 sum = 0;
    
    // 最大、最小、総和を計算
	parse     = m_greyScaleImage;
	while (parse < parseEnd) {
		u8 val = *parse++;
		sum += val;
		if (val > max)    { max = val; }
		if (val < min)    { min = val; }
	}

    m_max = max; m_min = min; m_sum = sum;

	endTime = nanotime();
}
Exemple #3
0
void *thr_time(void *arg__) {
  ArgTime *arg= arg__;

  tstamp 
    t0 = nanotime(arg->clock),
    t0epoch = nanoepoch(),
    lastt = t0,
    curt;
  Tok *t;

  while (!inQ->EOT) {
    t = ll_pop(inQ);

    nlock_lock(t->edit);
    ll_push(timeQ, t);
  
    curt = nanotime(arg->clock); 
    t->sstart = curt - t0;
    t->slast  = curt - lastt;
    t->epoch  = t->sstart - t0epoch;

    nlock_unlock(t->edit);

    lastt = curt;
  }

  timeQ->EOT = true;

  return 0;
}
Exemple #4
0
/*
 * Step our concept of UTC.  This is done by modifying our estimate of
 * when we booted.
 * XXX: not locked.
 */
void
tc_setclock(struct timespec *ts)
{
	struct timespec tbef, taft;
	struct bintime bt, bt2;

	cpu_tick_calibrate(1);
	nanotime(&tbef);
	timespec2bintime(ts, &bt);
	binuptime(&bt2);
	bintime_sub(&bt, &bt2);
	bintime_add(&bt2, &boottimebin);
	boottimebin = bt;
	bintime2timeval(&bt, &boottime);

	/* XXX fiddle all the little crinkly bits around the fiords... */
	tc_windup();
	nanotime(&taft);
	if (timestepwarnings) {
		log(LOG_INFO,
		    "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n",
		    (intmax_t)tbef.tv_sec, tbef.tv_nsec,
		    (intmax_t)taft.tv_sec, taft.tv_nsec,
		    (intmax_t)ts->tv_sec, ts->tv_nsec);
	}
	cpu_tick_calibrate(1);
}
Exemple #5
0
int64
runtime·tickspersecond(void)
{
	int64 res, t0, t1, c0, c1;

	res = (int64)runtime·atomicload64((uint64*)&ticks);
	if(res != 0)
		return ticks;
	runtime·lock(&ticksLock);
	res = ticks;
	if(res == 0) {
		t0 = runtime·nanotime();
		c0 = runtime·cputicks();
		runtime·usleep(100*1000);
		t1 = runtime·nanotime();
		c1 = runtime·cputicks();
		if(t1 == t0)
			t1++;
		res = (c1-c0)*1000*1000*1000/(t1-t0);
		if(res == 0)
			res++;
		runtime·atomicstore64((uint64*)&ticks, res);
	}
	runtime·unlock(&ticksLock);
	return res;
}
Exemple #6
0
/*
 * Vnode op for write
 *
 * spec_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
 *	      struct ucred *a_cred)
 */
static int
devfs_spec_write(struct vop_write_args *ap)
{
	struct devfs_node *node;
	struct vnode *vp;
	struct uio *uio;
	cdev_t dev;
	int error;

	vp = ap->a_vp;
	dev = vp->v_rdev;
	uio = ap->a_uio;
	node = DEVFS_NODE(vp);

	KKASSERT(uio->uio_segflg != UIO_NOCOPY);

	if (dev == NULL)		/* device was revoked */
		return (EBADF);

	vn_unlock(vp);
	error = dev_dwrite(dev, uio, ap->a_ioflag, NULL);
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);

	if (node) {
		nanotime(&node->atime);
		nanotime(&node->mtime);
	}

	return (error);
}
static int
cpufreq_latency(void)
{
	struct cpufreq *cf = cf_backend;
	struct timespec nta, ntb;
	const uint32_t n = 10;
	uint32_t i, j, l, m;
	uint64_t s;

	l = cpufreq_get_min();
	m = cpufreq_get_max();

	/*
	 * For each state, sample the average transition
	 * latency required to set the state for all CPUs.
	 */
	for (i = 0; i < cf->cf_state_count; i++) {

		for (s = 0, j = 0; j < n; j++) {

			/*
			 * Attempt to exclude possible
			 * caching done by the backend.
			 */
			if (i == 0)
				cpufreq_set_all_raw(l);
			else {
				cpufreq_set_all_raw(m);
			}

			nanotime(&nta);
			cpufreq_set_all_raw(cf->cf_state[i].cfs_freq);
			nanotime(&ntb);
			timespecsub(&ntb, &nta, &ntb);

			if (ntb.tv_sec != 0 ||
			    ntb.tv_nsec > CPUFREQ_LATENCY_MAX)
				continue;

			if (s >= UINT64_MAX - CPUFREQ_LATENCY_MAX)
				break;

			/* Convert to microseconds to prevent overflow */
			s += ntb.tv_nsec / 1000;
		}

		/*
		 * Consider the backend unsuitable if
		 * the transition latency was too high.
		 */
		if (s == 0)
			return EMSGSIZE;

		cf->cf_state[i].cfs_latency = s / n;
	}

	return 0;
}
 uint64_t Execute(Pointers* pointers) override {
   if (key_pointer_) {
     void* memory = pointers->Remove(key_pointer_);
     uint64_t time_nsecs = nanotime();
     free(memory);
     return nanotime() - time_nsecs;
   }
   return 0;
 }
static int _dump_file_from_fd(const char *title, const char *path, int fd) {
    if (title) printf("------ %s (%s", title, path);

    if (title) {
        struct stat st;
        if (memcmp(path, "/proc/", 6) && memcmp(path, "/sys/", 5) && !fstat(fd, &st)) {
            char stamp[80];
            time_t mtime = st.st_mtime;
            strftime(stamp, sizeof(stamp), "%Y-%m-%d %H:%M:%S", localtime(&mtime));
            printf(": %s", stamp);
        }
        printf(") ------\n");
    }

    bool newline = false;
    fd_set read_set;
    struct timeval tm;
    while (1) {
        FD_ZERO(&read_set);
        FD_SET(fd, &read_set);
        /* Timeout if no data is read for 30 seconds. */
        tm.tv_sec = 30;
        tm.tv_usec = 0;
        uint64_t elapsed = nanotime();
        int ret = TEMP_FAILURE_RETRY(select(fd + 1, &read_set, NULL, NULL, &tm));
        if (ret == -1) {
            printf("*** %s: select failed: %s\n", path, strerror(errno));
            newline = true;
            break;
        } else if (ret == 0) {
            elapsed = nanotime() - elapsed;
            printf("*** %s: Timed out after %.3fs\n", path,
                   (float) elapsed / NANOS_PER_SEC);
            newline = true;
            break;
        } else {
            char buffer[65536];
            ssize_t bytes_read = TEMP_FAILURE_RETRY(read(fd, buffer, sizeof(buffer)));
            if (bytes_read > 0) {
                fwrite(buffer, bytes_read, 1, stdout);
                newline = (buffer[bytes_read-1] == '\n');
            } else {
                if (bytes_read == -1) {
                    printf("*** %s: Failed to read from fd: %s", path, strerror(errno));
                    newline = true;
                }
                break;
            }
        }
    }
    close(fd);

    if (!newline) printf("\n");
    if (title) printf("\n");
    return 0;
}
  uint64_t Execute(Pointers* pointers) override {
    uint64_t time_nsecs = nanotime();
    void* memory = calloc(n_elements_, size_);
    time_nsecs = nanotime() - time_nsecs;

    memset(memory, 0, n_elements_ * size_);
    pointers->Add(key_pointer_, memory);

    return time_nsecs;
  }
  uint64_t Execute(Pointers* pointers) override {
    uint64_t time_nsecs = nanotime();
    void* memory = malloc(size_);
    time_nsecs = nanotime() - time_nsecs;

    memset(memory, 1, size_);
    pointers->Add(key_pointer_, memory);

    return time_nsecs;
  }
Exemple #12
0
float cpu_get_speed_mhz(void)
{
    unsigned long long diff1, diff2;
    unsigned long long val11, val12, val21, val22;

    val11 = nanotime();
    val21 = rdtsc();
    while ((val12 = nanotime()) < val11 + MEASURE_TIMEVAL)
        val22 = rdtsc();

    diff1 = (unsigned long long)( (unsigned long long)val22 - (unsigned long long)val21 );
    diff2 = (unsigned long long)( (unsigned long long)val12 - (unsigned long long)val11 );
    return ((float)diff1 / diff2);
}
Exemple #13
0
int main(int argc, char **argv)
{
	long int iterations = 1024;
	if (argc == 2) {
		iterations = 1024 * strtol(argv[1], NULL, 10);
		if (errno) {
			DIE("Can't parse %s as a number.\n", argv[1]);
		}
		if (iterations < 0) {
			DIE("I'd like a *positive* number, please.\n");
		}
	}
	CASINT *ints = malloc(RING_SIZE * sizeof(CASINT));
	int i;
	for (i = 0; i < RING_SIZE; ++i) {
		ints[i] = 0;
	}
	pthread_t *threads = malloc(sizeof(pthread_t) * SEGMENTS);
	pthread_mutex_t *locks = malloc(sizeof(pthread_mutex_t) * SEGMENTS);
	struct incrementer_input *inputs =
	    malloc(sizeof(struct incrementer_input) * SEGMENTS);
	for (i = 0; i < SEGMENTS; ++i) {
		if (pthread_mutex_init(locks + i, NULL)) {
			DIE("Can't init lock.\n");
		}
		inputs[i].go_lock = &locks[i];
		inputs[i].offset = i * SEGMENT_SIZE;
		inputs[i].ints = ints;
		inputs[i].iterations = iterations;
		pthread_mutex_lock(&locks[i]);
		pthread_create(&threads[i], NULL, &incrementer, &inputs[i]);
	}

	double time_begin = nanotime();
	for (i = 0; i < SEGMENTS; ++i) {
		pthread_mutex_unlock(&locks[i]);
	}
	for (i = 0; i < SEGMENTS; ++i) {
		if (pthread_join(threads[i], NULL)) {
			DIE("pthread_join failed.\n");
		}
	}
	double time_end = nanotime();
	double time_taken = time_end - time_begin;
	printf("Ran %ld iterations in %f.\n", iterations, time_taken);
	printf("Nanoseconds/lock = %f.\n",
	       1e9 * time_taken / RING_SIZE / (double)iterations);
	return 0;
}
  uint64_t Execute(Pointers* pointers) override {
    void* old_memory = nullptr;
    if (old_pointer_ != 0) {
      old_memory = pointers->Remove(old_pointer_);
    }

    uint64_t time_nsecs = nanotime();
    void* memory = realloc(old_memory, size_);
    time_nsecs = nanotime() - time_nsecs;

    memset(memory, 1, size_);
    pointers->Add(key_pointer_, memory);

    return time_nsecs;
  }
Exemple #15
0
int main() {
    uint64_t    n=50000;
    uint64_t    sum=0;
    uint64_t    latency=0;

    for (uint64_t i = 0; i < n; i++) {
        struct timespec start;
        struct timespec end;
        clock_gettime(CLOCK_REALTIME, &start);
        clock_gettime(CLOCK_REALTIME, &end);
        sum += nanotime(&end) - nanotime(&start);
    }
    
    printf("Latency: %f ns\n", (double)sum / n);
}
Exemple #16
0
/*
 * Construct an audit record for the passed thread.
 */
static int
audit_record_ctor(void *mem, int size, void *arg, int flags)
{
	struct kaudit_record *ar;
	struct thread *td;
	struct ucred *cred;

	KASSERT(sizeof(*ar) == size, ("audit_record_ctor: wrong size"));

	td = arg;
	ar = mem;
	bzero(ar, sizeof(*ar));
	ar->k_ar.ar_magic = AUDIT_RECORD_MAGIC;
	nanotime(&ar->k_ar.ar_starttime);

	/*
	 * Export the subject credential.
	 */
	cred = td->td_ucred;
	cru2x(cred, &ar->k_ar.ar_subj_cred);
	ar->k_ar.ar_subj_ruid = cred->cr_ruid;
	ar->k_ar.ar_subj_rgid = cred->cr_rgid;
	ar->k_ar.ar_subj_egid = cred->cr_groups[0];
	ar->k_ar.ar_subj_auid = cred->cr_audit.ai_auid;
	ar->k_ar.ar_subj_asid = cred->cr_audit.ai_asid;
	ar->k_ar.ar_subj_pid = td->td_proc->p_pid;
	ar->k_ar.ar_subj_amask = cred->cr_audit.ai_mask;
	ar->k_ar.ar_subj_term_addr = cred->cr_audit.ai_termid;
	return (0);
}
Exemple #17
0
static int
devfs_spec_read(struct vop_read_args *ap)
{
	struct devfs_node *node;
	struct vnode *vp;
	struct uio *uio;
	cdev_t dev;
	int error;

	vp = ap->a_vp;
	dev = vp->v_rdev;
	uio = ap->a_uio;
	node = DEVFS_NODE(vp);

	if (dev == NULL)		/* device was revoked */
		return (EBADF);
	if (uio->uio_resid == 0)
		return (0);

	vn_unlock(vp);
	error = dev_dread(dev, uio, ap->a_ioflag, NULL);
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);

	if (node)
		nanotime(&node->atime);

	return (error);
}
Exemple #18
0
/*
 * Construct an audit record for the passed thread.
 */
static void
audit_record_ctor(proc_t p, struct kaudit_record *ar)
{
	kauth_cred_t cred;

	bzero(ar, sizeof(*ar));
	ar->k_ar.ar_magic = AUDIT_RECORD_MAGIC;
	nanotime(&ar->k_ar.ar_starttime);

	if (PROC_NULL != p) {
		cred = kauth_cred_proc_ref(p);

		/*
	 	 * Export the subject credential.
	 	 */
		cru2x(cred, &ar->k_ar.ar_subj_cred);
		ar->k_ar.ar_subj_ruid = kauth_cred_getruid(cred);
		ar->k_ar.ar_subj_rgid = kauth_cred_getrgid(cred);
		ar->k_ar.ar_subj_egid = kauth_cred_getgid(cred);
		ar->k_ar.ar_subj_pid = p->p_pid;
		ar->k_ar.ar_subj_auid = cred->cr_audit.as_aia_p->ai_auid;
		ar->k_ar.ar_subj_asid = cred->cr_audit.as_aia_p->ai_asid;
		bcopy(&cred->cr_audit.as_mask, &ar->k_ar.ar_subj_amask,
    		    sizeof(struct au_mask));
		bcopy(&cred->cr_audit.as_aia_p->ai_termid,
		    &ar->k_ar.ar_subj_term_addr, sizeof(struct au_tid_addr));
		kauth_cred_unref(&cred);
	}
}
Exemple #19
0
static void 
timerproc ( void ) 
{ 
int64 delta , now; 
Timer *t; 
void ( *f ) ( int64 , Eface ) ; 
Eface arg; 
#line 198 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
for ( ;; ) { 
runtime·lock ( &timers ) ; 
timers.sleeping = false; 
now = runtime·nanotime ( ) ; 
for ( ;; ) { 
if ( timers.len == 0 ) { 
delta = -1; 
break; 
} 
t = timers.t[0]; 
delta = t->when - now; 
if ( delta > 0 ) 
break; 
if ( t->period > 0 ) { 
#line 213 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
t->when += t->period * ( 1 + -delta/t->period ) ; 
siftdown ( 0 ) ; 
} else { 
#line 217 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
timers.t[0] = timers.t[--timers.len]; 
timers.t[0]->i = 0; 
siftdown ( 0 ) ; 
t->i = -1; 
} 
f = ( void* ) t->fv->fn; 
arg = t->arg; 
runtime·unlock ( &timers ) ; 
if ( raceenabled ) 
runtime·raceacquire ( t ) ; 
f ( now , arg ) ; 
#line 230 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
f = nil; 
USED ( f ) ; 
arg.type = nil; 
arg.data = nil; 
USED ( &arg ) ; 
#line 236 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
runtime·lock ( &timers ) ; 
} 
if ( delta < 0 ) { 
#line 240 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
timers.rescheduling = true; 
runtime·parkunlock ( &timers , "timer goroutine (idle)" ) ; 
continue; 
} 
#line 245 "/home/14/ren/source/golang/go/src/pkg/runtime/time.goc"
timers.sleeping = true; 
runtime·noteclear ( &timers.waitnote ) ; 
runtime·unlock ( &timers ) ; 
runtime·notetsleepg ( &timers.waitnote , delta ) ; 
} 
} 
Exemple #20
0
int64
runtime·cputicks() {
	// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
	// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
	// TODO: need more entropy to better seed fastrand1.
	return runtime·nanotime();
}
Exemple #21
0
static void
cyclic_test_002(void)
{
	int error = 0;
	cyc_omni_handler_t hdlr;
	cyclic_id_t id;

	printf("%s: starting\n",__func__);

	hdlr.cyo_online = cyclic_test_002_online;
	hdlr.cyo_offline = cyclic_test_002_offline;
	hdlr.cyo_arg = NULL;

	nanotime(&test_002_start);

	mutex_enter(&cpu_lock);

        id = cyclic_add_omni(&hdlr);

	mutex_exit(&cpu_lock);

	DELAY(1200000);

	mutex_enter(&cpu_lock);

	cyclic_remove(id);

	mutex_exit(&cpu_lock);

	printf("%s: %s\n",__func__, error == 0 ? "passed":"failed");
}
Exemple #22
0
/**
 * Set host time based on time sync message from host
 */
static void
hv_set_host_time(void *context, int pending)
{
	hv_timesync_sc *softc = (hv_timesync_sc*)context;
	uint64_t hosttime = softc->time_msg.data;
	struct timespec guest_ts, host_ts;
	uint64_t host_tns;
	int64_t diff;
	int error;

	host_tns = (hosttime - HV_WLTIMEDELTA) * 100;
	host_ts.tv_sec = (time_t)(host_tns/HV_NANO_SEC_PER_SEC);
	host_ts.tv_nsec = (long)(host_tns%HV_NANO_SEC_PER_SEC);

	nanotime(&guest_ts);

	diff = (int64_t)host_ts.tv_sec - (int64_t)guest_ts.tv_sec;

	/*
	 * If host differs by 5 seconds then make the guest catch up
	 */
	if (diff > 5 || diff < -5) {
		error = kern_clock_settime(curthread, CLOCK_REALTIME,
		    &host_ts);
	}
}
Exemple #23
0
/*
 * Keyboard is generating events.  Turn this keystroke into an
 * event and put it in the queue.  If the queue is full, the
 * keystroke is lost (sorry!).
 */
void
wskbd_deliver_event(struct wskbd_softc *sc, u_int type, int value)
{
	struct wseventvar *evar;
	struct wscons_event *ev;
	int put;

	evar = sc->sc_base.me_evp;

	if (evar == NULL) {
		DPRINTF(("wskbd_input: not open\n"));
		return;
	}

#ifdef DIAGNOSTIC
	if (evar->q == NULL) {
		printf("wskbd_input: evar->q=NULL\n");
		return;
	}
#endif

	put = evar->put;
	ev = &evar->q[put];
	put = (put + 1) % WSEVENT_QSIZE;
	if (put == evar->get) {
		log(LOG_WARNING, "%s: event queue overflow\n",
		    sc->sc_base.me_dv.dv_xname);
		return;
	}
	ev->type = type;
	ev->value = value;
	nanotime(&ev->time);
	evar->put = put;
	WSEVENT_WAKEUP(evar);
}
Exemple #24
0
/**
 * Set host time based on time sync message from host
 */
static void
hv_set_host_time(void *context)
{
 	time_sync_data* time_msg = (time_sync_data*) context;	
	uint64_t hosttime = time_msg->data;
	struct timespec guest_ts, host_ts;
	uint64_t host_tns;
	int64_t diff;
	int error;

	host_tns = (hosttime - HV_WLTIMEDELTA) * 100;
	host_ts.tv_sec = (time_t)(host_tns/HV_NANO_SEC_PER_SEC);
	host_ts.tv_nsec = (long)(host_tns%HV_NANO_SEC_PER_SEC);

	nanotime(&guest_ts);
	
	diff = (int64_t)host_ts.tv_sec - (int64_t)guest_ts.tv_sec;

	/*
	 * If host differs by 5 seconds then make the guest catch up
	 */
	if (diff > 5 || diff < -5) {
		error = kern_clock_settime(curthread, CLOCK_REALTIME,
		    &host_ts);
	} 

	/*
	 * Free the hosttime that was allocated in hv_adj_guesttime()
	 */
	free(time_msg, M_DEVBUF);
}
Exemple #25
0
static void
cyclic_test_001(void)
{
	int error = 0;
	cyc_handler_t hdlr;
	cyc_time_t when;
	cyclic_id_t id;

	printf("%s: starting\n",__func__);

	hdlr.cyh_func = (cyc_func_t) cyclic_test_001_func;
        hdlr.cyh_arg = 0;
 
        when.cyt_when = 0;
        when.cyt_interval = 1000000000;

	nanotime(&test_001_start);

	mutex_enter(&cpu_lock);

        id = cyclic_add(&hdlr, &when);

	mutex_exit(&cpu_lock);

	DELAY(1200000);

	mutex_enter(&cpu_lock);

	cyclic_remove(id);

	mutex_exit(&cpu_lock);

	printf("%s: %s\n",__func__, error == 0 ? "passed":"failed");
}
Exemple #26
0
int64
runtime·cputicks() {
    // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
    // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
    // runtime·randomNumber provides better seeding of fastrand1.
    return runtime·nanotime() + runtime·randomNumber;
}
Exemple #27
0
/*
 * initialize and allocate VM and memory for pipe
 */
int
pipe_create(struct pipe *cpipe)
{
	int error;

	/* so pipe_free_kmem() doesn't follow junk pointer */
	cpipe->pipe_buffer.buffer = NULL;
	/*
	 * protect so pipeclose() doesn't follow a junk pointer
	 * if pipespace() fails.
	 */
	bzero(&cpipe->pipe_sel, sizeof cpipe->pipe_sel);
	cpipe->pipe_state = 0;
	cpipe->pipe_peer = NULL;
	cpipe->pipe_busy = 0;

	error = pipespace(cpipe, PIPE_SIZE);
	if (error != 0)
		return (error);

	nanotime(&cpipe->pipe_ctime);
	cpipe->pipe_atime = cpipe->pipe_ctime;
	cpipe->pipe_mtime = cpipe->pipe_ctime;
	cpipe->pipe_pgid = NO_PID;

	return (0);
}
Exemple #28
0
/* This function is used by clock_settime and settimeofday */
static int
settime1(struct proc *p, const struct timespec *ts, bool check_kauth)
{
	struct timespec delta, now;
	int s;

	/* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
	s = splclock();
	nanotime(&now);
	timespecsub(ts, &now, &delta);

	if (check_kauth && kauth_authorize_system(kauth_cred_get(),
	    KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts),
	    &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) {
		splx(s);
		return (EPERM);
	}

#ifdef notyet
	if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */
		splx(s);
		return (EPERM);
	}
#endif

	tc_setclock(ts);

	timespecadd(&boottime, &delta, &boottime);

	resettodr();
	splx(s);

	return (0);
}
Exemple #29
0
static void
vmbus_timesync(struct hv_util_sc *sc, uint64_t hvtime, uint8_t tsflags)
{
	struct timespec vm_ts;
	uint64_t hv_ns, vm_ns;

	hv_ns = (hvtime - VMBUS_ICMSG_TS_BASE) * VMBUS_ICMSG_TS_FACTOR;
	nanotime(&vm_ts);
	vm_ns = (vm_ts.tv_sec * NANOSEC) + vm_ts.tv_nsec;

	if ((tsflags & VMBUS_ICMSG_TS_FLAG_SYNC) && !vmbus_ts_ignore_sync) {
		struct timespec hv_ts;

		if (bootverbose) {
			device_printf(sc->ic_dev, "apply sync request, "
			    "hv: %ju, vm: %ju\n",
			    (uintmax_t)hv_ns, (uintmax_t)vm_ns);
		}
		hv_ts.tv_sec = hv_ns / NANOSEC;
		hv_ts.tv_nsec = hv_ns % NANOSEC;
		kern_clock_settime(curthread, CLOCK_REALTIME, &hv_ts);
		/* Done! */
		return;
	}

	if ((tsflags & VMBUS_ICMSG_TS_FLAG_SAMPLE) &&
	    vmbus_ts_sample_thresh > 0) {
		int64_t diff;

		if (vmbus_ts_sample_verbose) {
			device_printf(sc->ic_dev, "sample request, "
			    "hv: %ju, vm: %ju\n",
			    (uintmax_t)hv_ns, (uintmax_t)vm_ns);
		}

		if (hv_ns > vm_ns)
			diff = hv_ns - vm_ns;
		else
			diff = vm_ns - hv_ns;
		/* nanosec -> millisec */
		diff /= 1000000;

		if (diff > vmbus_ts_sample_thresh) {
			struct timespec hv_ts;

			if (bootverbose) {
				device_printf(sc->ic_dev,
				    "apply sample request, hv: %ju, vm: %ju\n",
				    (uintmax_t)hv_ns, (uintmax_t)vm_ns);
			}
			hv_ts.tv_sec = hv_ns / NANOSEC;
			hv_ts.tv_nsec = hv_ns % NANOSEC;
			kern_clock_settime(curthread, CLOCK_REALTIME, &hv_ts);
		}
		/* Done */
		return;
	}
}
Exemple #30
0
/*
 * Device-optimized file table vnode read routine.
 *
 * This bypasses the VOP table and talks directly to the device.  Most
 * filesystems just route to specfs and can make this optimization.
 *
 * MPALMOSTSAFE - acquires mplock
 */
static int
devfs_fo_read(struct file *fp, struct uio *uio,
		 struct ucred *cred, int flags)
{
	struct devfs_node *node;
	struct vnode *vp;
	int ioflag;
	int error;
	cdev_t dev;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not td %p", uio->uio_td, curthread));

	if (uio->uio_resid == 0)
		return 0;

	vp = (struct vnode *)fp->f_data;
	if (vp == NULL || vp->v_type == VBAD)
		return EBADF;

	node = DEVFS_NODE(vp);

	if ((dev = vp->v_rdev) == NULL)
		return EBADF;

	reference_dev(dev);

	if ((flags & O_FOFFSET) == 0)
		uio->uio_offset = fp->f_offset;

	ioflag = 0;
	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (flags & O_FBUFFERED) {
		/* ioflag &= ~IO_DIRECT; */
	} else if (flags & O_FUNBUFFERED) {
		ioflag |= IO_DIRECT;
	} else if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	ioflag |= sequential_heuristic(uio, fp);

	error = dev_dread(dev, uio, ioflag, fp);

	release_dev(dev);
	if (node)
		nanotime(&node->atime);
	if ((flags & O_FOFFSET) == 0)
		fp->f_offset = uio->uio_offset;
	fp->f_nextoff = uio->uio_offset;

	return (error);
}