/* * IP6 initialization: fill in IP6 protocol switch table. * All protocols not implemented in kernel go to raw IP6 protocol handler. */ void ip6_init(void) { struct protosw *pr; int i; struct timeval tv; pr = pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW); if (pr == NULL) panic("ip6_init"); for (i = 0; i < IPPROTO_MAX; i++) ip6_protox[i] = pr - inet6sw; for (pr = inet6domain.dom_protosw; pr < inet6domain.dom_protoswNPROTOSW; pr++) if (pr->pr_domain->dom_family == PF_INET6 && pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) ip6_protox[pr->pr_protocol] = pr - inet6sw; inet6_pfil_hook.ph_type = PFIL_TYPE_AF; inet6_pfil_hook.ph_af = AF_INET6; if ((i = pfil_head_register(&inet6_pfil_hook)) != 0) { kprintf("%s: WARNING: unable to register pfil hook, " "error %d\n", __func__, i); } netisr_register(NETISR_IPV6, ip6_input, NULL); /* XXX hashfn */ scope6_init(); addrsel_policy_init(); nd6_init(); frag6_init(); /* * in many cases, random() here does NOT return random number * as initialization during bootstrap time occur in fixed order. */ microtime(&tv); ip6_flow_seq = krandom() ^ tv.tv_usec; microtime(&tv); ip6_desync_factor = (krandom() ^ tv.tv_usec) % MAX_TEMP_DESYNC_FACTOR; }
int gettimeofday(struct timeval *tv, struct timezone *tz) { if (tz != NULL) goto err_param; microtime(tv); return 0; err_param: return EINVAL; }
/* * cfi_0002_busy_wait - wait until device is not busy */ static int cfi_0002_busy_wait(struct cfi * const cfi, flash_off_t offset, u_long usec) { int error; #ifdef CFI_0002_STATS struct timeval start; struct timeval now; struct timeval delta; if (usec > cfi->cfi_0002_stats.busy_usec_max) cfi->cfi_0002_stats.busy_usec_max = usec; if (usec < cfi->cfi_0002_stats.busy_usec_min) cfi->cfi_0002_stats.busy_usec_min = usec; microtime(&start); #endif if (usec > cfi->cfi_yield_time) { error = cfi_0002_busy_yield(cfi, offset, usec); #ifdef CFI_0002_STATS microtime(&now); cfi->cfi_0002_stats.busy_yield++; timersub(&now, &start, &delta); timeradd(&delta, &cfi->cfi_0002_stats.busy_yield_tv, &cfi->cfi_0002_stats.busy_yield_tv); #endif } else { error = cfi_0002_busy_poll(cfi, offset, usec); #ifdef CFI_0002_STATS microtime(&now); cfi->cfi_0002_stats.busy_poll++; timersub(&now, &start, &delta); timeradd(&delta, &cfi->cfi_0002_stats.busy_poll_tv, &cfi->cfi_0002_stats.busy_poll_tv); #endif } return error; }
gboolean timeout2(gpointer data){ if(!start) return true; timer[2] = microtime(); Detect * detect = ((TimerAction *) data)->det; freenect * frn = ((TimerAction *) data)->frn; frn->reload(); cv_image = frn->get_image_depth_rgb(); //cv_image = frn->get_image_rgb(); detect->NextFrame(cv_image,frn->get_image_rgb()); //cv_image = detec->GetFrame(); GtkWidget* widget = ((TimerAction *) data)->canvas_set; timer[2] = microtime()-timer[2]; gtk_widget_queue_draw (widget); if (record_video) cvWriteFrame(writter_v, cv_image); return true; }
int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct timeval now; union drm_wait_vblank *vblwait = data; int ret, flags, crtc, seq; if (!dev->irq_enabled || dev->vblank == NULL || vblwait->request.type & _DRM_VBLANK_SIGNAL) return (EINVAL); flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; if (crtc >= dev->vblank->vb_num) return (EINVAL); if ((ret = drm_vblank_get(dev, crtc)) != 0) return (ret); seq = drm_vblank_count(dev, crtc); if (vblwait->request.type & _DRM_VBLANK_RELATIVE) { vblwait->request.sequence += seq; vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; } flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; if ((flags & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1<<23)) { vblwait->request.sequence = seq + 1; } if (flags & _DRM_VBLANK_EVENT) return (drm_queue_vblank_event(dev, crtc, vblwait, file_priv)); DPRINTF("%s: %d waiting on %d, current %d\n", __func__, crtc, vblwait->request.sequence, drm_vblank_count(dev, crtc)); DRM_WAIT_ON(ret, &dev->vblank->vb_crtcs[crtc], &dev->vblank->vb_lock, 3 * hz, "drmvblq", ((drm_vblank_count(dev, crtc) - vblwait->request.sequence) <= (1 << 23)) || dev->irq_enabled == 0); microtime(&now); vblwait->reply.tval_sec = now.tv_sec; vblwait->reply.tval_usec = now.tv_usec; vblwait->reply.sequence = drm_vblank_count(dev, crtc); DPRINTF("%s: %d done waiting, seq = %d\n", __func__, crtc, vblwait->reply.sequence); drm_vblank_put(dev, crtc); return (ret); }
/* * resettodr: * * Reset the time-of-day register with the current time. */ void resettodr(void) { struct timeval rtctime; if (rtctime.tv_sec == 0) return; microtime(&rtctime); if (todr_handle != NULL && todr_settime(todr_handle, &rtctime) != 0) printf("resettodr: failed to set time\n"); }
struct ktr_header * ktrgetheader(type) { register struct ktr_header *kth; struct proc *p = curproc; /* XXX */ MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header), M_TEMP, M_WAITOK); kth->ktr_type = type; microtime(&kth->ktr_time); kth->ktr_pid = p->p_pid; bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN); return (kth); }
/* * Mark an interface up and notify protocols of * the transition. * NOTE: must be called at splnet or eqivalent. */ void if_up(struct ifnet *ifp) { ifp->if_flags |= IFF_UP; microtime(&ifp->if_lastchange); #ifdef notyet struct ifaddr *ifa; /* this has no effect on IP, and will kill all iso connections XXX */ for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) pfctlinput(PRC_IFUP, ifa->ifa_addr); #endif rt_ifmsg(ifp); }
int sys_fmaster_kqueue(struct thread *td, struct fmaster_kqueue_args *uap) { struct timeval time_start; int error; fmaster_log(td, LOG_DEBUG, "kqueue: started"); microtime(&time_start); error = fmaster_kqueue_main(td, uap); fmaster_log_syscall_end(td, "kqueue", &time_start, error); return (error); }
static int synthfs_insertnode(struct synthfsnode *newnode_sp, struct synthfsnode *parent_sp) { struct timeval now; DBG_ASSERT(parent_sp->s_type == SYNTHFS_DIRECTORY); TAILQ_INSERT_TAIL(&parent_sp->s_u.d.d_subnodes, newnode_sp, s_sibling); ++parent_sp->s_u.d.d_entrycount; newnode_sp->s_parent = parent_sp; parent_sp->s_nodeflags |= IN_CHANGE | IN_MODIFIED; microtime(&now); synthfs_update(STOV(parent_sp), &now, &now, 0); return 0; }
void doit() { double t0,t1; const int imax = 1000; const char* key = "mypassword"; const char* salt_bf = "$2a$99$01234567890ABCDEF$"; const char* salt_256 = "$5$rounds=5000$01234567890ABCDEF$"; const char* salt_512 = "$6$rounds=5000$01234567890ABCDEF$"; t0 = microtime(); for (int i = 0; i < imax; ++i) { crypt(key, salt_512); } t1 = microtime(); printf("sha_512\t%f RPS\n", (double)(imax)/(t1-t0) ); t0 = microtime(); for (int i = 0; i < imax; ++i) { crypt(key, salt_256); } t1 = microtime(); printf("sha_256\t%f RPS\n", (double)(imax)/(t1-t0) ); }
int sys_fmaster_dup(struct thread *td, struct fmaster_dup_args *uap) { struct timeval time_start; int error, fd; fd = uap->fd; fmaster_log(td, LOG_DEBUG, "dup: started: fd=%u", fd); microtime(&time_start); error = fmaster_dup_main(td, fd); fmaster_log_syscall_end(td, "dup", &time_start, error); return (error); }
/* * Initialise reassembly queue and fragment identifier. */ void frag6_init() { struct timeval tv; ip6_maxfragpackets = nmbclusters / 32; ip6_maxfrags = nmbclusters / 4; /* * in many cases, random() here does NOT return random number * as initialization during bootstrap time occur in fixed order. */ microtime(&tv); ip6_id = random() ^ tv.tv_usec; ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q; }
int sys_fmaster_thr_exit(struct thread *td, struct fmaster_thr_exit_args *uap) { struct timeval time_start; const char *sysname = "thr_exit"; int error; fmaster_log(td, LOG_DEBUG, "%s: started", sysname); microtime(&time_start); error = fmaster_thr_exit_main(td, uap); fmaster_log_syscall_end(td, sysname, &time_start, error); return (error); }
void PseudoWorld::onFrame(VideoFrame *frame) { for (std::vector<VideoFrame::Blob *>::size_type i = 0; i < frame->blobs.size(); i++) { VideoFrame::Blob *blob = frame->blobs[i]; switch (blob->color) { case VideoFrame::Blob::COLOR_BALL: this->readBallBlob(frame, blob); break; case VideoFrame::Blob::COLOR_YELLOW: case VideoFrame::Blob::COLOR_BLUE: this->readGoalBlob(frame, blob); break; } } std::map<int, PseudoWorld::Ball *>::iterator it = this->balls.begin(); PseudoWorld::Ball *ball; // Removing balls that are no longer visible while (it != this->balls.end()) { ball = it->second; if (frame->sequence - ball->sequence > 10) { delete it->second; it = this->balls.erase(it); } else { ++it; } } // Set the goal as not visible, if necessary if (frame->sequence - this->target.sequence > 5) { this->target.visible = false; } this->age++; this->lastFrame = microtime(); }
gettimeofday() { register struct a { struct timeval *tp; struct timezone *tzp; } *uap = (struct a *)u.u_ap; struct timeval atv; microtime(&atv); u.u_error = copyout((caddr_t)&atv, (caddr_t)uap->tp, sizeof (atv)); if (u.u_error) return; if (uap->tzp == 0) return; /* SHOULD HAVE PER-PROCESS TIMEZONE */ u.u_error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, sizeof (tz)); }
/* * MPSAFE */ static struct ktr_header * ktrgetheader(int type) { struct ktr_header *kth; struct proc *p = curproc; /* XXX */ struct lwp *lp = curthread->td_lwp; kth = kmalloc(sizeof(struct ktr_header), M_KTRACE, M_WAITOK); kth->ktr_type = type; /* XXX threaded flag is a hack at the moment */ kth->ktr_flags = (p->p_nthreads > 1) ? KTRH_THREADED : 0; microtime(&kth->ktr_time); kth->ktr_pid = p->p_pid; kth->ktr_tid = lp->lwp_tid; bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN + 1); return (kth); }
void FFree(void *mem, char *file, int line) { int s; int i; s = splhigh(); for (i = 0; i < malloccount; i++) { if ((caddr_t) mem == malloced[i].address) { /* found it */ bzero(mem, malloced[i].size); /* XXX */ free(mem, M_DEVBUF); malloccount--; total_malloced -= malloced[i].size; if (debug & DEBUG_MEMFREE) { /* keep track of recent frees */ char *f = strrchr(file, '/'); /* chop off dirname if present */ if (f == NULL) f = file; else f++; /* skip the / */ microtime(&freeinfo[lastfree].time); freeinfo[lastfree].seq = malloced[i].seq; freeinfo[lastfree].size = malloced[i].size; freeinfo[lastfree].line = line; freeinfo[lastfree].address = mem; bcopy(f, freeinfo[lastfree].file, min(strlen(f), MCFILENAMELEN - 1)); freeinfo[lastfree].file[MCFILENAMELEN - 1] = '\0'; if (++lastfree == FREECOUNT) lastfree = 0; } if (i < malloccount) /* more coming after */ bcopy(&malloced[i + 1], &malloced[i], (malloccount - i) * sizeof(struct mc)); splx(s); return; } } splx(s); log(LOG_ERR, "Freeing unallocated data at 0x%p from %s, line %d\n", mem, file, line); panic("Free"); }
Link* NetworkServer::accept_link(){ Link *link = serv_link->accept(); if(link == NULL){ log_error("accept failed! %s", strerror(errno)); return NULL; } if(!ip_filter->check_pass(link->remote_ip)){ log_debug("ip_filter deny link from %s:%d", link->remote_ip, link->remote_port); delete link; return NULL; } link->nodelay(); link->noblock(); link->create_time = microtime(); link->active_time = link->create_time; return link; }
caddr_t MMalloc(int size, char *file, int line) { int s; caddr_t result; int i; if (malloccount >= MALLOCENTRIES) { /* too many */ log(LOG_ERR, "vinum: can't allocate table space to trace memory allocation"); return 0; /* can't continue */ } /* Wait for malloc if we can */ /* * XXX We can wait if we're in process context. How do we tell? */ result = malloc(size, M_DEVBUF, M_NOWAIT); if (result == NULL) log(LOG_ERR, "vinum: can't allocate %d bytes from %s:%d\n", size, file, line); else { s = splhigh(); for (i = 0; i < malloccount; i++) { if (((result + size) > malloced[i].address) && (result < malloced[i].address + malloced[i].size)) /* overlap */ panic("Malloc overlap"); } if (result) { char *f = basename(file); i = malloccount++; total_malloced += size; microtime(&malloced[i].time); malloced[i].seq = mallocseq++; malloced[i].size = size; malloced[i].line = line; malloced[i].address = result; bcopy(f, malloced[i].file, min(strlen(f), MCFILENAMELEN - 1)); malloced[i].file[MCFILENAMELEN - 1] = '\0'; } if (malloccount > highwater) highwater = malloccount; splx(s); } return result; }
static int pmtimer_resume(device_t dev) { int pl; u_int second, minute, hour; struct timeval resume_time, tmp_time; /* modified for adjkerntz */ pl = splsoftclock(); timer_restore(); /* restore the all timers */ inittodr(0); /* adjust time to RTC */ microtime(&resume_time); getmicrotime(&tmp_time); timevaladd(&tmp_time, &diff_time); #ifdef FIXME /* XXX THIS DOESN'T WORK!!! */ time = tmp_time; #endif #ifdef PMTIMER_FIXUP_CALLTODO /* Calculate the delta time suspended */ timevalsub(&resume_time, &suspend_time); /* Fixup the calltodo list with the delta time. */ adjust_timeout_calltodo(&resume_time); #endif /* PMTIMER_FIXUP_CALLTODOK */ splx(pl); #ifndef PMTIMER_FIXUP_CALLTODO second = resume_time.tv_sec - suspend_time.tv_sec; #else /* PMTIMER_FIXUP_CALLTODO */ /* * We've already calculated resume_time to be the delta between * the suspend and the resume. */ second = resume_time.tv_sec; #endif /* PMTIMER_FIXUP_CALLTODO */ hour = second / 3600; second %= 3600; minute = second / 60; second %= 60; log(LOG_NOTICE, "wakeup from sleeping state (slept %02d:%02d:%02d)\n", hour, minute, second); return (0); }
int sys_fmaster_dup2(struct thread *td, struct fmaster_dup2_args *uap) { struct timeval time_start; int error, from, to; from = uap->from; to = uap->to; fmaster_log(td, LOG_DEBUG, "%s: started: from=%u, to=%u", sysname, from, to); microtime(&time_start); error = fmaster_dup2_main(td, from, to); fmaster_log_syscall_end(td, sysname, &time_start, error); return (error); }
void logrq(enum rqinfo_type type, union rqinfou info, struct buf *ubp) { int s = splhigh(); microtime(&rqip->timestamp); /* when did this happen? */ rqip->type = type; rqip->bp = ubp; /* user buffer */ switch (type) { case loginfo_user_bp: case loginfo_user_bpl: case loginfo_sdio: /* subdisk I/O */ case loginfo_sdiol: /* subdisk I/O launch */ case loginfo_sdiodone: /* subdisk I/O complete */ bcopy(info.bp, &rqip->info.b, sizeof(struct buf)); rqip->devmajor = major(info.bp->b_dev); rqip->devminor = minor(info.bp->b_dev); break; case loginfo_iodone: case loginfo_rqe: case loginfo_raid5_data: case loginfo_raid5_parity: bcopy(info.rqe, &rqip->info.rqe, sizeof(struct rqelement)); rqip->devmajor = major(info.rqe->b.b_dev); rqip->devminor = minor(info.rqe->b.b_dev); break; case loginfo_lockwait: case loginfo_lock: case loginfo_unlock: bcopy(info.lockinfo, &rqip->info.lockinfo, sizeof(struct rangelock)); break; case loginfo_unused: break; } rqip++; if (rqip >= &rqinfo[RQINFO_SIZE]) /* wrap around */ rqip = rqinfo; splx(s); }
static int ngetdir_9p(node_9p *np) { dir_9p *dp; struct timeval tv; int e; microtime(&tv); if (np->dirtimer && tv.tv_sec-np->dirtimer < DIRTIMEOUT) return 0; if ((e=stat_9p(np->nmp, np->fid, &dp))) return e; bcopy(dp, &np->dir, sizeof(*dp)); np->dir.name = np->dir.uid = np->dir.gid = np->dir.muid = NULL; free_9p(dp); return 0; }
/* * called with DEVFS_LOCK held */ static int devfs_update(struct vnode *vp, struct timeval *access, struct timeval *modify) { devnode_t * ip; struct timeval now; ip = VTODN(vp); if (vp->v_mount->mnt_flag & MNT_RDONLY) { ip->dn_access = 0; ip->dn_change = 0; ip->dn_update = 0; return (0); } microtime(&now); dn_times(ip, access, modify, &now); return (0); }
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; int handled = 0; struct timeval cur_vblank; drm_via_irq_t *cur_irq = dev_priv->via_irqs; int i; status = VIA_READ(VIA_REG_INTERRUPT); if (status & VIA_IRQ_VBLANK_PENDING) { atomic_inc(&dev_priv->vbl_received); if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { microtime(&cur_vblank); if (dev_priv->last_vblank_valid) { dev_priv->usec_per_vblank = time_diff(&cur_vblank, &dev_priv->last_vblank) >> 4; }
void adb_msa3_complete(void *buffer, void *data_area, int adb_command) { adb_event_t event; ADBDataBlock adbdata; int adbaddr; int error; #ifdef ADB_DEBUG int i; if (adb_debug) printf("adb: transaction completion\n"); #endif adbaddr = ADB_CMDADDR(adb_command); error = GetADBInfo(&adbdata, adbaddr); #ifdef ADB_DEBUG if (adb_debug) printf("adb: GetADBInfo returned %d\n", error); #endif event.addr = adbaddr; event.hand_id = ADBMS_MSA3; event.def_addr = (int)(adbdata.origADBAddr); event.byte_count = buffer[0]; memcpy(event.bytes, buffer + 1, event.byte_count); #ifdef ADB_DEBUG if (adb_debug) { printf("adb: from %d at %d (org %d) %d:", event.addr, event.hand_id, event.def_addr, buffer[0]); for (i = 1; i <= buffer[0]; i++) printf(" %x", buffer[i]); printf("\n"); } #endif microtime(&event.timestamp); adb_processevent(&event); }
static int devfsspec_close(struct vnop_close_args *ap) /* struct vnop_close_args { struct vnode *a_vp; int a_fflag; vfs_context_t a_context; } */ { struct vnode * vp = ap->a_vp; register devnode_t * dnp; struct timeval now; if (vnode_isinuse(vp, 1)) { DEVFS_LOCK(); microtime(&now); dnp = VTODN(vp); dn_times(dnp, &now, &now, &now); DEVFS_UNLOCK(); } return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap)); }
static void activate(struct sbsh_softc *sc) { struct timeval tv; sc->regs->SR = 0xff; /* clear it! */ sc->regs->CTDR = sc->regs->LTDR = sc->regs->CRDR = sc->regs->LRDR = 0; sc->head_tdesc = sc->head_rdesc = 0; alloc_rx_buffers(sc); sc->regs->CRB &= ~RXDE; sc->regs->IMR = EXT | RXS | TXS | CRC | OFL | UFL; sc->regs->CR |= TXEN | RXEN; sc->state = ACTIVE; ++sc->in_stats.attempts; microtime(&tv); sc->in_stats.last_time = tv.tv_sec; start_xmit_frames(sc); }
static int sd_openlog(vfs_context_t ctx) { int error = 0; struct timeval tv; /* Open shutdown log */ if ((error = vnode_open(PROC_SHUTDOWN_LOG, (O_CREAT | FWRITE | O_NOFOLLOW), 0644, 0, &sd_logvp, ctx))) { printf("Failed to open %s: error %d\n", PROC_SHUTDOWN_LOG, error); sd_logvp = NULLVP; return error; } vnode_setsize(sd_logvp, (off_t)0, 0, ctx); /* Write a little header */ microtime(&tv); sd_log(ctx, "Process shutdown log. Current time is %lu (in seconds).\n\n", tv.tv_sec); return 0; }