void writecontrolaction(int k, int i) /* ---------------------------------------------------------------- ** Input: k = link index ** i = control index ** Output: none ** Purpose: writes control action taken to status report **-------------------------------------------------------------- */ { int n; switch (Control[i].Type) { case LOWLEVEL: case HILEVEL: n = Control[i].Node; sprintf(Msg,FMT54,clocktime(Atime,Htime),LinkTxt[Link[k].Type], Link[k].ID,NodeTxt[getnodetype(n)],Node[n].ID); break; case TIMER: case TIMEOFDAY: sprintf(Msg,FMT55,clocktime(Atime,Htime),LinkTxt[Link[k].Type], Link[k].ID); break; default: return; } writeline(Msg); }
void forcibly_timeout_mp(am_node *mp) { mntfs *mf = mp->am_al->al_mnt; /* * Arrange to timeout this node */ if (mf && ((mp->am_flags & AMF_ROOT) || (mf->mf_flags & (MFF_MOUNTING | MFF_UNMOUNTING)))) { /* * We aren't going to schedule a timeout, so we need to notify the * child here unless we are already unmounting, in which case that * process is responsible for notifying the child. */ if (mf->mf_flags & MFF_UNMOUNTING) plog(XLOG_WARNING, "node %s is currently being unmounted, ignoring timeout request", mp->am_path); else { plog(XLOG_WARNING, "ignoring timeout request for active node %s", mp->am_path); notify_child(mp, AMQ_UMNT_FAILED, EBUSY, 0); } } else { plog(XLOG_INFO, "\"%s\" forcibly timed out", mp->am_path); mp->am_flags &= ~AMF_NOTIMEOUT; mp->am_ttl = clocktime(NULL); /* * Force mtime update of parent dir, to prevent DNLC/dcache from caching * the old entry, which could result in ESTALE errors, bad symlinks, and * more. */ clocktime(&mp->am_parent->am_fattr.na_mtime); reschedule_timeout_mp(); } }
/* * Check that we are not burning resources */ static void checkup(void) { static int max_fd = 0; static char *max_mem = 0; int next_fd = dup(0); caddr_t next_mem = sbrk(0); close(next_fd); if (max_fd < next_fd) { dlog("%d new fds allocated; total is %d", next_fd - max_fd, next_fd); max_fd = next_fd; } if (max_mem < next_mem) { #ifdef HAVE_GETPAGESIZE dlog("%#lx bytes of memory allocated; total is %#lx (%ld pages)", (long) (next_mem - max_mem), (unsigned long) next_mem, ((long) next_mem + getpagesize() - 1) / (long) getpagesize()); #else /* not HAVE_GETPAGESIZE */ dlog("%#lx bytes of memory allocated; total is %#lx", (long) (next_mem - max_mem), (unsigned long) next_mem); #endif /* not HAVE_GETPAGESIZE */ max_mem = next_mem; } } #else /* not DEBUG */ #define checkup() #endif /* not DEBUG */ static int #ifdef HAVE_SIGACTION do_select(sigset_t smask, int fds, fd_set *fdp, struct timeval *tvp) #else /* not HAVE_SIGACTION */ do_select(int smask, int fds, fd_set *fdp, struct timeval *tvp) #endif /* not HAVE_SIGACTION */ { int sig; int nsel; if ((sig = setjmp(select_intr))) { select_intr_valid = 0; /* Got a signal */ switch (sig) { case SIGINT: case SIGTERM: amd_state = Finishing; reschedule_timeout_mp(); break; } nsel = -1; errno = EINTR; } else { select_intr_valid = 1; /* * Invalidate the current clock value */ clock_valid = 0; /* * Allow interrupts. If a signal * occurs, then it will cause a longjmp * up above. */ #ifdef HAVE_SIGACTION sigprocmask(SIG_SETMASK, &smask, NULL); #else /* not HAVE_SIGACTION */ (void) sigsetmask(smask); #endif /* not HAVE_SIGACTION */ /* * Wait for input */ nsel = select(fds, fdp, (fd_set *) 0, (fd_set *) 0, tvp->tv_sec ? tvp : (struct timeval *) 0); } #ifdef HAVE_SIGACTION sigprocmask(SIG_BLOCK, &masked_sigs, NULL); #else /* not HAVE_SIGACTION */ (void) sigblock(MASKED_SIGS); #endif /* not HAVE_SIGACTION */ /* * Perhaps reload the cache? */ if (do_mapc_reload < clocktime()) { mapc_reload(); do_mapc_reload = clocktime() + gopt.map_reload_interval; } return nsel; }
void test_AlwaysInLimit(void) { /* Timestamp is: 2010-01-02 11:00:00Z */ const u_int32 timestamp = 3471418800UL; const u_short prime_incs[] = { 127, 151, 163, 179 }; int cyc; int yday; u_char whichprime; u_short ydayinc; int hour; int minute; int second; u_long yearstart; u_int32 actual; u_int32 diff; yearstart = 0; for (cyc = 0; cyc < 5; cyc++) { settime(1900 + cyc * 65, 1, 1, 0, 0, 0); for (yday = -26000; yday < 26000; yday += ydayinc) { whichprime = abs(yday) % COUNTOF(prime_incs); ydayinc = prime_incs[whichprime]; for (hour = -204; hour < 204; hour += 2) { for (minute = -60; minute < 60; minute++) { clocktime(yday, hour, minute, 30, 0, timestamp, &yearstart, &actual); diff = actual - timestamp; if (diff >= 0x80000000UL) diff = ~diff + 1; TEST_ASSERT_TRUE(isLE(diff, (183u * SECSPERDAY))); } } } } }
/* * refclock_process - process a sample from the clock * refclock_process_f - refclock_process with other than time1 fudge * * This routine converts the timecode in the form days, hours, minutes, * seconds and milliseconds/microseconds to internal timestamp format, * then constructs a new entry in the median filter circular buffer. * Return success (1) if the data are correct and consistent with the * converntional calendar. * * Important for PPS users: Normally, the pp->lastrec is set to the * system time when the on-time character is received and the pp->year, * ..., pp->second decoded and the seconds fraction pp->nsec in * nanoseconds). When a PPS offset is available, pp->nsec is forced to * zero and the fraction for pp->lastrec is set to the PPS offset. */ int refclock_process_f( struct refclockproc *pp, /* refclock structure pointer */ double fudge ) { l_fp offset, ltemp; /* * Compute the timecode timestamp from the days, hours, minutes, * seconds and milliseconds/microseconds of the timecode. Use * clocktime() for the aggregate seconds and the msec/usec for * the fraction, when present. Note that this code relies on the * filesystem time for the years and does not use the years of * the timecode. */ if (!clocktime(pp->day, pp->hour, pp->minute, pp->second, GMT, pp->lastrec.l_ui, &pp->yearstart, &offset.l_ui)) return (0); offset.l_uf = 0; DTOLFP(pp->nsec / 1e9, <emp); L_ADD(&offset, <emp); refclock_process_offset(pp, offset, pp->lastrec, fudge); return (1); }
int ndbm_init(mnt_map *m, char *map, time_t *tp) { DBM *db; db = dbm_open(map, O_RDONLY, 0); if (db) { struct stat stb; int error; #ifdef DBM_SUFFIX char dbfilename[256]; xstrlcpy(dbfilename, map, sizeof(dbfilename)); xstrlcat(dbfilename, DBM_SUFFIX, sizeof(dbfilename)); error = stat(dbfilename, &stb); #else /* not DBM_SUFFIX */ error = fstat(dbm_pagfno(db), &stb); #endif /* not DBM_SUFFIX */ if (error < 0) *tp = clocktime(NULL); else *tp = stb.st_mtime; dbm_close(db); return 0; } return errno; }
int exec_search(mnt_map *m, char *map, char *key, char **pval, time_t *tp) { int mapfd, ret; if ((ret = exec_check_perm(map)) != 0) { return ret; } if (!key) return 0; if (logfp) fflush(logfp); dlog("exec_search \"%s\", key: \"%s\"", map, key); mapfd = exec_map_open(map, key); if (mapfd >= 0) { if (tp) *tp = clocktime(NULL); return exec_parse_qanswer(mapfd, map, key, pval, tp); } return errno; }
/* * refclock_process - process a sample from the clock * * This routine converts the timecode in the form days, hours, minutes, * seconds and milliseconds/microseconds to internal timestamp format, * then constructs a new entry in the median filter circular buffer. * Return success (1) if the data are correct and consistent with the * converntional calendar. */ int refclock_process( struct refclockproc *pp ) { l_fp offset; /* * Compute the timecode timestamp from the days, hours, minutes, * seconds and milliseconds/microseconds of the timecode. Use * clocktime() for the aggregate seconds and the msec/usec for * the fraction, when present. Note that this code relies on the * filesystem time for the years and does not use the years of * the timecode. */ if (!clocktime(pp->day, pp->hour, pp->minute, pp->second, GMT, pp->lastrec.l_ui, &pp->yearstart, &offset.l_ui)) return (0); if (pp->usec) { TVUTOTSF(pp->usec, offset.l_uf); } else { MSUTOTSF(pp->msec, offset.l_uf); } refclock_process_offset(pp, offset, pp->lastrec, pp->fudgetime1); return (1); }
void autofs_mounted(am_node *mp) { autofs_fh_t *fh = mp->am_autofs_fh; unsigned long timeout = gopt.am_timeo; close(fh->kernelfd); fh->kernelfd = -1; autofs_get_mp(mp); /* Get autofs protocol version */ if (ioctl(fh->ioctlfd, AUTOFS_IOC_PROTOVER, &fh->version) < 0) { plog(XLOG_ERROR, "AUTOFS_IOC_PROTOVER: %s", strerror(errno)); fh->version = AUTOFS_MIN_VERSION; plog(XLOG_ERROR, "autofs: assuming protocol version %d", fh->version); } else plog(XLOG_INFO, "autofs: using protocol version %d", fh->version); /* set expiration timeout */ if (ioctl(fh->ioctlfd, AUTOFS_IOC_SETTIMEOUT, &timeout) < 0) plog(XLOG_ERROR, "AUTOFS_IOC_SETTIMEOUT: %s", strerror(errno)); /* tell the daemon to call us for expirations */ mp->am_autofs_ttl = clocktime(NULL) + gopt.am_timeo_w; }
myapplication::myapplication(){ this->sec=0; this->min=0; this->hour=0; this->day=0; this->resolution = clocktime(); }
/* * Initialize an allocated mount node. * It is assumed that the mount node was b-zero'd * before getting here so anything that would * be set to zero isn't done here. */ void init_map(am_node *mp, char *dir) { /* * mp->am_mapno is initialized by exported_ap_alloc * other fields don't need to be set to zero. */ mp->am_mnt = new_mntfs(); mp->am_mfarray = 0; mp->am_name = strdup(dir); mp->am_path = strdup(dir); mp->am_gen = new_gen(); #ifdef HAVE_FS_AUTOFS mp->am_autofs_fh = 0; #endif /* HAVE_FS_AUTOFS */ mp->am_timeo = gopt.am_timeo; mp->am_attr.ns_status = NFS_OK; mp->am_fattr = gen_fattr; mp->am_fattr.na_fsid = 42; mp->am_fattr.na_fileid = mp->am_gen; clocktime(&mp->am_fattr.na_atime); /* next line copies a "struct nfstime" among several fields */ mp->am_fattr.na_mtime = mp->am_fattr.na_ctime = mp->am_fattr.na_atime; new_ttl(mp); mp->am_stats.s_mtime = mp->am_fattr.na_atime.nt_seconds; mp->am_dev = -1; mp->am_rdev = -1; }
/* * Compute a new time to live value for a node. */ void new_ttl(am_node *mp) { mp->am_timeo_w = 0; mp->am_ttl = clocktime(&mp->am_fattr.na_atime); mp->am_ttl += mp->am_timeo; /* sun's -tl option */ }
/* * Retry a mount */ static void amfs_retry(int rc, int term, opaque_t arg) { struct continuation *cp = (struct continuation *) arg; am_node *mp = cp->mp; int error = 0; dlog("Commencing retry for mount of %s", mp->am_path); new_ttl(mp); if ((cp->start + ALLOWED_MOUNT_TIME) < clocktime(NULL)) { /* * The entire mount has timed out. Set the error code and skip past all * the mntfs's so that amfs_bgmount will not have any more * ways to try the mount, thus causing an error. */ plog(XLOG_INFO, "mount of \"%s\" has timed out", mp->am_path); error = ETIMEDOUT; while (*cp->mf) cp->mf++; /* explicitly forbid further retries after timeout */ cp->retry = FALSE; } if (error || !IN_PROGRESS(cp)) error = amfs_bgmount(cp); reschedule_timeout_mp(); }
void autofs_mounted(am_node *mp) { mntfs *mf = mp->am_mnt; autofs_fh_t *fh = mf->mf_autofs_fh; unsigned long timeout = gopt.am_timeo; close(fh->kernelfd); fh->kernelfd = -1; fh->ioctlfd = open(mf->mf_mount, O_RDONLY); /* Get autofs protocol version */ if (ioctl(fh->ioctlfd, AUTOFS_IOC_PROTOVER, &fh->version) < 0) { plog(XLOG_ERROR, "AUTOFS_IOC_PROTOVER: %s", strerror(errno)); fh->version = AUTOFS_MIN_VERSION; plog(XLOG_ERROR, "autofs: assuming protocol version %d", fh->version); } else plog(XLOG_INFO, "autofs: using protocol version %d", fh->version); if (fh->version < 4) { /* no support for subdirs */ plog(XLOG_INFO, "Turning off autofs support for host filesystems"); amfs_host_ops.nfs_fs_flags &= ~FS_AUTOFS; amfs_host_ops.autofs_fs_flags &= ~FS_AUTOFS; } /* set expiration timeout */ if (ioctl(fh->ioctlfd, AUTOFS_IOC_SETTIMEOUT, &timeout) < 0) plog(XLOG_ERROR, "AUTOFS_IOC_SETTIMEOUT: %s", strerror(errno)); /* tell the daemon to call us for expirations */ mp->am_ttl = clocktime() + gopt.am_timeo_w; }
am_node * amfs_generic_mount_child(am_node *new_mp, int *error_return) { int error; struct continuation *cp; /* Continuation structure if need to mount */ dlog("in amfs_generic_mount_child"); *error_return = error = 0; /* Error so far */ /* we have an errorfs attached to the am_node, free it */ if (new_mp->am_al) free_loc(new_mp->am_al); new_mp->am_al = NULL; /* * Construct a continuation */ cp = ALLOC(struct continuation); cp->callout = 0; cp->mp = new_mp; cp->retry = TRUE; cp->start = clocktime(NULL); cp->al = new_mp->am_alarray; /* * Try and mount the file system. If this succeeds immediately (possible * for a ufs file system) then return the attributes, otherwise just * return an error. */ error = amfs_bgmount(cp); reschedule_timeout_mp(); if (!error) return new_mp; /* * Code for quick reply. If current_transp is set, then it's the * transp that's been passed down from nfs_dispatcher() or from * autofs_program_[123](). * If new_mp->am_transp is not already set, set it by copying in * current_transp. Once am_transp is set, nfs_quick_reply() and * autofs_mount_succeeded() can use it to send a reply to the * client that requested this mount. */ if (current_transp && !new_mp->am_transp) { dlog("Saving RPC transport for %s", new_mp->am_path); new_mp->am_transp = (SVCXPRT *) xmalloc(sizeof(SVCXPRT)); *(new_mp->am_transp) = *current_transp; } if (error && new_mp->am_al && new_mp->am_al->al_mnt && (new_mp->am_al->al_mnt->mf_ops == &amfs_error_ops)) new_mp->am_error = error; if (new_mp->am_error > 0) assign_error_mntfs(new_mp); ereturn(error); }
void test_NoReasonableConversion(void) { /* Timestamp is: 2010-01-02 11:00:00Z */ const u_int32 timestamp = 3471418800UL; const int yday=100, hour=12, minute=0, second=0, tzoff=0; u_long yearstart = 0; u_int32 actual; TEST_ASSERT_FALSE(clocktime(yday, hour, minute, second, tzoff, timestamp, &yearstart, &actual)); }
static FILE * file_open(char *map, time_t *tp) { FILE *mapf = fopen(map, "r"); if (mapf && tp) { struct stat stb; if (fstat(fileno(mapf), &stb) < 0) *tp = clocktime(NULL); else *tp = stb.st_mtime; } return mapf; }
void writeruleaction(int k, char *ruleID) /* **-------------------------------------------------------------- ** Input: k = link index ** *ruleID = rule ID ** Output: none ** Purpose: writes rule action taken to status report **-------------------------------------------------------------- */ { sprintf(Msg,FMT63,clocktime(Atime,Htime),LinkTxt[Link[k].Type], Link[k].ID,ruleID); writeline(Msg); }
void test_CurrentYear(void) { // Timestamp: 2010-06-24 12:50:00Z const u_int32 timestamp = 3486372600UL; const u_int32 expected = timestamp; // exactly the same. const int yday=175, hour=12, minute=50, second=0, tzoff=0; u_long yearstart=0; u_int32 actual; TEST_ASSERT_TRUE(clocktime(yday, hour, minute, second, tzoff, timestamp, &yearstart, &actual)); TEST_ASSERT_EQUAL(expected, actual); }
void writehyderr(int errnode) /* **----------------------------------------------------------- ** Input: none ** Output: none ** Purpose: outputs status & checks connectivity when ** network hydraulic equations cannot be solved. **----------------------------------------------------------- */ { sprintf(Msg,FMT62,clocktime(Atime,Htime),Node[errnode].ID); if (Messageflag) writeline(Msg); writehydstat(0,0); disconnected(); } /* End of writehyderr */
/* * Output the time of day and hostname to the logfile */ static void show_time_host_and_name(int lvl) { static time_t last_t = 0; static char *last_ctime = 0; time_t t = clocktime(); char *sev; if (t != last_t) { last_ctime = ctime(&t); last_t = t; } switch (lvl) { case XLOG_FATAL: sev = "fatal:"; break; case XLOG_ERROR: sev = "error:"; break; case XLOG_USER: sev = "user: "******"warn: "; break; case XLOG_INFO: sev = "info: "; break; case XLOG_DEBUG: sev = "debug:"; break; case XLOG_MAP: sev = "map: "; break; case XLOG_STATS: sev = "stats:"; break; default: sev = "hmm: "; break; } fprintf(logfp, "%15.15s %s %s[%ld]/%s ", last_ctime + 4, am_get_hostname(), am_get_progname(), (long) am_mypid, sev); }
/* * Timeout all nodes waiting on * a given Fserver. */ void map_flush_srvr(fserver *fs) { int i; int done = 0; for (i = last_used_map; i >= 0; --i) { am_node *mp = exported_ap[i]; if (mp && mp->am_mnt && mp->am_mnt->mf_server == fs) { plog(XLOG_INFO, "Flushed %s; dependent on %s", mp->am_path, fs->fs_host); mp->am_ttl = clocktime(NULL); done = 1; } } if (done) reschedule_timeout_mp(); }
void test_NextYear(void) { /* * Timestamp is: 2009-12-31 23:00:00Z * Time sent into function is 01:00:00 * (which is meant to be 2010-01-01 01:00:00Z) */ const u_int32 timestamp = 3471289200UL; const u_int32 expected = 3471296400UL; const int yday=1, hour=1, minute=0, second=0, tzoff=0; u_long yearstart = 0; u_int32 actual; TEST_ASSERT_TRUE(clocktime(yday, hour, minute, second, tzoff, timestamp, &yearstart, &actual)); TEST_ASSERT_EQUAL(expected, actual); }
void forcibly_timeout_mp(am_node *mp) { mntfs *mf = mp->am_mnt; /* * Arrange to timeout this node */ if (mf && ((mp->am_flags & AMF_ROOT) || (mf->mf_flags & (MFF_MOUNTING | MFF_UNMOUNTING)))) { if (!(mf->mf_flags & MFF_UNMOUNTING)) plog(XLOG_WARNING, "ignoring timeout request for active node %s", mp->am_path); } else { plog(XLOG_INFO, "\"%s\" forcibly timed out", mp->am_path); mp->am_flags &= ~AMF_NOTIMEOUT; mp->am_ttl = clocktime(); reschedule_timeout_mp(); } }
void autofs_timeout_mp(am_node *mp) { autofs_fh_t *fh = mp->am_autofs_fh; time_t now = clocktime(NULL); /* update the ttl */ mp->am_autofs_ttl = now + gopt.am_timeo_w; if (fh->version < 4) { struct autofs_packet_expire pkt; while (ioctl(fh->ioctlfd, AUTOFS_IOC_EXPIRE, &pkt) == 0) autofs_handle_expire(mp, &pkt); return; } #if AUTOFS_MAX_VERSION >= 4 run_task(autofs_timeout_mp_task, mp, NULL, NULL); #endif /* AUTOFS_MAX_VERSION >= 4 */ }
void test_WrongYearStart(void) { /* * Timestamp (rec_ui) is: 2010-01-02 11:00:00Z * Time sent into function is 11:00:00. * Yearstart sent into function is the yearstart of 2009! */ const u_int32 timestamp = 3471418800UL; const u_int32 expected = timestamp; const int yday=2, hour=11, minute=0, second=0, tzoff=0; u_long yearstart = 302024100UL; // Yearstart of 2009. u_int32 actual; TEST_ASSERT_TRUE(clocktime(yday, hour, minute, second, tzoff, timestamp, &yearstart, &actual)); TEST_ASSERT_EQUAL(expected, actual); }
int ndbm_init(char *map, time_t *tp) { DBM *db; db = dbm_open(map, O_RDONLY, 0); if (db) { struct stat stb; if (fstat(dbm_pagfno(db), &stb) < 0) *tp = clocktime(); else *tp = stb.st_mtime; dbm_close(db); return 0; } return errno; }
void autofs_timeout_mp(am_node *mp) { autofs_fh_t *fh = mp->am_mnt->mf_autofs_fh; time_t now = clocktime(); /* update the ttl, but only if we're not going down */ if (mp->am_flags & AMF_NOTIMEOUT) mp->am_ttl = now + gopt.am_timeo_w; if (fh->version < 4) { struct autofs_packet_expire pkt; while (ioctl(fh->ioctlfd, AUTOFS_IOC_EXPIRE, &pkt) == 0) autofs_handle_expire(mp, &pkt); return; } #ifdef autofs_ptype_expire_multi run_task(autofs_timeout_mp_task, mp, NULL, NULL); #endif /* autofs_ptype_expire_multi */ }
void test_TimeZoneOffset(void) { /* * Timestamp (rec_ui) is: 2010-06-24 12:00:00 +0800 * (which is 2010-06-24 04:00:00Z) * * Time sent into function is 04:00:00 +0800 */ const u_int32 timestamp = 3486369600UL; const u_int32 expected = timestamp; const int yday=175, hour=4, minute=0, second=0, tzoff=8; u_long yearstart=0; u_int32 actual; TEST_ASSERT_TRUE(clocktime(yday, hour, minute, second, tzoff, timestamp, &yearstart, &actual)); TEST_ASSERT_EQUAL(expected, actual); }
void writerelerr(int iter, float relerr) /* **----------------------------------------------------------------- ** Input: iter = current iteration of hydraulic solution ** relerr = current convergence error ** Output: none ** Purpose: writes out convergence status of hydraulic solution **----------------------------------------------------------------- */ { if (iter == 0) { sprintf(Msg, FMT64, clocktime(Atime,Htime)); writeline(Msg); } else { sprintf(Msg,FMT65,iter,relerr); writeline(Msg); } } /* End of writerelerr */