static void in6_rtqtimo(void *rock) { struct radix_node_head *rnh = rock; struct rtqk_arg arg; struct timeval atv; static time_t last_adjusted_timeout = 0; arg.found = arg.killed = 0; arg.rnh = rnh; arg.nextstop = time_second + rtq_timeout; arg.draining = arg.updating = 0; crit_enter(); rnh->rnh_walktree(rnh, in6_rtqkill, &arg); crit_exit(); /* * Attempt to be somewhat dynamic about this: * If there are ``too many'' routes sitting around taking up space, * then crank down the timeout, and see if we can't make some more * go away. However, we make sure that we will never adjust more * than once in rtq_timeout seconds, to keep from cranking down too * hard. */ if ((arg.found - arg.killed > rtq_toomany) && (time_second - last_adjusted_timeout >= rtq_timeout) && rtq_reallyold > rtq_minreallyold) { rtq_reallyold = 2*rtq_reallyold / 3; if (rtq_reallyold < rtq_minreallyold) { rtq_reallyold = rtq_minreallyold; } last_adjusted_timeout = time_second; #ifdef DIAGNOSTIC log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold to %d", rtq_reallyold); #endif arg.found = arg.killed = 0; arg.updating = 1; crit_enter(); rnh->rnh_walktree(rnh, in6_rtqkill, &arg); crit_exit(); } atv.tv_usec = 0; atv.tv_sec = arg.nextstop; callout_reset(&in6_rtqtimo_ch[mycpuid], tvtohz_high(&atv), in6_rtqtimo, rock); }
/* * Low level support for sleep/wakeup paradigm * If a timeout is specified: * returns 0 if wakeup * returns EAGAIN if timed out * returns EINVAL if error * * If a timeout is not specified: * * returns time waiting in ticks. */ int sys_thr_sleep(struct thr_sleep_args *uap) { struct proc *p = curproc; struct lwp *lp = curthread->td_lwp; int sleepstart; struct timespec ts; struct timeval atv; int error, timo; timo = 0; if (uap->timeout != 0) { /* * Get timespec struct */ if ((error = copyin(uap->timeout, &ts, sizeof(ts))) != 0) { p->p_wakeup = 0; return error; } if (ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) { p->p_wakeup = 0; return (EINVAL); } TIMESPEC_TO_TIMEVAL(&atv, &ts); if (itimerfix(&atv)) { p->p_wakeup = 0; return (EINVAL); } timo = tvtohz_high(&atv); } uap->sysmsg_result = 0; if (p->p_wakeup == 0) { sleepstart = ticks; lp->lwp_flag |= LWP_SINTR; error = tsleep(p, 0, "thrslp", timo); lp->lwp_flag &= ~LWP_SINTR; if (error == EWOULDBLOCK) { p->p_wakeup = 0; uap->sysmsg_result = EAGAIN; return 0; } if (uap->timeout == 0) uap->sysmsg_result = ticks - sleepstart; } p->p_wakeup = 0; return (0); }
/* * bfq_bio_done(): .bio_done callback of the bfq policy * * Called after a bio is done, (by request_polling_biodone of dsched). * This function judges whet her a thread consumes up its time slice, and * if so, it will set the maybe_timeout flag in bfq_tdio structure. Any * further action of that thread or the bfq scheduler will cause the * thread to be expired. (in bfq_queue() or in bfq_dequeue()) * * This function requires the bfq_tdio pointer of the thread that pushes * bp to be stored by dsched_set_bio_priv() earlier. Currently it is * stored when bfq_queue() is called. * * lock: none. This function CANNOT be blocked by any lock * * refcount: * the corresponding tdio's refcount should decrease by 1 after * this function call. The counterpart increasing is in bfq_queue(). * For each bio pushed down, we increase the refcount of the pushing * tdio. */ static void bfq_bio_done(struct bio *bp) { struct disk *dp = dsched_get_bio_dp(bp); struct bfq_thread_io *bfq_tdio = dsched_get_bio_priv(bp); struct bfq_disk_ctx *bfq_diskctx = dsched_get_disk_priv(dp); struct timeval tv; int ticks_expired; KKASSERT(bfq_tdio); dsched_thread_io_ref(&bfq_tdio->head); atomic_add_int(&bfq_tdio->bio_completed, 1); /* the tdio has already expired */ if (bfq_tdio != bfq_diskctx->bfq_active_tdio) goto rtn; atomic_add_int(&bfq_tdio->service_received, BIO_SIZE(bp)); /* current time */ getmicrotime(&tv); bfq_tdio->last_request_done_time = tv; timevalsub (&tv, &bfq_tdio->service_start_time); ticks_expired = tvtohz_high(&tv); /* the thread has run out its time slice */ if ((ticks_expired != 0x7fffffff) && (ticks_expired >= BFQ_SLICE_TIMEOUT)) { /* * we cannot block here, so just set a flag */ #if 0 bfq_tdio->maybe_timeout = 1; #endif if (atomic_cmpset_int(&bfq_tdio->maybe_timeout, 0, 1)) { bfq_update_avg_time_slice(bfq_diskctx, tv); dsched_debug(BFQ_DEBUG_VERBOSE, "BFQ: %p may time out\n", bfq_tdio); } } rtn: dsched_thread_io_unref(&bfq_tdio->head); /* ref'ed in this function */ dsched_thread_io_unref(&bfq_tdio->head); /* ref'ed in queue() */ }
static void in6_mtutimo(void *rock) { struct radix_node_head *rnh = rock; struct mtuex_arg arg; struct timeval atv; arg.rnh = rnh; arg.nextstop = time_second + MTUTIMO_DEFAULT; crit_enter(); rnh->rnh_walktree(rnh, in6_mtuexpire, &arg); crit_exit(); atv.tv_usec = 0; atv.tv_sec = arg.nextstop; if (atv.tv_sec < time_second) { kprintf("invalid mtu expiration time on routing table\n"); arg.nextstop = time_second + 30; /* last resort */ } callout_reset(&in6_mtutimo_ch[mycpuid], tvtohz_high(&atv), in6_mtutimo, rock); }
/* * TRANS2_FIND_FIRST2/NEXT2, used for NT LM12 dialect */ static int smbfs_smb_trans2find2(struct smbfs_fctx *ctx) { struct smb_t2rq *t2p; struct smb_vc *vcp = SSTOVC(ctx->f_ssp); struct mbchain *mbp; struct mdchain *mdp; u_int16_t tw, flags; int error; if (ctx->f_t2) { smb_t2_done(ctx->f_t2); ctx->f_t2 = NULL; } ctx->f_flags &= ~SMBFS_RDD_GOTRNAME; flags = 8 | 2; /* <resume> | <close if EOS> */ if (ctx->f_flags & SMBFS_RDD_FINDSINGLE) { flags |= 1; /* close search after this request */ ctx->f_flags |= SMBFS_RDD_NOCLOSE; } if (ctx->f_flags & SMBFS_RDD_FINDFIRST) { error = smb_t2_alloc(SSTOCP(ctx->f_ssp), SMB_TRANS2_FIND_FIRST2, ctx->f_scred, &t2p); if (error) return error; ctx->f_t2 = t2p; mbp = &t2p->t2_tparam; mb_init(mbp); mb_put_uint16le(mbp, ctx->f_attrmask); mb_put_uint16le(mbp, ctx->f_limit); mb_put_uint16le(mbp, flags); mb_put_uint16le(mbp, ctx->f_infolevel); mb_put_uint32le(mbp, 0); error = smbfs_fullpath(mbp, vcp, ctx->f_dnp, ctx->f_wildcard, ctx->f_wclen); if (error) return error; } else { error = smb_t2_alloc(SSTOCP(ctx->f_ssp), SMB_TRANS2_FIND_NEXT2, ctx->f_scred, &t2p); if (error) return error; ctx->f_t2 = t2p; mbp = &t2p->t2_tparam; mb_init(mbp); mb_put_mem(mbp, (caddr_t)&ctx->f_Sid, 2, MB_MSYSTEM); mb_put_uint16le(mbp, ctx->f_limit); mb_put_uint16le(mbp, ctx->f_infolevel); mb_put_uint32le(mbp, 0); /* resume key */ mb_put_uint16le(mbp, flags); if (ctx->f_rname) mb_put_mem(mbp, ctx->f_rname, strlen(ctx->f_rname) + 1, MB_MSYSTEM); else mb_put_uint8(mbp, 0); /* resume file name */ #if 0 struct timeval tv; tv.tv_sec = 0; tv.tv_usec = 200 * 1000; /* 200ms */ if (vcp->vc_flags & SMBC_WIN95) { /* * some implementations suggests to sleep here * for 200ms, due to the bug in the Win95. * I've didn't notice any problem, but put code * for it. */ tsleep(&flags, 0, "fix95", tvtohz_high(&tv)); } #endif } t2p->t2_maxpcount = 5 * 2; t2p->t2_maxdcount = vcp->vc_txmax; error = smb_t2_request(t2p); if (error) return error; mdp = &t2p->t2_rparam; if (ctx->f_flags & SMBFS_RDD_FINDFIRST) { if ((error = md_get_uint16(mdp, &ctx->f_Sid)) != 0) return error; ctx->f_flags &= ~SMBFS_RDD_FINDFIRST; } if ((error = md_get_uint16le(mdp, &tw)) != 0) return error; ctx->f_ecnt = tw; if ((error = md_get_uint16le(mdp, &tw)) != 0) return error; if (tw) ctx->f_flags |= SMBFS_RDD_EOF | SMBFS_RDD_NOCLOSE; if ((error = md_get_uint16le(mdp, &tw)) != 0) return error; if ((error = md_get_uint16le(mdp, &tw)) != 0) return error; if (ctx->f_ecnt == 0) return ENOENT; ctx->f_rnameofs = tw; mdp = &t2p->t2_rdata; if (mdp->md_top == NULL) { kprintf("bug: ecnt = %d, but data is NULL (please report)\n", ctx->f_ecnt); return ENOENT; } if (mdp->md_top->m_len == 0) { kprintf("bug: ecnt = %d, but m_len = 0 and m_next = %p (please report)\n", ctx->f_ecnt,mbp->mb_top->m_next); return ENOENT; } ctx->f_eofs = 0; return 0; }