static void in6_mtutimo(void *rock) { struct radix_node_head *rnh = rock; struct mtuex_arg arg; struct timeval atv; struct timeval timenow; getmicrotime(&timenow); arg.rnh = rnh; arg.nextstop = timenow.tv_sec + MTUTIMO_DEFAULT; lck_mtx_lock(rnh_lock); rnh->rnh_walktree(rnh, in6_mtuexpire, &arg); atv.tv_usec = 0; atv.tv_sec = arg.nextstop; if (atv.tv_sec < timenow.tv_sec) { #if DIAGNOSTIC log(LOG_DEBUG, "IPv6: invalid mtu expiration time on routing table\n"); #endif arg.nextstop = timenow.tv_sec + 30; /*last resort*/ } atv.tv_sec -= timenow.tv_sec; lck_mtx_unlock(rnh_lock); timeout(in6_mtutimo, rock, tvtohz(&atv)); }
/* Start transaction. */ int tpm_legacy_start(struct tpm_softc *sc, int flag) { struct timeval tv; u_int8_t bits, r; int to, rv; bits = flag == UIO_READ ? TPM_LEGACY_DA : 0; tv.tv_sec = TPM_LEGACY_TMO; tv.tv_usec = 0; to = tvtohz(&tv) / TPM_LEGACY_SLEEP; while (((r = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1)) & (TPM_LEGACY_BUSY|bits)) != bits && to--) { rv = tsleep(sc, PRIBIO | PCATCH, "legacy_tpm_start", TPM_LEGACY_SLEEP); if (rv && rv != EWOULDBLOCK) return rv; } #if defined(TPM_DEBUG) && !defined(__FreeBSD__) printf("%s: bits %b\n", sc->sc_dev.dv_xname, r, TPM_LEGACY_BITS); #endif if ((r & (TPM_LEGACY_BUSY|bits)) != bits) return EIO; return 0; }
/* Finish transaction. */ int tpm_legacy_end(struct tpm_softc *sc, int flag, int rv) { struct timeval tv; u_int8_t r; int to; if (rv || flag == UIO_READ) bus_space_write_1(sc->sc_batm, sc->sc_bahm, 1, TPM_LEGACY_ABRT); else { tv.tv_sec = TPM_LEGACY_TMO; tv.tv_usec = 0; to = tvtohz(&tv) / TPM_LEGACY_SLEEP; while(((r = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1)) & TPM_LEGACY_BUSY) && to--) { rv = tsleep(sc, PRIBIO | PCATCH, "legacy_tpm_end", TPM_LEGACY_SLEEP); if (rv && rv != EWOULDBLOCK) return rv; } #if defined(TPM_DEBUG) && !defined(__FreeBSD__) printf("%s: bits %b\n", sc->sc_dev.dv_xname, r, TPM_LEGACY_BITS); #endif if (r & TPM_LEGACY_BUSY) return EIO; if (r & TPM_LEGACY_RE) return EIO; /* XXX Retry the loop? */ } return rv; }
RTDECL(int) RTTimerStart(PRTTIMER pTimer, uint64_t u64First) { struct timeval tv; if (!rtTimerIsValid(pTimer)) return VERR_INVALID_HANDLE; if (!pTimer->fSuspended) return VERR_TIMER_ACTIVE; if ( pTimer->fSpecificCpu && !RTMpIsCpuOnline(pTimer->idCpu)) return VERR_CPU_OFFLINE; /* * Calc when it should start firing. */ u64First += RTTimeNanoTS(); pTimer->fSuspended = false; pTimer->iTick = 0; pTimer->u64StartTS = u64First; pTimer->u64NextTS = u64First; tv.tv_sec = u64First / 1000000000; tv.tv_usec = (u64First % 1000000000) / 1000; callout_reset(&pTimer->Callout, tvtohz(&tv), rtTimerFreeBSDCallback, pTimer); return VINF_SUCCESS; }
static void rtTimerFreeBSDCallback(void *pvTimer) { PRTTIMER pTimer = (PRTTIMER)pvTimer; /* calculate and set the next timeout */ pTimer->iTick++; if (!pTimer->u64NanoInterval) { pTimer->fSuspended = true; callout_stop(&pTimer->Callout); } else { struct timeval tv; const uint64_t u64NanoTS = RTTimeNanoTS(); pTimer->u64NextTS = pTimer->u64StartTS + pTimer->iTick * pTimer->u64NanoInterval; if (pTimer->u64NextTS < u64NanoTS) pTimer->u64NextTS = u64NanoTS + RTTimerGetSystemGranularity() / 2; tv.tv_sec = pTimer->u64NextTS / 1000000000; tv.tv_usec = (pTimer->u64NextTS % 1000000000) / 1000; callout_reset(&pTimer->Callout, tvtohz(&tv), rtTimerFreeBSDCallback, pTimer); } /* callback */ if ( !pTimer->fSpecificCpu || pTimer->iCpu == curcpu) pTimer->pfnTimer(pTimer, pTimer->pvUser, pTimer->iTick); else smp_rendezvous(NULL, rtTimerFreeBSDIpiAction, NULL, pvTimer); }
/* Start transaction. */ int tpm_legacy_start(struct tpm_softc *sc, int flag) { struct timeval tv; uint8_t bits, r; int to, rv; bits = flag == UIO_READ ? TPM_LEGACY_DA : 0; tv.tv_sec = TPM_LEGACY_TMO; tv.tv_usec = 0; to = tvtohz(&tv) / TPM_LEGACY_SLEEP; while (((r = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1)) & (TPM_LEGACY_BUSY|bits)) != bits && to--) { rv = tsleep(sc, PRIBIO | PCATCH, "legacy_tpm_start", TPM_LEGACY_SLEEP); if (rv && rv != EWOULDBLOCK) return rv; } #if defined(TPM_DEBUG) && !defined(__FreeBSD__) char buf[128]; snprintb(buf, sizeof(buf), TPM_LEGACY_BITS, r); aprint_debug_dev(sc->sc_dev, "%s: bits %s\n", device_xname(sc->sc_dev), buf); #endif if ((r & (TPM_LEGACY_BUSY|bits)) != bits) return EIO; return 0; }
static void in6_mtutimo(void *rock) { struct radix_node_head *rnh = rock; struct mtuex_arg arg; struct timeval atv; int s; arg.rnh = rnh; arg.nextstop = time_second + MTUTIMO_DEFAULT; s = splnet(); rnh->rnh_walktree(rnh, in6_mtuexpire, &arg); splx(s); atv.tv_usec = 0; atv.tv_sec = arg.nextstop; if (atv.tv_sec < time_second) { #if DIAGNOSTIC log(LOG_DEBUG, "IPv6: invalid mtu expiration time on routing table\n"); #endif arg.nextstop = time_second + 30; /*last resort*/ } atv.tv_sec -= time_second; timeout(in6_mtutimo_funneled, rock, tvtohz(&atv)); }
static inline int tstohz(const struct timespec *tsp) { struct timeval tv; TIMESPEC_TO_TIMEVAL(&tv, tsp); return (tvtohz(&tv)); }
int tpm_tmotohz(int tmo) { struct timeval tv; tv.tv_sec = tmo / 1000; tv.tv_usec = 1000 * (tmo % 1000); return tvtohz(&tv); }
static int pow2ns_to_ticks(int pow2ns) { struct timeval tv; struct timespec ts; pow2ns_to_ts(pow2ns, &ts); TIMESPEC_TO_TIMEVAL(&tv, &ts); return (tvtohz(&tv)); }
/* * Convert milliseconds to ticks. */ static int timeout2hz(UINT16 Timeout) { struct timeval tv; tv.tv_sec = (time_t)(Timeout / 1000); tv.tv_usec = (suseconds_t)(Timeout % 1000) * 1000; return (tvtohz(&tv)); }
static void in6_rtqtimo(void *rock) { struct radix_node_head *rnh = rock; struct rtqk_arg arg; struct timeval atv; static time_t last_adjusted_timeout = 0; int s; arg.found = arg.killed = 0; arg.rnh = rnh; arg.nextstop = time_second + rtq_timeout; arg.draining = arg.updating = 0; s = splnet(); rnh->rnh_walktree(rnh, in6_rtqkill, &arg); splx(s); /* * Attempt to be somewhat dynamic about this: * If there are ``too many'' routes sitting around taking up space, * then crank down the timeout, and see if we can't make some more * go away. However, we make sure that we will never adjust more * than once in rtq_timeout seconds, to keep from cranking down too * hard. */ if ((arg.found - arg.killed > rtq_toomany) && (time_second - last_adjusted_timeout >= rtq_timeout) && rtq_reallyold > rtq_minreallyold) { rtq_reallyold = 2*rtq_reallyold / 3; if (rtq_reallyold < rtq_minreallyold) { rtq_reallyold = rtq_minreallyold; } last_adjusted_timeout = time_second; #ifdef DIAGNOSTIC log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold to %d", rtq_reallyold); #endif arg.found = arg.killed = 0; arg.updating = 1; s = splnet(); rnh->rnh_walktree(rnh, in6_rtqkill, &arg); splx(s); } atv.tv_usec = 0; atv.tv_sec = arg.nextstop - time_second; if (atv.tv_sec < 0) { printf("invalid rtq expiration time on routing table\n"); atv.tv_sec = 30; /*last resort*/ } timeout(in6_rtqtimo, rock, tvtohz(&atv)); }
int mstsopen(dev_t dev, struct tty *tp) { struct proc *p = curproc; struct msts *np; struct timeval t; int error; DPRINTF(("mstsopen\n")); if (tp->t_line == MSTSDISC) return ENODEV; if ((error = suser(p, 0)) != 0) return error; np = malloc(sizeof(struct msts), M_DEVBUF, M_WAITOK|M_ZERO); snprintf(np->timedev.xname, sizeof(np->timedev.xname), "msts%d", msts_nxid++); msts_count++; np->time.status = SENSOR_S_UNKNOWN; np->time.type = SENSOR_TIMEDELTA; #ifndef MSTS_DEBUG np->time.flags = SENSOR_FINVALID; #endif sensor_attach(&np->timedev, &np->time); np->signal.type = SENSOR_PERCENT; np->signal.status = SENSOR_S_UNKNOWN; np->signal.value = 100000LL; np->signal.flags = 0; strlcpy(np->signal.desc, "Signal", sizeof(np->signal.desc)); sensor_attach(&np->timedev, &np->signal); np->sync = 1; tp->t_sc = (caddr_t)np; error = linesw[TTYDISC].l_open(dev, tp); if (error) { free(np, M_DEVBUF); tp->t_sc = NULL; } else { sensordev_install(&np->timedev); timeout_set(&np->msts_tout, msts_timeout, np); /* convert timevals to hz */ t.tv_sec = TRUSTTIME; t.tv_usec = 0; t_trust = tvtohz(&t); } return error; }
static void in6_rtqtimo(void *rock) { struct radix_node_head *rnh = rock; struct rtqk_arg arg; struct timeval atv; static time_t last_adjusted_timeout = 0; struct timeval timenow; lck_mtx_lock(rnh_lock); /* Get the timestamp after we acquire the lock for better accuracy */ getmicrotime(&timenow); arg.found = arg.killed = 0; arg.rnh = rnh; arg.nextstop = timenow.tv_sec + rtq_timeout; arg.draining = arg.updating = 0; rnh->rnh_walktree(rnh, in6_rtqkill, &arg); /* * Attempt to be somewhat dynamic about this: * If there are ``too many'' routes sitting around taking up space, * then crank down the timeout, and see if we can't make some more * go away. However, we make sure that we will never adjust more * than once in rtq_timeout seconds, to keep from cranking down too * hard. */ if ((arg.found - arg.killed > rtq_toomany) && (timenow.tv_sec - last_adjusted_timeout >= rtq_timeout) && rtq_reallyold > rtq_minreallyold) { rtq_reallyold = 2*rtq_reallyold / 3; if (rtq_reallyold < rtq_minreallyold) { rtq_reallyold = rtq_minreallyold; } last_adjusted_timeout = timenow.tv_sec; #if DIAGNOSTIC log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold to %d", rtq_reallyold); #endif arg.found = arg.killed = 0; arg.updating = 1; rnh->rnh_walktree(rnh, in6_rtqkill, &arg); } atv.tv_usec = 0; atv.tv_sec = arg.nextstop - timenow.tv_sec; lck_mtx_unlock(rnh_lock); timeout(in6_rtqtimo, rock, tvtohz(&atv)); }
static void in6_mtutimo(void *rock) { CURVNET_SET_QUIET((struct vnet *) rock); struct timeval atv; struct mtuex_arg arg; rt_foreach_fib_walk(AF_INET6, in6_mtutimo_setwa, in6_mtuexpire, &arg); atv.tv_sec = MTUTIMO_DEFAULT; atv.tv_usec = 0; callout_reset(&V_rtq_mtutimer, tvtohz(&atv), in6_mtutimo, rock); CURVNET_RESTORE(); }
static void in6_rtqtimo(void *rock) { struct radix_node_head *rnh = rock; struct rtqk_arg arg; struct timeval atv; static time_t last_adjusted_timeout = 0; arg.found = arg.killed = 0; arg.rnh = rnh; arg.nextstop = time_second + rtq_timeout; arg.draining = arg.updating = 0; RADIX_NODE_HEAD_LOCK(rnh); rnh->rnh_walktree(rnh, in6_rtqkill, &arg); RADIX_NODE_HEAD_UNLOCK(rnh); /* * Attempt to be somewhat dynamic about this: * If there are ``too many'' routes sitting around taking up space, * then crank down the timeout, and see if we can't make some more * go away. However, we make sure that we will never adjust more * than once in rtq_timeout seconds, to keep from cranking down too * hard. */ if ((arg.found - arg.killed > rtq_toomany) && (time_second - last_adjusted_timeout >= rtq_timeout) && rtq_reallyold > rtq_minreallyold) { rtq_reallyold = 2*rtq_reallyold / 3; if (rtq_reallyold < rtq_minreallyold) { rtq_reallyold = rtq_minreallyold; } last_adjusted_timeout = time_second; #ifdef DIAGNOSTIC log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold to %d", rtq_reallyold); #endif arg.found = arg.killed = 0; arg.updating = 1; RADIX_NODE_HEAD_LOCK(rnh); rnh->rnh_walktree(rnh, in6_rtqkill, &arg); RADIX_NODE_HEAD_UNLOCK(rnh); } atv.tv_usec = 0; atv.tv_sec = arg.nextstop; callout_reset(&rtq_timer, tvtohz(&atv), in6_rtqtimo, rock); }
int tstohz(const struct timespec *ts) { struct timeval tv; TIMESPEC_TO_TIMEVAL(&tv, ts); /* Round up. */ if ((ts->tv_nsec % 1000) != 0) { tv.tv_usec += 1; if (tv.tv_usec >= 1000000) { tv.tv_usec -= 1000000; tv.tv_sec += 1; } } return (tvtohz(&tv)); }
static void in_rtqtimo(void *rock) { CURVNET_SET((struct vnet *) rock); int fibnum; void *newrock; struct timeval atv; for (fibnum = 0; fibnum < rt_numfibs; fibnum++) { newrock = rt_tables_get_rnh(fibnum, AF_INET); if (newrock != NULL) in_rtqtimo_one(newrock); } atv.tv_usec = 0; atv.tv_sec = V_rtq_timeout; callout_reset(&V_rtq_timer, tvtohz(&atv), in_rtqtimo, rock); CURVNET_RESTORE(); }
static void in6_mtutimo(void *rock) { struct radix_node_head *rnh = rock; struct mtuex_arg arg; struct timeval atv; arg.rnh = rnh; arg.nextstop = time_second + MTUTIMO_DEFAULT; RADIX_NODE_HEAD_LOCK(rnh); rnh->rnh_walktree(rnh, in6_mtuexpire, &arg); RADIX_NODE_HEAD_UNLOCK(rnh); atv.tv_usec = 0; atv.tv_sec = arg.nextstop; if (atv.tv_sec < time_second) { printf("invalid mtu expiration time on routing table\n"); arg.nextstop = time_second + 30; /* last resort */ } callout_reset(&rtq_mtutimer, tvtohz(&atv), in6_mtutimo, rock); }
static void in6_mtutimo(void *rock) { struct radix_node_head *rnh = rock; struct mtuex_arg arg; struct timeval atv; int s; arg.rnh = rnh; arg.nextstop = time_second + MTUTIMO_DEFAULT; s = splnet(); rnh->rnh_walktree(rnh, in6_mtuexpire, &arg); splx(s); atv.tv_usec = 0; atv.tv_sec = arg.nextstop - time_second; if (atv.tv_sec < 0) { printf("invalid mtu expiration time on routing table\n"); atv.tv_sec = 30; /*last resort*/ } timeout(in6_mtutimo, rock, tvtohz(&atv)); }
/* * This will block until a segment in file system fsid is written. A timeout * in milliseconds may be specified which will awake the cleaner automatically. * An fsid of -1 means any file system, and a timeout of 0 means forever. */ int lfs_segwait(fsid_t *fsidp, struct timeval *tv) { struct mount *mntp; void *addr; u_long timeout; int error; KERNEL_LOCK(1, NULL); if (fsidp == NULL || (mntp = vfs_getvfs(fsidp)) == NULL) addr = &lfs_allclean_wakeup; else addr = &VFSTOULFS(mntp)->um_lfs->lfs_nextseg; /* * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}! * XXX IS THAT WHAT IS INTENDED? */ timeout = tvtohz(tv); error = tsleep(addr, PCATCH | PVFS, "segment", timeout); KERNEL_UNLOCK_ONE(NULL); return (error == ERESTART ? EINTR : 0); }
/* * Convert a timeout in seconds to N where 2^N nanoseconds is close to * "seconds". * * The kernel expects the timeouts for watchdogs in "2^N nanosecond format". */ static u_int parse_timeout_to_pow2ns(char opt, const char *longopt, const char *myoptarg) { double a; u_int rv; struct timespec ts; struct timeval tv; int ticks; char shortopt[] = "- "; if (!longopt) shortopt[1] = opt; a = fetchtimeout(opt, longopt, myoptarg, 1); if (a == 0) rv = WD_TO_NEVER; else rv = seconds_to_pow2ns(a); pow2ns_to_ts(rv, &ts); tstotv(&tv, &ts); ticks = tvtohz(&tv); if (debugging) { printf("Timeout for %s%s " "is 2^%d nanoseconds " "(in: %s sec -> out: %jd sec %ld ns -> %d ticks)\n", longopt ? "-" : "", longopt ? longopt : shortopt, rv, myoptarg, (intmax_t)ts.tv_sec, ts.tv_nsec, ticks); } if (ticks <= 0) { errx(1, "Timeout for %s%s is too small, please choose a higher timeout.", longopt ? "-" : "", longopt ? longopt : shortopt); } return (rv); }
/* * TRANS2_FIND_FIRST2/NEXT2, used for NT LM12 dialect */ static int smbfs_smb_trans2find2(struct smbfs_fctx *ctx) { struct smb_t2rq *t2p; struct smb_vc *vcp = SSTOVC(ctx->f_ssp); struct mbchain *mbp; struct mdchain *mdp; u_int16_t tw, flags; int error; if (ctx->f_t2) { smb_t2_done(ctx->f_t2); ctx->f_t2 = NULL; } ctx->f_flags &= ~SMBFS_RDD_GOTRNAME; flags = 8 | 2; /* <resume> | <close if EOS> */ if (ctx->f_flags & SMBFS_RDD_FINDSINGLE) { flags |= 1; /* close search after this request */ ctx->f_flags |= SMBFS_RDD_NOCLOSE; } if (ctx->f_flags & SMBFS_RDD_FINDFIRST) { error = smb_t2_alloc(SSTOCP(ctx->f_ssp), SMB_TRANS2_FIND_FIRST2, ctx->f_scred, &t2p); if (error) return error; ctx->f_t2 = t2p; mbp = &t2p->t2_tparam; mb_init(mbp); mb_put_uint16le(mbp, ctx->f_attrmask); mb_put_uint16le(mbp, ctx->f_limit); mb_put_uint16le(mbp, flags); mb_put_uint16le(mbp, ctx->f_infolevel); mb_put_uint32le(mbp, 0); error = smbfs_fullpath(mbp, vcp, ctx->f_dnp, ctx->f_wildcard, ctx->f_wclen); if (error) return error; } else { error = smb_t2_alloc(SSTOCP(ctx->f_ssp), SMB_TRANS2_FIND_NEXT2, ctx->f_scred, &t2p); if (error) return error; ctx->f_t2 = t2p; mbp = &t2p->t2_tparam; mb_init(mbp); mb_put_mem(mbp, (caddr_t)&ctx->f_Sid, 2, MB_MSYSTEM); mb_put_uint16le(mbp, ctx->f_limit); mb_put_uint16le(mbp, ctx->f_infolevel); mb_put_uint32le(mbp, 0); /* resume key */ mb_put_uint16le(mbp, flags); if (ctx->f_rname) mb_put_mem(mbp, ctx->f_rname, ctx->f_rnamelen + 1, MB_MSYSTEM); else mb_put_uint8(mbp, 0); /* resume file name */ #if 0 struct timeval tv; tv.tv_sec = 0; tv.tv_usec = 200 * 1000; /* 200ms */ if (vcp->vc_flags & SMBC_WIN95) { /* * some implementations suggests to sleep here * for 200ms, due to the bug in the Win95. * I've didn't notice any problem, but put code * for it. */ pause("fix95", tvtohz(&tv)); } #endif } t2p->t2_maxpcount = 5 * 2; t2p->t2_maxdcount = vcp->vc_txmax; error = smb_t2_request(t2p); if (error) return error; mdp = &t2p->t2_rparam; if (ctx->f_flags & SMBFS_RDD_FINDFIRST) { if ((error = md_get_uint16(mdp, &ctx->f_Sid)) != 0) return error; ctx->f_flags &= ~SMBFS_RDD_FINDFIRST; } if ((error = md_get_uint16le(mdp, &tw)) != 0) return error; ctx->f_ecnt = tw; if ((error = md_get_uint16le(mdp, &tw)) != 0) return error; if (tw) ctx->f_flags |= SMBFS_RDD_EOF | SMBFS_RDD_NOCLOSE; if ((error = md_get_uint16le(mdp, &tw)) != 0) return error; if ((error = md_get_uint16le(mdp, &tw)) != 0) return error; if (ctx->f_ecnt == 0) { ctx->f_flags |= SMBFS_RDD_EOF | SMBFS_RDD_NOCLOSE; return ENOENT; } ctx->f_rnameofs = tw; mdp = &t2p->t2_rdata; if (mdp->md_top == NULL) { printf("bug: ecnt = %d, but data is NULL (please report)\n", ctx->f_ecnt); return ENOENT; } if (mdp->md_top->m_len == 0) { printf("bug: ecnt = %d, but m_len = 0 and m_next = %p (please report)\n", ctx->f_ecnt,mbp->mb_top->m_next); return ENOENT; } ctx->f_eofs = 0; return 0; }
/* ARGSUSED */ int bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, struct proc *p) { struct bpf_d *d; int error = 0; lck_mtx_lock(bpf_mlock); d = bpf_dtab[minor(dev)]; if (d == 0 || d == (void *)1) { lck_mtx_unlock(bpf_mlock); return (ENXIO); } switch (cmd) { default: error = EINVAL; break; /* * Check for read packet available. */ case FIONREAD: { int n; n = d->bd_slen; if (d->bd_hbuf) n += d->bd_hlen; *(int *)addr = n; break; } case SIOCGIFADDR: { struct ifnet *ifp; if (d->bd_bif == 0) error = EINVAL; else { ifp = d->bd_bif->bif_ifp; error = ifnet_ioctl(ifp, 0, cmd, addr); } break; } /* * Get buffer len [for read()]. */ case BIOCGBLEN: *(u_int *)addr = d->bd_bufsize; break; /* * Set buffer length. */ case BIOCSBLEN: #if BSD < 199103 error = EINVAL; #else if (d->bd_bif != 0) error = EINVAL; else { u_int size = *(u_int *)addr; if (size > bpf_maxbufsize) *(u_int *)addr = size = bpf_maxbufsize; else if (size < BPF_MINBUFSIZE) *(u_int *)addr = size = BPF_MINBUFSIZE; d->bd_bufsize = size; } #endif break; /* * Set link layer read filter. */ case BIOCSETF32: { struct bpf_program32 *prg32 = (struct bpf_program32 *)addr; error = bpf_setf(d, prg32->bf_len, CAST_USER_ADDR_T(prg32->bf_insns)); break; } case BIOCSETF64: { struct bpf_program64 *prg64 = (struct bpf_program64 *)addr; error = bpf_setf(d, prg64->bf_len, prg64->bf_insns); break; } /* * Flush read packet buffer. */ case BIOCFLUSH: reset_d(d); break; /* * Put interface into promiscuous mode. */ case BIOCPROMISC: if (d->bd_bif == 0) { /* * No interface attached yet. */ error = EINVAL; break; } if (d->bd_promisc == 0) { lck_mtx_unlock(bpf_mlock); error = ifnet_set_promiscuous(d->bd_bif->bif_ifp, 1); lck_mtx_lock(bpf_mlock); if (error == 0) d->bd_promisc = 1; } break; /* * Get device parameters. */ case BIOCGDLT: if (d->bd_bif == 0) error = EINVAL; else *(u_int *)addr = d->bd_bif->bif_dlt; break; /* * Get a list of supported data link types. */ case BIOCGDLTLIST: if (d->bd_bif == NULL) { error = EINVAL; } else { error = bpf_getdltlist(d, (struct bpf_dltlist *)addr, p); } break; /* * Set data link type. */ case BIOCSDLT: if (d->bd_bif == NULL) error = EINVAL; else error = bpf_setdlt(d, *(u_int *)addr); break; /* * Get interface name. */ case BIOCGETIF: if (d->bd_bif == 0) error = EINVAL; else { struct ifnet *const ifp = d->bd_bif->bif_ifp; struct ifreq *const ifr = (struct ifreq *)addr; snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%s%d", ifp->if_name, ifp->if_unit); } break; /* * Set interface. */ case BIOCSETIF: { ifnet_t ifp; ifp = ifunit(((struct ifreq *)addr)->ifr_name); if (ifp == NULL) error = ENXIO; else error = bpf_setif(d, ifp, 0); break; } /* * Set read timeout. */ case BIOCSRTIMEOUT: { struct BPF_TIMEVAL *_tv = (struct BPF_TIMEVAL *)addr; struct timeval tv; tv.tv_sec = _tv->tv_sec; tv.tv_usec = _tv->tv_usec; /* * Subtract 1 tick from tvtohz() since this isn't * a one-shot timer. */ if ((error = itimerfix(&tv)) == 0) d->bd_rtout = tvtohz(&tv) - 1; break; } /* * Get read timeout. */ case BIOCGRTIMEOUT: { struct BPF_TIMEVAL *tv = (struct BPF_TIMEVAL *)addr; tv->tv_sec = d->bd_rtout / hz; tv->tv_usec = (d->bd_rtout % hz) * tick; break; } /* * Get packet stats. */ case BIOCGSTATS: { struct bpf_stat *bs = (struct bpf_stat *)addr; bs->bs_recv = d->bd_rcount; bs->bs_drop = d->bd_dcount; break; } /* * Set immediate mode. */ case BIOCIMMEDIATE: d->bd_immediate = *(u_int *)addr; break; case BIOCVERSION: { struct bpf_version *bv = (struct bpf_version *)addr; bv->bv_major = BPF_MAJOR_VERSION; bv->bv_minor = BPF_MINOR_VERSION; break; } /* * Get "header already complete" flag */ case BIOCGHDRCMPLT: *(u_int *)addr = d->bd_hdrcmplt; break; /* * Set "header already complete" flag */ case BIOCSHDRCMPLT: d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; break; /* * Get "see sent packets" flag */ case BIOCGSEESENT: *(u_int *)addr = d->bd_seesent; break; /* * Set "see sent packets" flag */ case BIOCSSEESENT: d->bd_seesent = *(u_int *)addr; break; case FIONBIO: /* Non-blocking I/O */ break; case FIOASYNC: /* Send signal on receive packets */ d->bd_async = *(int *)addr; break; #ifndef __APPLE__ case FIOSETOWN: error = fsetown(*(int *)addr, &d->bd_sigio); break; case FIOGETOWN: *(int *)addr = fgetown(d->bd_sigio); break; /* This is deprecated, FIOSETOWN should be used instead. */ case TIOCSPGRP: error = fsetown(-(*(int *)addr), &d->bd_sigio); break; /* This is deprecated, FIOGETOWN should be used instead. */ case TIOCGPGRP: *(int *)addr = -fgetown(d->bd_sigio); break; #endif case BIOCSRSIG: /* Set receive signal */ { u_int sig; sig = *(u_int *)addr; if (sig >= NSIG) error = EINVAL; else d->bd_sig = sig; break; } case BIOCGRSIG: *(u_int *)addr = d->bd_sig; break; } lck_mtx_unlock(bpf_mlock); return (error); }
/****************************************************************************** agtiapi_InitResource() Purpose: Mapping PCI memory space Allocate and initialize per card based resource Parameters: ag_card_info_t *pCardInfo (IN) Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: ******************************************************************************/ STATIC agBOOLEAN agtiapi_InitResource( ag_card_info_t *thisCardInst ) { struct agtiapi_softc *pmsc = thisCardInst->pCard; device_t devx = thisCardInst->pPCIDev; //AGTIAPI_PRINTK( "agtiapi_InitResource: begin; pointer values %p / %p \n", // devx, thisCardInst ); // no IO mapped card implementation, we'll implement memory mapping if( agtiapi_typhAlloc( thisCardInst ) == AGTIAPI_FAIL ) { printf( "agtiapi_InitResource: failed call to agtiapi_typhAlloc \n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_InitResource: dma alloc MemSpan %p -- %p\n", (void*) pmsc->typh_busaddr, (void*) ( (U32_64)pmsc->typh_busaddr + pmsc->typhn ) ); // logical BARs for SPC: // bar 0 and 1 - logical BAR0 // bar 2 and 3 - logical BAR1 // bar4 - logical BAR2 // bar5 - logical BAR3 // Skiping the assignments for bar 1 and bar 3 (making bar 0, 2 64-bit): U32 bar; U32 lBar = 0; // logicalBar for (bar = 0; bar < PCI_NUMBER_BARS; bar++) { if ((bar==1) || (bar==3)) continue; thisCardInst->pciMemBaseRIDSpc[lBar] = PCIR_BAR(bar); thisCardInst->pciMemBaseRscSpc[lBar] = bus_alloc_resource_any( devx, SYS_RES_MEMORY, &(thisCardInst->pciMemBaseRIDSpc[lBar]), RF_ACTIVE ); AGTIAPI_PRINTK( "agtiapi_InitResource: bus_alloc_resource_any rtn %p \n", thisCardInst->pciMemBaseRscSpc[lBar] ); if ( thisCardInst->pciMemBaseRscSpc[lBar] != NULL ) { thisCardInst->pciMemVirtAddrSpc[lBar] = (caddr_t)rman_get_virtual( thisCardInst->pciMemBaseRscSpc[lBar] ); thisCardInst->pciMemBaseSpc[lBar] = bus_get_resource_start( devx, SYS_RES_MEMORY, thisCardInst->pciMemBaseRIDSpc[lBar]); thisCardInst->pciMemSizeSpc[lBar] = bus_get_resource_count( devx, SYS_RES_MEMORY, thisCardInst->pciMemBaseRIDSpc[lBar] ); AGTIAPI_PRINTK( "agtiapi_InitResource: PCI: bar %d, lBar %d " "VirtAddr=%lx, len=%d\n", bar, lBar, (long unsigned int)thisCardInst->pciMemVirtAddrSpc[lBar], thisCardInst->pciMemSizeSpc[lBar] ); } else { thisCardInst->pciMemVirtAddrSpc[lBar] = 0; thisCardInst->pciMemBaseSpc[lBar] = 0; thisCardInst->pciMemSizeSpc[lBar] = 0; } lBar++; } thisCardInst->pciMemVirtAddr = thisCardInst->pciMemVirtAddrSpc[0]; thisCardInst->pciMemSize = thisCardInst->pciMemSizeSpc[0]; thisCardInst->pciMemBase = thisCardInst->pciMemBaseSpc[0]; // Allocate all TI data structure required resources. // tiLoLevelResource U32 numVal; ag_resource_info_t *pRscInfo; pRscInfo = &thisCardInst->tiRscInfo; pRscInfo->tiLoLevelResource.loLevelOption.pciFunctionNumber = pci_get_function( devx ); struct timeval tv; tv.tv_sec = 1; tv.tv_usec = 0; int ticksPerSec; ticksPerSec = tvtohz( &tv ); int uSecPerTick = 1000000/USEC_PER_TICK; if (pRscInfo->tiLoLevelResource.loLevelMem.count != 0) { //AGTIAPI_INIT("agtiapi_InitResource: loLevelMem count = %d\n", // pRscInfo->tiLoLevelResource.loLevelMem.count); // adjust tick value to meet Linux requirement pRscInfo->tiLoLevelResource.loLevelOption.usecsPerTick = uSecPerTick; AGTIAPI_PRINTK( "agtiapi_InitResource: " "pRscInfo->tiLoLevelResource.loLevelOption.usecsPerTick" " 0x%x\n", pRscInfo->tiLoLevelResource.loLevelOption.usecsPerTick ); for( numVal = 0; numVal < pRscInfo->tiLoLevelResource.loLevelMem.count; numVal++ ) { if( pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength == 0 ) { AGTIAPI_PRINTK("agtiapi_InitResource: skip ZERO %d\n", numVal); continue; } // check for 64 bit alignment if ( pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment < AGTIAPI_64BIT_ALIGN ) { AGTIAPI_PRINTK("agtiapi_InitResource: set ALIGN %d\n", numVal); pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment = AGTIAPI_64BIT_ALIGN; } if( ((pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type & (BIT(0) | BIT(1))) == TI_DMA_MEM) || ((pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type & (BIT(0) | BIT(1))) == TI_CACHED_DMA_MEM)) { if ( thisCardInst->dmaIndex >= sizeof(thisCardInst->tiDmaMem) / sizeof(thisCardInst->tiDmaMem[0]) ) { AGTIAPI_PRINTK( "Invalid dmaIndex %d ERROR\n", thisCardInst->dmaIndex ); return AGTIAPI_FAIL; } thisCardInst->tiDmaMem[thisCardInst->dmaIndex].type = #ifdef CACHED_DMA pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type & (BIT(0) | BIT(1)); #else TI_DMA_MEM; #endif if( agtiapi_MemAlloc( thisCardInst, &thisCardInst->tiDmaMem[thisCardInst->dmaIndex].dmaVirtAddr, &thisCardInst->tiDmaMem[thisCardInst->dmaIndex].dmaPhysAddr, &pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].virtPtr, &pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal]. physAddrUpper, &pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal]. physAddrLower, pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength, thisCardInst->tiDmaMem[thisCardInst->dmaIndex].type, pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment) != AGTIAPI_SUCCESS ) { return AGTIAPI_FAIL; } thisCardInst->tiDmaMem[thisCardInst->dmaIndex].memSize = pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength; //AGTIAPI_INIT("agtiapi_InitResource: LoMem %d dmaIndex=%d DMA virt" // " %p, phys 0x%x, length %d align %d\n", // numVal, pCardInfo->dmaIndex, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].virtPtr, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].physAddrLower, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment); thisCardInst->dmaIndex++; } else if ( (pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type & (BIT(0) | BIT(1))) == TI_CACHED_MEM) { if (thisCardInst->cacheIndex >= sizeof(thisCardInst->tiCachedMem) / sizeof(thisCardInst->tiCachedMem[0])) { AGTIAPI_PRINTK( "Invalid cacheIndex %d ERROR\n", thisCardInst->cacheIndex ); return AGTIAPI_FAIL; } if ( agtiapi_MemAlloc( thisCardInst, &thisCardInst->tiCachedMem[thisCardInst->cacheIndex], (vm_paddr_t *)agNULL, &pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].virtPtr, (U32 *)agNULL, (U32 *)agNULL, pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength, TI_CACHED_MEM, pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment) != AGTIAPI_SUCCESS ) { return AGTIAPI_FAIL; } //AGTIAPI_INIT("agtiapi_InitResource: LoMem %d cacheIndex=%d CACHED " // "vaddr %p / %p, length %d align %d\n", // numVal, pCardInfo->cacheIndex, // pCardInfo->tiCachedMem[pCardInfo->cacheIndex], // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].virtPtr, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].totalLength, // pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].alignment); thisCardInst->cacheIndex++; } else if ( ((pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type & (BIT(0) | BIT(1))) == TI_DMA_MEM_CHIP)) { // not expecting this case, print warning that should get attention printf( "RED ALARM: we need a BAR for TI_DMA_MEM_CHIP, ignoring!" ); } else { printf( "agtiapi_InitResource: Unknown required memory type %d " "ERROR!\n", pRscInfo->tiLoLevelResource.loLevelMem.mem[numVal].type); return AGTIAPI_FAIL; } } } // end: TI data structure resources ... // begin: tiInitiatorResource if ( pmsc->flags & AGTIAPI_INITIATOR ) { if ( pRscInfo->tiInitiatorResource.initiatorMem.count != 0 ) { //AGTIAPI_INIT("agtiapi_InitResource: initiatorMem count = %d\n", // pRscInfo->tiInitiatorResource.initiatorMem.count); numVal = (U32)( pRscInfo->tiInitiatorResource.initiatorOption.usecsPerTick / uSecPerTick ); if( pRscInfo->tiInitiatorResource.initiatorOption.usecsPerTick % uSecPerTick > 0 ) pRscInfo->tiInitiatorResource.initiatorOption.usecsPerTick = (numVal + 1) * uSecPerTick; else pRscInfo->tiInitiatorResource.initiatorOption.usecsPerTick = numVal * uSecPerTick; for ( numVal = 0; numVal < pRscInfo->tiInitiatorResource.initiatorMem.count; numVal++ ) { // check for 64 bit alignment if( pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. alignment < AGTIAPI_64BIT_ALIGN ) { pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. alignment = AGTIAPI_64BIT_ALIGN; } if( thisCardInst->cacheIndex >= sizeof( thisCardInst->tiCachedMem) / sizeof( thisCardInst->tiCachedMem[0])) { AGTIAPI_PRINTK( "Invalid cacheIndex %d ERROR\n", thisCardInst->cacheIndex ); return AGTIAPI_FAIL; } // initiator memory is cached, no check is needed if( agtiapi_MemAlloc( thisCardInst, (void *)&thisCardInst->tiCachedMem[thisCardInst->cacheIndex], (vm_paddr_t *)agNULL, &pRscInfo->tiInitiatorResource.initiatorMem. tdCachedMem[numVal].virtPtr, (U32 *)agNULL, (U32 *)agNULL, pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. totalLength, TI_CACHED_MEM, pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. alignment) != AGTIAPI_SUCCESS) { return AGTIAPI_FAIL; } // AGTIAPI_INIT("agtiapi_InitResource: IniMem %d cacheIndex=%d CACHED " // "vaddr %p / %p, length %d align 0x%x\n", // numVal, // pCardInfo->cacheIndex, // pCardInfo->tiCachedMem[pCardInfo->cacheIndex], // pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. // virtPtr, //pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. // totalLength, // pRscInfo->tiInitiatorResource.initiatorMem.tdCachedMem[numVal]. // alignment); thisCardInst->cacheIndex++; } } } // end: tiInitiatorResource // begin: tiTdSharedMem if (pRscInfo->tiSharedMem.tdSharedCachedMem1.totalLength != 0) { // check for 64 bit alignment if( pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment < AGTIAPI_64BIT_ALIGN ) { pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment = AGTIAPI_64BIT_ALIGN; } if( (pRscInfo->tiSharedMem.tdSharedCachedMem1.type & (BIT(0) | BIT(1))) == TI_DMA_MEM ) { if( thisCardInst->dmaIndex >= sizeof(thisCardInst->tiDmaMem) / sizeof(thisCardInst->tiDmaMem[0]) ) { AGTIAPI_PRINTK( "Invalid dmaIndex %d ERROR\n", thisCardInst->dmaIndex); return AGTIAPI_FAIL; } if( agtiapi_MemAlloc( thisCardInst, (void *)&thisCardInst-> tiDmaMem[thisCardInst->dmaIndex].dmaVirtAddr, &thisCardInst->tiDmaMem[thisCardInst->dmaIndex]. dmaPhysAddr, &pRscInfo->tiSharedMem.tdSharedCachedMem1.virtPtr, &pRscInfo->tiSharedMem.tdSharedCachedMem1. physAddrUpper, &pRscInfo->tiSharedMem.tdSharedCachedMem1. physAddrLower, pRscInfo->tiSharedMem.tdSharedCachedMem1. totalLength, TI_DMA_MEM, pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment) != AGTIAPI_SUCCESS ) return AGTIAPI_FAIL; thisCardInst->tiDmaMem[thisCardInst->dmaIndex].memSize = pRscInfo->tiSharedMem.tdSharedCachedMem1.totalLength + pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment; // printf( "agtiapi_InitResource: SharedMem DmaIndex=%d DMA " // "virt %p / %p, phys 0x%x, align %d\n", // thisCardInst->dmaIndex, // thisCardInst->tiDmaMem[thisCardInst->dmaIndex].dmaVirtAddr, // pRscInfo->tiSharedMem.tdSharedCachedMem1.virtPtr, // pRscInfo->tiSharedMem.tdSharedCachedMem1.physAddrLower, // pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment); thisCardInst->dmaIndex++; } else if( (pRscInfo->tiSharedMem.tdSharedCachedMem1.type & (BIT(0) | BIT(1))) == TI_CACHED_MEM ) { if( thisCardInst->cacheIndex >= sizeof(thisCardInst->tiCachedMem) / sizeof(thisCardInst->tiCachedMem[0]) ) { AGTIAPI_PRINTK( "Invalid cacheIndex %d ERROR\n", thisCardInst->cacheIndex); return AGTIAPI_FAIL; } if( agtiapi_MemAlloc( thisCardInst, (void *)&thisCardInst-> tiCachedMem[thisCardInst->cacheIndex], (vm_paddr_t *)agNULL, &pRscInfo->tiSharedMem.tdSharedCachedMem1.virtPtr, (U32 *)agNULL, (U32 *)agNULL, pRscInfo-> tiSharedMem.tdSharedCachedMem1.totalLength, TI_CACHED_MEM, pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment) != AGTIAPI_SUCCESS ) return AGTIAPI_FAIL; // printf( "agtiapi_InitResource: SharedMem cacheIndex=%d CACHED " // "vaddr %p / %p, length %d align 0x%x\n", // thisCardInst->cacheIndex, // thisCardInst->tiCachedMem[thisCardInst->cacheIndex], // pRscInfo->tiSharedMem.tdSharedCachedMem1.virtPtr, // pRscInfo->tiSharedMem.tdSharedCachedMem1.totalLength, // pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment); AGTIAPI_PRINTK( "agtiapi_InitResource: SharedMem cacheIndex=%d CACHED " "vaddr %p / %p, length %d align 0x%x\n", thisCardInst->cacheIndex, thisCardInst->tiCachedMem[thisCardInst->cacheIndex], pRscInfo->tiSharedMem.tdSharedCachedMem1.virtPtr, pRscInfo->tiSharedMem.tdSharedCachedMem1.totalLength, pRscInfo->tiSharedMem.tdSharedCachedMem1.alignment ); thisCardInst->cacheIndex++; } else { AGTIAPI_PRINTK( "agtiapi_InitResource: " "Unknown required memory type ERROR!\n" ); return AGTIAPI_FAIL; } } // end: tiTdSharedMem DELAY( 200000 ); // or use AGTIAPI_INIT_MDELAY(200); return AGTIAPI_SUCCESS; } // agtiapi_InitResource() ends here
static int kern_sem_wait(struct thread *td, semid_t id, int tryflag, struct timespec *abstime) { struct timespec ts1, ts2; struct timeval tv; struct file *fp; struct ksem *ks; int error; DP((">>> kern_sem_wait entered! pid=%d\n", (int)td->td_proc->p_pid)); error = ksem_get(td, id, CAP_SEM_WAIT, &fp); if (error) return (error); ks = fp->f_data; mtx_lock(&sem_lock); DP((">>> kern_sem_wait critical section entered! pid=%d\n", (int)td->td_proc->p_pid)); #ifdef MAC error = mac_posixsem_check_wait(td->td_ucred, fp->f_cred, ks); if (error) { DP(("kern_sem_wait mac failed\n")); goto err; } #endif DP(("kern_sem_wait value = %d, tryflag %d\n", ks->ks_value, tryflag)); vfs_timestamp(&ks->ks_atime); while (ks->ks_value == 0) { ks->ks_waiters++; if (tryflag != 0) error = EAGAIN; else if (abstime == NULL) error = cv_wait_sig(&ks->ks_cv, &sem_lock); else { for (;;) { ts1 = *abstime; getnanotime(&ts2); timespecsub(&ts1, &ts2); TIMESPEC_TO_TIMEVAL(&tv, &ts1); if (tv.tv_sec < 0) { error = ETIMEDOUT; break; } error = cv_timedwait_sig(&ks->ks_cv, &sem_lock, tvtohz(&tv)); if (error != EWOULDBLOCK) break; } } ks->ks_waiters--; if (error) goto err; } ks->ks_value--; DP(("kern_sem_wait value post-decrement = %d\n", ks->ks_value)); error = 0; err: mtx_unlock(&sem_lock); fdrop(fp, td); DP(("<<< kern_sem_wait leaving, pid=%d, error = %d\n", (int)td->td_proc->p_pid, error)); return (error); }