static int hwmp_send_preq(struct ieee80211_node *ni, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], struct ieee80211_meshpreq_ie *preq) { struct ieee80211_hwmp_state *hs = ni->ni_vap->iv_hwmp; /* * Enforce PREQ interval. */ if (ratecheck(&hs->hs_lastpreq, &ieee80211_hwmp_preqminint) == 0) return EALREADY; getmicrouptime(&hs->hs_lastpreq); /* * mesh preq action frame format * [6] da * [6] sa * [6] addr3 = sa * [1] action * [1] category * [tlv] mesh path request */ preq->preq_ie = IEEE80211_ELEMID_MESHPREQ; return hwmp_send_action(ni, sa, da, (uint8_t *)preq, sizeof(struct ieee80211_meshpreq_ie)); }
void ieee80211_rssadapt_raise_rate(struct ieee80211com *ic, struct ieee80211_rssadapt *ra, struct ieee80211_rssdesc *id) { u_int16_t (*thrs)[IEEE80211_RATE_SIZE], newthr, oldthr; struct ieee80211_node *ni = id->id_node; struct ieee80211_rateset *rs = &ni->ni_rates; int i, rate, top; #ifdef IEEE80211_DEBUG int j; #endif ra->ra_nok++; if (!ratecheck(&ra->ra_last_raise, &ra->ra_raise_interval)) return; for (i = 0, top = IEEE80211_RSSADAPT_BKT0; i < IEEE80211_RSSADAPT_BKTS; i++, top <<= IEEE80211_RSSADAPT_BKTPOWER) { thrs = &ra->ra_rate_thresh[i]; if (id->id_len <= top) break; } if (id->id_rateidx + 1 < rs->rs_nrates && (*thrs)[id->id_rateidx + 1] > (*thrs)[id->id_rateidx]) { rate = (rs->rs_rates[id->id_rateidx + 1] & IEEE80211_RATE_VAL); RSSADAPT_PRINTF(("%s: threshold[%d, %d.%d] decay %d ", ic->ic_ifp->if_xname, IEEE80211_RSSADAPT_BKT0 << (IEEE80211_RSSADAPT_BKTPOWER* i), rate / 2, rate * 5 % 10, (*thrs)[id->id_rateidx + 1])); oldthr = (*thrs)[id->id_rateidx + 1]; if ((*thrs)[id->id_rateidx] == 0) newthr = ra->ra_avg_rssi; else newthr = (*thrs)[id->id_rateidx]; (*thrs)[id->id_rateidx + 1] = interpolate(master_expavgctl.rc_decay, oldthr, newthr); RSSADAPT_PRINTF(("-> %d\n", (*thrs)[id->id_rateidx + 1])); } #ifdef IEEE80211_DEBUG if (RSSADAPT_DO_PRINT()) { printf("%s: dst %s thresholds\n", ic->ic_ifp->if_xname, ether_sprintf(ni->ni_macaddr)); for (i = 0; i < IEEE80211_RSSADAPT_BKTS; i++) { printf("%d-byte", IEEE80211_RSSADAPT_BKT0 << (IEEE80211_RSSADAPT_BKTPOWER * i)); for (j = 0; j < rs->rs_nrates; j++) { rate = (rs->rs_rates[j] & IEEE80211_RATE_VAL); printf(", T[%d.%d] = %d", rate / 2, rate * 5 % 10, ra->ra_rate_thresh[i][j]); } printf("\n"); } } #endif /* IEEE80211_DEBUG */ }
static int hwmp_send_perr(struct ieee80211_node *ni, const uint8_t sa[IEEE80211_ADDR_LEN], const uint8_t da[IEEE80211_ADDR_LEN], struct ieee80211_meshperr_ie *perr) { struct ieee80211_hwmp_state *hs = ni->ni_vap->iv_hwmp; /* * Enforce PERR interval. */ if (ratecheck(&hs->hs_lastperr, &ieee80211_hwmp_perrminint) == 0) return EALREADY; getmicrouptime(&hs->hs_lastperr); /* * mesh perr action frame format * [6] da * [6] sa * [6] addr3 = sa * [1] action * [1] category * [tlv] mesh path error */ perr->perr_ie = IEEE80211_ELEMID_MESHPERR; return hwmp_send_action(ni, sa, da, (uint8_t *)perr, sizeof(struct ieee80211_meshperr_ie)); }
void ral_rssadapt_raise_rate(struct ieee80211com *ic, struct ral_rssadapt *ra, struct ral_rssdesc *id) { u_int16_t (*thrs)[IEEE80211_RATE_SIZE], newthr, oldthr; struct ieee80211_node *ni = id->id_node; struct ieee80211_rateset *rs = &ni->ni_rates; int i, rate, top; ra->ra_nok++; if (!ratecheck(&ra->ra_last_raise, &ra->ra_raise_interval)) return; for (i = 0, top = RAL_RSSADAPT_BKT0; i < RAL_RSSADAPT_BKTS; i++, top <<= RAL_RSSADAPT_BKTPOWER) { thrs = &ra->ra_rate_thresh[i]; if (id->id_len <= top) break; } if (id->id_rateidx + 1 < rs->rs_nrates && (*thrs)[id->id_rateidx + 1] > (*thrs)[id->id_rateidx]) { rate = (rs->rs_rates[id->id_rateidx + 1] & IEEE80211_RATE_VAL); oldthr = (*thrs)[id->id_rateidx + 1]; if ((*thrs)[id->id_rateidx] == 0) newthr = ra->ra_avg_rssi; else newthr = (*thrs)[id->id_rateidx]; (*thrs)[id->id_rateidx + 1] = interpolate(master_expavgctl.rc_decay, oldthr, newthr); } }
/* * Security sensitive rate limiting printf */ void rlprintf(struct timeval *t, const char *fmt, ...) { va_list ap; static const struct timeval msgperiod[1] = {{ 5, 0 }}; if (ratecheck(t, msgperiod)) vprintf(fmt, ap); }
int octeon_eth_recv_check(struct octeon_eth_softc *sc, uint64_t word2) { if (__predict_false(octeon_eth_recv_check_link(sc, word2)) != 0) { if (ratecheck(&sc->sc_rate_recv_check_link_last, &sc->sc_rate_recv_check_link_cap)) log(LOG_DEBUG, "%s: link is not up, the packet was dropped\n", sc->sc_dev.dv_xname); return 1; } #if 0 /* XXX Performance tuning (Jumbo-frame is not supported yet!) */ if (__predict_false(octeon_eth_recv_check_jumbo(sc, word2)) != 0) { /* XXX jumbo frame */ if (ratecheck(&sc->sc_rate_recv_check_jumbo_last, &sc->sc_rate_recv_check_jumbo_cap)) log(LOG_DEBUG, "jumbo frame was received\n"); return 1; } #endif if (__predict_false(octeon_eth_recv_check_code(sc, word2)) != 0) { if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) == PIP_WQE_WORD2_RE_OPCODE_LENGTH) { /* no logging */ /* XXX inclement special error count */ } else if ((word2 & PIP_WQE_WORD2_NOIP_OPECODE) == PIP_WQE_WORD2_RE_OPCODE_PARTIAL) { /* not an error. it's because of overload */ } else { if (ratecheck(&sc->sc_rate_recv_check_code_last, &sc->sc_rate_recv_check_code_cap)) log(LOG_WARNING, "%s: a reception error occured, " "the packet was dropped (error code = %lld)\n", sc->sc_dev.dv_xname, word2 & PIP_WQE_WORD2_NOIP_OPECODE); } return 1; } return 0; }
static int zkbd_on(void *v) { #if NAPM > 0 struct zkbd_softc *sc = (struct zkbd_softc *)v; int down; if (sc->sc_onkey_pin < 0) return 1; down = pxa2x0_gpio_get_bit(sc->sc_onkey_pin) ? 1 : 0; /* * Change run mode depending on how long the key is held down. * Ignore the key if it gets pressed while the lid is closed. * * Keys can bounce and we have to work around missed interrupts. * Only the second edge is detected upon exit from sleep mode. */ if (down) { if (sc->sc_hinge == 3) { zkbdondown = 0; } else { microuptime(&zkbdontv); zkbdondown = 1; } } else if (zkbdondown) { if (ratecheck(&zkbdontv, &zkbdhalttv)) { if (kbd_reset == 1) { kbd_reset = 0; psignal(initproc, SIGUSR1); } } else if (ratecheck(&zkbdontv, &zkbdsleeptv)) { apm_suspends++; } zkbdondown = 0; } #endif return 1; }
void ld_cac_done(device_t dv, void *context, int error) { struct buf *bp; struct ld_cac_softc *sc; int rv; bp = context; rv = 0; sc = device_private(dv); if ((error & CAC_RET_CMD_REJECTED) == CAC_RET_CMD_REJECTED) { aprint_error_dev(dv, "command rejected\n"); rv = EIO; } if (rv == 0 && (error & CAC_RET_INVAL_BLOCK) != 0) { aprint_error_dev(dv, "invalid request block\n"); rv = EIO; } if (rv == 0 && (error & CAC_RET_HARD_ERROR) != 0) { aprint_error_dev(dv, "hard error\n"); rv = EIO; } if (rv == 0 && (error & CAC_RET_SOFT_ERROR) != 0) { sc->sc_serrcnt++; if (ratecheck(&sc->sc_serrtm, &ld_cac_serrintvl)) { sc->sc_serrcnt = 0; aprint_error_dev(dv, "%d soft errors; array may be degraded\n", sc->sc_serrcnt); } } if (rv) { bp->b_error = rv; bp->b_resid = bp->b_bcount; } else bp->b_resid = 0; mutex_exit(sc->sc_mutex); lddone(&sc->sc_ld, bp); mutex_enter(sc->sc_mutex); }
void ld_cac_done(struct device *dv, void *context, int error) { struct buf *bp; struct ld_cac_softc *sc; bp = context; if ((error & CAC_RET_HARD_ERROR) != 0) { printf("%s: hard error\n", dv->dv_xname); error = EIO; } if ((error & CAC_RET_CMD_REJECTED) != 0) { printf("%s: invalid request\n", dv->dv_xname); error = EIO; } if ((error & CAC_RET_SOFT_ERROR) != 0) { sc = (struct ld_cac_softc *)dv; sc->sc_serrcnt++; if (ratecheck(&sc->sc_serrtm, &ld_cac_serrintvl)) { printf("%s: %d soft errors; array may be degraded\n", dv->dv_xname, sc->sc_serrcnt); sc->sc_serrcnt = 0; } error = 0; } if (error) { bp->b_flags |= B_ERROR; bp->b_error = error; bp->b_resid = bp->b_bcount; } else bp->b_resid = 0; lddone((struct ld_softc *)dv, bp); }
/* * Poll power-management related GPIO inputs, update battery life * in softc, and/or control battery charging. */ void zapm_poll(void *v) { struct zapm_softc *sc = v; int ac_on; int bc_lock; int charging; int volt; int s; s = splhigh(); /* Check positition of battery compartment lock switch. */ bc_lock = pxa2x0_gpio_get_bit(GPIO_BATT_COVER_C3000) ? 1 : 0; /* Stop discharging. */ if (sc->sc_discharging) { sc->sc_discharging = 0; volt = zapm_batt_volt(); ac_on = zapm_ac_on(); charging = 0; DPRINTF(("zapm_poll: discharge off volt %d\n", volt)); } else { ac_on = zapm_ac_on(); charging = sc->sc_charging; volt = sc->sc_batt_volt; } /* Start or stop charging as necessary. */ if (ac_on && bc_lock) { if (charging) { if (zapm_charge_complete(sc)) { DPRINTF(("zapm_poll: batt full\n")); charging = 0; zapm_enable_charging(sc, 0); } } else if (!zapm_charge_complete(sc)) { charging = 1; volt = zapm_batt_volt(); zapm_enable_charging(sc, 1); DPRINTF(("zapm_poll: start charging volt %d\n", volt)); } } else { if (charging) { charging = 0; zapm_enable_charging(sc, 0); timerclear(&sc->sc_lastbattchk); DPRINTF(("zapm_poll: stop charging\n")); } sc->sc_batt_full = 0; } /* * Restart charging once in a while. Discharge a few milliseconds * before updating the voltage in our softc if A/C is connected. */ if (bc_lock && ratecheck(&sc->sc_lastbattchk, &zapm_battchkrate)) { if (sc->sc_suspended) { DPRINTF(("zapm_poll: suspended %lu %lu\n", sc->sc_lastbattchk.tv_sec, pxa2x0_rtc_getsecs())); if (charging) { zapm_enable_charging(sc, 0); delay(15000); zapm_enable_charging(sc, 1); pxa2x0_rtc_setalarm(pxa2x0_rtc_getsecs() + zapm_battchkrate.tv_sec + 1); } } else if (ac_on && sc->sc_batt_full == 0) { DPRINTF(("zapm_poll: discharge on\n")); if (charging) zapm_enable_charging(sc, 0); sc->sc_discharging = 1; scoop_discharge_battery(1); timeout_add(&sc->sc_poll, DISCHARGE_TIMEOUT); } else if (!ac_on) { volt = zapm_batt_volt(); DPRINTF(("zapm_poll: volt %d\n", volt)); } } /* Update the cached power state in our softc. */ if (ac_on != sc->sc_ac_on || charging != sc->sc_charging || volt != sc->sc_batt_volt) { sc->sc_ac_on = ac_on; sc->sc_charging = charging; sc->sc_batt_volt = volt; if (sc->sc_event == APM_NOEVENT) sc->sc_event = APM_POWER_CHANGE; } /* Detect battery low conditions. */ if (!ac_on) { if (zapm_batt_life(volt) < 5) sc->sc_event = APM_BATTERY_LOW; if (zapm_batt_state(volt) == APM_BATT_CRITICAL) sc->sc_event = APM_CRIT_SUSPEND_REQ; } #ifdef APMDEBUG if (sc->sc_event != APM_NOEVENT) DPRINTF(("zapm_poll: power event %d\n", sc->sc_event)); #endif splx(s); }
static int ieee80211_do_slow_print(struct ieee80211com *ic, int *did_print) { static const struct timeval merge_print_intvl = { .tv_sec = 1, .tv_usec = 0 }; if ((ic->ic_if.if_flags & IFF_LINK0) == 0) return 0; if (!*did_print && (ic->ic_if.if_flags & IFF_DEBUG) == 0 && !ratecheck(&ic->ic_last_merge_print, &merge_print_intvl)) return 0; *did_print = 1; return 1; } /* ieee80211_ibss_merge helps merge 802.11 ad hoc networks. The * convention, set by the Wireless Ethernet Compatibility Alliance * (WECA), is that an 802.11 station will change its BSSID to match * the "oldest" 802.11 ad hoc network, on the same channel, that * has the station's desired SSID. The "oldest" 802.11 network * sends beacons with the greatest TSF timestamp. * * Return ENETRESET if the BSSID changed, 0 otherwise. * * XXX Perhaps we should compensate for the time that elapses * between the MAC receiving the beacon and the host processing it * in ieee80211_ibss_merge. */ int ieee80211_ibss_merge(struct ieee80211com *ic, struct ieee80211_node *ni, u_int64_t local_tsft) { u_int64_t beacon_tsft; int did_print = 0, sign; union { u_int64_t word; u_int8_t tstamp[8]; } u; /* ensure alignment */ (void)memcpy(&u, &ni->ni_tstamp[0], sizeof(u)); beacon_tsft = letoh64(u.word); /* we are faster, let the other guy catch up */ if (beacon_tsft < local_tsft) sign = -1; else sign = 1; if (IEEE80211_ADDR_EQ(ni->ni_bssid, ic->ic_bss->ni_bssid)) { if (!ieee80211_do_slow_print(ic, &did_print)) return 0; printf("%s: tsft offset %s%llu\n", ic->ic_if.if_xname, (sign < 0) ? "-" : "", (sign < 0) ? (local_tsft - beacon_tsft) : (beacon_tsft - local_tsft)); return 0; } if (sign < 0) return 0; if (ieee80211_match_bss(ic, ni) != 0) return 0; if (ieee80211_do_slow_print(ic, &did_print)) { printf("%s: ieee80211_ibss_merge: bssid mismatch %s\n", ic->ic_if.if_xname, ether_sprintf(ni->ni_bssid)); printf("%s: my tsft %llu beacon tsft %llu\n", ic->ic_if.if_xname, local_tsft, beacon_tsft); printf("%s: sync TSF with %s\n", ic->ic_if.if_xname, ether_sprintf(ni->ni_macaddr)); } ic->ic_flags &= ~IEEE80211_F_SIBSS; /* negotiate rates with new IBSS */ ieee80211_fix_rate(ic, ni, IEEE80211_F_DOFRATE | IEEE80211_F_DONEGO | IEEE80211_F_DODEL); if (ni->ni_rates.rs_nrates == 0) { if (ieee80211_do_slow_print(ic, &did_print)) { printf("%s: rates mismatch, BSSID %s\n", ic->ic_if.if_xname, ether_sprintf(ni->ni_bssid)); } return 0; } if (ieee80211_do_slow_print(ic, &did_print)) { printf("%s: sync BSSID %s -> ", ic->ic_if.if_xname, ether_sprintf(ic->ic_bss->ni_bssid)); printf("%s ", ether_sprintf(ni->ni_bssid)); printf("(from %s)\n", ether_sprintf(ni->ni_macaddr)); } ieee80211_node_newstate(ni, IEEE80211_STA_BSS); (*ic->ic_node_copy)(ic, ic->ic_bss, ni); return ENETRESET; }
/* * General fork call. Note that another LWP in the process may call exec() * or exit() while we are forking. It's safe to continue here, because * neither operation will complete until all LWPs have exited the process. */ int fork1(struct lwp *l1, int flags, int exitsig, void *stack, size_t stacksize, void (*func)(void *), void *arg, register_t *retval, struct proc **rnewprocp) { struct proc *p1, *p2, *parent; struct plimit *p1_lim; uid_t uid; struct lwp *l2; int count; vaddr_t uaddr; int tnprocs; int tracefork; int error = 0; p1 = l1->l_proc; uid = kauth_cred_getuid(l1->l_cred); tnprocs = atomic_inc_uint_nv(&nprocs); /* * Although process entries are dynamically created, we still keep * a global limit on the maximum number we will create. */ if (__predict_false(tnprocs >= maxproc)) error = -1; else error = kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL); if (error) { static struct timeval lasttfm; atomic_dec_uint(&nprocs); if (ratecheck(&lasttfm, &fork_tfmrate)) tablefull("proc", "increase kern.maxproc or NPROC"); if (forkfsleep) kpause("forkmx", false, forkfsleep, NULL); return EAGAIN; } /* * Enforce limits. */ count = chgproccnt(uid, 1); if (__predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) { if (kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_RLIMIT, p1, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS), &p1->p_rlimit[RLIMIT_NPROC], KAUTH_ARG(RLIMIT_NPROC)) != 0) { (void)chgproccnt(uid, -1); atomic_dec_uint(&nprocs); if (forkfsleep) kpause("forkulim", false, forkfsleep, NULL); return EAGAIN; } } /* * Allocate virtual address space for the U-area now, while it * is still easy to abort the fork operation if we're out of * kernel virtual address space. */ uaddr = uvm_uarea_alloc(); if (__predict_false(uaddr == 0)) { (void)chgproccnt(uid, -1); atomic_dec_uint(&nprocs); return ENOMEM; } /* * We are now committed to the fork. From here on, we may * block on resources, but resource allocation may NOT fail. */ /* Allocate new proc. */ p2 = proc_alloc(); /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ memset(&p2->p_startzero, 0, (unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero)); memcpy(&p2->p_startcopy, &p1->p_startcopy, (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy)); TAILQ_INIT(&p2->p_sigpend.sp_info); LIST_INIT(&p2->p_lwps); LIST_INIT(&p2->p_sigwaiters); /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * Inherit flags we want to keep. The flags related to SIGCHLD * handling are important in order to keep a consistent behaviour * for the child after the fork. If we are a 32-bit process, the * child will be too. */ p2->p_flag = p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN | PK_32); p2->p_emul = p1->p_emul; p2->p_execsw = p1->p_execsw; if (flags & FORK_SYSTEM) { /* * Mark it as a system process. Set P_NOCLDWAIT so that * children are reparented to init(8) when they exit. * init(8) can easily wait them out for us. */ p2->p_flag |= (PK_SYSTEM | PK_NOCLDWAIT); } mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH); mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE); rw_init(&p2->p_reflock); cv_init(&p2->p_waitcv, "wait"); cv_init(&p2->p_lwpcv, "lwpwait"); /* * Share a lock between the processes if they are to share signal * state: we must synchronize access to it. */ if (flags & FORK_SHARESIGS) { p2->p_lock = p1->p_lock; mutex_obj_hold(p1->p_lock); } else p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); kauth_proc_fork(p1, p2); p2->p_raslist = NULL; #if defined(__HAVE_RAS) ras_fork(p1, p2); #endif /* bump references to the text vnode (for procfs) */ p2->p_textvp = p1->p_textvp; if (p2->p_textvp) vref(p2->p_textvp); if (flags & FORK_SHAREFILES) fd_share(p2); else if (flags & FORK_CLEANFILES) p2->p_fd = fd_init(NULL); else p2->p_fd = fd_copy(); /* XXX racy */ p2->p_mqueue_cnt = p1->p_mqueue_cnt; if (flags & FORK_SHARECWD) cwdshare(p2); else p2->p_cwdi = cwdinit(); /* * Note: p_limit (rlimit stuff) is copy-on-write, so normally * we just need increase pl_refcnt. */ p1_lim = p1->p_limit; if (!p1_lim->pl_writeable) { lim_addref(p1_lim); p2->p_limit = p1_lim; } else { p2->p_limit = lim_copy(p1_lim); } if (flags & FORK_PPWAIT) { /* Mark ourselves as waiting for a child. */ l1->l_pflag |= LP_VFORKWAIT; p2->p_lflag = PL_PPWAIT; p2->p_vforklwp = l1; } else { p2->p_lflag = 0; } p2->p_sflag = 0; p2->p_slflag = 0; parent = (flags & FORK_NOWAIT) ? initproc : p1; p2->p_pptr = parent; p2->p_ppid = parent->p_pid; LIST_INIT(&p2->p_children); p2->p_aio = NULL; #ifdef KTRACE /* * Copy traceflag and tracefile if enabled. * If not inherited, these were zeroed above. */ if (p1->p_traceflag & KTRFAC_INHERIT) { mutex_enter(&ktrace_lock); p2->p_traceflag = p1->p_traceflag; if ((p2->p_tracep = p1->p_tracep) != NULL) ktradref(p2); mutex_exit(&ktrace_lock); } #endif /* * Create signal actions for the child process. */ p2->p_sigacts = sigactsinit(p1, flags & FORK_SHARESIGS); mutex_enter(p1->p_lock); p2->p_sflag |= (p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP)); sched_proc_fork(p1, p2); mutex_exit(p1->p_lock); p2->p_stflag = p1->p_stflag; /* * p_stats. * Copy parts of p_stats, and zero out the rest. */ p2->p_stats = pstatscopy(p1->p_stats); /* * Set up the new process address space. */ uvm_proc_fork(p1, p2, (flags & FORK_SHAREVM) ? true : false); /* * Finish creating the child process. * It will return through a different path later. */ lwp_create(l1, p2, uaddr, (flags & FORK_PPWAIT) ? LWP_VFORK : 0, stack, stacksize, (func != NULL) ? func : child_return, arg, &l2, l1->l_class); /* * Inherit l_private from the parent. * Note that we cannot use lwp_setprivate() here since that * also sets the CPU TLS register, which is incorrect if the * process has changed that without letting the kernel know. */ l2->l_private = l1->l_private; /* * If emulation has a process fork hook, call it now. */ if (p2->p_emul->e_proc_fork) (*p2->p_emul->e_proc_fork)(p2, l1, flags); /* * ...and finally, any other random fork hooks that subsystems * might have registered. */ doforkhooks(p2, p1); SDT_PROBE(proc,,,create, p2, p1, flags, 0, 0); /* * It's now safe for the scheduler and other processes to see the * child process. */ mutex_enter(proc_lock); if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT) p2->p_lflag |= PL_CONTROLT; LIST_INSERT_HEAD(&parent->p_children, p2, p_sibling); p2->p_exitsig = exitsig; /* signal for parent on exit */ /* * We don't want to tracefork vfork()ed processes because they * will not receive the SIGTRAP until it is too late. */ tracefork = (p1->p_slflag & (PSL_TRACEFORK|PSL_TRACED)) == (PSL_TRACEFORK|PSL_TRACED) && (flags && FORK_PPWAIT) == 0; if (tracefork) { p2->p_slflag |= PSL_TRACED; p2->p_opptr = p2->p_pptr; if (p2->p_pptr != p1->p_pptr) { struct proc *parent1 = p2->p_pptr; if (parent1->p_lock < p2->p_lock) { if (!mutex_tryenter(parent1->p_lock)) { mutex_exit(p2->p_lock); mutex_enter(parent1->p_lock); } } else if (parent1->p_lock > p2->p_lock) { mutex_enter(parent1->p_lock); } parent1->p_slflag |= PSL_CHTRACED; proc_reparent(p2, p1->p_pptr); if (parent1->p_lock != p2->p_lock) mutex_exit(parent1->p_lock); } /* * Set ptrace status. */ p1->p_fpid = p2->p_pid; p2->p_fpid = p1->p_pid; } LIST_INSERT_AFTER(p1, p2, p_pglist); LIST_INSERT_HEAD(&allproc, p2, p_list); p2->p_trace_enabled = trace_is_enabled(p2); #ifdef __HAVE_SYSCALL_INTERN (*p2->p_emul->e_syscall_intern)(p2); #endif /* * Update stats now that we know the fork was successful. */ uvmexp.forks++; if (flags & FORK_PPWAIT) uvmexp.forks_ppwait++; if (flags & FORK_SHAREVM) uvmexp.forks_sharevm++; /* * Pass a pointer to the new process to the caller. */ if (rnewprocp != NULL) *rnewprocp = p2; if (ktrpoint(KTR_EMUL)) p2->p_traceflag |= KTRFAC_TRC_EMUL; /* * Notify any interested parties about the new process. */ if (!SLIST_EMPTY(&p1->p_klist)) { mutex_exit(proc_lock); KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); mutex_enter(proc_lock); } /* * Make child runnable, set start time, and add to run queue except * if the parent requested the child to start in SSTOP state. */ mutex_enter(p2->p_lock); /* * Start profiling. */ if ((p2->p_stflag & PST_PROFIL) != 0) { mutex_spin_enter(&p2->p_stmutex); startprofclock(p2); mutex_spin_exit(&p2->p_stmutex); } getmicrotime(&p2->p_stats->p_start); p2->p_acflag = AFORK; lwp_lock(l2); KASSERT(p2->p_nrlwps == 1); if (p2->p_sflag & PS_STOPFORK) { struct schedstate_percpu *spc = &l2->l_cpu->ci_schedstate; p2->p_nrlwps = 0; p2->p_stat = SSTOP; p2->p_waited = 0; p1->p_nstopchild++; l2->l_stat = LSSTOP; KASSERT(l2->l_wchan == NULL); lwp_unlock_to(l2, spc->spc_lwplock); } else { p2->p_nrlwps = 1; p2->p_stat = SACTIVE; l2->l_stat = LSRUN; sched_enqueue(l2, false); lwp_unlock(l2); } /* * Return child pid to parent process, * marking us as parent via retval[1]. */ if (retval != NULL) { retval[0] = p2->p_pid; retval[1] = 0; } mutex_exit(p2->p_lock); /* * Preserve synchronization semantics of vfork. If waiting for * child to exec or exit, sleep until it clears LP_VFORKWAIT. */ #if 0 while (l1->l_pflag & LP_VFORKWAIT) { cv_wait(&l1->l_waitcv, proc_lock); } #else while (p2->p_lflag & PL_PPWAIT) cv_wait(&p1->p_waitcv, proc_lock); #endif /* * Let the parent know that we are tracing its child. */ if (tracefork) { ksiginfo_t ksi; KSI_INIT_EMPTY(&ksi); ksi.ksi_signo = SIGTRAP; ksi.ksi_lid = l1->l_lid; kpsignal(p1, &ksi, NULL); } mutex_exit(proc_lock); return 0; }
/* * Poll power-management related GPIO inputs, update battery life * in softc, and/or control battery charging. */ static void zapm_poll1(void *v, int do_suspend) { struct zapm_softc *sc = (struct zapm_softc *)v; int ac_state; int bc_lock; int charging; int volt; if (!mutex_tryenter(&sc->sc_mtx)) return; ac_state = zapm_get_ac_state(sc); bc_lock = zapm_get_battery_compartment_state(sc); /* Stop discharging. */ if (sc->discharging) { sc->discharging = 0; charging = 0; volt = zapm_get_battery_volt(); DPRINTF(("zapm_poll: discharge off volt %d\n", volt)); } else { charging = sc->battery_state & APM_BATT_FLAG_CHARGING; volt = sc->battery_volt; } /* Start or stop charging as necessary. */ if (ac_state && bc_lock) { int charge_completed = zapm_charge_complete(sc); if (charging) { if (charge_completed) { DPRINTF(("zapm_poll: battery is full\n")); charging = 0; zapm_set_charging(sc, 0); } } else if (!charge_completed) { charging = APM_BATT_FLAG_CHARGING; volt = zapm_get_battery_volt(); zapm_set_charging(sc, 1); DPRINTF(("zapm_poll: start charging volt %d\n", volt)); } } else { if (charging) { charging = 0; zapm_set_charging(sc, 0); timerclear(&sc->sc_lastbattchk); DPRINTF(("zapm_poll: stop charging\n")); } sc->battery_full_cnt = 0; } /* * Restart charging once in a while. Discharge a few milliseconds * before updating the voltage in our softc if A/C is connected. */ if (bc_lock && ratecheck(&sc->sc_lastbattchk, &zapm_battchkrate)) { if (do_suspend && sc->suspended) { /* XXX */ #if 0 DPRINTF(("zapm_poll: suspended %lu %lu\n", sc->lastbattchk.tv_sec, pxa2x0_rtc_getsecs())); if (charging) { zapm_set_charging(sc, 0); delay(15000); zapm_set_charging(sc, 1); pxa2x0_rtc_setalarm(pxa2x0_rtc_getsecs() + zapm_battchkrate.tv_sec + 1); } #endif } else if (ac_state && sc->battery_full_cnt == 0) { DPRINTF(("zapm_poll: discharge on\n")); if (charging) zapm_set_charging(sc, 0); sc->discharging = 1; if (ZAURUS_ISC1000 || ZAURUS_ISC3000) scoop_discharge_battery(1); callout_schedule(&sc->sc_discharge_poll, DISCHARGE_TIMEOUT); } else if (!ac_state) { volt = zapm_get_battery_volt(); DPRINTF(("zapm_poll: volt %d\n", volt)); } } /* Update the cached power state in our softc. */ if ((ac_state != sc->ac_state) || (charging != (sc->battery_state & APM_BATT_FLAG_CHARGING))) { config_hook_call(CONFIG_HOOK_PMEVENT, CONFIG_HOOK_PMEVENT_AC, (void *)((ac_state == APM_AC_OFF) ? CONFIG_HOOK_AC_OFF : (charging ? CONFIG_HOOK_AC_ON_CHARGE : CONFIG_HOOK_AC_ON_NOCHARGE))); } if (volt != sc->battery_volt) { sc->battery_volt = volt; sc->battery_life = zapm_battery_life(volt); config_hook_call(CONFIG_HOOK_PMEVENT, CONFIG_HOOK_PMEVENT_BATTERY, (void *)zapm_battery_state(volt)); } mutex_exit(&sc->sc_mtx); }
int fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize, void (*func)(void *), void *arg, register_t *retval, struct proc **rnewprocp) { struct proc *p2; uid_t uid; struct vmspace *vm; int count; vaddr_t uaddr; int s; extern void endtsleep(void *); extern void realitexpire(void *); /* * Although process entries are dynamically created, we still keep * a global limit on the maximum number we will create. We reserve * the last 5 processes to root. The variable nprocs is the current * number of processes, maxproc is the limit. */ uid = p1->p_cred->p_ruid; if ((nprocs >= maxproc - 5 && uid != 0) || nprocs >= maxproc) { static struct timeval lasttfm; if (ratecheck(&lasttfm, &fork_tfmrate)) tablefull("proc"); return (EAGAIN); } nprocs++; /* * Increment the count of procs running with this uid. Don't allow * a nonprivileged user to exceed their current limit. */ count = chgproccnt(uid, 1); if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) { (void)chgproccnt(uid, -1); nprocs--; return (EAGAIN); } uaddr = uvm_km_alloc1(kernel_map, USPACE, USPACE_ALIGN, 1); if (uaddr == 0) { chgproccnt(uid, -1); nprocs--; return (ENOMEM); } /* * From now on, we're committed to the fork and cannot fail. */ /* Allocate new proc. */ p2 = pool_get(&proc_pool, PR_WAITOK); p2->p_stat = SIDL; /* protect against others */ p2->p_exitsig = exitsig; p2->p_forw = p2->p_back = NULL; #ifdef RTHREADS if (flags & FORK_THREAD) { atomic_setbits_int(&p2->p_flag, P_THREAD); p2->p_p = p1->p_p; TAILQ_INSERT_TAIL(&p2->p_p->ps_threads, p2, p_thr_link); } else { process_new(p2, p1); } #else process_new(p2, p1); #endif /* * Make a proc table entry for the new process. * Start by zeroing the section of proc that is zero-initialized, * then copy the section that is copied directly from the parent. */ bzero(&p2->p_startzero, (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); bcopy(&p1->p_startcopy, &p2->p_startcopy, (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); /* * Initialize the timeouts. */ timeout_set(&p2->p_sleep_to, endtsleep, p2); timeout_set(&p2->p_realit_to, realitexpire, p2); #if defined(__HAVE_CPUINFO) p2->p_cpu = p1->p_cpu; #endif /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * The p_stats and p_sigacts substructs are set in vm_fork. */ p2->p_flag = 0; p2->p_emul = p1->p_emul; if (p1->p_flag & P_PROFIL) startprofclock(p2); atomic_setbits_int(&p2->p_flag, p1->p_flag & (P_SUGID | P_SUGIDEXEC)); if (flags & FORK_PTRACE) atomic_setbits_int(&p2->p_flag, p1->p_flag & P_TRACED); #ifdef RTHREADS if (flags & FORK_THREAD) { /* nothing */ } else #endif { p2->p_p->ps_cred = pool_get(&pcred_pool, PR_WAITOK); bcopy(p1->p_p->ps_cred, p2->p_p->ps_cred, sizeof(*p2->p_p->ps_cred)); p2->p_p->ps_cred->p_refcnt = 1; crhold(p1->p_ucred); } TAILQ_INIT(&p2->p_selects); /* bump references to the text vnode (for procfs) */ p2->p_textvp = p1->p_textvp; if (p2->p_textvp) VREF(p2->p_textvp); if (flags & FORK_CLEANFILES) p2->p_fd = fdinit(p1); else if (flags & FORK_SHAREFILES) p2->p_fd = fdshare(p1); else p2->p_fd = fdcopy(p1); /* * If ps_limit is still copy-on-write, bump refcnt, * otherwise get a copy that won't be modified. * (If PL_SHAREMOD is clear, the structure is shared * copy-on-write.) */ #ifdef RTHREADS if (flags & FORK_THREAD) { /* nothing */ } else #endif { if (p1->p_p->ps_limit->p_lflags & PL_SHAREMOD) p2->p_p->ps_limit = limcopy(p1->p_p->ps_limit); else { p2->p_p->ps_limit = p1->p_p->ps_limit; p2->p_p->ps_limit->p_refcnt++; } } if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) atomic_setbits_int(&p2->p_flag, P_CONTROLT); if (flags & FORK_PPWAIT) atomic_setbits_int(&p2->p_flag, P_PPWAIT); p2->p_pptr = p1; if (flags & FORK_NOZOMBIE) atomic_setbits_int(&p2->p_flag, P_NOZOMBIE); LIST_INIT(&p2->p_children); #ifdef KTRACE /* * Copy traceflag and tracefile if enabled. * If not inherited, these were zeroed above. */ if (p1->p_traceflag & KTRFAC_INHERIT) { p2->p_traceflag = p1->p_traceflag; if ((p2->p_tracep = p1->p_tracep) != NULL) VREF(p2->p_tracep); } #endif /* * set priority of child to be that of parent * XXX should move p_estcpu into the region of struct proc which gets * copied. */ scheduler_fork_hook(p1, p2); /* * Create signal actions for the child process. */ if (flags & FORK_SIGHAND) sigactsshare(p1, p2); else p2->p_sigacts = sigactsinit(p1); /* * If emulation has process fork hook, call it now. */ if (p2->p_emul->e_proc_fork) (*p2->p_emul->e_proc_fork)(p2, p1); p2->p_addr = (struct user *)uaddr; /* * Finish creating the child process. It will return through a * different path later. */ uvm_fork(p1, p2, ((flags & FORK_SHAREVM) ? TRUE : FALSE), stack, stacksize, func ? func : child_return, arg ? arg : p2); timeout_set(&p2->p_stats->p_virt_to, virttimer_trampoline, p2); timeout_set(&p2->p_stats->p_prof_to, proftimer_trampoline, p2); vm = p2->p_vmspace; if (flags & FORK_FORK) { forkstat.cntfork++; forkstat.sizfork += vm->vm_dsize + vm->vm_ssize; } else if (flags & FORK_VFORK) { forkstat.cntvfork++; forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize; } else if (flags & FORK_RFORK) { forkstat.cntrfork++; forkstat.sizrfork += vm->vm_dsize + vm->vm_ssize; } else { forkstat.cntkthread++; forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize; } /* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */ do { lastpid = 1 + (randompid ? arc4random() : lastpid) % PID_MAX; } while (pidtaken(lastpid)); p2->p_pid = lastpid; LIST_INSERT_HEAD(&allproc, p2, p_list); LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling); LIST_INSERT_AFTER(p1, p2, p_pglist); if (p2->p_flag & P_TRACED) { p2->p_oppid = p1->p_pid; if (p2->p_pptr != p1->p_pptr) proc_reparent(p2, p1->p_pptr); /* * Set ptrace status. */ if (flags & FORK_FORK) { p2->p_ptstat = malloc(sizeof(*p2->p_ptstat), M_SUBPROC, M_WAITOK); p1->p_ptstat->pe_report_event = PTRACE_FORK; p2->p_ptstat->pe_report_event = PTRACE_FORK; p1->p_ptstat->pe_other_pid = p2->p_pid; p2->p_ptstat->pe_other_pid = p1->p_pid; } } #if NSYSTRACE > 0 if (ISSET(p1->p_flag, P_SYSTRACE)) systrace_fork(p1, p2); #endif /* * Make child runnable, set start time, and add to run queue. */ SCHED_LOCK(s); getmicrotime(&p2->p_stats->p_start); p2->p_acflag = AFORK; p2->p_stat = SRUN; setrunqueue(p2); SCHED_UNLOCK(s); /* * Notify any interested parties about the new process. */ KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid); /* * Update stats now that we know the fork was successfull. */ uvmexp.forks++; if (flags & FORK_PPWAIT) uvmexp.forks_ppwait++; if (flags & FORK_SHAREVM) uvmexp.forks_sharevm++; /* * Pass a pointer to the new process to the caller. */ if (rnewprocp != NULL) *rnewprocp = p2; /* * Preserve synchronization semantics of vfork. If waiting for * child to exec or exit, set P_PPWAIT on child, and sleep on our * proc (in case of exit). */ if (flags & FORK_PPWAIT) while (p2->p_flag & P_PPWAIT) tsleep(p1, PWAIT, "ppwait", 0); /* * If we're tracing the child, alert the parent too. */ if ((flags & FORK_PTRACE) && (p1->p_flag & P_TRACED)) psignal(p1, SIGTRAP); /* * Return child pid to parent process, * marking us as parent via retval[1]. */ if (retval != NULL) { retval[0] = p2->p_pid; retval[1] = 0; } return (0); }