static int ecc_x3400_attach(device_t dev) { struct ecc_x3400_softc *sc = device_get_softc(dev); uint32_t val, dimms; callout_init_mp(&sc->ecc_callout); val = MC_READ_4(PCI_X3400UC_MC_CTRL); if ((val & PCI_X3400UC_MC_CTRL_ECCEN) == 0) { device_printf(dev, "ECC checking is not enabled\n"); return 0; } val = MC_READ_4(PCI_X3400UC_MC_STS); if ((val & PCI_X3400UC_MC_STS_ECCEN) == 0) { device_printf(dev, "ECC is not enabled\n"); return 0; } val = MC_READ_4(PCI_X3400UC_MC_MAX_DOD); dimms = __SHIFTOUT(val, PCI_X3400UC_MC_MAX_DOD_DIMMS); sc->ecc_dimms = dimms + 1; device_printf(dev, "max dimms %d\n", sc->ecc_dimms); callout_reset(&sc->ecc_callout, hz, ecc_x3400_callout, sc); return 0; }
/* * cam_sim_alloc() may potentially be called from an interrupt (?) but * unexpected things happen to the system if malloc() returns NULL so we * use M_INTWAIT anyway. */ struct cam_sim * cam_sim_alloc(sim_action_func sim_action, sim_poll_func sim_poll, const char *sim_name, void *softc, u_int32_t unit, sim_lock *lock, int max_dev_transactions, int max_tagged_dev_transactions, struct cam_devq *queue) { struct cam_sim *sim; /* * XXX ahd was limited to 256 instead of 512 for unknown reasons, * move that to a global limit here. We may be able to remove this * code, needs testing. */ if (max_dev_transactions > 256) max_dev_transactions = 256; if (max_tagged_dev_transactions > 256) max_tagged_dev_transactions = 256; /* * Allocate a simq or use the supplied (possibly shared) simq. */ if (queue == NULL) queue = cam_simq_alloc(max_tagged_dev_transactions); else cam_devq_reference(queue); if (lock == NULL) return (NULL); sim = kmalloc(sizeof(struct cam_sim), M_CAMSIM, M_INTWAIT | M_ZERO); sim->sim_action = sim_action; sim->sim_poll = sim_poll; sim->sim_name = sim_name; sim->softc = softc; sim->path_id = CAM_PATH_ANY; sim->unit_number = unit; sim->bus_id = 0; /* set in xpt_bus_register */ sim->max_tagged_dev_openings = max_tagged_dev_transactions; sim->max_dev_openings = max_dev_transactions; sim->flags = 0; sim->refcount = 1; sim->devq = queue; sim->lock = lock; if (lock == &sim_mplock) { sim->flags |= 0; callout_init(&sim->callout); } else { sim->flags |= CAM_SIM_MPSAFE; callout_init_mp(&sim->callout); } SLIST_INIT(&sim->ccb_freeq); TAILQ_INIT(&sim->sim_doneq); spin_init(&sim->sim_spin, "cam_sim_alloc"); return (sim); }
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, enum hrtimer_mode mode) { BUG_ON(clock_id != CLOCK_MONOTONIC); memset(timer, 0, sizeof(struct hrtimer)); timer->clock_id = clock_id; timer->ht_mode = mode; lwkt_token_init(&timer->timer_token, "timer token"); callout_init_mp(&(timer)->timer_callout); }
/* * Enable multicast routing */ static int ip6_mrouter_init(struct socket *so, struct mbuf *m, int cmd) { int *v; ASSERT_NETISR0; #ifdef MRT6DEBUG if (mrt6debug) log(LOG_DEBUG, "ip6_mrouter_init: so_type = %d, pr_protocol = %d\n", so->so_type, so->so_proto->pr_protocol); #endif if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_ICMPV6) return EOPNOTSUPP; if (!m || (m->m_len != sizeof(int *))) return ENOPROTOOPT; v = mtod(m, int *); if (*v != 1) return ENOPROTOOPT; if (ip6_mrouter != NULL) return EADDRINUSE; ip6_mrouter = so; ip6_mrouter_ver = cmd; bzero((caddr_t)mf6ctable, sizeof(mf6ctable)); bzero((caddr_t)n6expire, sizeof(n6expire)); pim6 = 0;/* used for stubbing out/in pim stuff */ callout_init_mp(&expire_upcalls_ch); netmsg_init(&expire_upcalls_nmsg, NULL, &netisr_adone_rport, MSGF_PRIORITY | MSGF_DROPABLE, expire_upcalls_dispatch); callout_reset(&expire_upcalls_ch, EXPIRE_TIMEOUT, expire_upcalls, NULL); #ifdef MRT6DEBUG if (mrt6debug) log(LOG_DEBUG, "ip6_mrouter_init\n"); #endif return 0; }
void hwmp_vattach(struct ieee80211vap *vap) { struct ieee80211_hwmp_state *hs; KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a mesh vap, opmode %d", vap->iv_opmode)); hs = kmalloc(sizeof(struct ieee80211_hwmp_state), M_80211_VAP, M_INTWAIT | M_ZERO); hs->hs_maxhops = IEEE80211_HWMP_DEFAULT_MAXHOPS; callout_init_mp(&hs->hs_roottimer); vap->iv_hwmp = hs; }
struct rndtest_state * rndtest_attach(device_t dev) { struct rndtest_state *rsp; rsp = kmalloc(sizeof (*rsp), M_DEVBUF, M_INTWAIT); rsp->rs_begin = rsp->rs_buf; rsp->rs_end = rsp->rs_buf + sizeof(rsp->rs_buf); rsp->rs_current = rsp->rs_begin; rsp->rs_discard = 1; rsp->rs_collect = 1; rsp->rs_parent = dev; /* NB: 1 means the callout runs w/o Giant locked */ callout_init_mp(&rsp->rs_to); return (rsp); }
/* * bfq_prepare(): the .prepare callback of the bfq policy. Initialize * all fields in bfq_diskctx and initialize the corresponding helper * thread. * * lock: none * refcount: none * * Returns 0 */ static int bfq_prepare(struct dsched_disk_ctx *diskctx) { struct bfq_disk_ctx *bfq_diskctx = (struct bfq_disk_ctx *)diskctx; BFQ_LOCKINIT(bfq_diskctx); bfq_diskctx->pending_dequeue = 0; wf2q_init(&bfq_diskctx->bfq_wf2q); callout_init_mp(&bfq_diskctx->bfq_callout); bfq_diskctx->bfq_blockon = NULL; bfq_diskctx->bfq_active_tdio = NULL; bfq_diskctx->bfq_remaining_budget = 0; bfq_diskctx->bfq_max_budget = BFQ_DEFAULT_MAX_BUDGET; bfq_diskctx->bfq_peak_rate_samples = 0; bfq_diskctx->bfq_peak_rate = 0; #if 0 bfq_diskctx->bfq_flag = BFQ_FLAG_AS | BFQ_FLAG_AUTO_MAX_BUDGET; #endif bfq_diskctx->bfq_flag = BFQ_FLAG_AS; bfq_diskctx->bfq_as_miss = 0; bfq_diskctx->bfq_as_hit = 0; bfq_diskctx->bfq_as_avg_wait_miss = 0; bfq_diskctx->bfq_as_avg_wait_all = 0; bfq_diskctx->bfq_as_max_wait = 0; bfq_diskctx->bfq_as_max_wait2 = 0; bfq_diskctx->bfq_as_high_wait_count = 0; bfq_diskctx->bfq_as_high_wait_count2 = 0; bfq_diskctx->bfq_avg_time_slice = 0; bfq_diskctx->bfq_max_time_slice = 0; bfq_diskctx->bfq_high_time_slice_count = 0; /* initiailize the helper thread */ helper_init(bfq_diskctx); dsched_debug(BFQ_DEBUG_NORMAL, "BFQ: initialized!\n"); return 0; }
void hwmp_vattach(struct ieee80211vap *vap) { struct ieee80211_hwmp_state *hs; KASSERT(vap->iv_opmode == IEEE80211_M_MBSS, ("not a mesh vap, opmode %d", vap->iv_opmode)); hs = kmalloc(sizeof(struct ieee80211_hwmp_state), M_80211_VAP, M_INTWAIT | M_ZERO); if (hs == NULL) { kprintf("%s: couldn't alloc HWMP state\n", __func__); return; } hs->hs_maxhops = IEEE80211_HWMP_DEFAULT_MAXHOPS; callout_init_mp(&hs->hs_roottimer); vap->iv_hwmp = hs; }
static int rdrand_attach(device_t dev) { struct rdrand_softc *sc; sc = device_get_softc(dev); if (hz > 100) sc->sc_rng_ticks = hz/100; else sc->sc_rng_ticks = 1; callout_init_mp(&sc->sc_rng_co); callout_reset(&sc->sc_rng_co, sc->sc_rng_ticks, rdrand_rng_harvest, sc); return 0; }
/* * Reset and initialize the device. Note that unit 0 (UKBD_DEFAULT) is an * always-connected device once it has been initially detected. We do not * deregister it if the usb keyboard is unplugged to avoid losing the * connection to the console. This feature also handles the USB bus reset * which detaches and reattaches USB devices during boot. */ static int ukbd_init(int unit, keyboard_t **kbdp, void *arg, int flags) { keyboard_t *kbd; ukbd_state_t *state; keymap_t *keymap; accentmap_t *accmap; fkeytab_t *fkeymap; int fkeymap_size; void **data = (void **)arg; struct usb_attach_arg *uaa = (struct usb_attach_arg *)data[0]; if (unit == UKBD_DEFAULT) { *kbdp = kbd = &default_kbd; if (KBD_IS_INITIALIZED(kbd) && KBD_IS_CONFIGURED(kbd)) { return 0; } state = &default_kbd_state; keymap = &default_keymap; accmap = &default_accentmap; fkeymap = default_fkeytab; fkeymap_size = NELEM(default_fkeytab); } else if (*kbdp == NULL) { *kbdp = kbd = kmalloc(sizeof(*kbd), M_DEVBUF, M_INTWAIT | M_ZERO); state = kmalloc(sizeof(*state), M_DEVBUF, M_INTWAIT); keymap = kmalloc(sizeof(key_map), M_DEVBUF, M_INTWAIT); accmap = kmalloc(sizeof(accent_map), M_DEVBUF, M_INTWAIT); fkeymap = kmalloc(sizeof(fkey_tab), M_DEVBUF, M_INTWAIT); fkeymap_size = NELEM(fkey_tab); if ((state == NULL) || (keymap == NULL) || (accmap == NULL) || (fkeymap == NULL)) { if (state != NULL) kfree(state, M_DEVBUF); if (keymap != NULL) kfree(keymap, M_DEVBUF); if (accmap != NULL) kfree(accmap, M_DEVBUF); if (fkeymap != NULL) kfree(fkeymap, M_DEVBUF); kfree(kbd, M_DEVBUF); return ENOMEM; } } else if (KBD_IS_INITIALIZED(*kbdp) && KBD_IS_CONFIGURED(*kbdp)) { return 0; } else { kbd = *kbdp; state = (ukbd_state_t *)kbd->kb_data; keymap = kbd->kb_keymap; accmap = kbd->kb_accentmap; fkeymap = kbd->kb_fkeytab; fkeymap_size = kbd->kb_fkeytab_size; } if (!KBD_IS_PROBED(kbd)) { kbd_init_struct(kbd, DRIVER_NAME, KB_OTHER, unit, flags, KB_PRI_USB, 0, 0); bzero(state, sizeof(*state)); bcopy(&key_map, keymap, sizeof(key_map)); bcopy(&accent_map, accmap, sizeof(accent_map)); bcopy(fkey_tab, fkeymap, imin(fkeymap_size*sizeof(fkeymap[0]), sizeof(fkey_tab))); kbd_set_maps(kbd, keymap, accmap, fkeymap, fkeymap_size); kbd->kb_data = (void *)state; if (probe_keyboard(uaa, flags)) { return ENXIO; } else { KBD_FOUND_DEVICE(kbd); } ukbd_clear_state(kbd); /* * If reattatching to an already open keyboard (e.g. console), * try to restore the translation mode. Otherwise set the * translation mode to, well, translation mode so we don't * get garbage. */ state->ks_mode = K_XLATE; state->ks_iface = uaa->iface; state->ks_uaa = uaa; state->ks_ifstate = 0; callout_init_mp(&state->ks_timeout); /* * FIXME: set the initial value for lock keys in ks_state * according to the BIOS data? */ KBD_PROBE_DONE(kbd); } if (!KBD_IS_INITIALIZED(kbd) && !(flags & KB_CONF_PROBE_ONLY)) { if (KBD_HAS_DEVICE(kbd) && init_keyboard((ukbd_state_t *)kbd->kb_data, &kbd->kb_type, kbd->kb_flags)) { return ENXIO; } ukbd_ioctl(kbd, KDSETLED, (caddr_t)&(state->ks_state)); } if (!KBD_IS_CONFIGURED(kbd)) { if (kbd_register(kbd) < 0) { kbd->kb_flags = 0; /* XXX: Missing free()'s */ return ENXIO; } if (ukbd_enable_intr(kbd, TRUE, (usbd_intr_t *)data[1]) == 0) ukbd_timeout((void *)kbd); KBD_CONFIG_DONE(kbd); } return 0; }
static int ecc_e31200_attach(device_t dev) { struct ecc_e31200_softc *sc = device_get_softc(dev); uint32_t capa, dmfc, mch_barlo, mch_barhi; uint64_t mch_bar; int bus, slot; dev = sc->ecc_device; /* XXX */ bus = pci_get_bus(dev); slot = pci_get_slot(dev); capa = pcib_read_config(dev, bus, slot, 0, PCI_E31200_CAPID0_A, 4); dmfc = __SHIFTOUT(capa, PCI_E31200_CAPID0_A_DMFC); if (dmfc == PCI_E31200_CAPID0_A_DMFC_1333) { ecc_printf(sc, "CAP DDR3 1333 "); } else if (dmfc == PCI_E31200_CAPID0_A_DMFC_1067) { ecc_printf(sc, "CAP DDR3 1067 "); } else if (dmfc == PCI_E31200_CAPID0_A_DMFC_ALL) { ecc_printf(sc, "no CAP "); } else { ecc_printf(sc, "unknown DMFC %#x\n", dmfc); return 0; } if (capa & PCI_E31200_CAPID0_A_ECCDIS) { kprintf("NON-ECC\n"); return 0; } else { kprintf("ECC\n"); } mch_barlo = pcib_read_config(dev, bus, slot, 0, PCI_E31200_MCHBAR_LO, 4); mch_barhi = pcib_read_config(dev, bus, slot, 0, PCI_E31200_MCHBAR_HI, 4); mch_bar = (uint64_t)mch_barlo | (((uint64_t)mch_barhi) << 32); if (bootverbose) ecc_printf(sc, "MCHBAR %jx\n", (uintmax_t)mch_bar); if (mch_bar & PCI_E31200_MCHBAR_LO_EN) { uint64_t map_addr = mch_bar & PCI_E31200_MCHBAR_ADDRMASK; uint32_t dimm_ch0, dimm_ch1; sc->ecc_addr = pmap_mapdev_uncacheable(map_addr, MCH_E31200_SIZE); if (bootverbose) { ecc_printf(sc, "LOG0_C0 %#x\n", CSR_READ_4(sc, MCH_E31200_ERRLOG0_C0)); ecc_printf(sc, "LOG0_C1 %#x\n", CSR_READ_4(sc, MCH_E31200_ERRLOG0_C1)); } dimm_ch0 = CSR_READ_4(sc, MCH_E31200_DIMM_CH0); dimm_ch1 = CSR_READ_4(sc, MCH_E31200_DIMM_CH1); if (bootverbose) { ecc_e31200_chaninfo(sc, dimm_ch0, "channel0"); ecc_e31200_chaninfo(sc, dimm_ch1, "channel1"); } if (((dimm_ch0 | dimm_ch1) & MCH_E31200_DIMM_ECC) == 0) { ecc_printf(sc, "No ECC active\n"); pmap_unmapdev((vm_offset_t)sc->ecc_addr, MCH_E31200_SIZE); return 0; } } ecc_e31200_status(sc); callout_init_mp(&sc->ecc_callout); callout_reset(&sc->ecc_callout, hz, ecc_e31200_callout, sc); return 0; }
/* * Function name: twa_attach * Description: Allocates pci resources; updates sc; adds a node to the * sysctl tree to expose the driver version; makes calls * (to the Common Layer) to initialize ctlr, and to * attach to CAM. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static TW_INT32 twa_attach(device_t dev) { struct twa_softc *sc = device_get_softc(dev); TW_INT32 bar_num; TW_INT32 bar0_offset; TW_INT32 bar_size; TW_INT32 irq_flags; TW_INT32 error; sc->ctlr_handle.osl_ctlr_ctxt = sc; /* Initialize the softc structure. */ sc->bus_dev = dev; tw_osli_dbg_dprintf(3, sc, "entered"); sc->device_id = pci_get_device(dev); /* Initialize the mutexes right here. */ sc->io_lock = &(sc->io_lock_handle); spin_init(sc->io_lock, "twa_iolock"); sc->q_lock = &(sc->q_lock_handle); spin_init(sc->q_lock, "twa_qlock"); sc->sim_lock = &(sc->sim_lock_handle); lockinit(sc->sim_lock, "tw_osl_sim_lock", 0, LK_CANRECURSE); SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "driver_version", CTLFLAG_RD, TW_OSL_DRIVER_VERSION_STRING, 0, "TWA driver version"); /* Force the busmaster enable bit on, in case the BIOS forgot. */ pci_enable_busmaster(dev); /* Allocate the PCI register window. */ if ((error = tw_cl_get_pci_bar_info(sc->device_id, TW_CL_BAR_TYPE_MEM, &bar_num, &bar0_offset, &bar_size))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x201F, "Can't get PCI BAR info", error); tw_osli_free_resources(sc); return(error); } sc->reg_res_id = PCIR_BARS + bar0_offset; if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2002, "Can't allocate register window", ENXIO); tw_osli_free_resources(sc); return(ENXIO); } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); /* Allocate and register our interrupt. */ sc->irq_res_id = 0; sc->irq_type = pci_alloc_1intr(sc->bus_dev, twa_msi_enable, &sc->irq_res_id, &irq_flags); if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ, &(sc->irq_res_id), 0, ~0, 1, irq_flags)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2003, "Can't allocate interrupt", ENXIO); tw_osli_free_resources(sc); return(ENXIO); } if ((error = twa_setup_intr(sc))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2004, "Can't set up interrupt", error); tw_osli_free_resources(sc); return(error); } if ((error = tw_osli_alloc_mem(sc))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2005, "Memory allocation failure", error); tw_osli_free_resources(sc); return(error); } /* Initialize the Common Layer for this controller. */ if ((error = tw_cl_init_ctlr(&sc->ctlr_handle, sc->flags, sc->device_id, TW_OSLI_MAX_NUM_REQUESTS, TW_OSLI_MAX_NUM_AENS, sc->non_dma_mem, sc->dma_mem, sc->dma_mem_phys ))) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2006, "Failed to initialize Common Layer/controller", error); tw_osli_free_resources(sc); return(error); } /* Create the control device. */ sc->ctrl_dev = make_dev(&twa_ops, device_get_unit(sc->bus_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "twa%d", device_get_unit(sc->bus_dev)); sc->ctrl_dev->si_drv1 = sc; if ((error = tw_osli_cam_attach(sc))) { tw_osli_free_resources(sc); tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, TW_CL_MESSAGE_SOURCE_FREEBSD_DRIVER, 0x2007, "Failed to initialize CAM", error); return(error); } sc->watchdog_index = 0; callout_init_mp(&(sc->watchdog_callout[0])); callout_init_mp(&(sc->watchdog_callout[1])); callout_reset(&(sc->watchdog_callout[0]), 5*hz, twa_watchdog, &sc->ctlr_handle); return(0); }
static int ecc_e31200_attach(device_t dev) { struct ecc_e31200_softc *sc = device_get_softc(dev); uint32_t capa, dmfc, mch_barlo, mch_barhi; uint64_t mch_bar; int bus, slot, dmfc_parsed = 1; dev = sc->ecc_device; /* XXX */ bus = pci_get_bus(dev); slot = pci_get_slot(dev); capa = pcib_read_config(dev, bus, slot, 0, PCI_E31200_CAPID0_A, 4); if (sc->ecc_ver == ECC_E31200_VER_1) { dmfc = __SHIFTOUT(capa, PCI_E31200_CAPID0_A_DMFC); } else { /* V2/V3 */ uint32_t capb; capb = pcib_read_config(dev, bus, slot, 0, PCI_E31200_CAPID0_B, 4); dmfc = __SHIFTOUT(capb, PCI_E31200_CAPID0_B_DMFC); } if (dmfc == PCI_E31200_CAPID0_DMFC_1067) { ecc_printf(sc, "CAP DDR3 1067 "); } else if (dmfc == PCI_E31200_CAPID0_DMFC_1333) { ecc_printf(sc, "CAP DDR3 1333 "); } else { if (sc->ecc_ver == ECC_E31200_VER_1) { if (dmfc == PCI_E31200_CAPID0_DMFC_V1_ALL) ecc_printf(sc, "no CAP "); else dmfc_parsed = 0; } else { /* V2/V3 */ if (dmfc == PCI_E31200_CAPID0_DMFC_1600) ecc_printf(sc, "CAP DDR3 1600 "); else if (dmfc == PCI_E31200_CAPID0_DMFC_1867) ecc_printf(sc, "CAP DDR3 1867 "); else if (dmfc == PCI_E31200_CAPID0_DMFC_2133) ecc_printf(sc, "CAP DDR3 2133 "); else if (dmfc == PCI_E31200_CAPID0_DMFC_2400) ecc_printf(sc, "CAP DDR3 2400 "); else if (dmfc == PCI_E31200_CAPID0_DMFC_2667) ecc_printf(sc, "CAP DDR3 2667 "); else if (dmfc == PCI_E31200_CAPID0_DMFC_2933) ecc_printf(sc, "CAP DDR3 2933 "); else dmfc_parsed = 0; } } if (!dmfc_parsed) { ecc_printf(sc, "unknown DMFC %#x\n", dmfc); return 0; } if (capa & PCI_E31200_CAPID0_A_ECCDIS) { kprintf("NON-ECC\n"); return 0; } else { kprintf("ECC\n"); } mch_barlo = pcib_read_config(dev, bus, slot, 0, PCI_E31200_MCHBAR_LO, 4); mch_barhi = pcib_read_config(dev, bus, slot, 0, PCI_E31200_MCHBAR_HI, 4); mch_bar = (uint64_t)mch_barlo | (((uint64_t)mch_barhi) << 32); if (bootverbose) ecc_printf(sc, "MCHBAR %jx\n", (uintmax_t)mch_bar); if (mch_bar & PCI_E31200_MCHBAR_LO_EN) { uint64_t map_addr = mch_bar & PCI_E31200_MCHBAR_ADDRMASK; uint32_t dimm_ch0, dimm_ch1; int ecc_active; sc->ecc_addr = pmap_mapdev_uncacheable(map_addr, MCH_E31200_SIZE); if (bootverbose) { ecc_printf(sc, "LOG0_C0 %#x\n", CSR_READ_4(sc, MCH_E31200_ERRLOG0_C0)); ecc_printf(sc, "LOG0_C1 %#x\n", CSR_READ_4(sc, MCH_E31200_ERRLOG0_C1)); } dimm_ch0 = CSR_READ_4(sc, MCH_E31200_DIMM_CH0); dimm_ch1 = CSR_READ_4(sc, MCH_E31200_DIMM_CH1); if (bootverbose) { ecc_e31200_chaninfo(sc, dimm_ch0, "channel0"); ecc_e31200_chaninfo(sc, dimm_ch1, "channel1"); } ecc_active = 1; if (sc->ecc_ver == ECC_E31200_VER_1 || sc->ecc_ver == ECC_E31200_VER_2) { if (((dimm_ch0 | dimm_ch1) & MCH_E31200_DIMM_ECC) == MCH_E31200_DIMM_ECC_NONE) { ecc_active = 0; ecc_printf(sc, "No ECC active\n"); } } else { /* V3 */ uint32_t ecc_mode0, ecc_mode1; ecc_mode0 = __SHIFTOUT(dimm_ch0, MCH_E31200_DIMM_ECC); ecc_mode1 = __SHIFTOUT(dimm_ch1, MCH_E31200_DIMM_ECC); /* * Only active ALL/NONE is supported */ if (ecc_mode0 != MCH_E31200_DIMM_ECC_NONE && ecc_mode0 != MCH_E31200_DIMM_ECC_ALL) { ecc_active = 0; ecc_printf(sc, "channel0, invalid ECC " "active 0x%x\n", ecc_mode0); } if (ecc_mode1 != MCH_E31200_DIMM_ECC_NONE && ecc_mode1 != MCH_E31200_DIMM_ECC_ALL) { ecc_active = 0; ecc_printf(sc, "channel1, invalid ECC " "active 0x%x\n", ecc_mode1); } if (ecc_mode0 == MCH_E31200_DIMM_ECC_NONE && ecc_mode1 == MCH_E31200_DIMM_ECC_NONE) { ecc_active = 0; ecc_printf(sc, "No ECC active\n"); } } if (!ecc_active) { pmap_unmapdev((vm_offset_t)sc->ecc_addr, MCH_E31200_SIZE); return 0; } } else { ecc_printf(sc, "MCHBAR is not enabled\n"); } ecc_e31200_status(sc); callout_init_mp(&sc->ecc_callout); callout_reset(&sc->ecc_callout, hz, ecc_e31200_callout, sc); return 0; }
/* * This procedure is the main loop of our per-cpu helper thread. The * sc->isrunning flag prevents us from racing hardclock_softtick() and * a critical section is sufficient to interlock sc->curticks and protect * us from remote IPI's / list removal. * * The thread starts with the MP lock released and not in a critical * section. The loop itself is MP safe while individual callbacks * may or may not be, so we obtain or release the MP lock as appropriate. */ static void softclock_handler(void *arg) { softclock_pcpu_t sc; struct callout *c; struct callout_tailq *bucket; struct callout slotimer; int mpsafe = 1; int flags; /* * Setup pcpu slow clocks which we want to run from the callout * thread. */ callout_init_mp(&slotimer); callout_reset(&slotimer, hz * 10, slotimer_callback, &slotimer); /* * Run the callout thread at the same priority as other kernel * threads so it can be round-robined. */ /*lwkt_setpri_self(TDPRI_SOFT_NORM);*/ /* * Loop critical section against ipi operations to this cpu. */ sc = arg; crit_enter(); loop: while (sc->softticks != (int)(sc->curticks + 1)) { bucket = &sc->callwheel[sc->softticks & cwheelmask]; for (c = TAILQ_FIRST(bucket); c; c = sc->next) { if (c->c_time != sc->softticks) { sc->next = TAILQ_NEXT(c, c_links.tqe); continue; } flags = c->c_flags; if (flags & CALLOUT_MPSAFE) { if (mpsafe == 0) { mpsafe = 1; rel_mplock(); } } else { /* * The request might be removed while we * are waiting to get the MP lock. If it * was removed sc->next will point to the * next valid request or NULL, loop up. */ if (mpsafe) { mpsafe = 0; sc->next = c; get_mplock(); if (c != sc->next) continue; } } /* * Queue protection only exists while we hold the * critical section uninterrupted. * * Adjust sc->next when removing (c) from the queue, * note that an IPI on this cpu may make further * adjustments to sc->next. */ sc->next = TAILQ_NEXT(c, c_links.tqe); TAILQ_REMOVE(bucket, c, c_links.tqe); KASSERT((c->c_flags & CALLOUT_ARMED) && (c->c_flags & CALLOUT_PENDING) && CALLOUT_FLAGS_TO_CPU(c->c_flags) == mycpu->gd_cpuid, ("callout %p: bad flags %08x", c, c->c_flags)); /* * Once CALLOUT_PENDING is cleared, sc->running * protects the callout structure's existance but * only until we call c_func(). A callout_stop() * or callout_reset() issued from within c_func() * will not block. The callout can also be kfree()d * by c_func(). * * We set EXECUTED before calling c_func() so a * callout_stop() issued from within c_func() returns * the correct status. */ if ((flags & (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) == (CALLOUT_AUTOLOCK | CALLOUT_ACTIVE)) { void (*c_func)(void *); void *c_arg; struct lock *c_lk; int error; /* * NOTE: sc->running must be set prior to * CALLOUT_PENDING being cleared to * avoid missed CANCELs and *_stop() * races. */ sc->running = (intptr_t)c; c_func = c->c_func; c_arg = c->c_arg; c_lk = c->c_lk; c->c_func = NULL; KKASSERT(c->c_flags & CALLOUT_DID_INIT); flags = callout_unpend_disarm(c); error = lockmgr(c_lk, LK_EXCLUSIVE | LK_CANCELABLE); if (error == 0) { atomic_set_int(&c->c_flags, CALLOUT_EXECUTED); crit_exit(); c_func(c_arg); crit_enter(); lockmgr(c_lk, LK_RELEASE); } } else if (flags & CALLOUT_ACTIVE) { void (*c_func)(void *); void *c_arg; sc->running = (intptr_t)c; c_func = c->c_func; c_arg = c->c_arg; c->c_func = NULL; KKASSERT(c->c_flags & CALLOUT_DID_INIT); flags = callout_unpend_disarm(c); atomic_set_int(&c->c_flags, CALLOUT_EXECUTED); crit_exit(); c_func(c_arg); crit_enter(); } else { flags = callout_unpend_disarm(c); } /* * Read and clear sc->running. If bit 0 was set, * a callout_stop() is likely blocked waiting for * the callback to complete. * * The sigclear above also cleared CALLOUT_WAITING * and returns the contents of flags prior to clearing * any bits. * * Interlock wakeup any _stop's waiting on us. Note * that once c_func() was called, the callout * structure (c) pointer may no longer be valid. It * can only be used for the wakeup. */ if ((atomic_readandclear_ptr(&sc->running) & 1) || (flags & CALLOUT_WAITING)) { wakeup(c); } /* NOTE: list may have changed */ } ++sc->softticks; } /* * Don't leave us holding the MP lock when we deschedule ourselves. */ if (mpsafe == 0) { mpsafe = 1; rel_mplock(); } sc->isrunning = 0; lwkt_deschedule_self(&sc->thread); /* == curthread */ lwkt_switch(); goto loop; /* NOT REACHED */ }