/*---------------------------------------------------------------------------* * initialize one B channels rx/tx data structures and init/deinit HSCX *---------------------------------------------------------------------------*/ void isic_bchannel_setup(int unit, int h_chan, int bprot, int activate) { struct l1_softc *sc = &l1_sc[unit]; l1_bchan_state_t *chan = &sc->sc_chan[h_chan]; int s = SPLI4B(); if(activate == 0) { /* deactivation */ isic_hscx_init(sc, h_chan, activate); } NDBGL1(L1_BCHAN, "unit=%d, channel=%d, %s", sc->sc_unit, h_chan, activate ? "activate" : "deactivate"); /* general part */ chan->unit = sc->sc_unit; /* unit number */ chan->channel = h_chan; /* B channel */ chan->bprot = bprot; /* B channel protocol */ chan->state = HSCX_IDLE; /* B channel state */ /* receiver part */ chan->rx_queue.ifq_maxlen = IFQ_MAXLEN; if(!mtx_initialized(&chan->rx_queue.ifq_mtx)) mtx_init(&chan->rx_queue.ifq_mtx, "i4b_isic_rx", NULL, MTX_DEF); i4b_Bcleanifq(&chan->rx_queue); /* clean rx queue */ chan->rxcount = 0; /* reset rx counter */ i4b_Bfreembuf(chan->in_mbuf); /* clean rx mbuf */ chan->in_mbuf = NULL; /* reset mbuf ptr */ chan->in_cbptr = NULL; /* reset mbuf curr ptr */ chan->in_len = 0; /* reset mbuf data len */ /* transmitter part */ chan->tx_queue.ifq_maxlen = IFQ_MAXLEN; if(!mtx_initialized(&chan->tx_queue.ifq_mtx)) mtx_init(&chan->tx_queue.ifq_mtx, "i4b_isic_tx", NULL, MTX_DEF); i4b_Bcleanifq(&chan->tx_queue); /* clean tx queue */ chan->txcount = 0; /* reset tx counter */ i4b_Bfreembuf(chan->out_mbuf_head); /* clean tx mbuf */ chan->out_mbuf_head = NULL; /* reset head mbuf ptr */ chan->out_mbuf_cur = NULL; /* reset current mbuf ptr */ chan->out_mbuf_cur_ptr = NULL; /* reset current mbuf data ptr */ chan->out_mbuf_cur_len = 0; /* reset current mbuf data cnt */ if(activate != 0) { /* activation */ isic_hscx_init(sc, h_chan, activate); } splx(s); }
int ata_attach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); int error, rid; struct cam_devq *devq; const char *res; char buf[64]; int i, mode; /* check that we have a virgin channel to attach */ if (ch->r_irq) return EEXIST; /* initialize the softc basics */ ch->dev = dev; ch->state = ATA_IDLE; bzero(&ch->state_mtx, sizeof(struct mtx)); mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); TASK_INIT(&ch->conntask, 0, ata_conn_event, dev); for (i = 0; i < 16; i++) { ch->user[i].revision = 0; snprintf(buf, sizeof(buf), "dev%d.sata_rev", i); if (resource_int_value(device_get_name(dev), device_get_unit(dev), buf, &mode) != 0 && resource_int_value(device_get_name(dev), device_get_unit(dev), "sata_rev", &mode) != 0) mode = -1; if (mode >= 0) ch->user[i].revision = mode; ch->user[i].mode = 0; snprintf(buf, sizeof(buf), "dev%d.mode", i); if (resource_string_value(device_get_name(dev), device_get_unit(dev), buf, &res) == 0) mode = ata_str2mode(res); else if (resource_string_value(device_get_name(dev), device_get_unit(dev), "mode", &res) == 0) mode = ata_str2mode(res); else mode = -1; if (mode >= 0) ch->user[i].mode = mode; if (ch->flags & ATA_SATA) ch->user[i].bytecount = 8192; else ch->user[i].bytecount = MAXPHYS; ch->user[i].caps = 0; ch->curr[i] = ch->user[i]; if (ch->flags & ATA_SATA) { if (ch->pm_level > 0) ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ; if (ch->pm_level > 1) ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ; } else { if (!(ch->flags & ATA_NO_48BIT_DMA)) ch->user[i].caps |= CTS_ATA_CAPS_H_DMA48; } } callout_init(&ch->poll_callout, 1); /* allocate DMA resources if DMA HW present*/ if (ch->dma.alloc) ch->dma.alloc(dev); /* setup interrupt delivery */ rid = ATA_IRQ_RID; ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!ch->r_irq) { device_printf(dev, "unable to allocate interrupt\n"); return ENXIO; } if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, ata_interrupt, ch, &ch->ih))) { bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); device_printf(dev, "unable to setup interrupt\n"); return error; } if (ch->flags & ATA_PERIODIC_POLL) callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); mtx_lock(&ch->state_mtx); /* Create the device queue for our SIM. */ devq = cam_simq_alloc(1); if (devq == NULL) { device_printf(dev, "Unable to allocate simq\n"); error = ENOMEM; goto err1; } /* Construct SIM entry */ ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch, device_get_unit(dev), &ch->state_mtx, 1, 0, devq); if (ch->sim == NULL) { device_printf(dev, "unable to allocate sim\n"); cam_simq_free(devq); error = ENOMEM; goto err1; } if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { device_printf(dev, "unable to register xpt bus\n"); error = ENXIO; goto err2; } if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(dev, "unable to create path\n"); error = ENXIO; goto err3; } mtx_unlock(&ch->state_mtx); return (0); err3: xpt_bus_deregister(cam_sim_path(ch->sim)); err2: cam_sim_free(ch->sim, /*free_devq*/TRUE); ch->sim = NULL; err1: bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); mtx_unlock(&ch->state_mtx); if (ch->flags & ATA_PERIODIC_POLL) callout_drain(&ch->poll_callout); return (error); }
void nm_os_selinfo_init(NM_SELINFO_T *si) { struct mtx *m = &si->m; mtx_init(m, "nm_kn_lock", NULL, MTX_DEF); knlist_init_mtx(&si->si.si_note, m); }
static int pmu_attach(device_t dev) { struct pmu_softc *sc; int i; uint8_t reg; uint8_t cmd[2] = {2, 0}; uint8_t resp[16]; phandle_t node,child; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_memrid = 0; sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_memrid, RF_ACTIVE); mtx_init(&sc->sc_mutex,"pmu",NULL,MTX_DEF | MTX_RECURSE); if (sc->sc_memr == NULL) { device_printf(dev, "Could not alloc mem resource!\n"); return (ENXIO); } /* * Our interrupt is attached to a GPIO pin. Depending on probe order, * we may not have found it yet. If we haven't, it will find us, and * attach our interrupt then. */ pmu = dev; if (pmu_extint != NULL) { if (setup_pmu_intr(dev,pmu_extint) != 0) return (ENXIO); } sc->sc_autopoll = 0; sc->sc_batteries = 0; sc->adb_bus = NULL; sc->sc_leddev = NULL; /* Init PMU */ pmu_write_reg(sc, vBufB, pmu_read_reg(sc, vBufB) | vPB4); pmu_write_reg(sc, vDirB, (pmu_read_reg(sc, vDirB) | vPB4) & ~vPB3); reg = PMU_DEFAULTS; pmu_send(sc, PMU_SET_IMASK, 1, ®, 16, resp); pmu_write_reg(sc, vIER, 0x94); /* make sure VIA interrupts are on */ pmu_send(sc, PMU_SYSTEM_READY, 1, cmd, 16, resp); pmu_send(sc, PMU_GET_VERSION, 0, cmd, 16, resp); /* Initialize child buses (ADB) */ node = ofw_bus_get_node(dev); for (child = OF_child(node); child != 0; child = OF_peer(child)) { char name[32]; memset(name, 0, sizeof(name)); OF_getprop(child, "name", name, sizeof(name)); if (bootverbose) device_printf(dev, "PMU child <%s>\n",name); if (strncmp(name, "adb", 4) == 0) { sc->adb_bus = device_add_child(dev,"adb",-1); } if (strncmp(name, "power-mgt", 9) == 0) { uint32_t prim_info[9]; if (OF_getprop(child, "prim-info", prim_info, sizeof(prim_info)) >= 7) sc->sc_batteries = (prim_info[6] >> 16) & 0xff; if (bootverbose && sc->sc_batteries > 0) device_printf(dev, "%d batteries detected\n", sc->sc_batteries); } }
int ex_attach(device_t dev) { struct ex_softc * sc = device_get_softc(dev); struct ifnet * ifp; struct ifmedia * ifm; int error; uint16_t temp; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } /* work out which set of irq <-> internal tables to use */ if (ex_card_type(sc->enaddr) == CARD_TYPE_EX_10_PLUS) { sc->irq2ee = plus_irq2eemap; sc->ee2irq = plus_ee2irqmap; } else { sc->irq2ee = irq2eemap; sc->ee2irq = ee2irqmap; } sc->mem_size = CARD_RAM_SIZE; /* XXX This should be read from the card itself. */ /* * Initialize the ifnet structure. */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_start = ex_start; ifp->if_ioctl = ex_ioctl; ifp->if_init = ex_init; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifmedia_init(&sc->ifmedia, 0, ex_ifmedia_upd, ex_ifmedia_sts); mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->timer, &sc->lock, 0); temp = ex_eeprom_read(sc, EE_W5); if (temp & EE_W5_PORT_TPE) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); if (temp & EE_W5_PORT_BNC) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); if (temp & EE_W5_PORT_AUI) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->ifmedia, ex_get_media(sc)); ifm = &sc->ifmedia; ifm->ifm_media = ifm->ifm_cur->ifm_media; ex_ifmedia_upd(ifp); /* * Attach the interface. */ ether_ifattach(ifp, sc->enaddr); error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ex_intr, (void *)sc, &sc->ih); if (error) { device_printf(dev, "bus_setup_intr() failed!\n"); ether_ifdetach(ifp); mtx_destroy(&sc->lock); return (error); } return(0); }
static int ofw_iicbus_attach(device_t dev) { struct iicbus_softc *sc = IICBUS_SOFTC(dev); struct ofw_iicbus_devinfo *dinfo; phandle_t child, node, root; pcell_t freq, paddr; device_t childdev; ssize_t compatlen; char compat[255]; char *curstr; u_int iic_addr_8bit = 0; sc->dev = dev; mtx_init(&sc->lock, "iicbus", NULL, MTX_DEF); /* * If there is a clock-frequency property for the device node, use it as * the starting value for the bus frequency. Then call the common * routine that handles the tunable/sysctl which allows the FDT value to * be overridden by the user. */ node = ofw_bus_get_node(dev); freq = 0; OF_getencprop(node, "clock-frequency", &freq, sizeof(freq)); iicbus_init_frequency(dev, freq); iicbus_reset(dev, IIC_FASTEST, 0, NULL); bus_generic_probe(dev); bus_enumerate_hinted_children(dev); /* * Check if we're running on a PowerMac, needed for the I2C * address below. */ root = OF_peer(0); compatlen = OF_getprop(root, "compatible", compat, sizeof(compat)); if (compatlen != -1) { for (curstr = compat; curstr < compat + compatlen; curstr += strlen(curstr) + 1) { if (strncmp(curstr, "MacRISC", 7) == 0) iic_addr_8bit = 1; } } /* * Attach those children represented in the device tree. */ for (child = OF_child(node); child != 0; child = OF_peer(child)) { /* * Try to get the I2C address first from the i2c-address * property, then try the reg property. It moves around * on different systems. */ if (OF_getencprop(child, "i2c-address", &paddr, sizeof(paddr)) == -1) if (OF_getencprop(child, "reg", &paddr, sizeof(paddr)) == -1) continue; /* * Now set up the I2C and OFW bus layer devinfo and add it * to the bus. */ dinfo = malloc(sizeof(struct ofw_iicbus_devinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (dinfo == NULL) continue; /* * FreeBSD drivers expect I2C addresses to be expressed as * 8-bit values. Apple OFW data contains 8-bit values, but * Linux FDT data contains 7-bit values, so shift them up to * 8-bit format. */ if (iic_addr_8bit) dinfo->opd_dinfo.addr = paddr; else dinfo->opd_dinfo.addr = paddr << 1; if (ofw_bus_gen_setup_devinfo(&dinfo->opd_obdinfo, child) != 0) { free(dinfo, M_DEVBUF); continue; } childdev = device_add_child(dev, NULL, -1); resource_list_init(&dinfo->opd_dinfo.rl); ofw_bus_intr_to_rl(childdev, child, &dinfo->opd_dinfo.rl, NULL); device_set_ivars(childdev, dinfo); } /* Register bus */ OF_device_register_xref(OF_xref_from_node(node), dev); return (bus_generic_attach(dev)); }
static int a10_gpio_attach(device_t dev) { int rid; phandle_t gpio; struct a10_gpio_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "a10 gpio", "gpio", MTX_SPIN); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); goto fail; } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { device_printf(dev, "cannot allocate interrupt\n"); goto fail; } /* Find our node. */ gpio = ofw_bus_get_node(sc->sc_dev); if (!OF_hasprop(gpio, "gpio-controller")) /* Node is not a GPIO controller. */ goto fail; /* Use the right pin data for the current SoC */ switch (allwinner_soc_type()) { #ifdef SOC_ALLWINNER_A10 case ALLWINNERSOC_A10: sc->padconf = &a10_padconf; break; #endif #ifdef SOC_ALLWINNER_A20 case ALLWINNERSOC_A20: sc->padconf = &a20_padconf; break; #endif #ifdef SOC_ALLWINNER_A31 case ALLWINNERSOC_A31: sc->padconf = &a31_padconf; break; #endif #ifdef SOC_ALLWINNER_A31S case ALLWINNERSOC_A31S: sc->padconf = &a31s_padconf; break; #endif default: return (ENOENT); } sc->sc_busdev = gpiobus_attach_bus(dev); if (sc->sc_busdev == NULL) goto fail; /* * Register as a pinctrl device */ fdt_pinctrl_register(dev, "allwinner,pins"); fdt_pinctrl_configure_tree(dev); return (0); fail: if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); mtx_destroy(&sc->sc_mtx); return (ENXIO); }
/* This is the main program (i.e. the main thread) */ int main(void) { /* Initialization... */ mtx_init(&gMutex, mtx_plain); cnd_init(&gCond); /* Test 1: thread arguments & return values */ printf("PART I: Thread arguments & return values\n"); { thrd_t t1, t2, t3, t4; int id1, id2, id3, id4, ret1, ret2, ret3, ret4; /* Start a bunch of child threads */ id1 = 1; thrd_create(&t1, ThreadIDs, (void*)&id1); thrd_join(t1, &ret1); printf(" Thread 1 returned %d.\n", ret1); id2 = 2; thrd_create(&t2, ThreadIDs, (void*)&id2); thrd_join(t2, &ret2); printf(" Thread 2 returned %d.\n", ret2); id3 = 3; thrd_create(&t3, ThreadIDs, (void*)&id3); thrd_join(t3, &ret3); printf(" Thread 3 returned %d.\n", ret3); id4 = 4; thrd_create(&t4, ThreadIDs, (void*)&id4); thrd_join(t4, &ret4); printf(" Thread 4 returned %d.\n", ret4); } /* Test 2: compile time thread local storage */ printf("PART II: Compile time thread local storage\n"); #ifndef NO_CT_TLS { thrd_t t1; /* Clear the TLS variable (it should keep this value after all threads are finished). */ gLocalVar = 1; printf(" Main gLocalVar is %d.\n", gLocalVar); /* Start a child thread that modifies gLocalVar */ thrd_create(&t1, ThreadTLS, NULL); thrd_join(t1, NULL); /* Check if the TLS variable has changed */ if(gLocalVar == 1) printf(" Main gLocalID was not changed by the child thread - OK!\n"); else printf(" Main gLocalID was changed by the child thread - FAIL!\n"); } #else printf(" Compile time TLS is not supported on this platform...\n"); #endif /* Test 3: mutex locking */ printf("PART III: Mutex locking (100 threads x 10000 iterations)\n"); { thrd_t t[100]; int i; /* Clear the global counter. */ gCount = 0; /* Start a bunch of child threads */ for (i = 0; i < 100; ++ i) { thrd_create(&t[i], ThreadLock, NULL); } /* Wait for the threads to finish */ for (i = 0; i < 100; ++ i) { thrd_join(t[i], NULL); } /* Check the global count */ printf(" gCount = %d\n", gCount); } /* Test 4: condition variable */ printf("PART IV: Condition variable (40 + 1 threads)\n"); { thrd_t t1, t[40]; int i; /* Set the global counter to the number of threads to run. */ gCount = 40; /* Start the waiting thread (it will wait for gCount to reach zero). */ thrd_create(&t1, ThreadCondition2, NULL); /* Start a bunch of child threads (these will decrease gCount by 1 when they finish) */ for (i = 0; i < 40; ++ i) { thrd_create(&t[i], ThreadCondition1, NULL); } /* Wait for the waiting thread to finish */ thrd_join(t1, NULL); /* Wait for the other threads to finish */ for (i = 0; i < 40; ++ i) { thrd_join(t[i], NULL); } } /* Test 5: yield */ printf("PART V: Yield (40 + 1 threads)\n"); { thrd_t t[40]; int i; /* Start a bunch of child threads */ for (i = 0; i < 40; ++ i) { thrd_create(&t[i], ThreadYield, NULL); } /* Yield... */ thrd_yield(); /* Wait for the threads to finish */ for (i = 0; i < 40; ++ i) { thrd_join(t[i], NULL); } } /* Test 6: Sleep */ printf("PART VI: Sleep (10 x 100 ms)\n"); { int i; struct timespec ts; printf(" Sleeping"); fflush(stdout); for (i = 0; i < 10; ++ i) { /* Calculate current time + 100ms */ clock_gettime(TIME_UTC, &ts); ts.tv_nsec += 100000000; if (ts.tv_nsec >= 1000000000) { ts.tv_sec++; ts.tv_nsec -= 1000000000; } /* Sleep... */ thrd_sleep(&ts, NULL); printf("."); fflush(stdout); } printf("\n"); } /* Test 7: Time */ printf("PART VII: Current time (UTC), three times\n"); { struct timespec ts; clock_gettime(TIME_UTC, &ts); printf(" Time = %ld.%09ld\n", (long)ts.tv_sec, ts.tv_nsec); clock_gettime(TIME_UTC, &ts); printf(" Time = %ld.%09ld\n", (long)ts.tv_sec, ts.tv_nsec); clock_gettime(TIME_UTC, &ts); printf(" Time = %ld.%09ld\n", (long)ts.tv_sec, ts.tv_nsec); } /* FIXME: Implement some more tests for the TinyCThread API... */ /* Finalization... */ mtx_destroy(&gMutex); cnd_destroy(&gCond); return 0; }
static int hme_sbus_attach(device_t dev) { struct hme_sbus_softc *hsc; struct hme_softc *sc; u_long start, count; uint32_t burst; int i, error = 0; hsc = device_get_softc(dev); sc = &hsc->hsc_hme; mtx_init(&sc->sc_lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map five register banks: * * bank 0: HME SEB registers * bank 1: HME ETX registers * bank 2: HME ERX registers * bank 3: HME MAC registers * bank 4: HME MIF registers * */ i = 0; hsc->hsc_seb_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (hsc->hsc_seb_res == NULL) { device_printf(dev, "cannot map SEB registers\n"); error = ENXIO; goto fail_mtx_res; } sc->sc_sebt = rman_get_bustag(hsc->hsc_seb_res); sc->sc_sebh = rman_get_bushandle(hsc->hsc_seb_res); i = 1; hsc->hsc_etx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (hsc->hsc_etx_res == NULL) { device_printf(dev, "cannot map ETX registers\n"); error = ENXIO; goto fail_seb_res; } sc->sc_etxt = rman_get_bustag(hsc->hsc_etx_res); sc->sc_etxh = rman_get_bushandle(hsc->hsc_etx_res); i = 2; hsc->hsc_erx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (hsc->hsc_erx_res == NULL) { device_printf(dev, "cannot map ERX registers\n"); error = ENXIO; goto fail_etx_res; } sc->sc_erxt = rman_get_bustag(hsc->hsc_erx_res); sc->sc_erxh = rman_get_bushandle(hsc->hsc_erx_res); i = 3; hsc->hsc_mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (hsc->hsc_mac_res == NULL) { device_printf(dev, "cannot map MAC registers\n"); error = ENXIO; goto fail_erx_res; } sc->sc_mact = rman_get_bustag(hsc->hsc_mac_res); sc->sc_mach = rman_get_bushandle(hsc->hsc_mac_res); /* * At least on some HMEs, the MIF registers seem to be inside the MAC * range, so try to kludge around it. */ i = 4; hsc->hsc_mif_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (hsc->hsc_mif_res == NULL) { if (bus_get_resource(dev, SYS_RES_MEMORY, i, &start, &count) != 0) { device_printf(dev, "cannot get MIF registers\n"); error = ENXIO; goto fail_mac_res; } if (start < rman_get_start(hsc->hsc_mac_res) || start + count - 1 > rman_get_end(hsc->hsc_mac_res)) { device_printf(dev, "cannot move MIF registers to MAC " "bank\n"); error = ENXIO; goto fail_mac_res; } sc->sc_mift = sc->sc_mact; bus_space_subregion(sc->sc_mact, sc->sc_mach, start - rman_get_start(hsc->hsc_mac_res), count, &sc->sc_mifh); } else { sc->sc_mift = rman_get_bustag(hsc->hsc_mif_res); sc->sc_mifh = rman_get_bushandle(hsc->hsc_mif_res); } i = 0; hsc->hsc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_SHAREABLE | RF_ACTIVE); if (hsc->hsc_ires == NULL) { device_printf(dev, "could not allocate interrupt\n"); error = ENXIO; goto fail_mif_res; } OF_getetheraddr(dev, sc->sc_enaddr); burst = sbus_get_burstsz(dev); /* Translate into plain numerical format */ if ((burst & SBUS_BURST_64)) sc->sc_burst = 64; else if ((burst & SBUS_BURST_32)) sc->sc_burst = 32; else if ((burst & SBUS_BURST_16)) sc->sc_burst = 16; else sc->sc_burst = 0; sc->sc_dev = dev; sc->sc_flags = 0; if ((error = hme_config(sc)) != 0) { device_printf(dev, "could not be configured\n"); goto fail_ires; } if ((error = bus_setup_intr(dev, hsc->hsc_ires, INTR_TYPE_NET | INTR_MPSAFE, NULL, hme_intr, sc, &hsc->hsc_ih)) != 0) { device_printf(dev, "couldn't establish interrupt\n"); hme_detach(sc); goto fail_ires; } return (0); fail_ires: bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(hsc->hsc_ires), hsc->hsc_ires); fail_mif_res: if (hsc->hsc_mif_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(hsc->hsc_mif_res), hsc->hsc_mif_res); } fail_mac_res: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(hsc->hsc_mac_res), hsc->hsc_mac_res); fail_erx_res: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(hsc->hsc_erx_res), hsc->hsc_erx_res); fail_etx_res: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(hsc->hsc_etx_res), hsc->hsc_etx_res); fail_seb_res: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(hsc->hsc_seb_res), hsc->hsc_seb_res); fail_mtx_res: mtx_destroy(&sc->sc_lock); return (error); }
static int usie_attach(device_t self) { struct usie_softc *sc = device_get_softc(self); struct usb_attach_arg *uaa = device_get_ivars(self); struct ifnet *ifp; struct usb_interface *iface; struct usb_interface_descriptor *id; struct usb_device_request req; int err; uint16_t fwattr; uint8_t iface_index; uint8_t ifidx; uint8_t start; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; mtx_init(&sc->sc_mtx, "usie", MTX_NETWORK_LOCK, MTX_DEF); TASK_INIT(&sc->sc_if_status_task, 0, usie_if_status_cb, sc); TASK_INIT(&sc->sc_if_sync_task, 0, usie_if_sync_cb, sc); usb_callout_init_mtx(&sc->sc_if_sync_ch, &sc->sc_mtx, 0); mtx_lock(&sc->sc_mtx); /* set power mode to D0 */ req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = USIE_POWER; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 0); if (usie_do_request(sc, &req, NULL)) { mtx_unlock(&sc->sc_mtx); goto detach; } /* read fw attr */ fwattr = 0; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = USIE_FW_ATTR; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, sizeof(fwattr)); if (usie_do_request(sc, &req, &fwattr)) { mtx_unlock(&sc->sc_mtx); goto detach; } mtx_unlock(&sc->sc_mtx); /* check DHCP supports */ DPRINTF("fwattr=%x\n", fwattr); if (!(fwattr & USIE_FW_DHCP)) { device_printf(self, "DHCP is not supported. A firmware upgrade might be needed.\n"); } /* find available interfaces */ sc->sc_nucom = 0; for (ifidx = 0; ifidx < USIE_IFACE_MAX; ifidx++) { iface = usbd_get_iface(uaa->device, ifidx); if (iface == NULL) break; id = usbd_get_interface_descriptor(iface); if ((id == NULL) || (id->bInterfaceClass != UICLASS_VENDOR)) continue; /* setup Direct IP transfer */ if (id->bInterfaceNumber >= 7 && id->bNumEndpoints == 3) { sc->sc_if_ifnum = id->bInterfaceNumber; iface_index = ifidx; DPRINTF("ifnum=%d, ifidx=%d\n", sc->sc_if_ifnum, ifidx); err = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_if_xfer, usie_if_config, USIE_IF_N_XFER, sc, &sc->sc_mtx); if (err == 0) continue; device_printf(self, "could not allocate USB transfers on " "iface_index=%d, err=%s\n", iface_index, usbd_errstr(err)); goto detach; } /* setup ucom */ if (sc->sc_nucom >= USIE_UCOM_MAX) continue; usbd_set_parent_iface(uaa->device, ifidx, uaa->info.bIfaceIndex); DPRINTF("NumEndpoints=%d bInterfaceNumber=%d\n", id->bNumEndpoints, id->bInterfaceNumber); if (id->bNumEndpoints == 2) { sc->sc_uc_xfer[sc->sc_nucom][0] = NULL; start = 1; } else start = 0; err = usbd_transfer_setup(uaa->device, &ifidx, sc->sc_uc_xfer[sc->sc_nucom] + start, usie_uc_config + start, USIE_UC_N_XFER - start, &sc->sc_ucom[sc->sc_nucom], &sc->sc_mtx); if (err != 0) { DPRINTF("usbd_transfer_setup error=%s\n", usbd_errstr(err)); continue; } mtx_lock(&sc->sc_mtx); for (; start < USIE_UC_N_XFER; start++) usbd_xfer_set_stall(sc->sc_uc_xfer[sc->sc_nucom][start]); mtx_unlock(&sc->sc_mtx); sc->sc_uc_ifnum[sc->sc_nucom] = id->bInterfaceNumber; sc->sc_nucom++; /* found a port */ } if (sc->sc_nucom == 0) { device_printf(self, "no comports found\n"); goto detach; } err = ucom_attach(&sc->sc_super_ucom, sc->sc_ucom, sc->sc_nucom, sc, &usie_uc_callback, &sc->sc_mtx); if (err != 0) { DPRINTF("ucom_attach failed\n"); goto detach; } DPRINTF("Found %d interfaces.\n", sc->sc_nucom); /* setup ifnet (Direct IP) */ sc->sc_ifp = ifp = if_alloc(IFT_OTHER); if (ifp == NULL) { device_printf(self, "Could not allocate a network interface\n"); goto detach; } if_initname(ifp, "usie", device_get_unit(self)); ifp->if_softc = sc; ifp->if_mtu = USIE_MTU_MAX; ifp->if_flags |= IFF_NOARP; ifp->if_init = usie_if_init; ifp->if_ioctl = usie_if_ioctl; ifp->if_start = usie_if_start; ifp->if_output = usie_if_output; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); if_attach(ifp); bpfattach(ifp, DLT_RAW, 0); if (fwattr & USIE_PM_AUTO) { usbd_set_power_mode(uaa->device, USB_POWER_MODE_SAVE); DPRINTF("enabling automatic suspend and resume\n"); } else { usbd_set_power_mode(uaa->device, USB_POWER_MODE_ON); DPRINTF("USB power is always ON\n"); } DPRINTF("device attached\n"); return (0); detach: usie_detach(self); return (ENOMEM); }
static int rk30_gpio_attach(device_t dev) { struct rk30_gpio_softc *sc = device_get_softc(dev); uint32_t func; int i, rid; phandle_t gpio; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, "rk30 gpio", "gpio", MTX_DEF); rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "cannot allocate memory window\n"); return (ENXIO); } sc->sc_bst = rman_get_bustag(sc->sc_mem_res); sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res); rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->sc_irq_res) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); device_printf(dev, "cannot allocate interrupt\n"); return (ENXIO); } /* Find our node. */ gpio = ofw_bus_get_node(sc->sc_dev); if (!OF_hasprop(gpio, "gpio-controller")) /* Node is not a GPIO controller. */ goto fail; /* Initialize the software controlled pins. */ for (i = 0; i < RK30_GPIO_PINS; i++) { snprintf(sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME, "pin %d", i); func = rk30_gpio_get_function(sc, i); sc->sc_gpio_pins[i].gp_pin = i; sc->sc_gpio_pins[i].gp_caps = RK30_GPIO_DEFAULT_CAPS; sc->sc_gpio_pins[i].gp_flags = rk30_gpio_func_flag(func); } sc->sc_gpio_npins = i; device_add_child(dev, "gpioc", device_get_unit(dev)); device_add_child(dev, "gpiobus", device_get_unit(dev)); return (bus_generic_attach(dev)); fail: if (sc->sc_irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); return (ENXIO); }
static int pl190_intc_attach(device_t dev) { struct pl190_intc_softc *sc; uint32_t id; int i, rid; struct pl190_intc_irqsrc *isrcs; struct intr_pic *pic; int error; uint32_t irq; const char *name; phandle_t xref; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->mtx, device_get_nameunit(dev), "pl190", MTX_SPIN); /* Request memory resources */ rid = 0; sc->intc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->intc_res == NULL) { device_printf(dev, "Error: could not allocate memory resources\n"); return (ENXIO); } /* * All interrupts should use IRQ line */ INTC_VIC_WRITE_4(sc, VICINTSELECT, 0x00000000); /* Disable all interrupts */ INTC_VIC_WRITE_4(sc, VICINTENCLEAR, 0xffffffff); id = 0; for (i = 3; i >= 0; i--) { id = (id << 8) | (INTC_VIC_READ_4(sc, VICPERIPHID + i*4) & 0xff); } device_printf(dev, "Peripheral ID: %08x\n", id); id = 0; for (i = 3; i >= 0; i--) { id = (id << 8) | (INTC_VIC_READ_4(sc, VICPRIMECELLID + i*4) & 0xff); } device_printf(dev, "PrimeCell ID: %08x\n", id); /* PIC attachment */ isrcs = sc->isrcs; name = device_get_nameunit(sc->dev); for (irq = 0; irq < VIC_NIRQS; irq++) { isrcs[irq].irq = irq; error = intr_isrc_register(&isrcs[irq].isrc, sc->dev, 0, "%s,%u", name, irq); if (error != 0) return (error); } xref = OF_xref_from_node(ofw_bus_get_node(sc->dev)); pic = intr_pic_register(sc->dev, xref); if (pic == NULL) return (ENXIO); return (intr_pic_claim_root(sc->dev, xref, pl190_intc_intr, sc, 0)); }
static int cdce_attach(device_t dev) { struct cdce_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; struct usb_attach_arg *uaa = device_get_ivars(dev); struct usb_interface *iface; const struct usb_cdc_union_descriptor *ud; const struct usb_interface_descriptor *id; const struct usb_cdc_ethernet_descriptor *ued; const struct usb_config *pcfg; int error; uint8_t i; uint8_t data_iface_no; char eaddr_str[5 * ETHER_ADDR_LEN]; /* approx */ sc->sc_flags = USB_GET_DRIVER_INFO(uaa); sc->sc_ue.ue_udev = uaa->device; device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); ud = usbd_find_descriptor (uaa->device, NULL, uaa->info.bIfaceIndex, UDESC_CS_INTERFACE, 0 - 1, UDESCSUB_CDC_UNION, 0 - 1); if ((ud == NULL) || (ud->bLength < sizeof(*ud)) || (sc->sc_flags & CDCE_FLAG_NO_UNION)) { DPRINTFN(1, "No union descriptor!\n"); sc->sc_ifaces_index[0] = uaa->info.bIfaceIndex; sc->sc_ifaces_index[1] = uaa->info.bIfaceIndex; goto alloc_transfers; } data_iface_no = ud->bSlaveInterface[0]; for (i = 0;; i++) { iface = usbd_get_iface(uaa->device, i); if (iface) { id = usbd_get_interface_descriptor(iface); if (id && (id->bInterfaceNumber == data_iface_no)) { sc->sc_ifaces_index[0] = i; sc->sc_ifaces_index[1] = uaa->info.bIfaceIndex; usbd_set_parent_iface(uaa->device, i, uaa->info.bIfaceIndex); break; } } else { device_printf(dev, "no data interface found\n"); goto detach; } } /* * <quote> * * The Data Class interface of a networking device shall have * a minimum of two interface settings. The first setting * (the default interface setting) includes no endpoints and * therefore no networking traffic is exchanged whenever the * default interface setting is selected. One or more * additional interface settings are used for normal * operation, and therefore each includes a pair of endpoints * (one IN, and one OUT) to exchange network traffic. Select * an alternate interface setting to initialize the network * aspects of the device and to enable the exchange of * network traffic. * * </quote> * * Some devices, most notably cable modems, include interface * settings that have no IN or OUT endpoint, therefore loop * through the list of all available interface settings * looking for one with both IN and OUT endpoints. */ alloc_transfers: pcfg = cdce_config; /* Default Configuration */ for (i = 0; i != 32; i++) { error = usbd_set_alt_interface_index(uaa->device, sc->sc_ifaces_index[0], i); if (error) break; #if CDCE_HAVE_NCM if ((i == 0) && (cdce_ncm_init(sc) == 0)) pcfg = cdce_ncm_config; #endif error = usbd_transfer_setup(uaa->device, sc->sc_ifaces_index, sc->sc_xfer, pcfg, CDCE_N_TRANSFER, sc, &sc->sc_mtx); if (error == 0) break; } if (error || (i == 32)) { device_printf(dev, "No valid alternate " "setting found\n"); goto detach; } ued = usbd_find_descriptor (uaa->device, NULL, uaa->info.bIfaceIndex, UDESC_CS_INTERFACE, 0 - 1, UDESCSUB_CDC_ENF, 0 - 1); if ((ued == NULL) || (ued->bLength < sizeof(*ued))) { error = USB_ERR_INVAL; } else { error = usbd_req_get_string_any(uaa->device, NULL, eaddr_str, sizeof(eaddr_str), ued->iMacAddress); } if (error) { /* fake MAC address */ device_printf(dev, "faking MAC address\n"); sc->sc_ue.ue_eaddr[0] = 0x2a; memcpy(&sc->sc_ue.ue_eaddr[1], &ticks, sizeof(uint32_t)); sc->sc_ue.ue_eaddr[5] = device_get_unit(dev); } else { memset(sc->sc_ue.ue_eaddr, 0, sizeof(sc->sc_ue.ue_eaddr)); for (i = 0; i != (ETHER_ADDR_LEN * 2); i++) { char c = eaddr_str[i]; if ('0' <= c && c <= '9') c -= '0'; else if (c != 0) c -= 'A' - 10; else break; c &= 0xf; if ((i & 1) == 0) c <<= 4; sc->sc_ue.ue_eaddr[i / 2] |= c; } if (uaa->usb_mode == USB_MODE_DEVICE) { /* * Do not use the same MAC address like the peer ! */ sc->sc_ue.ue_eaddr[5] ^= 0xFF; } } ue->ue_sc = sc; ue->ue_dev = dev; ue->ue_udev = uaa->device; ue->ue_mtx = &sc->sc_mtx; ue->ue_methods = &cdce_ue_methods; error = uether_ifattach(ue); if (error) { device_printf(dev, "could not attach interface\n"); goto detach; } return (0); /* success */ detach: cdce_detach(dev); return (ENXIO); /* failure */ }
int udf_mountfs(struct vnode *devvp, struct mount *mp, uint32_t lb, struct proc *p) { struct buf *bp = NULL; struct anchor_vdp avdp; struct umount *ump = NULL; struct part_desc *pd; struct logvol_desc *lvd; struct fileset_desc *fsd; struct extfile_entry *xfentry; struct file_entry *fentry; uint32_t sector, size, mvds_start, mvds_end; uint32_t fsd_offset = 0; uint16_t part_num = 0, fsd_part = 0; int error = EINVAL; int logvol_found = 0, part_found = 0, fsd_found = 0; int bsize; /* * Disallow multiple mounts of the same device. * Disallow mounting of a device that is currently in use * (except for root, which might share swap device for miniroot). * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp))) return (error); if (vcount(devvp) > 1 && devvp != rootvp) return (EBUSY); vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0); VOP_UNLOCK(devvp, 0, p); if (error) return (error); error = VOP_OPEN(devvp, FREAD, FSCRED, p); if (error) return (error); ump = malloc(sizeof(*ump), M_UDFMOUNT, M_WAITOK | M_ZERO); mp->mnt_data = (qaddr_t) ump; mp->mnt_stat.f_fsid.val[0] = devvp->v_rdev; mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_UDF); mp->mnt_flag |= MNT_LOCAL; ump->um_mountp = mp; ump->um_dev = devvp->v_rdev; ump->um_devvp = devvp; bsize = 2048; /* Should probe the media for its size. */ /* * Get the Anchor Volume Descriptor Pointer from sector 256. * Should also check sector n - 256, n, and 512. */ sector = 256; if ((error = bread(devvp, sector * btodb(bsize), bsize, NOCRED, &bp)) != 0) goto bail; if ((error = udf_checktag((struct desc_tag *)bp->b_data, TAGID_ANCHOR))) goto bail; bcopy(bp->b_data, &avdp, sizeof(struct anchor_vdp)); brelse(bp); bp = NULL; /* * Extract the Partition Descriptor and Logical Volume Descriptor * from the Volume Descriptor Sequence. * Should we care about the partition type right now? * What about multiple partitions? */ mvds_start = letoh32(avdp.main_vds_ex.loc); mvds_end = mvds_start + (letoh32(avdp.main_vds_ex.len) - 1) / bsize; for (sector = mvds_start; sector < mvds_end; sector++) { if ((error = bread(devvp, sector * btodb(bsize), bsize, NOCRED, &bp)) != 0) { printf("Can't read sector %d of VDS\n", sector); goto bail; } lvd = (struct logvol_desc *)bp->b_data; if (!udf_checktag(&lvd->tag, TAGID_LOGVOL)) { ump->um_bsize = letoh32(lvd->lb_size); ump->um_bmask = ump->um_bsize - 1; ump->um_bshift = ffs(ump->um_bsize) - 1; fsd_part = letoh16(lvd->_lvd_use.fsd_loc.loc.part_num); fsd_offset = letoh32(lvd->_lvd_use.fsd_loc.loc.lb_num); if (udf_find_partmaps(ump, lvd)) break; logvol_found = 1; } pd = (struct part_desc *)bp->b_data; if (!udf_checktag(&pd->tag, TAGID_PARTITION)) { part_found = 1; part_num = letoh16(pd->part_num); ump->um_len = ump->um_reallen = letoh32(pd->part_len); ump->um_start = ump->um_realstart = letoh32(pd->start_loc); } brelse(bp); bp = NULL; if ((part_found) && (logvol_found)) break; } if (!part_found || !logvol_found) { error = EINVAL; goto bail; } if (ISSET(ump->um_flags, UDF_MNT_USES_META)) { /* Read Metadata File 'File Entry' to find Metadata file. */ struct long_ad *la; sector = ump->um_start + ump->um_meta_start; /* Set in udf_get_mpartmap() */ if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) { printf("Cannot read sector %d for Metadata File Entry\n", sector); error = EINVAL; goto bail; } xfentry = (struct extfile_entry *)bp->b_data; fentry = (struct file_entry *)bp->b_data; if (udf_checktag(&xfentry->tag, TAGID_EXTFENTRY) == 0) la = (struct long_ad *)&xfentry->data[letoh32(xfentry->l_ea)]; else if (udf_checktag(&fentry->tag, TAGID_FENTRY) == 0) la = (struct long_ad *)&fentry->data[letoh32(fentry->l_ea)]; else { printf("Invalid Metadata File FE @ sector %d! (tag.id %d)\n", sector, fentry->tag.id); error = EINVAL; goto bail; } ump->um_meta_start = letoh32(la->loc.lb_num); ump->um_meta_len = letoh32(la->len); if (bp != NULL) { brelse(bp); bp = NULL; } } else if (fsd_part != part_num) { printf("FSD does not lie within the partition!\n"); error = EINVAL; goto bail; } mtx_init(&ump->um_hashmtx, IPL_NONE); ump->um_hashtbl = hashinit(UDF_HASHTBLSIZE, M_UDFMOUNT, M_WAITOK, &ump->um_hashsz); /* Get the VAT, if needed */ if (ump->um_flags & UDF_MNT_FIND_VAT) { error = udf_vat_get(ump, lb); if (error) goto bail; } /* * Grab the Fileset Descriptor * Thanks to Chuck McCrobie <*****@*****.**> for pointing * me in the right direction here. */ if (ISSET(ump->um_flags, UDF_MNT_USES_META)) sector = ump->um_meta_start; else sector = fsd_offset; udf_vat_map(ump, §or); if ((error = RDSECTOR(devvp, sector, ump->um_bsize, &bp)) != 0) { printf("Cannot read sector %d of FSD\n", sector); goto bail; } fsd = (struct fileset_desc *)bp->b_data; if (!udf_checktag(&fsd->tag, TAGID_FSD)) { fsd_found = 1; bcopy(&fsd->rootdir_icb, &ump->um_root_icb, sizeof(struct long_ad)); if (ISSET(ump->um_flags, UDF_MNT_USES_META)) { ump->um_root_icb.loc.lb_num += ump->um_meta_start; ump->um_root_icb.loc.part_num = part_num; } } brelse(bp); bp = NULL; if (!fsd_found) { printf("Couldn't find the fsd\n"); error = EINVAL; goto bail; } /* * Find the file entry for the root directory. */ sector = letoh32(ump->um_root_icb.loc.lb_num); size = letoh32(ump->um_root_icb.len); udf_vat_map(ump, §or); if ((error = udf_readlblks(ump, sector, size, &bp)) != 0) { printf("Cannot read sector %d\n", sector); goto bail; } xfentry = (struct extfile_entry *)bp->b_data; fentry = (struct file_entry *)bp->b_data; error = udf_checktag(&xfentry->tag, TAGID_EXTFENTRY); if (error) { error = udf_checktag(&fentry->tag, TAGID_FENTRY); if (error) { printf("Invalid root file entry!\n"); goto bail; } } brelse(bp); bp = NULL; devvp->v_specmountpoint = mp; return (0); bail: if (ump->um_hashtbl != NULL) free(ump->um_hashtbl, M_UDFMOUNT); if (ump != NULL) { free(ump, M_UDFMOUNT); mp->mnt_data = NULL; mp->mnt_flag &= ~MNT_LOCAL; } if (bp != NULL) brelse(bp); vn_lock(devvp, LK_EXCLUSIVE|LK_RETRY, p); VOP_CLOSE(devvp, FREAD, FSCRED, p); VOP_UNLOCK(devvp, 0, p); return (error); }
static int amr_pci_attach(device_t dev) { struct amr_softc *sc; struct amr_ident *id; int rid, rtype, error; debug_called(1); /* * Initialise softc. */ sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->amr_dev = dev; /* assume failure is 'not configured' */ error = ENXIO; /* * Determine board type. */ if ((id = amr_find_ident(dev)) == NULL) return (ENXIO); if (id->flags & AMR_ID_QUARTZ) { sc->amr_type |= AMR_TYPE_QUARTZ; } if ((amr_force_sg32 == 0) && (id->flags & AMR_ID_DO_SG64) && (sizeof(vm_paddr_t) > 4)) { device_printf(dev, "Using 64-bit DMA\n"); sc->amr_type |= AMR_TYPE_SG64; } /* force the busmaster enable bit on */ pci_enable_busmaster(dev); /* * Allocate the PCI register window. */ rid = PCIR_BAR(0); rtype = AMR_IS_QUARTZ(sc) ? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->amr_reg = bus_alloc_resource_any(dev, rtype, &rid, RF_ACTIVE); if (sc->amr_reg == NULL) { device_printf(sc->amr_dev, "can't allocate register window\n"); goto out; } sc->amr_btag = rman_get_bustag(sc->amr_reg); sc->amr_bhandle = rman_get_bushandle(sc->amr_reg); /* * Allocate and connect our interrupt. */ rid = 0; sc->amr_irq = bus_alloc_resource_any(sc->amr_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->amr_irq == NULL) { device_printf(sc->amr_dev, "can't allocate interrupt\n"); goto out; } if (bus_setup_intr(sc->amr_dev, sc->amr_irq, INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE, NULL, amr_pci_intr, sc, &sc->amr_intr)) { device_printf(sc->amr_dev, "can't set up interrupt\n"); goto out; } debug(2, "interrupt attached"); /* assume failure is 'out of memory' */ error = ENOMEM; /* * Allocate the parent bus DMA tag appropriate for PCI. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), /* PCI parent */ 1, 0, /* alignment,boundary */ AMR_IS_SG64(sc) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->amr_parent_dmat)) { device_printf(dev, "can't allocate parent DMA tag\n"); goto out; } /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->amr_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ DFLTPHYS, /* maxsize */ AMR_NSEG, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->amr_list_lock, /* lockarg */ &sc->amr_buffer_dmat)) { device_printf(sc->amr_dev, "can't allocate buffer DMA tag\n"); goto out; } if (bus_dma_tag_create(sc->amr_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ DFLTPHYS, /* maxsize */ AMR_NSEG, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->amr_list_lock, /* lockarg */ &sc->amr_buffer64_dmat)) { device_printf(sc->amr_dev, "can't allocate buffer DMA tag\n"); goto out; } debug(2, "dma tag done"); /* * Allocate and set up mailbox in a bus-visible fashion. */ mtx_init(&sc->amr_list_lock, "AMR List Lock", NULL, MTX_DEF); mtx_init(&sc->amr_hw_lock, "AMR HW Lock", NULL, MTX_DEF); if ((error = amr_setup_mbox(sc)) != 0) goto out; debug(2, "mailbox setup"); /* * Build the scatter/gather buffers. */ if ((error = amr_sglist_map(sc)) != 0) goto out; debug(2, "s/g list mapped"); if ((error = amr_ccb_map(sc)) != 0) goto out; debug(2, "ccb mapped"); /* * Do bus-independant initialisation, bring controller online. */ error = amr_attach(sc); out: if (error) amr_pci_free(sc); return(error); }
static int bcm_dma_attach(device_t dev) { struct bcm_dma_softc *sc = device_get_softc(dev); int rid, err = 0; int i; sc->sc_dev = dev; if (bcm_dma_sc) return (ENXIO); for (i = 0; i < BCM_DMA_CH_MAX; i++) { sc->sc_irq[i] = NULL; sc->sc_intrhand[i] = NULL; } /* DMA0 - DMA14 */ rid = 0; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } /* IRQ DMA0 - DMA11 XXX NOT USE DMA12(spurious?) */ for (rid = 0; rid < BCM_DMA_CH_MAX; rid++) { sc->sc_irq[rid] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->sc_irq[rid] == NULL) { device_printf(dev, "cannot allocate interrupt\n"); err = ENXIO; goto fail; } if (bus_setup_intr(dev, sc->sc_irq[rid], INTR_TYPE_MISC | INTR_MPSAFE, NULL, bcm_dma_intr, &sc->sc_dma_ch[rid], &sc->sc_intrhand[rid])) { device_printf(dev, "cannot setup interrupt handler\n"); err = ENXIO; goto fail; } } mtx_init(&sc->sc_mtx, "bcmdma", "bcmdma", MTX_DEF); bcm_dma_sc = sc; err = bcm_dma_init(dev); if (err) goto fail; return (err); fail: if (sc->sc_mem) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem); for (i = 0; i < BCM_DMA_CH_MAX; i++) { if (sc->sc_intrhand[i]) bus_teardown_intr(dev, sc->sc_irq[i], sc->sc_intrhand[i]); if (sc->sc_irq[i]) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq[i]); } return (err); }
int create_geom_disk(struct nand_chip *chip) { struct disk *ndisk, *rdisk; /* Create the disk device */ ndisk = disk_alloc(); ndisk->d_strategy = nand_strategy; ndisk->d_ioctl = nand_ioctl; ndisk->d_getattr = nand_getattr; ndisk->d_name = "gnand"; ndisk->d_drv1 = chip; ndisk->d_maxsize = chip->chip_geom.block_size; ndisk->d_sectorsize = chip->chip_geom.page_size; ndisk->d_mediasize = chip->chip_geom.chip_size; ndisk->d_unit = chip->num + 10 * device_get_unit(device_get_parent(chip->dev)); /* * When using BBT, make two last blocks of device unavailable * to user (because those are used to store BBT table). */ if (chip->bbt != NULL) ndisk->d_mediasize -= (2 * chip->chip_geom.block_size); ndisk->d_flags = DISKFLAG_CANDELETE; snprintf(ndisk->d_ident, sizeof(ndisk->d_ident), "nand: Man:0x%02x Dev:0x%02x", chip->id.man_id, chip->id.dev_id); ndisk->d_rotation_rate = DISK_RR_NON_ROTATING; disk_create(ndisk, DISK_VERSION); /* Create the RAW disk device */ rdisk = disk_alloc(); rdisk->d_strategy = nand_strategy_raw; rdisk->d_ioctl = nand_ioctl; rdisk->d_getattr = nand_getattr; rdisk->d_name = "gnand.raw"; rdisk->d_drv1 = chip; rdisk->d_maxsize = chip->chip_geom.block_size; rdisk->d_sectorsize = chip->chip_geom.page_size; rdisk->d_mediasize = chip->chip_geom.chip_size; rdisk->d_unit = chip->num + 10 * device_get_unit(device_get_parent(chip->dev)); rdisk->d_flags = DISKFLAG_CANDELETE; snprintf(rdisk->d_ident, sizeof(rdisk->d_ident), "nand_raw: Man:0x%02x Dev:0x%02x", chip->id.man_id, chip->id.dev_id); rdisk->d_rotation_rate = DISK_RR_NON_ROTATING; disk_create(rdisk, DISK_VERSION); chip->ndisk = ndisk; chip->rdisk = rdisk; mtx_init(&chip->qlock, "NAND I/O lock", NULL, MTX_DEF); bioq_init(&chip->bioq); TASK_INIT(&chip->iotask, 0, nand_io_proc, chip); chip->tq = taskqueue_create("nand_taskq", M_WAITOK, taskqueue_thread_enqueue, &chip->tq); taskqueue_start_threads(&chip->tq, 1, PI_DISK, "nand taskq"); if (bootverbose) device_printf(chip->dev, "Created gnand%d for chip [0x%0x, " "0x%0x]\n", ndisk->d_unit, chip->id.man_id, chip->id.dev_id); return (0); }
static int iir_pci_attach(device_t dev) { struct gdt_softc *gdt; struct resource *irq = NULL; int retries, rid, error = 0; void *ih; u_int8_t protocol; gdt = device_get_softc(dev); mtx_init(&gdt->sc_lock, "iir", NULL, MTX_DEF); /* map DPMEM */ rid = PCI_DPMEM; gdt->sc_dpmem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (gdt->sc_dpmem == NULL) { device_printf(dev, "can't allocate register resources\n"); error = ENOMEM; goto err; } /* get IRQ */ rid = 0; irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (irq == NULL) { device_printf(dev, "can't find IRQ value\n"); error = ENOMEM; goto err; } gdt->sc_devnode = dev; gdt->sc_init_level = 0; gdt->sc_hanum = device_get_unit(dev); gdt->sc_bus = pci_get_bus(dev); gdt->sc_slot = pci_get_slot(dev); gdt->sc_vendor = pci_get_vendor(dev); gdt->sc_device = pci_get_device(dev); gdt->sc_subdevice = pci_get_subdevice(dev); gdt->sc_class = GDT_MPR; /* no FC ctr. if (gdt->sc_device >= GDT_PCI_PRODUCT_FC) gdt->sc_class |= GDT_FC; */ /* initialize RP controller */ /* check and reset interface area */ bus_write_4(gdt->sc_dpmem, GDT_MPR_IC, htole32(GDT_MPR_MAGIC)); if (bus_read_4(gdt->sc_dpmem, GDT_MPR_IC) != htole32(GDT_MPR_MAGIC)) { device_printf(dev, "cannot access DPMEM at 0x%lx (shadowed?)\n", rman_get_start(gdt->sc_dpmem)); error = ENXIO; goto err; } bus_set_region_4(gdt->sc_dpmem, GDT_I960_SZ, htole32(0), GDT_MPR_SZ >> 2); /* Disable everything */ bus_write_1(gdt->sc_dpmem, GDT_EDOOR_EN, bus_read_1(gdt->sc_dpmem, GDT_EDOOR_EN) | 4); bus_write_1(gdt->sc_dpmem, GDT_MPR_EDOOR, 0xff); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS, 0); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_CMD_INDEX, 0); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO, htole32(rman_get_start(gdt->sc_dpmem))); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_CMD_INDX, 0xff); bus_write_1(gdt->sc_dpmem, GDT_MPR_LDOOR, 1); DELAY(20); retries = GDT_RETRIES; while (bus_read_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS) != 0xff) { if (--retries == 0) { device_printf(dev, "DEINIT failed\n"); error = ENXIO; goto err; } DELAY(1); } protocol = (uint8_t)le32toh(bus_read_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO)); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS, 0); if (protocol != GDT_PROTOCOL_VERSION) { device_printf(dev, "unsupported protocol %d\n", protocol); error = ENXIO; goto err; } /* special command to controller BIOS */ bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO, htole32(0)); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO + sizeof (u_int32_t), htole32(0)); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO + 2 * sizeof (u_int32_t), htole32(1)); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO + 3 * sizeof (u_int32_t), htole32(0)); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_CMD_INDX, 0xfe); bus_write_1(gdt->sc_dpmem, GDT_MPR_LDOOR, 1); DELAY(20); retries = GDT_RETRIES; while (bus_read_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS) != 0xfe) { if (--retries == 0) { device_printf(dev, "initialization error\n"); error = ENXIO; goto err; } DELAY(1); } bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS, 0); gdt->sc_ic_all_size = GDT_MPR_SZ; gdt->sc_copy_cmd = gdt_mpr_copy_cmd; gdt->sc_get_status = gdt_mpr_get_status; gdt->sc_intr = gdt_mpr_intr; gdt->sc_release_event = gdt_mpr_release_event; gdt->sc_set_sema0 = gdt_mpr_set_sema0; gdt->sc_test_busy = gdt_mpr_test_busy; /* Allocate a dmatag representing the capabilities of this attachment */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignemnt*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/BUS_SPACE_UNRESTRICTED, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&gdt->sc_lock, &gdt->sc_parent_dmat) != 0) { error = ENXIO; goto err; } gdt->sc_init_level++; if (iir_init(gdt) != 0) { iir_free(gdt); error = ENXIO; goto err; } /* Register with the XPT */ iir_attach(gdt); /* associate interrupt handler */ if (bus_setup_intr(dev, irq, INTR_TYPE_CAM | INTR_MPSAFE, NULL, iir_intr, gdt, &ih )) { device_printf(dev, "Unable to register interrupt handler\n"); error = ENXIO; goto err; } gdt_pci_enable_intr(gdt); return (0); err: if (irq) bus_release_resource( dev, SYS_RES_IRQ, 0, irq ); if (gdt->sc_dpmem) bus_release_resource( dev, SYS_RES_MEMORY, rid, gdt->sc_dpmem ); mtx_destroy(&gdt->sc_lock); return (error); }
static void ugidfw_init(struct mac_policy_conf *mpc) { mtx_init(&ugidfw_mtx, "mac_bsdextended lock", NULL, MTX_DEF); }
static int uhso_attach(device_t self) { struct uhso_softc *sc = device_get_softc(self); struct usb_attach_arg *uaa = device_get_ivars(self); struct usb_interface_descriptor *id; struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; struct sysctl_oid *tree = NULL, *tty_node; struct ucom_softc *ucom; struct uhso_tty *ht; int i, error, port; void *probe_f; usb_error_t uerr; char *desc; sc->sc_dev = self; sc->sc_udev = uaa->device; mtx_init(&sc->sc_mtx, "uhso", NULL, MTX_DEF); ucom_ref(&sc->sc_super_ucom); sc->sc_ucom = NULL; sc->sc_ttys = 0; sc->sc_radio = 1; id = usbd_get_interface_descriptor(uaa->iface); sc->sc_ctrl_iface_no = id->bInterfaceNumber; sc->sc_iface_no = uaa->info.bIfaceNum; sc->sc_iface_index = uaa->info.bIfaceIndex; /* Setup control pipe */ uerr = usbd_transfer_setup(uaa->device, &sc->sc_iface_index, sc->sc_ctrl_xfer, uhso_ctrl_config, UHSO_CTRL_MAX, sc, &sc->sc_mtx); if (uerr) { device_printf(self, "Failed to setup control pipe: %s\n", usbd_errstr(uerr)); goto out; } if (USB_GET_DRIVER_INFO(uaa) == UHSO_STATIC_IFACE) probe_f = uhso_probe_iface_static; else if (USB_GET_DRIVER_INFO(uaa) == UHSO_AUTO_IFACE) probe_f = uhso_probe_iface_auto; else goto out; error = uhso_probe_iface(sc, uaa->info.bIfaceNum, probe_f); if (error != 0) goto out; sctx = device_get_sysctl_ctx(sc->sc_dev); soid = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "type", CTLFLAG_RD, uhso_port[UHSO_IFACE_PORT(sc->sc_type)], 0, "Port available at this interface"); SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "radio", CTLTYPE_INT | CTLFLAG_RW, sc, 0, uhso_radio_sysctl, "I", "Enable radio"); /* * The default interface description on most Option devices isn't * very helpful. So we skip device_set_usb_desc and set the * device description manually. */ device_set_desc_copy(self, uhso_port_type[UHSO_IFACE_PORT_TYPE(sc->sc_type)]); /* Announce device */ device_printf(self, "<%s port> at <%s %s> on %s\n", uhso_port_type[UHSO_IFACE_PORT_TYPE(sc->sc_type)], usb_get_manufacturer(uaa->device), usb_get_product(uaa->device), device_get_nameunit(device_get_parent(self))); if (sc->sc_ttys > 0) { SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "ports", CTLFLAG_RD, &sc->sc_ttys, 0, "Number of attached serial ports"); tree = SYSCTL_ADD_NODE(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "port", CTLFLAG_RD, NULL, "Serial ports"); } /* * Loop through the number of found TTYs and create sysctl * nodes for them. */ for (i = 0; i < sc->sc_ttys; i++) { ht = &sc->sc_tty[i]; ucom = &sc->sc_ucom[i]; if (UHSO_IFACE_USB_TYPE(sc->sc_type) & UHSO_IF_MUX) port = uhso_mux_port_map[ht->ht_muxport]; else port = UHSO_IFACE_PORT_TYPE(sc->sc_type); desc = uhso_port_type_sysctl[port]; tty_node = SYSCTL_ADD_NODE(sctx, SYSCTL_CHILDREN(tree), OID_AUTO, desc, CTLFLAG_RD, NULL, ""); ht->ht_name[0] = 0; if (sc->sc_ttys == 1) snprintf(ht->ht_name, 32, "cuaU%d", ucom->sc_super->sc_unit); else { snprintf(ht->ht_name, 32, "cuaU%d.%d", ucom->sc_super->sc_unit, ucom->sc_subunit); } desc = uhso_port_type[port]; SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(tty_node), OID_AUTO, "tty", CTLFLAG_RD, ht->ht_name, 0, ""); SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(tty_node), OID_AUTO, "desc", CTLFLAG_RD, desc, 0, ""); if (bootverbose) device_printf(sc->sc_dev, "\"%s\" port at %s\n", desc, ht->ht_name); } return (0); out: uhso_detach(sc->sc_dev); return (ENXIO); }
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, u32 size, u16 stride, int node, int queue_idx) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_tx_ring *ring; int tmp; int err; ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node); if (!ring) { ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL); if (!ring) { en_err(priv, "Failed allocating TX ring\n"); return -ENOMEM; } } ring->size = size; ring->size_mask = size - 1; ring->stride = stride; #ifdef CONFIG_RATELIMIT ring->rl_data.rate_index = 0; /* User_valid should be false in a rate_limit ring until the * creation process of the ring is done, after the activation. */ if (queue_idx < priv->native_tx_ring_num) ring->rl_data.user_valid = true; else ring->rl_data.user_valid = false; #endif ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; ring->inline_thold = min(inline_thold, MAX_INLINE); mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF); mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF); /* Allocate the buf ring */ #ifdef CONFIG_RATELIMIT if (queue_idx < priv->native_tx_ring_num) ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, M_WAITOK, &ring->tx_lock.m); else ring->br = buf_ring_alloc(size / 4, M_DEVBUF, M_WAITOK, &ring->tx_lock.m); #else ring->br = buf_ring_alloc(MLX4_EN_DEF_TX_QUEUE_SIZE, M_DEVBUF, M_WAITOK, &ring->tx_lock.m); #endif if (ring->br == NULL) { en_err(priv, "Failed allocating tx_info ring\n"); return -ENOMEM; } tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = vmalloc_node(tmp, node); if (!ring->tx_info) { ring->tx_info = vmalloc(tmp); if (!ring->tx_info) { err = -ENOMEM; goto err_ring; } } en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", ring->tx_info, tmp); ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node); if (!ring->bounce_buf) { ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); if (!ring->bounce_buf) { err = -ENOMEM; goto err_info; } } ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); /* Allocate HW buffers on provided NUMA node */ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 2 * PAGE_SIZE); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } err = mlx4_en_map_buffer(&ring->wqres.buf); if (err) { en_err(priv, "Failed to map TX buffer\n"); goto err_hwq_res; } ring->buf = ring->wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, MLX4_RESERVE_BF_QP); if (err) { en_err(priv, "failed reserving qp for TX ring\n"); goto err_map; } err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; } ring->qp.event = mlx4_en_sqp_event; err = mlx4_bf_alloc(mdev->dev, &ring->bf, node); if (err) { en_dbg(DRV, priv, "working without blueflame (%d)", err); ring->bf.uar = &mdev->priv_uar; ring->bf.uar->map = mdev->uar_map; ring->bf_enabled = false; } else ring->bf_enabled = true; ring->queue_index = queue_idx; if (queue_idx < priv->num_tx_rings_p_up ) CPU_SET(queue_idx, &ring->affinity_mask); *pring = ring; return 0; err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); err_map: mlx4_en_unmap_buffer(&ring->wqres.buf); err_hwq_res: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_bounce: kfree(ring->bounce_buf); err_info: vfree(ring->tx_info); err_ring: buf_ring_free(ring->br, M_DEVBUF); kfree(ring); return err; }
int zstty_attach(device_t dev) { struct zstty_softc *sc; struct tty *tp; char mode[32]; int reset; int baud; int clen; char parity; int stop; char c; sc = device_get_softc(dev); mtx_init(&sc->sc_mtx, "zstty", NULL, MTX_SPIN); sc->sc_dev = dev; sc->sc_iput = sc->sc_iget = sc->sc_ibuf; sc->sc_oget = sc->sc_obuf; tp = ttymalloc(NULL); sc->sc_si = make_dev(&zstty_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s", device_get_desc(dev)); sc->sc_si->si_drv1 = sc; sc->sc_si->si_tty = tp; tp->t_dev = sc->sc_si; sc->sc_tty = tp; tp->t_oproc = zsttystart; tp->t_param = zsttyparam; tp->t_stop = zsttystop; tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_lflag = TTYDEF_LFLAG; tp->t_cflag = CREAD | CLOCAL | CS8; tp->t_ospeed = TTYDEF_SPEED; tp->t_ispeed = TTYDEF_SPEED; if (zstty_console(dev, mode, sizeof(mode))) { ttychars(tp); /* format: 9600,8,n,1,- */ if (sscanf(mode, "%d,%d,%c,%d,%c", &baud, &clen, &parity, &stop, &c) == 5) { tp->t_ospeed = baud; tp->t_ispeed = baud; tp->t_cflag = CREAD | CLOCAL; switch (clen) { case 5: tp->t_cflag |= CS5; break; case 6: tp->t_cflag |= CS6; break; case 7: tp->t_cflag |= CS7; break; case 8: default: tp->t_cflag |= CS8; break; } if (parity == 'e') tp->t_cflag |= PARENB; else if (parity == 'o') tp->t_cflag |= PARENB | PARODD; if (stop == 2) tp->t_cflag |= CSTOPB; } device_printf(dev, "console %s\n", mode); sc->sc_console = 1; zstty_cons = sc; } else { if ((device_get_unit(dev) & 1) == 0) reset = ZSWR9_A_RESET; else reset = ZSWR9_B_RESET; ZS_WRITE_REG(sc, 9, reset); } return (0); }
static void * nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg) { uint8_t descr[NVME_MODEL_NUMBER_LENGTH+1]; struct nvd_disk *ndisk; struct disk *disk; struct nvd_controller *ctrlr = ctrlr_arg; ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK); disk = disk_alloc(); disk->d_strategy = nvd_strategy; disk->d_ioctl = nvd_ioctl; disk->d_name = NVD_STR; disk->d_drv1 = ndisk; disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns); disk->d_sectorsize = nvme_ns_get_sector_size(ns); disk->d_mediasize = (off_t)nvme_ns_get_size(ns); if (TAILQ_EMPTY(&disk_head)) disk->d_unit = 0; else disk->d_unit = TAILQ_LAST(&disk_head, disk_list)->disk->d_unit + 1; disk->d_flags = 0; if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED) disk->d_flags |= DISKFLAG_CANDELETE; if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED) disk->d_flags |= DISKFLAG_CANFLUSHCACHE; /* ifdef used here to ease porting to stable branches at a later point. */ #ifdef DISKFLAG_UNMAPPED_BIO disk->d_flags |= DISKFLAG_UNMAPPED_BIO; #endif /* * d_ident and d_descr are both far bigger than the length of either * the serial or model number strings. */ nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns), sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH); nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr), NVME_MODEL_NUMBER_LENGTH); #if __FreeBSD_version >= 900034 strlcpy(disk->d_descr, descr, sizeof(descr)); #endif ndisk->ns = ns; ndisk->disk = disk; ndisk->cur_depth = 0; mtx_init(&ndisk->bioqlock, "NVD bioq lock", NULL, MTX_DEF); bioq_init(&ndisk->bioq); TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk); ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK, taskqueue_thread_enqueue, &ndisk->tq); taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq"); TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq); TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq); disk_create(disk, DISK_VERSION); printf(NVD_STR"%u: <%s> NVMe namespace\n", disk->d_unit, descr); printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit, (uintmax_t)disk->d_mediasize / (1024*1024), (uintmax_t)disk->d_mediasize / disk->d_sectorsize, disk->d_sectorsize); return (NULL); }
static int fwohci_pci_attach(device_t self) { fwohci_softc_t *sc = device_get_softc(self); int err; int rid; #if defined(__DragonFly__) || __FreeBSD_version < 500000 int intr; /* For the moment, put in a message stating what is wrong */ intr = pci_read_config(self, PCIR_INTLINE, 1); if (intr == 0 || intr == 255) { device_printf(self, "Invalid irq %d\n", intr); #ifdef __i386__ device_printf(self, "Please switch PNP-OS to 'No' in BIOS\n"); #endif } #endif #if 0 if (bootverbose) firewire_debug = bootverbose; #endif mtx_init(FW_GMTX(&sc->fc), "firewire", NULL, MTX_DEF); fwohci_pci_init(self); rid = PCI_CBMEM; #if __FreeBSD_version >= 502109 sc->bsr = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); #else sc->bsr = bus_alloc_resource(self, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); #endif if (!sc->bsr) { device_printf(self, "Could not map memory\n"); return ENXIO; } sc->bst = rman_get_bustag(sc->bsr); sc->bsh = rman_get_bushandle(sc->bsr); rid = 0; #if __FreeBSD_version >= 502109 sc->irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); #else sc->irq_res = bus_alloc_resource(self, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); #endif if (sc->irq_res == NULL) { device_printf(self, "Could not allocate irq\n"); fwohci_pci_detach(self); return ENXIO; } err = bus_setup_intr(self, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, (driver_intr_t *) fwohci_intr, sc, &sc->ih); #if defined(__DragonFly__) || __FreeBSD_version < 500000 /* XXX splcam() should mask this irq for sbp.c*/ err = bus_setup_intr(self, sc->irq_res, INTR_TYPE_CAM, (driver_intr_t *) fwohci_dummy_intr, sc, &sc->ih_cam); /* XXX splbio() should mask this irq for physio()/fwmem_strategy() */ err = bus_setup_intr(self, sc->irq_res, INTR_TYPE_BIO, (driver_intr_t *) fwohci_dummy_intr, sc, &sc->ih_bio); #endif if (err) { device_printf(self, "Could not setup irq, %d\n", err); fwohci_pci_detach(self); return ENXIO; } err = bus_dma_tag_create( #if defined(__FreeBSD__) && __FreeBSD_version >= 700020 /*parent*/bus_get_dma_tag(self), #else /*parent*/NULL, #endif /*alignment*/1, /*boundary*/0, #if BOUNCE_BUFFER_TEST /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, #else /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, #endif /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/0x100000, /*nsegments*/0x20, /*maxsegsz*/0x8000, /*flags*/BUS_DMA_ALLOCNOW, #if defined(__FreeBSD__) && __FreeBSD_version >= 501102 /*lockfunc*/busdma_lock_mutex, /*lockarg*/FW_GMTX(&sc->fc), #endif &sc->fc.dmat); if (err != 0) { printf("fwohci_pci_attach: Could not allocate DMA tag " "- error %d\n", err); return (ENOMEM); } err = fwohci_init(sc, self); if (err) { device_printf(self, "fwohci_init failed with err=%d\n", err); fwohci_pci_detach(self); return EIO; } /* probe and attach a child device(firewire) */ bus_generic_probe(self); bus_generic_attach(self); return 0; }
static int fwe_attach(device_t dev) { struct fwe_softc *fwe; struct ifnet *ifp; int unit, s; #if defined(__DragonFly__) || __FreeBSD_version < 500000 u_char *eaddr; #else u_char eaddr[6]; #endif struct fw_eui64 *eui; fwe = ((struct fwe_softc *)device_get_softc(dev)); unit = device_get_unit(dev); bzero(fwe, sizeof(struct fwe_softc)); mtx_init(&fwe->mtx, "fwe", NULL, MTX_DEF); /* XXX */ fwe->stream_ch = stream_ch; fwe->dma_ch = -1; fwe->fd.fc = device_get_ivars(dev); if (tx_speed < 0) tx_speed = fwe->fd.fc->speed; fwe->fd.dev = dev; fwe->fd.post_explore = NULL; fwe->eth_softc.fwe = fwe; fwe->pkt_hdr.mode.stream.tcode = FWTCODE_STREAM; fwe->pkt_hdr.mode.stream.sy = 0; fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch; /* generate fake MAC address: first and last 3bytes from eui64 */ #define LOCAL (0x02) #define GROUP (0x01) #if defined(__DragonFly__) || __FreeBSD_version < 500000 eaddr = &IFP2ENADDR(fwe->eth_softc.ifp)[0]; #endif eui = &fwe->fd.fc->eui; eaddr[0] = (FW_EUI64_BYTE(eui, 0) | LOCAL) & ~GROUP; eaddr[1] = FW_EUI64_BYTE(eui, 1); eaddr[2] = FW_EUI64_BYTE(eui, 2); eaddr[3] = FW_EUI64_BYTE(eui, 5); eaddr[4] = FW_EUI64_BYTE(eui, 6); eaddr[5] = FW_EUI64_BYTE(eui, 7); printf("if_fwe%d: Fake Ethernet address: " "%02x:%02x:%02x:%02x:%02x:%02x\n", unit, eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]); /* fill the rest and attach interface */ ifp = fwe->eth_softc.ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } ifp->if_softc = &fwe->eth_softc; #if __FreeBSD_version >= 501113 || defined(__DragonFly__) if_initname(ifp, device_get_name(dev), unit); #else ifp->if_unit = unit; ifp->if_name = "fwe"; #endif ifp->if_init = fwe_init; #if defined(__DragonFly__) || __FreeBSD_version < 500000 ifp->if_output = ether_output; #endif ifp->if_start = fwe_start; ifp->if_ioctl = fwe_ioctl; ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE; s = splimp(); #if defined(__DragonFly__) || __FreeBSD_version < 500000 ether_ifattach(ifp, 1); #else ether_ifattach(ifp, eaddr); #endif splx(s); /* Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_POLLING; ifp->if_capenable |= IFCAP_VLAN_MTU; #endif FWEDEBUG(ifp, "interface created\n"); return 0; }
static int intsmb_attach(device_t dev) { struct intsmb_softc *sc = device_get_softc(dev); int error, rid, value; int intr; char *str; sc->dev = dev; mtx_init(&sc->lock, device_get_nameunit(dev), "intsmb", MTX_DEF); sc->cfg_irq9 = 0; switch (pci_get_devid(dev)) { #ifndef NO_CHANGE_PCICONF case 0x71138086: /* Intel 82371AB */ case 0x719b8086: /* Intel 82443MX */ /* Changing configuration is allowed. */ sc->cfg_irq9 = 1; break; #endif case 0x43851002: case 0x780b1022: if (pci_get_revid(dev) >= 0x40) sc->sb8xx = 1; break; } if (sc->sb8xx) { error = sb8xx_attach(dev); if (error != 0) goto fail; else goto no_intr; } sc->io_rid = PCI_BASE_ADDR_SMB; sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid, RF_ACTIVE); if (sc->io_res == NULL) { device_printf(dev, "Could not allocate I/O space\n"); error = ENXIO; goto fail; } if (sc->cfg_irq9) { pci_write_config(dev, PCIR_INTLINE, 0x9, 1); pci_write_config(dev, PCI_HST_CFG_SMB, PCI_INTR_SMB_IRQ9 | PCI_INTR_SMB_ENABLE, 1); } value = pci_read_config(dev, PCI_HST_CFG_SMB, 1); sc->poll = (value & PCI_INTR_SMB_ENABLE) == 0; intr = value & PCI_INTR_SMB_MASK; switch (intr) { case PCI_INTR_SMB_SMI: str = "SMI"; break; case PCI_INTR_SMB_IRQ9: str = "IRQ 9"; break; case PCI_INTR_SMB_IRQ_PCI: str = "PCI IRQ"; break; default: str = "BOGUS"; } device_printf(dev, "intr %s %s ", str, sc->poll == 0 ? "enabled" : "disabled"); printf("revision %d\n", pci_read_config(dev, PCI_REVID_SMB, 1)); if (!sc->poll && intr == PCI_INTR_SMB_SMI) { device_printf(dev, "using polling mode when configured interrupt is SMI\n"); sc->poll = 1; } if (sc->poll) goto no_intr; if (intr != PCI_INTR_SMB_IRQ9 && intr != PCI_INTR_SMB_IRQ_PCI) { device_printf(dev, "Unsupported interrupt mode\n"); error = ENXIO; goto fail; } /* Force IRQ 9. */ rid = 0; if (sc->cfg_irq9) bus_set_resource(dev, SYS_RES_IRQ, rid, 9, 1); sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Could not allocate irq\n"); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, intsmb_rawintr, sc, &sc->irq_hand); if (error) { device_printf(dev, "Failed to map intr\n"); goto fail; } no_intr: sc->isbusy = 0; sc->smbus = device_add_child(dev, "smbus", -1); if (sc->smbus == NULL) { error = ENXIO; goto fail; } error = device_probe_and_attach(sc->smbus); if (error) goto fail; #ifdef ENABLE_ALART /* Enable Arart */ bus_write_1(sc->io_res, PIIX4_SMBSLVCNT, PIIX4_SMBSLVCNT_ALTEN); #endif return (0); fail: intsmb_detach(dev); return (error); }
static int ncv_alloc_resource(device_t dev) { struct ncv_softc *sc = device_get_softc(dev); u_int32_t flags = device_get_flags(dev); rman_res_t ioaddr, iosize, maddr, msize; int error; bus_addr_t offset = 0; if(flags & KME_KXLC004_01) offset = OFFSET_KME_KXLC004_01; error = bus_get_resource(dev, SYS_RES_IOPORT, 0, &ioaddr, &iosize); if (error || (iosize < (offset + NCVIOSZ))) { return(ENOMEM); } mtx_init(&sc->sc_sclow.sl_lock, "ncv", NULL, MTX_DEF); sc->port_rid = 0; sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, ioaddr+offset, ioaddr+iosize-offset, iosize-offset, RF_ACTIVE); if (sc->port_res == NULL) { ncv_release_resource(dev); return(ENOMEM); } if (offset != 0) { sc->port_rid_dmy = 0; sc->port_res_dmy = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid_dmy, ioaddr, ioaddr+offset, offset, RF_ACTIVE); if (sc->port_res_dmy == NULL) { printf("Warning: cannot allocate IOPORT partially.\n"); } } else { sc->port_rid_dmy = 0; sc->port_res_dmy = NULL; } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (sc->irq_res == NULL) { ncv_release_resource(dev); return(ENOMEM); } error = bus_get_resource(dev, SYS_RES_MEMORY, 0, &maddr, &msize); if (error) { return(0); /* XXX */ } /* no need to allocate memory if not configured */ if (maddr == 0 || msize == 0) { return(0); } sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) { ncv_release_resource(dev); return(ENOMEM); } return(0); }
int smc_attach(device_t dev) { int type, error; uint16_t val; u_char eaddr[ETHER_ADDR_LEN]; struct smc_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); error = 0; sc->smc_dev = dev; ifp = sc->smc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { error = ENOSPC; goto done; } mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Set up watchdog callout. */ callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0); type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; sc->smc_reg_rid = 0; sc->smc_reg = bus_alloc_resource(dev, type, &sc->smc_reg_rid, 0, ~0, 16, RF_ACTIVE); if (sc->smc_reg == NULL) { error = ENXIO; goto done; } sc->smc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (sc->smc_irq == NULL) { error = ENXIO; goto done; } SMC_LOCK(sc); smc_reset(sc); SMC_UNLOCK(sc); smc_select_bank(sc, 3); val = smc_read_2(sc, REV); sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT; if (bootverbose) device_printf(dev, "revision %x\n", sc->smc_rev); callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx, CALLOUT_RETURNUNLOCKED); if (sc->smc_chip >= REV_CHIP_91110FD) { (void)mii_attach(dev, &sc->smc_miibus, ifp, smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (sc->smc_miibus != NULL) { sc->smc_mii_tick = smc_mii_tick; sc->smc_mii_mediachg = smc_mii_mediachg; sc->smc_mii_mediaioctl = smc_mii_mediaioctl; } } smc_select_bank(sc, 1); eaddr[0] = smc_read_1(sc, IAR0); eaddr[1] = smc_read_1(sc, IAR1); eaddr[2] = smc_read_1(sc, IAR2); eaddr[3] = smc_read_1(sc, IAR3); eaddr[4] = smc_read_1(sc, IAR4); eaddr[5] = smc_read_1(sc, IAR5); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = smc_init; ifp->if_ioctl = smc_ioctl; ifp->if_start = smc_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = ifp->if_capenable = 0; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ether_ifattach(ifp, eaddr); /* Set up taskqueue */ TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp); TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp); sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->smc_tq); taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->smc_dev)); /* Mask all interrupts. */ sc->smc_mask = 0; smc_write_1(sc, MSK, 0); /* Wire up interrupt */ error = bus_setup_intr(dev, sc->smc_irq, INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih); if (error != 0) goto done; done: if (error != 0) smc_detach(dev); return (error); }
void vmbus_rxbr_init(struct vmbus_rxbr *rbr) { mtx_init(&rbr->rxbr_lock, "vmbus_rxbr", NULL, MTX_SPIN); }
static int octe_attach(device_t dev) { struct ifnet *ifp; cvm_oct_private_t *priv; device_t child; unsigned qos; int error; priv = device_get_softc(dev); ifp = priv->ifp; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if (priv->phy_id != -1) { if (priv->phy_device == NULL) { error = mii_attach(dev, &priv->miibus, ifp, octe_mii_medchange, octe_mii_medstat, BMSR_DEFCAPMASK, priv->phy_id, MII_OFFSET_ANY, 0); if (error != 0) device_printf(dev, "attaching PHYs failed\n"); } else { child = device_add_child(dev, priv->phy_device, -1); if (child == NULL) device_printf(dev, "missing phy %u device %s\n", priv->phy_id, priv->phy_device); } } if (priv->miibus == NULL) { ifmedia_init(&priv->media, 0, octe_medchange, octe_medstat); ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); } /* * XXX * We don't support programming the multicast filter right now, although it * ought to be easy enough. (Presumably it's just a matter of putting * multicast addresses in the CAM?) */ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI; ifp->if_init = octe_init; ifp->if_ioctl = octe_ioctl; priv->if_flags = ifp->if_flags; mtx_init(&priv->tx_mtx, ifp->if_xname, "octe tx send queue", MTX_DEF); for (qos = 0; qos < 16; qos++) { mtx_init(&priv->tx_free_queue[qos].ifq_mtx, ifp->if_xname, "octe tx free queue", MTX_DEF); IFQ_SET_MAXLEN(&priv->tx_free_queue[qos], MAX_OUT_QUEUE_DEPTH); } ether_ifattach(ifp, priv->mac); ifp->if_transmit = octe_transmit; ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; ifp->if_capenable = ifp->if_capabilities; ifp->if_hwassist = CSUM_TCP | CSUM_UDP; OCTE_TX_LOCK(priv); IFQ_SET_MAXLEN(&ifp->if_snd, MAX_OUT_QUEUE_DEPTH); ifp->if_snd.ifq_drv_maxlen = MAX_OUT_QUEUE_DEPTH; IFQ_SET_READY(&ifp->if_snd); OCTE_TX_UNLOCK(priv); return (bus_generic_attach(dev)); }