struct socket * soget(bool waitok) { struct socket *so; so = pool_cache_get(socket_cache, (waitok ? PR_WAITOK : PR_NOWAIT)); if (__predict_false(so == NULL)) return (NULL); memset(so, 0, sizeof(*so)); TAILQ_INIT(&so->so_q0); TAILQ_INIT(&so->so_q); #if 0 cv_init(&so->so_cv, "socket"); cv_init(&so->so_rcv.sb_cv, "netio"); cv_init(&so->so_snd.sb_cv, "netio"); #endif #if 0 /* VADIM */ selinit(&so->so_rcv.sb_sel); selinit(&so->so_snd.sb_sel); #else so->so_rcv.sb_flags |= SB_NOTIFY; so->so_snd.sb_flags |= SB_NOTIFY; #endif so->so_rcv.sb_so = so; so->so_snd.sb_so = so; return so; }
static int tun_clone_create(struct if_clone *ifc, int unit) { struct tun_softc *tp; if ((tp = tun_find_zunit(unit)) == NULL) { /* Allocate a new instance */ tp = malloc(sizeof(*tp), M_DEVBUF, M_WAITOK|M_ZERO); tp->tun_unit = unit; mutex_init(&tp->tun_lock, MUTEX_DEFAULT, IPL_NET); selinit(&tp->tun_rsel); selinit(&tp->tun_wsel); } else { /* Revive tunnel instance; clear ifp part */ (void)memset(&tp->tun_if, 0, sizeof(struct ifnet)); } if_initname(&tp->tun_if, ifc->ifc_name, unit); tunattach0(tp); tp->tun_flags |= TUN_INITED; tp->tun_osih = softint_establish(SOFTINT_CLOCK, tun_o_softintr, tp); tp->tun_isih = softint_establish(SOFTINT_CLOCK, tun_i_softintr, tp); simple_lock(&tun_softc_lock); LIST_INSERT_HEAD(&tun_softc_list, tp, tun_list); simple_unlock(&tun_softc_lock); return (0); }
static void aedattach(device_t parent, device_t self, void *aux) { struct adb_attach_args *aa_args = (struct adb_attach_args *)aux; struct aed_softc *sc = device_private(self); callout_init(&sc->sc_repeat_ch, 0); selinit(&sc->sc_selinfo); sc->origaddr = aa_args->origaddr; sc->adbaddr = aa_args->adbaddr; sc->handler_id = aa_args->handler_id; sc->sc_evq_tail = 0; sc->sc_evq_len = 0; sc->sc_rptdelay = 20; sc->sc_rptinterval = 6; sc->sc_repeating = -1; /* not repeating */ /* Pull in the options flags. */ sc->sc_options = (device_cfdata(self)->cf_flags | aed_options); sc->sc_ioproc = NULL; sc->sc_buttons = 0; sc->sc_open = 0; aed_sc = sc; printf("ADB Event device\n"); return; }
/* open the xenevt device; this is where we clone */ int xenevtopen(dev_t dev, int flags, int mode, struct lwp *l) { struct xenevt_d *d; struct file *fp; int fd, error; switch(minor(dev)) { case DEV_EVT: /* falloc() will use the descriptor for us. */ if ((error = fd_allocfile(&fp, &fd)) != 0) return error; d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK | M_ZERO); d->ci = &cpu_info_primary; mutex_init(&d->lock, MUTEX_DEFAULT, IPL_HIGH); cv_init(&d->cv, "xenevt"); selinit(&d->sel); return fd_clone(fp, fd, flags, &xenevt_fileops, d); case DEV_XSD: /* no clone for /dev/xsd_kva */ return (0); default: break; } return ENODEV; }
static void panel_attach(device_t parent, device_t self, void *aux) { struct panel_softc *sc = device_private(self); struct mainbus_attach_args *maa = aux; struct hd44780_io io; static struct lcdkp_xlate keys[] = { { 0xfa, 'h' }, { 0xf6, 'k' }, { 0xde, 'l' }, { 0xee, 'j' }, { 0x7e, 's' }, { 0xbe, 'e' } }; sc->sc_lcd.sc_dev = self; sc->sc_lcd.sc_iot = maa->ma_iot; if (bus_space_map(sc->sc_lcd.sc_iot, maa->ma_addr, PANEL_REGION, 0, &sc->sc_lcd.sc_ioir)) { aprint_error(": unable to map registers\n"); return; } bus_space_subregion(sc->sc_lcd.sc_iot, sc->sc_lcd.sc_ioir, DATA_OFFSET, 1, &sc->sc_lcd.sc_iodr); printf("\n"); sc->sc_lcd.sc_dev_ok = 1; sc->sc_lcd.sc_cols = PANEL_COLS; sc->sc_lcd.sc_vcols = PANEL_VCOLS; sc->sc_lcd.sc_flags = HD_8BIT | HD_MULTILINE | HD_KEYPAD; sc->sc_lcd.sc_writereg = panel_cbt_hdwritereg; sc->sc_lcd.sc_readreg = panel_cbt_hdreadreg; hd44780_attach_subr(&sc->sc_lcd); /* Hello World */ io.dat = 0; io.len = PANEL_VCOLS * PANEL_ROWS; memcpy(io.buf, &startup_message, io.len); hd44780_ddram_io(&sc->sc_lcd, sc->sc_lcd.sc_curchip, &io, HD_DDRAM_WRITE); pmf_device_register1(self, NULL, NULL, panel_shutdown); sc->sc_kp.sc_iot = maa->ma_iot; sc->sc_kp.sc_ioh = MIPS_PHYS_TO_KSEG1(PANEL_BASE); /* XXX */ sc->sc_kp.sc_knum = sizeof(keys) / sizeof(struct lcdkp_xlate); sc->sc_kp.sc_kpad = keys; sc->sc_kp.sc_rread = panel_cbt_kprread; lcdkp_attach_subr(&sc->sc_kp); callout_init(&sc->sc_callout, 0); selinit(&sc->sc_selq); }
/* * Initialize a firm_event queue. */ void ev_init(struct evvar *ev, const char *name, kmutex_t *mtx) { ev->ev_get = ev->ev_put = 0; ev->ev_q = kmem_zalloc((size_t)EV_QSIZE * sizeof(struct firm_event), KM_SLEEP); selinit(&ev->ev_sel); ev->ev_lock = mtx; cv_init(&ev->ev_cv, name); }
struct cprng_strong * cprng_strong_create(const char *name, int ipl, int flags) { const uint32_t cc = cprng_counter(); struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng), KM_SLEEP); /* * rndsink_request takes a spin lock at IPL_VM, so we can be no * higher than that. */ KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH); /* Initialize the easy fields. */ (void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name)); cprng->cs_flags = flags; mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl); cv_init(&cprng->cs_cv, cprng->cs_name); selinit(&cprng->cs_selq); cprng->cs_rndsink = rndsink_create(NIST_BLOCK_KEYLEN_BYTES, &cprng_strong_rndsink_callback, cprng); /* Get some initial entropy. Record whether it is full entropy. */ uint8_t seed[NIST_BLOCK_KEYLEN_BYTES]; mutex_enter(&cprng->cs_lock); cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed, sizeof(seed)); if (nist_ctr_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed), &cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name))) /* XXX Fix nist_ctr_drbg API so this can't happen. */ panic("cprng %s: NIST CTR_DRBG instantiation failed", cprng->cs_name); explicit_memset(seed, 0, sizeof(seed)); if (ISSET(flags, CPRNG_HARD)) cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES; else cprng->cs_remaining = 0; if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY)) printf("cprng %s: creating with partial entropy\n", cprng->cs_name); mutex_exit(&cprng->cs_lock); return cprng; }
isdnbchanattach(void) #endif { int i; rbch_driver_id = isdn_l4_driver_attach("isdnbchan", NISDNBCHAN, &rbch_driver_functions); for(i=0; i < NISDNBCHAN; i++) { #if defined(__FreeBSD__) #if __FreeBSD__ == 3 #ifdef DEVFS rbch_softc[i].devfs_token = devfs_add_devswf(&isdnbchan_cdevsw, i, DV_CHR, UID_ROOT, GID_WHEEL, 0600, "isdnbchan%d", i); #endif #else make_dev(&isdnbchan_cdevsw, i, UID_ROOT, GID_WHEEL, 0600, "isdnbchan%d", i); #endif #endif #if I4BRBCHACCT #if defined(__FreeBSD__) callout_handle_init(&rbch_softc[i].sc_callout); #endif #if defined(__NetBSD__) && __NetBSD_Version__ >= 104230000 callout_init(&rbch_softc[i].sc_callout, 0); selinit(&rbch_softc[i].selp); #endif rbch_softc[i].sc_fn = 1; #endif rbch_softc[i].sc_unit = i; rbch_softc[i].sc_devstate = ST_IDLE; rbch_softc[i].sc_hdlcq.ifq_maxlen = I4BRBCHMAXQLEN; rbch_softc[i].it_in.c_ispeed = rbch_softc[i].it_in.c_ospeed = 64000; termioschars(&rbch_softc[i].it_in); } }
isdnattach(void) #endif { i4b_rdqueue.ifq_maxlen = IFQ_MAXLEN; selinit(&select_rd_info); #if defined(__FreeBSD__) #if __FreeBSD__ == 3 #ifdef DEVFS devfs_token = devfs_add_devswf(&i4b_cdevsw, 0, DV_CHR, UID_ROOT, GID_WHEEL, 0600, "i4b"); #endif #else make_dev(&i4b_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "i4b"); #endif #endif }
void cir_attach(device_t parent, device_t self, void *aux) { struct cir_softc *sc = device_private(self); struct ir_attach_args *ia = aux; sc->sc_dev = self; selinit(&sc->sc_rdsel); sc->sc_methods = ia->ia_methods; sc->sc_handle = ia->ia_handle; #ifdef DIAGNOSTIC if (sc->sc_methods->im_read == NULL || sc->sc_methods->im_write == NULL || sc->sc_methods->im_setparams == NULL) panic("%s: missing methods", device_xname(sc->sc_dev)); #endif printf("\n"); }
/* * dmoverioopen: * * Device switch open routine. */ int dmoverioopen(dev_t dev, int flag, int mode, struct lwp *l) { struct dmio_state *ds; struct file *fp; int error, fd, s; /* falloc() will use the descriptor for us. */ if ((error = fd_allocfile(&fp, &fd)) != 0) return (error); s = splsoftclock(); ds = pool_get(&dmio_state_pool, PR_WAITOK); splx(s); memset(ds, 0, sizeof(*ds)); simple_lock_init(&ds->ds_slock); TAILQ_INIT(&ds->ds_pending); TAILQ_INIT(&ds->ds_complete); selinit(&ds->ds_selq); return fd_clone(fp, fd, flag, &dmio_fileops, ds); }
/* * kqueue(2) system call. */ static int kqueue1(struct lwp *l, int flags, register_t *retval) { struct kqueue *kq; file_t *fp; int fd, error; if ((error = fd_allocfile(&fp, &fd)) != 0) return error; fp->f_flag = FREAD | FWRITE | (flags & (FNONBLOCK|FNOSIGPIPE)); fp->f_type = DTYPE_KQUEUE; fp->f_ops = &kqueueops; kq = kmem_zalloc(sizeof(*kq), KM_SLEEP); mutex_init(&kq->kq_lock, MUTEX_DEFAULT, IPL_SCHED); cv_init(&kq->kq_cv, "kqueue"); selinit(&kq->kq_sel); TAILQ_INIT(&kq->kq_head); fp->f_data = kq; *retval = fd; kq->kq_fdp = curlwp->l_fd; fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0); fd_affix(curproc, fp, fd); return error; }
void uscanner_attach(device_t parent, device_t self, void *aux) { struct uscanner_softc *sc = device_private(self); struct usb_attach_arg *uaa = aux; usb_interface_descriptor_t *id = 0; usb_endpoint_descriptor_t *ed, *ed_bulkin = NULL, *ed_bulkout = NULL; char *devinfop; int i; usbd_status err; sc->sc_dev = self; aprint_naive("\n"); aprint_normal("\n"); devinfop = usbd_devinfo_alloc(uaa->device, 0); aprint_normal_dev(self, "%s\n", devinfop); usbd_devinfo_free(devinfop); sc->sc_dev_flags = uscanner_lookup(uaa->vendor, uaa->product)->flags; sc->sc_udev = uaa->device; err = usbd_set_config_no(uaa->device, 1, 1); /* XXX */ if (err) { aprint_error_dev(self, "failed to set configuration" ", err=%s\n", usbd_errstr(err)); sc->sc_dying = 1; return; } /* XXX We only check the first interface */ err = usbd_device2interface_handle(sc->sc_udev, 0, &sc->sc_iface); if (!err && sc->sc_iface) id = usbd_get_interface_descriptor(sc->sc_iface); if (err || id == 0) { aprint_error_dev(self, "could not get interface descriptor, err=%d,id=%p\n", err, id); sc->sc_dying = 1; return; } /* Find the two first bulk endpoints */ for (i = 0 ; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i); if (ed == 0) { aprint_error_dev(self, "could not read endpoint descriptor\n"); sc->sc_dying = 1; return; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) { ed_bulkin = ed; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) { ed_bulkout = ed; } if (ed_bulkin && ed_bulkout) /* found all we need */ break; } /* Verify that we got something sensible */ if (ed_bulkin == NULL || ed_bulkout == NULL) { aprint_error_dev(self, "bulk-in and/or bulk-out endpoint not found\n"); sc->sc_dying = 1; return; } sc->sc_bulkin = ed_bulkin->bEndpointAddress; sc->sc_bulkout = ed_bulkout->bEndpointAddress; selinit(&sc->sc_selq); usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); return; }
void satlinkattach(device_t parent, device_t self, void *aux) { struct satlink_softc *sc = device_private(self); struct isa_attach_args *ia = aux; bus_space_tag_t iot = ia->ia_iot; bus_space_handle_t ioh; bus_addr_t ringaddr; printf("\n"); /* Map the card. */ if (bus_space_map(iot, ia->ia_io[0].ir_addr, SATLINK_IOSIZE, 0, &ioh)) { aprint_error_dev(self, "can't map i/o space\n"); return; } sc->sc_iot = iot; sc->sc_ioh = ioh; sc->sc_ic = ia->ia_ic; sc->sc_drq = ia->ia_drq[0].ir_drq; /* Reset the card. */ bus_space_write_1(iot, ioh, SATLINK_COMMAND, SATLINK_CMD_RESET); /* Read ID from the card. */ sc->sc_id.sid_mfrid = bus_space_read_1(iot, ioh, SATLINK_MFRID_L) | (bus_space_read_1(iot, ioh, SATLINK_MFRID_H) << 8); sc->sc_id.sid_grpid = bus_space_read_1(iot, ioh, SATLINK_GRPID); sc->sc_id.sid_userid = bus_space_read_1(iot, ioh, SATLINK_USERID_L) | (bus_space_read_1(iot, ioh, SATLINK_USERID_H) << 8); sc->sc_id.sid_serial = bus_space_read_1(iot, ioh, SATLINK_SER_L) | (bus_space_read_1(iot, ioh, SATLINK_SER_M0) << 8) | (bus_space_read_1(iot, ioh, SATLINK_SER_M1) << 16) | (bus_space_read_1(iot, ioh, SATLINK_SER_H) << 24); printf("%s: mfrid 0x%x, grpid 0x%x, userid 0x%x, serial %d\n", device_xname(self), sc->sc_id.sid_mfrid, sc->sc_id.sid_grpid, sc->sc_id.sid_userid, sc->sc_id.sid_serial); callout_init(&sc->sc_ch, 0); selinit(&sc->sc_selq); sc->sc_bufsize = isa_dmamaxsize(sc->sc_ic, sc->sc_drq); /* Allocate and map the ring buffer. */ if (isa_dmamem_alloc(sc->sc_ic, sc->sc_drq, sc->sc_bufsize, &ringaddr, BUS_DMA_NOWAIT)) { aprint_error_dev(self, "can't allocate ring buffer\n"); return; } if (isa_dmamem_map(sc->sc_ic, sc->sc_drq, ringaddr, sc->sc_bufsize, &sc->sc_buf, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) { aprint_error_dev(self, "can't map ring buffer\n"); isa_dmamem_free(sc->sc_ic, sc->sc_drq, ringaddr, sc->sc_bufsize); return; } if (isa_drq_alloc(sc->sc_ic, sc->sc_drq) != 0) { aprint_error_dev(self, "can't reserve drq %d\n", sc->sc_drq); isa_dmamem_unmap(sc->sc_ic, sc->sc_drq, sc->sc_buf, sc->sc_bufsize); isa_dmamem_free(sc->sc_ic, sc->sc_drq, ringaddr, sc->sc_bufsize); return; } /* Create the DMA map. */ if (isa_dmamap_create(sc->sc_ic, sc->sc_drq, sc->sc_bufsize, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW)) { aprint_error_dev(self, "can't create DMA map\n"); isa_dmamem_unmap(sc->sc_ic, sc->sc_drq, sc->sc_buf, sc->sc_bufsize); isa_dmamem_free(sc->sc_ic, sc->sc_drq, ringaddr, sc->sc_bufsize); return; } }
static void chattach(device_t parent, device_t self, void *aux) { struct ch_softc *sc = device_private(self); struct scsipibus_attach_args *sa = aux; struct scsipi_periph *periph = sa->sa_periph; sc->sc_dev = self; selinit(&sc->sc_selq); /* Glue into the SCSI bus */ sc->sc_periph = periph; periph->periph_dev = sc->sc_dev; periph->periph_switch = &ch_switch; printf("\n"); /* * Find out our device's quirks. */ ch_get_quirks(sc, &sa->sa_inqbuf); /* * Some changers require a long time to settle out, to do * tape inventory, for instance. */ if (sc->sc_settledelay) { printf("%s: waiting %d seconds for changer to settle...\n", device_xname(sc->sc_dev), sc->sc_settledelay); delay(1000000 * sc->sc_settledelay); } /* * Get information about the device. Note we can't use * interrupts yet. */ if (ch_get_params(sc, XS_CTL_DISCOVERY|XS_CTL_IGNORE_MEDIA_CHANGE)) printf("%s: offline\n", device_xname(sc->sc_dev)); else { #define PLURAL(c) (c) == 1 ? "" : "s" printf("%s: %d slot%s, %d drive%s, %d picker%s, %d portal%s\n", device_xname(sc->sc_dev), sc->sc_counts[CHET_ST], PLURAL(sc->sc_counts[CHET_ST]), sc->sc_counts[CHET_DT], PLURAL(sc->sc_counts[CHET_DT]), sc->sc_counts[CHET_MT], PLURAL(sc->sc_counts[CHET_MT]), sc->sc_counts[CHET_IE], PLURAL(sc->sc_counts[CHET_IE])); #undef PLURAL #ifdef CHANGER_DEBUG printf("%s: move mask: 0x%x 0x%x 0x%x 0x%x\n", device_xname(sc->sc_dev), sc->sc_movemask[CHET_MT], sc->sc_movemask[CHET_ST], sc->sc_movemask[CHET_IE], sc->sc_movemask[CHET_DT]); printf("%s: exchange mask: 0x%x 0x%x 0x%x 0x%x\n", device_xname(sc->sc_dev), sc->sc_exchangemask[CHET_MT], sc->sc_exchangemask[CHET_ST], sc->sc_exchangemask[CHET_IE], sc->sc_exchangemask[CHET_DT]); #endif /* CHANGER_DEBUG */ } /* Default the current picker. */ sc->sc_picker = sc->sc_firsts[CHET_MT]; }
static void udsir_attach(device_t parent, device_t self, void *aux) { struct udsir_softc *sc = device_private(self); struct usbif_attach_arg *uiaa = aux; struct usbd_device *dev = uiaa->uiaa_device; struct usbd_interface *iface = uiaa->uiaa_iface; char *devinfop; usb_endpoint_descriptor_t *ed; uint8_t epcount; int i; struct ir_attach_args ia; DPRINTFN(10, ("udsir_attach: sc=%p\n", sc)); sc->sc_dev = self; aprint_naive("\n"); aprint_normal("\n"); devinfop = usbd_devinfo_alloc(dev, 0); aprint_normal_dev(self, "%s\n", devinfop); usbd_devinfo_free(devinfop); sc->sc_udev = dev; sc->sc_iface = iface; epcount = 0; (void)usbd_endpoint_count(iface, &epcount); sc->sc_rd_addr = -1; sc->sc_wr_addr = -1; for (i = 0; i < epcount; i++) { ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { aprint_error_dev(self, "couldn't get ep %d\n", i); return; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->sc_rd_addr = ed->bEndpointAddress; sc->sc_rd_maxpsz = UGETW(ed->wMaxPacketSize); } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->sc_wr_addr = ed->bEndpointAddress; sc->sc_wr_maxpsz = UGETW(ed->wMaxPacketSize); } } if (sc->sc_rd_addr == -1 || sc->sc_wr_addr == -1) { aprint_error_dev(self, "missing endpoint\n"); return; } DPRINTFN(10, ("udsir_attach: %p\n", sc->sc_udev)); usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); ia.ia_type = IR_TYPE_IRFRAME; ia.ia_methods = &udsir_methods; ia.ia_handle = sc; sc->sc_child = config_found(self, &ia, ir_print); selinit(&sc->sc_rd_sel); selinit(&sc->sc_wr_sel); return; }
static void tctrl_attach(device_t parent, device_t self, void *aux) { struct tctrl_softc *sc = device_private(self); union obio_attach_args *uoba = aux; struct sbus_attach_args *sa = &uoba->uoba_sbus; unsigned int i, v; /* We're living on a sbus slot that looks like an obio that * looks like an sbus slot. */ sc->sc_dev = self; sc->sc_memt = sa->sa_bustag; if (sbus_bus_map(sc->sc_memt, sa->sa_slot, sa->sa_offset - TS102_REG_UCTRL_INT, sa->sa_size, BUS_SPACE_MAP_LINEAR, &sc->sc_memh) != 0) { printf(": can't map registers\n"); return; } printf("\n"); sc->sc_tft_on = 1; /* clear any pending data. */ for (i = 0; i < 10000; i++) { if ((TS102_UCTRL_STS_RXNE_STA & tctrl_read(sc, TS102_REG_UCTRL_STS)) == 0) { break; } v = tctrl_read(sc, TS102_REG_UCTRL_DATA); tctrl_write(sc, TS102_REG_UCTRL_STS, TS102_UCTRL_STS_RXNE_STA); } if (sa->sa_nintr != 0) { (void)bus_intr_establish(sc->sc_memt, sa->sa_pri, IPL_NONE, tctrl_intr, sc); evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(sc->sc_dev), "intr"); } /* See what the external status is */ sc->sc_ext_status = 0; tctrl_read_ext_status(); if (sc->sc_ext_status != 0) { const char *sep; printf("%s: ", device_xname(sc->sc_dev)); v = sc->sc_ext_status; for (i = 0, sep = ""; v != 0; i++, v >>= 1) { if (v & 1) { printf("%s%s", sep, tctrl_ext_statuses[i]); sep = ", "; } } printf("\n"); } /* Get a current of the control bitport */ tctrl_setup_bitport_nop(); tctrl_write(sc, TS102_REG_UCTRL_INT, TS102_UCTRL_INT_RXNE_REQ|TS102_UCTRL_INT_RXNE_MSK); sc->sc_lid = (sc->sc_ext_status & TS102_EXT_STATUS_LID_DOWN) == 0; sc->sc_power_state = PWR_RESUME; sc->sc_extvga = (sc->sc_ext_status & TS102_EXT_STATUS_EXTERNAL_VGA_ATTACHED) != 0; sc->sc_video_callback = NULL; sc->sc_wantdata = 0; sc->sc_event_count = 0; sc->sc_ext_pending = 0; sc->sc_ext_pending = 0; mutex_init(&sc->sc_requestlock, MUTEX_DEFAULT, IPL_NONE); selinit(&sc->sc_rsel); /* setup sensors and register the power button */ tctrl_sensor_setup(sc); tctrl_lid_state(sc); tctrl_ac_state(sc); /* initialize the LCD */ tctrl_init_lcd(); /* initialize sc_lcdstate */ sc->sc_lcdstate = 0; sc->sc_lcdwanted = 0; tadpole_set_lcd(2, 0); /* fire up the LCD event thread */ sc->sc_events = 0; if (kthread_create(PRI_NONE, 0, NULL, tctrl_event_thread, sc, &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) { printf("%s: unable to create event kthread", device_xname(sc->sc_dev)); } }
void tap_attach(device_t parent, device_t self, void *aux) { struct tap_softc *sc = device_private(self); struct ifnet *ifp; const struct sysctlnode *node; int error; uint8_t enaddr[ETHER_ADDR_LEN] = { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff }; char enaddrstr[3 * ETHER_ADDR_LEN]; sc->sc_dev = self; sc->sc_sih = NULL; getnanotime(&sc->sc_btime); sc->sc_atime = sc->sc_mtime = sc->sc_btime; sc->sc_flags = 0; selinit(&sc->sc_rsel); /* * Initialize the two locks for the device. * * We need a lock here because even though the tap device can be * opened only once, the file descriptor might be passed to another * process, say a fork(2)ed child. * * The Giant saves us from most of the hassle, but since the read * operation can sleep, we don't want two processes to wake up at * the same moment and both try and dequeue a single packet. * * The queue for event listeners (used by kqueue(9), see below) has * to be protected too, so use a spin lock. */ mutex_init(&sc->sc_rdlock, MUTEX_DEFAULT, IPL_NONE); mutex_init(&sc->sc_kqlock, MUTEX_DEFAULT, IPL_VM); if (!pmf_device_register(self, NULL, NULL)) aprint_error_dev(self, "couldn't establish power handler\n"); /* * In order to obtain unique initial Ethernet address on a host, * do some randomisation. It's not meant for anything but avoiding * hard-coding an address. */ cprng_fast(&enaddr[3], 3); aprint_verbose_dev(self, "Ethernet address %s\n", ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr)); /* * Why 1000baseT? Why not? You can add more. * * Note that there are 3 steps: init, one or several additions to * list of supported media, and in the end, the selection of one * of them. */ ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_AUTO); /* * One should note that an interface must do multicast in order * to support IPv6. */ ifp = &sc->sc_ec.ec_if; strcpy(ifp->if_xname, device_xname(self)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = tap_ioctl; ifp->if_start = tap_start; ifp->if_stop = tap_stop; ifp->if_init = tap_init; IFQ_SET_READY(&ifp->if_snd); sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; /* Those steps are mandatory for an Ethernet driver. */ if_initialize(ifp); ether_ifattach(ifp, enaddr); if_register(ifp); /* * Add a sysctl node for that interface. * * The pointer transmitted is not a string, but instead a pointer to * the softc structure, which we can use to build the string value on * the fly in the helper function of the node. See the comments for * tap_sysctl_handler for details. * * Usually sysctl_createv is called with CTL_CREATE as the before-last * component. However, we can allocate a number ourselves, as we are * the only consumer of the net.link.<iface> node. In this case, the * unit number is conveniently used to number the node. CTL_CREATE * would just work, too. */ if ((error = sysctl_createv(NULL, 0, NULL, &node, CTLFLAG_READWRITE, CTLTYPE_STRING, device_xname(self), NULL, tap_sysctl_handler, 0, (void *)sc, 18, CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev), CTL_EOL)) != 0) aprint_error_dev(self, "sysctl_createv returned %d, ignoring\n", error); }
void bppattach(device_t parent, device_t self, void *aux) { struct bpp_softc *dsc = device_private(self); struct lsi64854_softc *sc = &dsc->sc_lsi64854; struct sbus_softc *sbsc = device_private(parent); struct sbus_attach_args *sa = aux; int burst, sbusburst; int node; sc->sc_dev = self; selinit(&dsc->sc_rsel); selinit(&dsc->sc_wsel); dsc->sc_sih = softint_establish(SOFTINT_CLOCK, bppsoftintr, dsc); sc->sc_bustag = sa->sa_bustag; sc->sc_dmatag = sa->sa_dmatag; node = sa->sa_node; /* Map device registers */ if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset, sa->sa_size, 0, &sc->sc_regs) != 0) { aprint_error(": cannot map registers\n"); return; } /* * Get transfer burst size from PROM and plug it into the * controller registers. This is needed on the Sun4m; do * others need it too? */ sbusburst = sbsc->sc_burst; if (sbusburst == 0) sbusburst = SBUS_BURST_32 - 1; /* 1->16 */ burst = prom_getpropint(node, "burst-sizes", -1); if (burst == -1) /* take SBus burst sizes */ burst = sbusburst; /* Clamp at parent's burst sizes */ burst &= sbusburst; sc->sc_burst = (burst & SBUS_BURST_32) ? 32 : (burst & SBUS_BURST_16) ? 16 : 0; /* Join the Sbus device family */ dsc->sc_sd.sd_reset = NULL; sbus_establish(&dsc->sc_sd, self); /* Initialize the DMA channel */ sc->sc_channel = L64854_CHANNEL_PP; lsi64854_attach(sc); /* Establish interrupt handler */ if (sa->sa_nintr) { sc->sc_intrchain = bppintr; sc->sc_intrchainarg = dsc; (void)bus_intr_establish(sa->sa_bustag, sa->sa_pri, IPL_TTY, bppintr, sc); } /* Allocate buffer XXX - should actually use dmamap_uio() */ dsc->sc_bufsz = 1024; dsc->sc_buf = malloc(dsc->sc_bufsz, M_DEVBUF, M_NOWAIT); /* XXX read default state */ { bus_space_handle_t h = sc->sc_regs; struct hwstate *hw = &dsc->sc_hwdefault; int ack_rate = sa->sa_frequency / 1000000; hw->hw_hcr = bus_space_read_2(sc->sc_bustag, h, L64854_REG_HCR); hw->hw_ocr = bus_space_read_2(sc->sc_bustag, h, L64854_REG_OCR); hw->hw_tcr = bus_space_read_1(sc->sc_bustag, h, L64854_REG_TCR); hw->hw_or = bus_space_read_1(sc->sc_bustag, h, L64854_REG_OR); DPRINTF(("bpp: hcr %x ocr %x tcr %x or %x\n", hw->hw_hcr, hw->hw_ocr, hw->hw_tcr, hw->hw_or)); /* Set these to sane values */ hw->hw_hcr = ((ack_rate<<BPP_HCR_DSS_SHFT)&BPP_HCR_DSS_MASK) | ((ack_rate<<BPP_HCR_DSW_SHFT)&BPP_HCR_DSW_MASK); hw->hw_ocr |= BPP_OCR_ACK_OP; } }
void apm_attach(struct apm_softc *sc) { u_int numbatts, capflags; aprint_normal(": "); switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) { case 0x0100: apm_v11_enabled = 0; apm_v12_enabled = 0; break; case 0x0101: apm_v12_enabled = 0; /* fall through */ case 0x0102: default: break; } apm_set_ver(sc); /* prints version info */ aprint_normal("\n"); if (apm_minver >= 2) (*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts, &capflags); /* * enable power management */ (*sc->sc_ops->aa_enable)(sc->sc_cookie, 1); if (sc->sc_ops->aa_cpu_busy) (*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie); mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE); /* Initial state is `resumed'. */ sc->sc_power_state = PWR_RESUME; selinit(&sc->sc_rsel); selinit(&sc->sc_xsel); /* Do an initial check. */ apm_periodic_check(sc); /* * Create a kernel thread to periodically check for APM events, * and notify other subsystems when they occur. */ if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc, &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) { /* * We were unable to create the APM thread; bail out. */ if (sc->sc_ops->aa_disconnect) (*sc->sc_ops->aa_disconnect)(sc->sc_cookie); aprint_error_dev(sc->sc_dev, "unable to create thread, " "kernel APM support disabled\n"); } if (!pmf_device_register(sc->sc_dev, NULL, NULL)) aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n"); }