int pflogifs_resize(size_t n) { struct ifnet **p; int i; if (n > SIZE_MAX / sizeof(*p)) return (EINVAL); if (n == 0) p = NULL; else if ((p = mallocarray(n, sizeof(*p), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) return (ENOMEM); for (i = 0; i < n; i++) if (i < npflogifs) p[i] = pflogifs[i]; else p[i] = NULL; if (pflogifs) free(pflogifs, M_DEVBUF, 0); pflogifs = p; npflogifs = n; return (0); }
void vndattach(int num) { char *mem; int i; if (num <= 0) return; mem = mallocarray(num, sizeof(struct vnd_softc), M_DEVBUF, M_NOWAIT | M_ZERO); if (mem == NULL) { printf("WARNING: no memory for vnode disks\n"); return; } vnd_softc = (struct vnd_softc *)mem; for (i = 0; i < num; i++) { struct vnd_softc *sc = &vnd_softc[i]; sc->sc_dev.dv_unit = i; snprintf(sc->sc_dev.dv_xname, sizeof(sc->sc_dev.dv_xname), "vnd%d", i); disk_construct(&sc->sc_dk); device_ref(&sc->sc_dev); } numvnd = num; }
static int e2fs_sbfill(struct vnode *devvp, struct m_ext2fs *fs) { struct buf *bp = NULL; int i, error; /* XXX assume hardware block size == 512 */ fs->e2fs_ncg = howmany(fs->e2fs.e2fs_bcount - fs->e2fs.e2fs_first_dblock, fs->e2fs.e2fs_bpg); fs->e2fs_fsbtodb = fs->e2fs.e2fs_log_bsize + 1; fs->e2fs_bsize = 1024 << fs->e2fs.e2fs_log_bsize; fs->e2fs_bshift = LOG_MINBSIZE + fs->e2fs.e2fs_log_bsize; fs->e2fs_fsize = 1024 << fs->e2fs.e2fs_log_fsize; fs->e2fs_qbmask = fs->e2fs_bsize - 1; fs->e2fs_bmask = ~fs->e2fs_qbmask; fs->e2fs_ipb = fs->e2fs_bsize / EXT2_DINODE_SIZE(fs); fs->e2fs_itpg = fs->e2fs.e2fs_ipg / fs->e2fs_ipb; /* Re-read group descriptors from the disk. */ fs->e2fs_ngdb = howmany(fs->e2fs_ncg, fs->e2fs_bsize / sizeof(struct ext2_gd)); fs->e2fs_gd = mallocarray(fs->e2fs_ngdb, fs->e2fs_bsize, M_UFSMNT, M_WAITOK); for (i = 0; i < fs->e2fs_ngdb; ++i) { daddr_t dblk = ((fs->e2fs_bsize > 1024) ? 0 : 1) + i + 1; size_t gdesc = i * fs->e2fs_bsize / sizeof(struct ext2_gd); struct ext2_gd *gd; error = bread(devvp, fsbtodb(fs, dblk), fs->e2fs_bsize, &bp); if (error) { size_t gdescs_space = fs->e2fs_ngdb * fs->e2fs_bsize; free(fs->e2fs_gd, M_UFSMNT, gdescs_space); fs->e2fs_gd = NULL; brelse(bp); return (error); } gd = (struct ext2_gd *) bp->b_data; e2fs_cgload(gd, fs->e2fs_gd + gdesc, fs->e2fs_bsize); brelse(bp); bp = NULL; } if ((fs->e2fs.e2fs_features_rocompat & EXT2F_ROCOMPAT_LARGEFILE) == 0 || (fs->e2fs.e2fs_rev == E2FS_REV0)) fs->e2fs_maxfilesize = INT_MAX; else fs->e2fs_maxfilesize = ext2fs_maxfilesize(fs); if (fs->e2fs.e2fs_features_incompat & EXT2F_INCOMPAT_EXTENTS) fs->e2fs_maxfilesize *= 4; return (0); }
static usbd_status alloc_all_mididevs(struct umidi_softc *sc, int nmidi) { sc->sc_num_mididevs = nmidi; sc->sc_mididevs = mallocarray(nmidi, sizeof(*sc->sc_mididevs), M_USBDEV, M_WAITOK | M_CANFAIL | M_ZERO); if (!sc->sc_mididevs) return USBD_NOMEM; return USBD_NORMAL_COMPLETION; }
void vdsp_alloc(void *arg1) { struct vdsp_softc *sc = arg1; struct vio_dring_reg dr; KASSERT(sc->sc_num_descriptors <= VDSK_MAX_DESCRIPTORS); KASSERT(sc->sc_descriptor_size <= VDSK_MAX_DESCRIPTOR_SIZE); sc->sc_vd = mallocarray(sc->sc_num_descriptors, sc->sc_descriptor_size, M_DEVBUF, M_WAITOK); sc->sc_vd_ring = mallocarray(sc->sc_num_descriptors, sizeof(*sc->sc_vd_ring), M_DEVBUF, M_WAITOK); task_set(&sc->sc_vd_task, vdsp_vd_task, sc); bzero(&dr, sizeof(dr)); dr.tag.type = VIO_TYPE_CTRL; dr.tag.stype = VIO_SUBTYPE_ACK; dr.tag.stype_env = VIO_DRING_REG; dr.tag.sid = sc->sc_local_sid; dr.dring_ident = ++sc->sc_dring_ident; vdsp_sendmsg(sc, &dr, sizeof(dr), 1); }
/* * Operations for changing memory attributes. * * This is basically just an ioctl shim for mem_range_attr_get * and mem_range_attr_set. */ static int mem_ioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) { int nd, error = 0; struct mem_range_op *mo = (struct mem_range_op *)data; struct mem_range_desc *md; /* is this for us? */ if ((cmd != MEMRANGE_GET) && (cmd != MEMRANGE_SET)) return (ENOTTY); /* any chance we can handle this? */ if (mem_range_softc.mr_op == NULL) return (EOPNOTSUPP); /* do we have any descriptors? */ if (mem_range_softc.mr_ndesc == 0) return (ENXIO); switch (cmd) { case MEMRANGE_GET: nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); if (nd > 0) { md = mallocarray(nd, sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); error = mem_range_attr_get(md, &nd); if (!error) error = copyout(md, mo->mo_desc, nd * sizeof(struct mem_range_desc)); free(md, M_MEMDESC, nd * sizeof(struct mem_range_desc)); } else { nd = mem_range_softc.mr_ndesc; } mo->mo_arg[0] = nd; break; case MEMRANGE_SET: md = malloc(sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); /* clamp description string */ md->mr_owner[sizeof(md->mr_owner) - 1] = 0; if (error == 0) error = mem_range_attr_set(md, &mo->mo_arg[0]); free(md, M_MEMDESC, sizeof(struct mem_range_desc)); break; } return (error); }
static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, uint32_t page_flags, struct vm_page *dummy_read_page) { struct radeon_device *rdev; struct radeon_ttm_tt *gtt; rdev = radeon_get_rdev(bdev); #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { return ttm_agp_tt_create(bdev, rdev->ddev->agp, size, page_flags, dummy_read_page); } #endif gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); if (gtt == NULL) { return NULL; } gtt->ttm.ttm.func = &radeon_backend_func; gtt->rdev = rdev; if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { kfree(gtt); return NULL; } gtt->segs = mallocarray(gtt->ttm.ttm.num_pages, sizeof(bus_dma_segment_t), M_DRM, M_WAITOK | M_ZERO); if (gtt->segs == NULL) { ttm_dma_tt_fini(>t->ttm); free(gtt, M_DRM, 0); return NULL; } if (bus_dmamap_create(rdev->dmat, size, gtt->ttm.ttm.num_pages, size, 0, BUS_DMA_WAITOK, >t->map)) { free(gtt->segs, M_DRM, 0); ttm_dma_tt_fini(>t->ttm); free(gtt, M_DRM, 0); return NULL; } return >t->ttm.ttm; }
void acpivout_get_bcl(struct acpivout_softc *sc) { int i, j, value; struct aml_value res; DPRINTF(("Getting _BCL!")); aml_evalname(sc->sc_acpi, sc->sc_devnode, "_BCL", 0, NULL, &res); if (res.type != AML_OBJTYPE_PACKAGE) { sc->sc_bcl_len = 0; goto err; } /* * Per the ACPI spec section B.6.2 the _BCL method returns a package. * The first integer in the package is the brightness level * when the computer has full power, and the second is the * brightness level when the computer is on batteries. * All other levels may be used by OSPM. * So we skip the first two integers in the package. */ if (res.length <= 2) { sc->sc_bcl_len = 0; goto err; } sc->sc_bcl_len = res.length - 2; sc->sc_bcl = mallocarray(sc->sc_bcl_len, sizeof(int), M_DEVBUF, M_WAITOK | M_ZERO); for (i = 0; i < sc->sc_bcl_len; i++) { /* Sort darkest to brightest */ value = aml_val2int(res.v_package[i + 2]); for (j = i; j > 0 && sc->sc_bcl[j - 1] > value; j--) sc->sc_bcl[j] = sc->sc_bcl[j - 1]; sc->sc_bcl[j] = value; } err: aml_freevalue(&res); }
void apic_attach(struct elroy_softc *sc) { volatile struct elroy_regs *r = sc->sc_regs; u_int32_t data; data = apic_read(r, APIC_VERSION); sc->sc_nints = (data & APIC_VERSION_NENT) >> APIC_VERSION_NENT_SHIFT; printf(" APIC ver %x, %d pins", data & APIC_VERSION_MASK, sc->sc_nints); sc->sc_irq = mallocarray(sc->sc_nints, sizeof(int), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_irq == NULL) panic("apic_attach: cannot allocate irq table"); apic_get_int_tbl(sc); #ifdef DEBUG apic_dump(sc); #endif }
void vmcmdset_extend(struct exec_vmcmd_set *evsp) { struct exec_vmcmd *nvcp; u_int ocnt; #ifdef DIAGNOSTIC if (evsp->evs_used < evsp->evs_cnt) panic("vmcmdset_extend: not necessary"); #endif ocnt = evsp->evs_cnt; KASSERT(ocnt > 0); /* figure out number of entries in new set */ evsp->evs_cnt += ocnt; /* reallocate the command set */ nvcp = mallocarray(evsp->evs_cnt, sizeof(*nvcp), M_EXEC, M_WAITOK); memcpy(nvcp, evsp->evs_cmds, ocnt * sizeof(*nvcp)); if (evsp->evs_cmds != evsp->evs_start) free(evsp->evs_cmds, M_EXEC, ocnt * sizeof(*nvcp)); evsp->evs_cmds = nvcp; }
static usbd_status alloc_all_endpoints_yamaha(struct umidi_softc *sc) { /* This driver currently supports max 1in/1out bulk endpoints */ usb_descriptor_t *desc; usb_endpoint_descriptor_t *epd; int out_addr, in_addr, in_packetsize, i, dir; size_t remain, descsize; out_addr = in_addr = 0; /* detect endpoints */ desc = TO_D(usbd_get_interface_descriptor(sc->sc_iface)); for (i=(int)TO_IFD(desc)->bNumEndpoints-1; i>=0; i--) { epd = usbd_interface2endpoint_descriptor(sc->sc_iface, i); if (UE_GET_XFERTYPE(epd->bmAttributes) == UE_BULK) { dir = UE_GET_DIR(epd->bEndpointAddress); if (dir==UE_DIR_OUT && !out_addr) out_addr = epd->bEndpointAddress; else if (dir==UE_DIR_IN && !in_addr) { in_addr = epd->bEndpointAddress; in_packetsize = UGETW(epd->wMaxPacketSize); } } } desc = NEXT_D(desc); /* count jacks */ if (!(desc->bDescriptorType==UDESC_CS_INTERFACE && desc->bDescriptorSubtype==UMIDI_MS_HEADER)) return USBD_INVAL; remain = (size_t)UGETW(TO_CSIFD(desc)->wTotalLength) - (size_t)desc->bLength; desc = NEXT_D(desc); while (remain>=sizeof(usb_descriptor_t)) { descsize = desc->bLength; if (descsize>remain || descsize==0) break; if (desc->bDescriptorType==UDESC_CS_INTERFACE && remain>=UMIDI_JACK_DESCRIPTOR_SIZE) { if (desc->bDescriptorSubtype==UMIDI_OUT_JACK) sc->sc_out_num_jacks++; else if (desc->bDescriptorSubtype==UMIDI_IN_JACK) sc->sc_in_num_jacks++; } desc = NEXT_D(desc); remain-=descsize; } /* validate some parameters */ if (sc->sc_out_num_jacks>UMIDI_MAX_EPJACKS) sc->sc_out_num_jacks = UMIDI_MAX_EPJACKS; if (sc->sc_in_num_jacks>UMIDI_MAX_EPJACKS) sc->sc_in_num_jacks = UMIDI_MAX_EPJACKS; if (sc->sc_out_num_jacks && out_addr) sc->sc_out_num_endpoints = 1; else { sc->sc_out_num_endpoints = 0; sc->sc_out_num_jacks = 0; } if (sc->sc_in_num_jacks && in_addr) sc->sc_in_num_endpoints = 1; else { sc->sc_in_num_endpoints = 0; sc->sc_in_num_jacks = 0; } sc->sc_endpoints = mallocarray(sc->sc_out_num_endpoints + sc->sc_in_num_endpoints, sizeof(struct umidi_endpoint), M_USBDEV, M_WAITOK | M_CANFAIL); if (!sc->sc_endpoints) return USBD_NOMEM; if (sc->sc_out_num_endpoints) { sc->sc_out_ep = sc->sc_endpoints; sc->sc_out_ep->sc = sc; sc->sc_out_ep->addr = out_addr; sc->sc_out_ep->packetsize = UGETW(epd->wMaxPacketSize); sc->sc_out_ep->num_jacks = sc->sc_out_num_jacks; sc->sc_out_ep->num_open = 0; memset(sc->sc_out_ep->jacks, 0, sizeof(sc->sc_out_ep->jacks)); } else sc->sc_out_ep = NULL; if (sc->sc_in_num_endpoints) { sc->sc_in_ep = sc->sc_endpoints+sc->sc_out_num_endpoints; sc->sc_in_ep->sc = sc; sc->sc_in_ep->addr = in_addr; sc->sc_in_ep->packetsize = in_packetsize; sc->sc_in_ep->num_jacks = sc->sc_in_num_jacks; sc->sc_in_ep->num_open = 0; memset(sc->sc_in_ep->jacks, 0, sizeof(sc->sc_in_ep->jacks)); } else sc->sc_in_ep = NULL; return USBD_NORMAL_COMPLETION; }
/* * 1st pass on BIOS's Intel MP specification table. * * initializes: * mp_ncpus = 1 * * determines: * cpu_apic_address (common to all CPUs) * ioapic_address[N] * mp_naps * mp_nbusses * mp_napics * nintrs */ void mpbios_scan(struct device *self) { const u_int8_t *position, *end; int count; int type; int intr_cnt; paddr_t lapic_base; printf(": Intel MP Specification 1.%d\n", mp_fps->spec_rev); /* * looks like we've got a MP system. start setting up * infrastructure.. * XXX is this the right place?? */ lapic_base = LAPIC_BASE; if (mp_cth != NULL) lapic_base = (paddr_t)mp_cth->apic_address; lapic_boot_init(lapic_base); /* check for use of 'default' configuration */ if (mp_fps->mpfb1 != 0) { struct mpbios_proc pe; printf("%s: MP default configuration %d\n", self->dv_xname, mp_fps->mpfb1); /* use default addresses */ pe.apic_id = cpu_number(); pe.cpu_flags = PROCENTRY_FLAG_EN|PROCENTRY_FLAG_BP; pe.cpu_signature = cpu_info_primary.ci_signature; pe.feature_flags = cpu_info_primary.ci_feature_flags; mpbios_cpu((u_int8_t *)&pe, self); pe.apic_id = 1 - cpu_number(); pe.cpu_flags = PROCENTRY_FLAG_EN; mpbios_cpu((u_int8_t *)&pe, self); mpbios_ioapic((u_int8_t *)&default_ioapic, self); /* XXX */ printf("%s: WARNING: interrupts not configured\n", self->dv_xname); panic("lazy bum"); return; } else { /* * should not happen; mp_probe returns 0 in this case, * but.. */ if (mp_cth == NULL) panic("mpbios_scan: no config (can't happen?)"); /* * Walk the table once, counting items */ for (count = mp_cth->entry_count, position = (const u_int8_t *)mp_cth + sizeof(*mp_cth), end = position + mp_cth->base_len; count-- && position < end; position += mp_conf[type].length) { type = *position; if (type >= MPS_MCT_NTYPES) { printf("%s: unknown entry type %x" " in MP config table\n", self->dv_xname, type); end = position; break; } mp_conf[type].count++; } /* * Walk the table twice, counting int and bus entries */ for (count = mp_cth->entry_count, intr_cnt = 15, /* presume all isa irqs missing */ position = (const u_int8_t *)mp_cth + sizeof(*mp_cth); count-- && position < end; position += mp_conf[type].length) { type = *position; if (type == MPS_MCT_BUS) { const struct mpbios_bus *bp = (const struct mpbios_bus *)position; if (bp->bus_id >= mp_nbusses) mp_nbusses = bp->bus_id + 1; } /* * Count actual interrupt instances. * dst_apic_id of MPS_ALL_APICS means "wired to all * apics of this type". */ if ((type == MPS_MCT_IOINT) || (type == MPS_MCT_LINT)) { const struct mpbios_int *ie = (const struct mpbios_int *)position; if (ie->dst_apic_id != MPS_ALL_APICS) intr_cnt++; else if (type == MPS_MCT_IOINT) intr_cnt += mp_conf[MPS_MCT_IOAPIC].count; else intr_cnt += mp_conf[MPS_MCT_CPU].count; } } mp_busses = mallocarray(mp_nbusses, sizeof(struct mp_bus), M_DEVBUF, M_NOWAIT|M_ZERO); mp_intrs = mallocarray(intr_cnt, sizeof(struct mp_intr_map), M_DEVBUF, M_NOWAIT); /* re-walk the table, recording info of interest */ position = (const u_int8_t *)mp_cth + sizeof(*mp_cth); count = mp_cth->entry_count; mp_nintrs = 0; while ((count--) && (position < end)) { switch (type = *(u_char *)position) { case MPS_MCT_CPU: mpbios_cpu(position, self); break; case MPS_MCT_BUS: mpbios_bus(position, self); break; case MPS_MCT_IOAPIC: mpbios_ioapic(position, self); break; case MPS_MCT_IOINT: case MPS_MCT_LINT: if (mpbios_int(position, &mp_intrs[mp_nintrs]) == 0) mp_nintrs++; break; default: printf("%s: unknown entry type %x " "in MP config table\n", self->dv_xname, type); /* NOTREACHED */ return; } position += mp_conf[type].length; } if (mp_verbose && mp_cth->ext_len) printf("%s: MP WARNING: %d " "bytes of extended entries not examined\n", self->dv_xname, mp_cth->ext_len); } /* Clean up. */ mp_fps = NULL; mpbios_unmap(&mp_fp_map); if (mp_cth != NULL) { mp_cth = NULL; mpbios_unmap(&mp_cfg_table_map); } #if NPCI > 0 if (pci_mode_detect() != 0) mpbios_intr_fixup(); #endif }
static usbd_status alloc_all_jacks(struct umidi_softc *sc) { int i, j; struct umidi_endpoint *ep; struct umidi_jack *jack, **rjack; /* allocate/initialize structures */ sc->sc_jacks = mallocarray(sc->sc_in_num_jacks + sc->sc_out_num_jacks, sizeof(*sc->sc_out_jacks), M_USBDEV, M_WAITOK | M_CANFAIL); if (!sc->sc_jacks) return USBD_NOMEM; sc->sc_out_jacks = sc->sc_out_num_jacks ? sc->sc_jacks : NULL; sc->sc_in_jacks = sc->sc_in_num_jacks ? sc->sc_jacks+sc->sc_out_num_jacks : NULL; jack = &sc->sc_out_jacks[0]; for (i=0; i<sc->sc_out_num_jacks; i++) { jack->opened = 0; jack->binded = 0; jack->arg = NULL; jack->u.out.intr = NULL; jack->intr = 0; jack->cable_number = i; jack++; } jack = &sc->sc_in_jacks[0]; for (i=0; i<sc->sc_in_num_jacks; i++) { jack->opened = 0; jack->binded = 0; jack->arg = NULL; jack->u.in.intr = NULL; jack->cable_number = i; jack++; } /* assign each jacks to each endpoints */ jack = &sc->sc_out_jacks[0]; ep = &sc->sc_out_ep[0]; for (i=0; i<sc->sc_out_num_endpoints; i++) { rjack = &ep->jacks[0]; for (j=0; j<ep->num_jacks; j++) { *rjack = jack; jack->endpoint = ep; jack++; rjack++; } ep++; } jack = &sc->sc_in_jacks[0]; ep = &sc->sc_in_ep[0]; for (i=0; i<sc->sc_in_num_endpoints; i++) { rjack = &ep->jacks[0]; for (j=0; j<ep->num_jacks; j++) { *rjack = jack; jack->endpoint = ep; jack++; rjack++; } ep++; } return USBD_NORMAL_COMPLETION; }
static usbd_status alloc_all_endpoints_genuine(struct umidi_softc *sc) { usb_interface_descriptor_t *interface_desc; usb_config_descriptor_t *config_desc; usb_descriptor_t *desc; size_t remain, descsize; struct umidi_endpoint *p, *q, *lowest, *endep, tmpep; int epaddr, eppacketsize, num_ep; interface_desc = usbd_get_interface_descriptor(sc->sc_iface); num_ep = interface_desc->bNumEndpoints; sc->sc_endpoints = p = mallocarray(num_ep, sizeof(struct umidi_endpoint), M_USBDEV, M_WAITOK | M_CANFAIL); if (!p) return USBD_NOMEM; sc->sc_out_num_endpoints = sc->sc_in_num_endpoints = 0; epaddr = -1; /* get the list of endpoints for midi stream */ config_desc = usbd_get_config_descriptor(sc->sc_udev); desc = (usb_descriptor_t *) config_desc; remain = (size_t)UGETW(config_desc->wTotalLength); while (remain>=sizeof(usb_descriptor_t)) { descsize = desc->bLength; if (descsize>remain || descsize==0) break; if (desc->bDescriptorType==UDESC_ENDPOINT && remain>=USB_ENDPOINT_DESCRIPTOR_SIZE && UE_GET_XFERTYPE(TO_EPD(desc)->bmAttributes) == UE_BULK) { epaddr = TO_EPD(desc)->bEndpointAddress; eppacketsize = UGETW(TO_EPD(desc)->wMaxPacketSize); } else if (desc->bDescriptorType==UDESC_CS_ENDPOINT && remain>=UMIDI_CS_ENDPOINT_DESCRIPTOR_SIZE && epaddr!=-1) { if (num_ep>0) { num_ep--; p->sc = sc; p->addr = epaddr; p->packetsize = eppacketsize; p->num_jacks = TO_CSEPD(desc)->bNumEmbMIDIJack; if (UE_GET_DIR(epaddr)==UE_DIR_OUT) { sc->sc_out_num_endpoints++; sc->sc_out_num_jacks += p->num_jacks; } else { sc->sc_in_num_endpoints++; sc->sc_in_num_jacks += p->num_jacks; } p++; } } else epaddr = -1; desc = NEXT_D(desc); remain-=descsize; } /* sort endpoints */ num_ep = sc->sc_out_num_endpoints + sc->sc_in_num_endpoints; p = sc->sc_endpoints; endep = p + num_ep; while (p<endep) { lowest = p; for (q=p+1; q<endep; q++) { if ((UE_GET_DIR(lowest->addr)==UE_DIR_IN && UE_GET_DIR(q->addr)==UE_DIR_OUT) || ((UE_GET_DIR(lowest->addr)== UE_GET_DIR(q->addr)) && (UE_GET_ADDR(lowest->addr)> UE_GET_ADDR(q->addr)))) lowest = q; } if (lowest != p) { memcpy((void *)&tmpep, (void *)p, sizeof(tmpep)); memcpy((void *)p, (void *)lowest, sizeof(tmpep)); memcpy((void *)lowest, (void *)&tmpep, sizeof(tmpep)); } p->num_open = 0; p++; } sc->sc_out_ep = sc->sc_out_num_endpoints ? sc->sc_endpoints : NULL; sc->sc_in_ep = sc->sc_in_num_endpoints ? sc->sc_endpoints+sc->sc_out_num_endpoints : NULL; return USBD_NORMAL_COMPLETION; }
/* * can't use bus_space_xxx as we don't have a bus handle ... */ void ioapic_attach(struct device *parent, struct device *self, void *aux) { struct ioapic_softc *sc = (struct ioapic_softc *)self; struct apic_attach_args *aaa = (struct apic_attach_args *)aux; int apic_id; bus_space_handle_t bh; u_int32_t ver_sz; int i; sc->sc_flags = aaa->flags; sc->sc_apicid = aaa->apic_id; printf(": apid %d", aaa->apic_id); if (ioapic_find(aaa->apic_id) != NULL) { printf(", duplicate apic id (ignored)\n"); return; } ioapic_add(sc); printf(" pa 0x%lx", aaa->apic_address); if (x86_mem_add_mapping(aaa->apic_address, PAGE_SIZE, 0, &bh) != 0) { printf(", map failed\n"); return; } sc->sc_reg = (volatile u_int32_t *)(bh + IOAPIC_REG); sc->sc_data = (volatile u_int32_t *)(bh + IOAPIC_DATA); sc->sc_pic.pic_type = PIC_IOAPIC; #ifdef MULTIPROCESSOR mtx_init(&sc->sc_pic.pic_mutex, IPL_NONE); #endif sc->sc_pic.pic_hwmask = ioapic_hwmask; sc->sc_pic.pic_hwunmask = ioapic_hwunmask; sc->sc_pic.pic_addroute = ioapic_addroute; sc->sc_pic.pic_delroute = ioapic_delroute; sc->sc_pic.pic_edge_stubs = ioapic_edge_stubs; sc->sc_pic.pic_level_stubs = ioapic_level_stubs; ver_sz = ioapic_read(sc, IOAPIC_VER); sc->sc_apic_vers = (ver_sz & IOAPIC_VER_MASK) >> IOAPIC_VER_SHIFT; sc->sc_apic_sz = (ver_sz & IOAPIC_MAX_MASK) >> IOAPIC_MAX_SHIFT; sc->sc_apic_sz++; if (aaa->apic_vecbase != -1) sc->sc_apic_vecbase = aaa->apic_vecbase; else { /* * XXX this assumes ordering of ioapics in the table. * Only needed for broken BIOS workaround (see mpbios.c) */ sc->sc_apic_vecbase = ioapic_vecbase; ioapic_vecbase += sc->sc_apic_sz; } if (mp_verbose) { printf(", %s mode", aaa->flags & IOAPIC_PICMODE ? "PIC" : "virtual wire"); } printf(", version %x, %d pins\n", sc->sc_apic_vers, sc->sc_apic_sz); apic_id = (ioapic_read(sc, IOAPIC_ID) & IOAPIC_ID_MASK) >> IOAPIC_ID_SHIFT; sc->sc_pins = mallocarray(sc->sc_apic_sz, sizeof(struct ioapic_pin), M_DEVBUF, M_WAITOK); for (i = 0; i < sc->sc_apic_sz; i++) { sc->sc_pins[i].ip_next = NULL; sc->sc_pins[i].ip_map = NULL; sc->sc_pins[i].ip_vector = 0; sc->sc_pins[i].ip_type = IST_NONE; } /* * In case the APIC is not initialized to the correct ID * do it now. * Maybe we should record the original ID for interrupt * mapping later ... */ if (mp_verbose && apic_id != sc->sc_apicid) { printf("%s: misconfigured as apic %d", sc->sc_pic.pic_name, apic_id); ioapic_set_id(sc); } #if 0 /* output of this was boring. */ if (mp_verbose) for (i = 0; i < sc->sc_apic_sz; i++) ioapic_print_redir(sc, "boot", i); #endif }
void mpic_attach(struct device *parent, struct device *self, void *args) { struct mpic_softc *sc = (struct mpic_softc *)self; struct armv7_attach_args *aa = args; uint32_t main, mainsize, cpu, cpusize; struct fdt_memory mem; int i; mpic = sc; arm_init_smask(); if (fdt_get_memory_address(aa->aa_node, 0, &mem)) panic("%s: cannot extract main memory", sc->sc_dev.dv_xname); main = mem.addr; mainsize = mem.size; if (fdt_get_memory_address(aa->aa_node, 1, &mem)) panic("%s: cannot extract cpu memory", sc->sc_dev.dv_xname); cpu = mem.addr; cpusize = mem.size; if (fdt_node_property_int(aa->aa_node, "#interrupt-cells", &sc->sc_ncells) != 1) panic("%s: no #interrupt-cells property", sc->sc_dev.dv_xname); sc->sc_iot = aa->aa_iot; if (bus_space_map(sc->sc_iot, main, mainsize, 0, &sc->sc_m_ioh)) panic("%s: main bus_space_map failed!", __func__); if (bus_space_map(sc->sc_iot, cpu, cpusize, 0, &sc->sc_c_ioh)) panic("%s: cpu bus_space_map failed!", __func__); evcount_attach(&sc->sc_spur, "irq1023/spur", NULL); sc->sc_nintr = (bus_space_read_4(sc->sc_iot, sc->sc_m_ioh, MPIC_CTRL) >> 2) & 0x3ff; printf(" nirq %d\n", sc->sc_nintr); /* Disable all interrupts */ for (i = 0; i < sc->sc_nintr; i++) { bus_space_write_4(sc->sc_iot, sc->sc_m_ioh, MPIC_ICE, i); bus_space_write_4(sc->sc_iot, sc->sc_c_ioh, MPIC_ISM, i); } /* Clear pending IPIs */ bus_space_write_4(sc->sc_iot, sc->sc_c_ioh, MPIC_DOORBELL_CAUSE, 0); /* Enable hardware priorization selection */ bus_space_write_4(sc->sc_iot, sc->sc_m_ioh, MPIC_CTRL, MPIC_CTRL_PRIO_EN); sc->sc_mpic_handler = mallocarray(sc->sc_nintr, sizeof(*sc->sc_mpic_handler), M_DEVBUF, M_ZERO | M_NOWAIT); for (i = 0; i < sc->sc_nintr; i++) { TAILQ_INIT(&sc->sc_mpic_handler[i].is_list); } mpic_setipl(IPL_HIGH); /* XXX ??? */ mpic_calc_mask(); /* insert self as interrupt handler */ arm_set_intr_handler(mpic_splraise, mpic_spllower, mpic_splx, mpic_setipl, mpic_intr_establish, mpic_intr_disestablish, mpic_intr_string, mpic_irq_handler); arm_set_intr_handler_fdt(aa->aa_node, mpic_intr_establish_fdt_idx); /* enable interrupts */ intr_enable(); }
void zstty_attach(struct device *parent, struct device *self, void *aux) { struct zsc_softc *zsc = (struct zsc_softc *)parent; struct zstty_softc *zst = (struct zstty_softc *)self; struct cfdata *cf = self->dv_cfdata; struct zsc_attach_args *args = aux; struct zs_chanstate *cs; struct tty *tp; int channel, s, tty_unit; dev_t dev; const char *i, *o; int dtr_on; int resetbit; timeout_set(&zst->zst_diag_ch, zstty_diag, zst); tty_unit = zst->zst_dev.dv_unit; channel = args->channel; cs = zsc->zsc_cs[channel]; cs->cs_private = zst; cs->cs_ops = &zsops_tty; zst->zst_cs = cs; zst->zst_swflags = cf->cf_flags; /* softcar, etc. */ zst->zst_hwflags = args->hwflags; dev = makedev(zs_major, tty_unit); if (zst->zst_swflags) printf(" flags 0x%x", zst->zst_swflags); if (ISSET(zst->zst_hwflags, ZS_HWFLAG_NO_DCD)) SET(zst->zst_swflags, TIOCFLAG_SOFTCAR); /* * Check whether we serve as a console device. * XXX - split console input/output channels aren't * supported yet on /dev/console */ i = o = NULL; if ((zst->zst_hwflags & ZS_HWFLAG_CONSOLE_INPUT) != 0) { i = " input"; if ((args->hwflags & ZS_HWFLAG_USE_CONSDEV) != 0) { args->consdev->cn_dev = dev; cn_tab->cn_pollc = args->consdev->cn_pollc; cn_tab->cn_getc = args->consdev->cn_getc; } cn_tab->cn_dev = dev; } if ((zst->zst_hwflags & ZS_HWFLAG_CONSOLE_OUTPUT) != 0) { o = " output"; if ((args->hwflags & ZS_HWFLAG_USE_CONSDEV) != 0) { cn_tab->cn_putc = args->consdev->cn_putc; } cn_tab->cn_dev = dev; } if (i != NULL || o != NULL) { printf(": console%s", i ? (o ? "" : i) : o); } #ifdef KGDB if (zs_check_kgdb(cs, dev)) { /* * Allow kgdb to "take over" this port. Returns true * if this serial port is in-use by kgdb. */ printf(": kgdb\n"); /* * This is the kgdb port (exclusive use) * so skip the normal attach code. */ return; } #endif #if defined(__sparc__) || defined(__sparc64__) if (strcmp(args->type, "keyboard") == 0 || strcmp(args->type, "mouse") == 0) printf(": %s", args->type); #endif printf("\n"); tp = ttymalloc(0); tp->t_dev = dev; tp->t_oproc = zsstart; tp->t_param = zsparam; tp->t_hwiflow = zshwiflow; zst->zst_tty = tp; zst->zst_rbuf = mallocarray(zstty_rbuf_size, 2, M_DEVBUF, M_WAITOK); zst->zst_ebuf = zst->zst_rbuf + (zstty_rbuf_size * 2); /* Disable the high water mark. */ zst->zst_r_hiwat = 0; zst->zst_r_lowat = 0; zst->zst_rbget = zst->zst_rbput = zst->zst_rbuf; zst->zst_rbavail = zstty_rbuf_size; /* if there are no enable/disable functions, assume the device is always enabled */ if (!cs->enable) cs->enabled = 1; /* * Hardware init */ dtr_on = 0; resetbit = 0; if (ISSET(zst->zst_hwflags, ZS_HWFLAG_CONSOLE)) { /* Call zsparam similar to open. */ struct termios t; /* Wait a while for previous console output to complete */ DELAY(10000); /* Setup the "new" parameters in t. */ t.c_ispeed = 0; t.c_ospeed = cs->cs_defspeed; t.c_cflag = cs->cs_defcflag; s = splzs(); /* * Turn on receiver and status interrupts. * We defer the actual write of the register to zsparam(), * but we must make sure status interrupts are turned on by * the time zsparam() reads the initial rr0 state. */ SET(cs->cs_preg[1], ZSWR1_RIE | ZSWR1_TIE | ZSWR1_SIE); splx(s); /* Make sure zsparam will see changes. */ tp->t_ospeed = 0; (void)zsparam(tp, &t); /* Make sure DTR is on now. */ dtr_on = 1; } else if (!ISSET(zst->zst_hwflags, ZS_HWFLAG_NORESET)) { /* Not the console; may need reset. */ resetbit = (channel == 0) ? ZSWR9_A_RESET : ZSWR9_B_RESET; } s = splzs(); if (resetbit) zs_write_reg(cs, 9, resetbit); zs_modem(zst, dtr_on); splx(s); }
/* * Ioctl routine for generic ppp devices. */ int pppioctl(struct ppp_softc *sc, u_long cmd, caddr_t data, int flag, struct proc *p) { int s, error, flags, mru, npx; u_int nb; struct ppp_option_data *odp; struct compressor **cp; struct npioctl *npi; time_t t; #if NBPFILTER > 0 struct bpf_program *bp, *nbp; struct bpf_insn *newcode, *oldcode; int newcodelen; #endif #ifdef PPP_COMPRESS u_char ccp_option[CCP_MAX_OPTION_LENGTH]; #endif switch (cmd) { case FIONREAD: *(int *)data = mq_len(&sc->sc_inq); break; case PPPIOCGUNIT: *(int *)data = sc->sc_unit; /* XXX */ break; case PPPIOCGFLAGS: *(u_int *)data = sc->sc_flags; break; case PPPIOCSFLAGS: if ((error = suser(p, 0)) != 0) return (error); flags = *(int *)data & SC_MASK; s = splsoftnet(); #ifdef PPP_COMPRESS if (sc->sc_flags & SC_CCP_OPEN && !(flags & SC_CCP_OPEN)) ppp_ccp_closed(sc); #endif splnet(); sc->sc_flags = (sc->sc_flags & ~SC_MASK) | flags; splx(s); break; case PPPIOCSMRU: if ((error = suser(p, 0)) != 0) return (error); mru = *(int *)data; if (mru >= PPP_MRU && mru <= PPP_MAXMRU) sc->sc_mru = mru; break; case PPPIOCGMRU: *(int *)data = sc->sc_mru; break; #ifdef VJC case PPPIOCSMAXCID: if ((error = suser(p, 0)) != 0) return (error); if (sc->sc_comp) { s = splsoftnet(); sl_compress_setup(sc->sc_comp, *(int *)data); splx(s); } break; #endif case PPPIOCXFERUNIT: if ((error = suser(p, 0)) != 0) return (error); sc->sc_xfer = p->p_p->ps_pid; break; #ifdef PPP_COMPRESS case PPPIOCSCOMPRESS: if ((error = suser(p, 0)) != 0) return (error); odp = (struct ppp_option_data *) data; nb = odp->length; if (nb > sizeof(ccp_option)) nb = sizeof(ccp_option); if ((error = copyin(odp->ptr, ccp_option, nb)) != 0) return (error); if (ccp_option[1] < 2) /* preliminary check on the length byte */ return (EINVAL); for (cp = ppp_compressors; *cp != NULL; ++cp) if ((*cp)->compress_proto == ccp_option[0]) { /* * Found a handler for the protocol - try to allocate * a compressor or decompressor. */ error = 0; if (odp->transmit) { s = splsoftnet(); if (sc->sc_xc_state != NULL) (*sc->sc_xcomp->comp_free)(sc->sc_xc_state); sc->sc_xcomp = *cp; sc->sc_xc_state = (*cp)->comp_alloc(ccp_option, nb); if (sc->sc_xc_state == NULL) { if (sc->sc_flags & SC_DEBUG) printf("%s: comp_alloc failed\n", sc->sc_if.if_xname); error = ENOBUFS; } splnet(); sc->sc_flags &= ~SC_COMP_RUN; splx(s); } else { s = splsoftnet(); if (sc->sc_rc_state != NULL) (*sc->sc_rcomp->decomp_free)(sc->sc_rc_state); sc->sc_rcomp = *cp; sc->sc_rc_state = (*cp)->decomp_alloc(ccp_option, nb); if (sc->sc_rc_state == NULL) { if (sc->sc_flags & SC_DEBUG) printf("%s: decomp_alloc failed\n", sc->sc_if.if_xname); error = ENOBUFS; } splnet(); sc->sc_flags &= ~SC_DECOMP_RUN; splx(s); } return (error); } if (sc->sc_flags & SC_DEBUG) printf("%s: no compressor for [%x %x %x], %x\n", sc->sc_if.if_xname, ccp_option[0], ccp_option[1], ccp_option[2], nb); return (EINVAL); /* no handler found */ #endif /* PPP_COMPRESS */ case PPPIOCGNPMODE: case PPPIOCSNPMODE: npi = (struct npioctl *) data; switch (npi->protocol) { case PPP_IP: npx = NP_IP; break; default: return EINVAL; } if (cmd == PPPIOCGNPMODE) { npi->mode = sc->sc_npmode[npx]; } else { if ((error = suser(p, 0)) != 0) return (error); if (npi->mode != sc->sc_npmode[npx]) { s = splsoftnet(); sc->sc_npmode[npx] = npi->mode; if (npi->mode != NPMODE_QUEUE) { ppp_requeue(sc); (*sc->sc_start)(sc); } splx(s); } } break; case PPPIOCGIDLE: s = splsoftnet(); t = time_second; ((struct ppp_idle *)data)->xmit_idle = t - sc->sc_last_sent; ((struct ppp_idle *)data)->recv_idle = t - sc->sc_last_recv; splx(s); break; #if NBPFILTER > 0 case PPPIOCSPASS: case PPPIOCSACTIVE: nbp = (struct bpf_program *) data; if ((unsigned) nbp->bf_len > BPF_MAXINSNS) return EINVAL; newcodelen = nbp->bf_len * sizeof(struct bpf_insn); if (nbp->bf_len != 0) { newcode = mallocarray(nbp->bf_len, sizeof(struct bpf_insn), M_DEVBUF, M_WAITOK); if ((error = copyin((caddr_t)nbp->bf_insns, (caddr_t)newcode, newcodelen)) != 0) { free(newcode, M_DEVBUF, 0); return error; } if (!bpf_validate(newcode, nbp->bf_len)) { free(newcode, M_DEVBUF, 0); return EINVAL; } } else newcode = 0; bp = (cmd == PPPIOCSPASS)? &sc->sc_pass_filt: &sc->sc_active_filt; oldcode = bp->bf_insns; s = splnet(); bp->bf_len = nbp->bf_len; bp->bf_insns = newcode; splx(s); if (oldcode != 0) free(oldcode, M_DEVBUF, 0); break; #endif default: return (-1); } return (0); }
/* * exec_script_makecmds(): Check if it's an executable shell script. * * Given a proc pointer and an exec package pointer, see if the referent * of the epp is in shell script. If it is, then set things up so that * the script can be run. This involves preparing the address space * and arguments for the shell which will run the script. * * This function is ultimately responsible for creating a set of vmcmds * which can be used to build the process's vm space and inserting them * into the exec package. */ int exec_script_makecmds(struct proc *p, struct exec_package *epp) { int error, hdrlinelen, shellnamelen, shellarglen; char *hdrstr = epp->ep_hdr; char *cp, *shellname, *shellarg, *oldpnbuf; char **shellargp = NULL, **tmpsap; struct vnode *scriptvp; uid_t script_uid = -1; gid_t script_gid = -1; u_short script_sbits; /* * remember the old vp and pnbuf for later, so we can restore * them if check_exec() fails. */ scriptvp = epp->ep_vp; oldpnbuf = epp->ep_ndp->ni_cnd.cn_pnbuf; /* * if the magic isn't that of a shell script, or we've already * done shell script processing for this exec, punt on it. */ if ((epp->ep_flags & EXEC_INDIR) != 0 || epp->ep_hdrvalid < EXEC_SCRIPT_MAGICLEN || strncmp(hdrstr, EXEC_SCRIPT_MAGIC, EXEC_SCRIPT_MAGICLEN)) return ENOEXEC; /* * check that the shell spec is terminated by a newline, * and that it isn't too large. Don't modify the * buffer unless we're ready to commit to handling it. * (The latter requirement means that we have to check * for both spaces and tabs later on.) */ hdrlinelen = min(epp->ep_hdrvalid, MAXINTERP); for (cp = hdrstr + EXEC_SCRIPT_MAGICLEN; cp < hdrstr + hdrlinelen; cp++) { if (*cp == '\n') { *cp = '\0'; break; } } if (cp >= hdrstr + hdrlinelen) return ENOEXEC; shellname = NULL; shellarg = NULL; shellarglen = 0; /* strip spaces before the shell name */ for (cp = hdrstr + EXEC_SCRIPT_MAGICLEN; *cp == ' ' || *cp == '\t'; cp++) ; /* collect the shell name; remember its length for later */ shellname = cp; shellnamelen = 0; if (*cp == '\0') goto check_shell; for ( /* cp = cp */ ; *cp != '\0' && *cp != ' ' && *cp != '\t'; cp++) shellnamelen++; if (*cp == '\0') goto check_shell; *cp++ = '\0'; /* skip spaces before any argument */ for ( /* cp = cp */ ; *cp == ' ' || *cp == '\t'; cp++) ; if (*cp == '\0') goto check_shell; /* * collect the shell argument. everything after the shell name * is passed as ONE argument; that's the correct (historical) * behaviour. */ shellarg = cp; for ( /* cp = cp */ ; *cp != '\0'; cp++) shellarglen++; *cp++ = '\0'; check_shell: /* * MNT_NOSUID and STRC are already taken care of by check_exec, * so we don't need to worry about them now or later. */ script_sbits = epp->ep_vap->va_mode & (VSUID | VSGID); if (script_sbits != 0) { script_uid = epp->ep_vap->va_uid; script_gid = epp->ep_vap->va_gid; } /* * if the script isn't readable, or it's set-id, then we've * gotta supply a "/dev/fd/..." for the shell to read. * Note that stupid shells (csh) do the wrong thing, and * close all open fd's when they start. That kills this * method of implementing "safe" set-id and x-only scripts. */ vn_lock(scriptvp, LK_EXCLUSIVE|LK_RETRY, p); error = VOP_ACCESS(scriptvp, VREAD, p->p_ucred, p); VOP_UNLOCK(scriptvp, p); if (error == EACCES || script_sbits) { struct file *fp; #ifdef DIAGNOSTIC if (epp->ep_flags & EXEC_HASFD) panic("exec_script_makecmds: epp already has a fd"); #endif fdplock(p->p_fd); error = falloc(p, &fp, &epp->ep_fd); fdpunlock(p->p_fd); if (error) goto fail; epp->ep_flags |= EXEC_HASFD; fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = (caddr_t) scriptvp; fp->f_flag = FREAD; FILE_SET_MATURE(fp, p); } /* set up the parameters for the recursive check_exec() call */ epp->ep_ndp->ni_dirfd = AT_FDCWD; epp->ep_ndp->ni_dirp = shellname; epp->ep_ndp->ni_segflg = UIO_SYSSPACE; epp->ep_flags |= EXEC_INDIR; /* and set up the fake args list, for later */ shellargp = mallocarray(4, sizeof(char *), M_EXEC, M_WAITOK); tmpsap = shellargp; *tmpsap = malloc(shellnamelen + 1, M_EXEC, M_WAITOK); strlcpy(*tmpsap++, shellname, shellnamelen + 1); if (shellarg != NULL) { *tmpsap = malloc(shellarglen + 1, M_EXEC, M_WAITOK); strlcpy(*tmpsap++, shellarg, shellarglen + 1); } *tmpsap = malloc(MAXPATHLEN, M_EXEC, M_WAITOK); if ((epp->ep_flags & EXEC_HASFD) == 0) { error = copyinstr(epp->ep_name, *tmpsap, MAXPATHLEN, NULL); if (error != 0) { *(tmpsap + 1) = NULL; goto fail; } } else snprintf(*tmpsap, MAXPATHLEN, "/dev/fd/%d", epp->ep_fd); tmpsap++; *tmpsap = NULL; /* * mark the header we have as invalid; check_exec will read * the header from the new executable */ epp->ep_hdrvalid = 0; if ((error = check_exec(p, epp)) == 0) { /* note that we've clobbered the header */ epp->ep_flags |= EXEC_DESTR; /* * It succeeded. Unlock the script and * close it if we aren't using it any more. * Also, set things up so that the fake args * list will be used. */ if ((epp->ep_flags & EXEC_HASFD) == 0) vn_close(scriptvp, FREAD, p->p_ucred, p); /* free the old pathname buffer */ pool_put(&namei_pool, oldpnbuf); epp->ep_flags |= (EXEC_HASARGL | EXEC_SKIPARG); epp->ep_fa = shellargp; /* * set things up so that set-id scripts will be * handled appropriately */ epp->ep_vap->va_mode |= script_sbits; if (script_sbits & VSUID) epp->ep_vap->va_uid = script_uid; if (script_sbits & VSGID) epp->ep_vap->va_gid = script_gid; return (0); } /* XXX oldpnbuf not set for "goto fail" path */ epp->ep_ndp->ni_cnd.cn_pnbuf = oldpnbuf; fail: /* note that we've clobbered the header */ epp->ep_flags |= EXEC_DESTR; /* kill the opened file descriptor, else close the file */ if (epp->ep_flags & EXEC_HASFD) { epp->ep_flags &= ~EXEC_HASFD; fdplock(p->p_fd); (void) fdrelease(p, epp->ep_fd); fdpunlock(p->p_fd); } else vn_close(scriptvp, FREAD, p->p_ucred, p); pool_put(&namei_pool, epp->ep_ndp->ni_cnd.cn_pnbuf); /* free the fake arg list, because we're not returning it */ if ((tmpsap = shellargp) != NULL) { while (*tmpsap != NULL) { free(*tmpsap, M_EXEC, 0); tmpsap++; } free(shellargp, M_EXEC, 4 * sizeof(char *)); } /* * free any vmspace-creation commands, * and release their references */ kill_vmcmds(&epp->ep_vmcmds); return error; }
int ugenopen(dev_t dev, int flag, int mode, struct proc *p) { struct ugen_softc *sc; int unit = UGENUNIT(dev); int endpt = UGENENDPOINT(dev); usb_endpoint_descriptor_t *edesc; struct ugen_endpoint *sce; int dir, isize; usbd_status err; struct usbd_xfer *xfer; void *buf; int i, j; if (unit >= ugen_cd.cd_ndevs) return (ENXIO); sc = ugen_cd.cd_devs[unit]; if (sc == NULL) return (ENXIO); DPRINTFN(5, ("ugenopen: flag=%d, mode=%d, unit=%d endpt=%d\n", flag, mode, unit, endpt)); if (sc == NULL || usbd_is_dying(sc->sc_udev)) return (ENXIO); if (sc->sc_is_open[endpt]) return (EBUSY); if (endpt == USB_CONTROL_ENDPOINT) { sc->sc_is_open[USB_CONTROL_ENDPOINT] = 1; return (0); } /* Make sure there are pipes for all directions. */ for (dir = OUT; dir <= IN; dir++) { if (flag & (dir == OUT ? FWRITE : FREAD)) { sce = &sc->sc_endpoints[endpt][dir]; if (sce == 0 || sce->edesc == 0) return (ENXIO); } } /* Actually open the pipes. */ /* XXX Should back out properly if it fails. */ for (dir = OUT; dir <= IN; dir++) { if (!(flag & (dir == OUT ? FWRITE : FREAD))) continue; sce = &sc->sc_endpoints[endpt][dir]; sce->state = 0; sce->timeout = USBD_NO_TIMEOUT; DPRINTFN(5, ("ugenopen: sc=%p, endpt=%d, dir=%d, sce=%p\n", sc, endpt, dir, sce)); edesc = sce->edesc; switch (edesc->bmAttributes & UE_XFERTYPE) { case UE_INTERRUPT: if (dir == OUT) { err = usbd_open_pipe(sce->iface, edesc->bEndpointAddress, 0, &sce->pipeh); if (err) return (EIO); break; } isize = UGETW(edesc->wMaxPacketSize); if (isize == 0) /* shouldn't happen */ return (EINVAL); sce->ibuf = malloc(isize, M_USBDEV, M_WAITOK); DPRINTFN(5, ("ugenopen: intr endpt=%d,isize=%d\n", endpt, isize)); clalloc(&sce->q, UGEN_IBSIZE, 0); err = usbd_open_pipe_intr(sce->iface, edesc->bEndpointAddress, USBD_SHORT_XFER_OK, &sce->pipeh, sce, sce->ibuf, isize, ugenintr, USBD_DEFAULT_INTERVAL); if (err) { free(sce->ibuf, M_USBDEV, 0); clfree(&sce->q); return (EIO); } DPRINTFN(5, ("ugenopen: interrupt open done\n")); break; case UE_BULK: err = usbd_open_pipe(sce->iface, edesc->bEndpointAddress, 0, &sce->pipeh); if (err) return (EIO); break; case UE_ISOCHRONOUS: if (dir == OUT) return (EINVAL); isize = UGETW(edesc->wMaxPacketSize); if (isize == 0) /* shouldn't happen */ return (EINVAL); sce->ibuf = mallocarray(isize, UGEN_NISOFRAMES, M_USBDEV, M_WAITOK); sce->cur = sce->fill = sce->ibuf; sce->limit = sce->ibuf + isize * UGEN_NISOFRAMES; DPRINTFN(5, ("ugenopen: isoc endpt=%d, isize=%d\n", endpt, isize)); err = usbd_open_pipe(sce->iface, edesc->bEndpointAddress, 0, &sce->pipeh); if (err) { free(sce->ibuf, M_USBDEV, 0); return (EIO); } for(i = 0; i < UGEN_NISOREQS; ++i) { sce->isoreqs[i].sce = sce; xfer = usbd_alloc_xfer(sc->sc_udev); if (xfer == 0) goto bad; sce->isoreqs[i].xfer = xfer; buf = usbd_alloc_buffer (xfer, isize * UGEN_NISORFRMS); if (buf == 0) { i++; goto bad; } sce->isoreqs[i].dmabuf = buf; for(j = 0; j < UGEN_NISORFRMS; ++j) sce->isoreqs[i].sizes[j] = isize; usbd_setup_isoc_xfer(xfer, sce->pipeh, &sce->isoreqs[i], sce->isoreqs[i].sizes, UGEN_NISORFRMS, USBD_NO_COPY | USBD_SHORT_XFER_OK, ugen_isoc_rintr); (void)usbd_transfer(xfer); } DPRINTFN(5, ("ugenopen: isoc open done\n")); break; bad: while (--i >= 0) /* implicit buffer free */ usbd_free_xfer(sce->isoreqs[i].xfer); return (ENOMEM); case UE_CONTROL: sce->timeout = USBD_DEFAULT_TIMEOUT; return (EINVAL); } } sc->sc_is_open[endpt] = 1; return (0); }
static usbd_status alloc_all_endpoints_fixed_ep(struct umidi_softc *sc) { struct umq_fixed_ep_desc *fp; struct umidi_endpoint *ep; usb_endpoint_descriptor_t *epd; int i; fp = umidi_get_quirk_data_from_type(sc->sc_quirk, UMQ_TYPE_FIXED_EP); sc->sc_out_num_endpoints = fp->num_out_ep; sc->sc_in_num_endpoints = fp->num_in_ep; sc->sc_endpoints = mallocarray(sc->sc_out_num_endpoints + sc->sc_in_num_endpoints, sizeof(*sc->sc_endpoints), M_USBDEV, M_WAITOK | M_CANFAIL); if (!sc->sc_endpoints) return USBD_NOMEM; sc->sc_out_ep = sc->sc_out_num_endpoints ? sc->sc_endpoints : NULL; sc->sc_in_ep = sc->sc_in_num_endpoints ? sc->sc_endpoints+sc->sc_out_num_endpoints : NULL; ep = &sc->sc_out_ep[0]; for (i=0; i<sc->sc_out_num_endpoints; i++) { epd = usbd_interface2endpoint_descriptor( sc->sc_iface, fp->out_ep[i].ep); if (!epd) { DPRINTF(("%s: cannot get endpoint descriptor(out:%d)\n", sc->sc_dev.dv_xname, fp->out_ep[i].ep)); goto error; } if (UE_GET_XFERTYPE(epd->bmAttributes)!=UE_BULK || UE_GET_DIR(epd->bEndpointAddress)!=UE_DIR_OUT) { printf("%s: illegal endpoint(out:%d)\n", sc->sc_dev.dv_xname, fp->out_ep[i].ep); goto error; } ep->sc = sc; ep->packetsize = UGETW(epd->wMaxPacketSize); ep->addr = epd->bEndpointAddress; ep->num_jacks = fp->out_ep[i].num_jacks; sc->sc_out_num_jacks += fp->out_ep[i].num_jacks; ep->num_open = 0; memset(ep->jacks, 0, sizeof(ep->jacks)); ep++; } ep = &sc->sc_in_ep[0]; for (i=0; i<sc->sc_in_num_endpoints; i++) { epd = usbd_interface2endpoint_descriptor( sc->sc_iface, fp->in_ep[i].ep); if (!epd) { DPRINTF(("%s: cannot get endpoint descriptor(in:%d)\n", sc->sc_dev.dv_xname, fp->in_ep[i].ep)); goto error; } if (UE_GET_XFERTYPE(epd->bmAttributes)!=UE_BULK || UE_GET_DIR(epd->bEndpointAddress)!=UE_DIR_IN) { printf("%s: illegal endpoint(in:%d)\n", sc->sc_dev.dv_xname, fp->in_ep[i].ep); goto error; } ep->sc = sc; ep->addr = epd->bEndpointAddress; ep->packetsize = UGETW(epd->wMaxPacketSize); ep->num_jacks = fp->in_ep[i].num_jacks; sc->sc_in_num_jacks += fp->in_ep[i].num_jacks; ep->num_open = 0; memset(ep->jacks, 0, sizeof(ep->jacks)); ep++; } return USBD_NORMAL_COMPLETION; error: free(sc->sc_endpoints, M_USBDEV, (sc->sc_out_num_endpoints + sc->sc_in_num_endpoints) * sizeof(*sc->sc_endpoints)); sc->sc_endpoints = NULL; return USBD_INVAL; }
int safte_read_config(struct safte_softc *sc) { struct safte_config *config = NULL; struct safte_readbuf_cmd *cmd; struct safte_sensor *s; struct scsi_xfer *xs; int error = 0, flags = 0, i, j; config = dma_alloc(sizeof(*config), PR_NOWAIT); if (config == NULL) return (1); if (cold) flags |= SCSI_AUTOCONF; xs = scsi_xs_get(sc->sc_link, flags | SCSI_DATA_IN | SCSI_SILENT); if (xs == NULL) { error = 1; goto done; } xs->cmdlen = sizeof(*cmd); xs->data = (void *)config; xs->datalen = sizeof(*config); xs->retries = 2; xs->timeout = 30000; cmd = (struct safte_readbuf_cmd *)xs->cmd; cmd->opcode = READ_BUFFER; cmd->flags |= SAFTE_RD_MODE; cmd->bufferid = SAFTE_RD_CONFIG; cmd->length = htobe16(sizeof(*config)); error = scsi_xs_sync(xs); scsi_xs_put(xs); if (error != 0) { error = 1; goto done; } DPRINTF(("%s: nfans: %d npwrsup: %d nslots: %d doorlock: %d ntemps: %d" " alarm: %d celsius: %d ntherm: %d\n", DEVNAME(sc), config->nfans, config->npwrsup, config->nslots, config->doorlock, config->ntemps, config->alarm, SAFTE_CFG_CELSIUS(config->therm), SAFTE_CFG_NTHERM(config->therm))); sc->sc_encbuflen = config->nfans * sizeof(u_int8_t) + /* fan status */ config->npwrsup * sizeof(u_int8_t) + /* power supply status */ config->nslots * sizeof(u_int8_t) + /* device scsi id (lun) */ sizeof(u_int8_t) + /* door lock status */ sizeof(u_int8_t) + /* speaker status */ config->ntemps * sizeof(u_int8_t) + /* temp sensors */ sizeof(u_int16_t); /* temp out of range sensors */ sc->sc_encbuf = dma_alloc(sc->sc_encbuflen, PR_NOWAIT); if (sc->sc_encbuf == NULL) { error = 1; goto done; } sc->sc_nsensors = config->nfans + config->npwrsup + config->ntemps + (config->doorlock ? 1 : 0) + (config->alarm ? 1 : 0); sc->sc_sensors = mallocarray(sc->sc_nsensors, sizeof(struct safte_sensor), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_sensors == NULL) { dma_free(sc->sc_encbuf, sc->sc_encbuflen); sc->sc_encbuf = NULL; sc->sc_nsensors = 0; error = 1; goto done; } strlcpy(sc->sc_sensordev.xname, DEVNAME(sc), sizeof(sc->sc_sensordev.xname)); s = sc->sc_sensors; for (i = 0; i < config->nfans; i++) { s->se_type = SAFTE_T_FAN; s->se_field = (u_int8_t *)(sc->sc_encbuf + i); s->se_sensor.type = SENSOR_INDICATOR; snprintf(s->se_sensor.desc, sizeof(s->se_sensor.desc), "Fan%d", i); s++; } j = config->nfans; for (i = 0; i < config->npwrsup; i++) { s->se_type = SAFTE_T_PWRSUP; s->se_field = (u_int8_t *)(sc->sc_encbuf + j + i); s->se_sensor.type = SENSOR_INDICATOR; snprintf(s->se_sensor.desc, sizeof(s->se_sensor.desc), "PSU%d", i); s++; } j += config->npwrsup; #if NBIO > 0 sc->sc_nslots = config->nslots; sc->sc_slots = (u_int8_t *)(sc->sc_encbuf + j); #endif j += config->nslots; if (config->doorlock) { s->se_type = SAFTE_T_DOORLOCK; s->se_field = (u_int8_t *)(sc->sc_encbuf + j); s->se_sensor.type = SENSOR_INDICATOR; strlcpy(s->se_sensor.desc, "doorlock", sizeof(s->se_sensor.desc)); s++; } j++; if (config->alarm) { s->se_type = SAFTE_T_ALARM; s->se_field = (u_int8_t *)(sc->sc_encbuf + j); s->se_sensor.type = SENSOR_INDICATOR; strlcpy(s->se_sensor.desc, "alarm", sizeof(s->se_sensor.desc)); s++; } j++; /* * stash the temp info so we can get out of range status. limit the * number so the out of temp checks cant go into memory it doesnt own */ sc->sc_ntemps = (config->ntemps > 15) ? 15 : config->ntemps; sc->sc_temps = s; sc->sc_celsius = SAFTE_CFG_CELSIUS(config->therm); for (i = 0; i < config->ntemps; i++) { s->se_type = SAFTE_T_TEMP; s->se_field = (u_int8_t *)(sc->sc_encbuf + j + i); s->se_sensor.type = SENSOR_TEMP; s++; } j += config->ntemps; sc->sc_temperrs = (u_int8_t *)(sc->sc_encbuf + j); done: dma_free(config, sizeof(*config)); return (error); }
void uhub_attach(struct device *parent, struct device *self, void *aux) { struct uhub_softc *sc = (struct uhub_softc *)self; struct usb_attach_arg *uaa = aux; struct usbd_device *dev = uaa->device; struct usbd_hub *hub = NULL; union { usb_hub_descriptor_t hs; usb_hub_ss_descriptor_t ss; } hd; int p, port, nports, powerdelay; struct usbd_interface *iface; usb_endpoint_descriptor_t *ed; struct usbd_tt *tts = NULL; uint8_t ttthink = 0; usbd_status err; #ifdef UHUB_DEBUG int nremov; #endif sc->sc_hub = dev; err = usbd_set_config_index(dev, 0, 1); if (err) { DPRINTF("%s: configuration failed, error=%s\n", sc->sc_dev.dv_xname, usbd_errstr(err)); return; } if (dev->depth > USB_HUB_MAX_DEPTH) { printf("%s: hub depth (%d) exceeded, hub ignored\n", sc->sc_dev.dv_xname, USB_HUB_MAX_DEPTH); return; } /* * Super-Speed hubs need to know their depth to be able to * parse the bits of the route-string that correspond to * their downstream port number. * * This does no apply to root hubs. */ if (dev->depth != 0 && dev->speed == USB_SPEED_SUPER) { if (usbd_set_hub_depth(dev, dev->depth - 1)) { printf("%s: unable to set HUB depth\n", sc->sc_dev.dv_xname); return; } } /* Get hub descriptor. */ if (dev->speed == USB_SPEED_SUPER) { err = usbd_get_hub_ss_descriptor(dev, &hd.ss, 1); nports = hd.ss.bNbrPorts; powerdelay = (hd.ss.bPwrOn2PwrGood * UHD_PWRON_FACTOR); if (!err && nports > 7) usbd_get_hub_ss_descriptor(dev, &hd.ss, nports); } else { err = usbd_get_hub_descriptor(dev, &hd.hs, 1); nports = hd.hs.bNbrPorts; powerdelay = (hd.hs.bPwrOn2PwrGood * UHD_PWRON_FACTOR); ttthink = UGETW(hd.hs.wHubCharacteristics) & UHD_TT_THINK; if (!err && nports > 7) usbd_get_hub_descriptor(dev, &hd.hs, nports); } if (err) { DPRINTF("%s: getting hub descriptor failed, error=%s\n", sc->sc_dev.dv_xname, usbd_errstr(err)); return; } #ifdef UHUB_DEBUG for (nremov = 0, port = 1; port <= nports; port++) { if (dev->speed == USB_SPEED_SUPER) { if (!UHD_NOT_REMOV(&hd.ss, port)) nremov++; } else { if (!UHD_NOT_REMOV(&hd.hs, port)) nremov++; } } printf("%s: %d port%s with %d removable, %s powered", sc->sc_dev.dv_xname, nports, nports != 1 ? "s" : "", nremov, dev->self_powered ? "self" : "bus"); if (dev->depth > 0 && UHUB_IS_HIGH_SPEED(sc)) { printf(", %s transaction translator%s", UHUB_IS_SINGLE_TT(sc) ? "single" : "multiple", UHUB_IS_SINGLE_TT(sc) ? "" : "s"); } printf("\n"); #endif if (nports == 0) { printf("%s: no ports, hub ignored\n", sc->sc_dev.dv_xname); goto bad; } hub = malloc(sizeof(*hub), M_USBDEV, M_NOWAIT); if (hub == NULL) return; hub->ports = mallocarray(nports, sizeof(struct usbd_port), M_USBDEV, M_NOWAIT); if (hub->ports == NULL) { free(hub, M_USBDEV, 0); return; } dev->hub = hub; dev->hub->hubsoftc = sc; hub->explore = uhub_explore; hub->nports = nports; hub->powerdelay = powerdelay; hub->ttthink = ttthink >> 5; if (!dev->self_powered && dev->powersrc->parent != NULL && !dev->powersrc->parent->self_powered) { printf("%s: bus powered hub connected to bus powered hub, " "ignored\n", sc->sc_dev.dv_xname); goto bad; } /* Set up interrupt pipe. */ err = usbd_device2interface_handle(dev, 0, &iface); if (err) { printf("%s: no interface handle\n", sc->sc_dev.dv_xname); goto bad; } ed = usbd_interface2endpoint_descriptor(iface, 0); if (ed == NULL) { printf("%s: no endpoint descriptor\n", sc->sc_dev.dv_xname); goto bad; } if ((ed->bmAttributes & UE_XFERTYPE) != UE_INTERRUPT) { printf("%s: bad interrupt endpoint\n", sc->sc_dev.dv_xname); goto bad; } sc->sc_statuslen = (nports + 1 + 7) / 8; sc->sc_statusbuf = malloc(sc->sc_statuslen, M_USBDEV, M_NOWAIT); if (!sc->sc_statusbuf) goto bad; err = usbd_open_pipe_intr(iface, ed->bEndpointAddress, USBD_SHORT_XFER_OK, &sc->sc_ipipe, sc, sc->sc_statusbuf, sc->sc_statuslen, uhub_intr, UHUB_INTR_INTERVAL); if (err) { printf("%s: cannot open interrupt pipe\n", sc->sc_dev.dv_xname); goto bad; } /* Wait with power off for a while. */ usbd_delay_ms(dev, USB_POWER_DOWN_TIME); /* * To have the best chance of success we do things in the exact same * order as Windoze98. This should not be necessary, but some * devices do not follow the USB specs to the letter. * * These are the events on the bus when a hub is attached: * Get device and config descriptors (see attach code) * Get hub descriptor (see above) * For all ports * turn on power * wait for power to become stable * (all below happens in explore code) * For all ports * clear C_PORT_CONNECTION * For all ports * get port status * if device connected * wait 100 ms * turn on reset * wait * clear C_PORT_RESET * get port status * proceed with device attachment */ if (UHUB_IS_HIGH_SPEED(sc)) { tts = mallocarray((UHUB_IS_SINGLE_TT(sc) ? 1 : nports), sizeof (struct usbd_tt), M_USBDEV, M_NOWAIT); if (!tts) goto bad; } /* Set up data structures */ for (p = 0; p < nports; p++) { struct usbd_port *up = &hub->ports[p]; up->device = NULL; up->parent = dev; up->portno = p + 1; if (dev->self_powered) /* Self powered hub, give ports maximum current. */ up->power = USB_MAX_POWER; else up->power = USB_MIN_POWER; up->restartcnt = 0; up->reattach = 0; if (UHUB_IS_HIGH_SPEED(sc)) { up->tt = &tts[UHUB_IS_SINGLE_TT(sc) ? 0 : p]; up->tt->hub = hub; } else { up->tt = NULL; } } for (port = 1; port <= nports; port++) { /* Turn the power on. */ err = usbd_set_port_feature(dev, port, UHF_PORT_POWER); if (err) printf("%s: port %d power on failed, %s\n", sc->sc_dev.dv_xname, port, usbd_errstr(err)); /* Make sure we check the port status at least once. */ sc->sc_status |= (1 << port); } /* Wait for stable power. */ if (dev->powersrc->parent != NULL) usbd_delay_ms(dev, powerdelay + USB_EXTRA_POWER_UP_TIME); /* The usual exploration will finish the setup. */ sc->sc_running = 1; return; bad: if (sc->sc_statusbuf) free(sc->sc_statusbuf, M_USBDEV, 0); if (hub) { if (hub->ports) free(hub->ports, M_USBDEV, 0); free(hub, M_USBDEV, 0); } dev->hub = NULL; }
void uhidev_attach(struct device *parent, struct device *self, void *aux) { struct uhidev_softc *sc = (struct uhidev_softc *)self; struct usb_attach_arg *uaa = aux; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; struct uhidev_attach_arg uha; int size, nrepid, repid, repsz; int i, repsizes[256]; void *desc = NULL; struct device *dev; sc->sc_udev = uaa->device; sc->sc_iface = uaa->iface; sc->sc_ifaceno = uaa->ifaceno; id = usbd_get_interface_descriptor(sc->sc_iface); usbd_set_idle(sc->sc_udev, sc->sc_ifaceno, 0, 0); sc->sc_iep_addr = sc->sc_oep_addr = -1; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i); if (ed == NULL) { printf("%s: could not read endpoint descriptor\n", DEVNAME(sc)); return; } DPRINTFN(10,("uhidev_attach: bLength=%d bDescriptorType=%d " "bEndpointAddress=%d-%s bmAttributes=%d wMaxPacketSize=%d" " bInterval=%d\n", ed->bLength, ed->bDescriptorType, ed->bEndpointAddress & UE_ADDR, UE_GET_DIR(ed->bEndpointAddress)==UE_DIR_IN? "in" : "out", ed->bmAttributes & UE_XFERTYPE, UGETW(ed->wMaxPacketSize), ed->bInterval)); if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_INTERRUPT) { sc->sc_iep_addr = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && (ed->bmAttributes & UE_XFERTYPE) == UE_INTERRUPT) { sc->sc_oep_addr = ed->bEndpointAddress; } else { printf("%s: unexpected endpoint\n", DEVNAME(sc)); return; } } /* * Check that we found an input interrupt endpoint. * The output interrupt endpoint is optional */ if (sc->sc_iep_addr == -1) { printf("%s: no input interrupt endpoint\n", DEVNAME(sc)); return; } #ifndef SMALL_KERNEL if (uhidev_use_rdesc(sc, id, uaa->vendor, uaa->product, &desc, &size)) return; #endif /* !SMALL_KERNEL */ if (desc == NULL) { struct usb_hid_descriptor *hid; hid = usbd_get_hid_descriptor(sc->sc_udev, id); if (hid == NULL) { printf("%s: no HID descriptor\n", DEVNAME(sc)); return; } size = UGETW(hid->descrs[0].wDescriptorLength); desc = malloc(size, M_USBDEV, M_NOWAIT); if (desc == NULL) { printf("%s: no memory\n", DEVNAME(sc)); return; } if (usbd_get_report_descriptor(sc->sc_udev, sc->sc_ifaceno, desc, size)) { printf("%s: no report descriptor\n", DEVNAME(sc)); free(desc, M_USBDEV, 0); return; } } sc->sc_repdesc = desc; sc->sc_repdesc_size = size; nrepid = uhidev_maxrepid(desc, size); if (nrepid < 0) return; printf("%s: iclass %d/%d", DEVNAME(sc), id->bInterfaceClass, id->bInterfaceSubClass); if (nrepid > 0) printf(", %d report id%s", nrepid, nrepid > 1 ? "s" : ""); printf("\n"); nrepid++; sc->sc_subdevs = mallocarray(nrepid, sizeof(struct uhidev *), M_USBDEV, M_NOWAIT | M_ZERO); if (sc->sc_subdevs == NULL) { printf("%s: no memory\n", DEVNAME(sc)); return; } sc->sc_nrepid = nrepid; sc->sc_isize = 0; for (repid = 0; repid < nrepid; repid++) { repsz = hid_report_size(desc, size, hid_input, repid); DPRINTF(("uhidev_match: repid=%d, repsz=%d\n", repid, repsz)); repsizes[repid] = repsz; if (repsz > sc->sc_isize) sc->sc_isize = repsz; } sc->sc_isize += (nrepid != 1); /* one byte for the report ID */ DPRINTF(("uhidev_attach: isize=%d\n", sc->sc_isize)); uha.uaa = uaa; uha.parent = sc; uha.reportid = UHIDEV_CLAIM_ALLREPORTID; /* Look for a driver claiming all report IDs first. */ dev = config_found_sm(self, &uha, NULL, uhidevsubmatch); if (dev != NULL) { for (repid = 0; repid < nrepid; repid++) sc->sc_subdevs[repid] = (struct uhidev *)dev; return; } for (repid = 0; repid < nrepid; repid++) { DPRINTF(("%s: try repid=%d\n", __func__, repid)); if (hid_report_size(desc, size, hid_input, repid) == 0 && hid_report_size(desc, size, hid_output, repid) == 0 && hid_report_size(desc, size, hid_feature, repid) == 0) continue; uha.reportid = repid; dev = config_found_sm(self, &uha, uhidevprint, uhidevsubmatch); sc->sc_subdevs[repid] = (struct uhidev *)dev; } }
/* * Perform a READ ELEMENT STATUS on behalf of the user, and return to * the user only the data the user is interested in (i.e. an array of * changer_element_status structures) */ int ch_usergetelemstatus(struct ch_softc *sc, struct changer_element_status_request *cesr) { struct changer_element_status *user_data = NULL; struct read_element_status_header *st_hdr; struct read_element_status_page_header *pg_hdr; caddr_t desc; caddr_t data = NULL; size_t size, desclen, udsize; int chet = cesr->cesr_type; int avail, i, error = 0; int want_voltags = (cesr->cesr_flags & CESR_VOLTAGS) ? 1 : 0; /* * If there are no elements of the requested type in the changer, * the request is invalid. */ if (sc->sc_counts[chet] == 0) return (EINVAL); /* * Request one descriptor for the given element type. This * is used to determine the size of the descriptor so that * we can allocate enough storage for all of them. We assume * that the first one can fit into 1k. */ size = 1024; data = dma_alloc(size, PR_WAITOK); error = ch_getelemstatus(sc, sc->sc_firsts[chet], 1, data, size, want_voltags); if (error) goto done; st_hdr = (struct read_element_status_header *)data; pg_hdr = (struct read_element_status_page_header *) (st_hdr + 1); desclen = _2btol(pg_hdr->edl); dma_free(data, size); /* * Reallocate storage for descriptors and get them from the * device. */ size = sizeof(struct read_element_status_header) + sizeof(struct read_element_status_page_header) + (desclen * sc->sc_counts[chet]); data = dma_alloc(size, PR_WAITOK); error = ch_getelemstatus(sc, sc->sc_firsts[chet], sc->sc_counts[chet], data, size, want_voltags); if (error) goto done; /* * Fill in the user status array. */ st_hdr = (struct read_element_status_header *)data; pg_hdr = (struct read_element_status_page_header *) (st_hdr + 1); avail = _2btol(st_hdr->count); if (avail != sc->sc_counts[chet]) { error = EINVAL; goto done; } user_data = mallocarray(avail, sizeof(struct changer_element_status), M_DEVBUF, M_WAITOK | M_ZERO); udsize = avail * sizeof(struct changer_element_status); desc = (caddr_t)(pg_hdr + 1); for (i = 0; i < avail; ++i) { struct changer_element_status *ces = &(user_data[i]); copy_element_status(pg_hdr->flags, (struct read_element_status_descriptor *)desc, ces); desc += desclen; } /* Copy array out to userspace. */ error = copyout(user_data, cesr->cesr_data, udsize); done: if (data != NULL) dma_free(data, size); if (user_data != NULL) free(user_data, M_DEVBUF, udsize); return (error); }
/* * Attempt to build up a hash table for the directory contents in * inode 'ip'. Returns 0 on success, or -1 of the operation failed. */ int ufsdirhash_build(struct inode *ip) { struct dirhash *dh; struct buf *bp = NULL; struct direct *ep; struct vnode *vp; doff_t bmask, pos; int dirblocks, i, j, memreqd, nblocks, narrays, nslots, slot; /* Check if we can/should use dirhash. */ if (ip->i_dirhash == NULL) { if (DIP(ip, size) < ufs_mindirhashsize || OFSFMT(ip)) return (-1); } else { /* Hash exists, but sysctls could have changed. */ if (DIP(ip, size) < ufs_mindirhashsize || ufs_dirhashmem > ufs_dirhashmaxmem) { ufsdirhash_free(ip); return (-1); } /* Check if hash exists and is intact (note: unlocked read). */ if (ip->i_dirhash->dh_hash != NULL) return (0); /* Free the old, recycled hash and build a new one. */ ufsdirhash_free(ip); } /* Don't hash removed directories. */ if (ip->i_effnlink == 0) return (-1); vp = ip->i_vnode; /* Allocate 50% more entries than this dir size could ever need. */ DIRHASH_ASSERT(DIP(ip, size) >= DIRBLKSIZ, ("ufsdirhash_build size")); nslots = DIP(ip, size) / DIRECTSIZ(1); nslots = (nslots * 3 + 1) / 2; narrays = howmany(nslots, DH_NBLKOFF); nslots = narrays * DH_NBLKOFF; dirblocks = howmany(DIP(ip, size), DIRBLKSIZ); nblocks = (dirblocks * 3 + 1) / 2; memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) + narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) + nblocks * sizeof(*dh->dh_blkfree); DIRHASHLIST_LOCK(); if (memreqd + ufs_dirhashmem > ufs_dirhashmaxmem) { DIRHASHLIST_UNLOCK(); if (memreqd > ufs_dirhashmaxmem / 2) return (-1); /* Try to free some space. */ if (ufsdirhash_recycle(memreqd) != 0) return (-1); /* Enough was freed, and list has been locked. */ } ufs_dirhashmem += memreqd; DIRHASHLIST_UNLOCK(); /* * Use non-blocking mallocs so that we will revert to a linear * lookup on failure rather than potentially blocking forever. */ dh = malloc(sizeof(*dh), M_DIRHASH, M_NOWAIT|M_ZERO); if (dh == NULL) { DIRHASHLIST_LOCK(); ufs_dirhashmem -= memreqd; DIRHASHLIST_UNLOCK(); return (-1); } dh->dh_hash = mallocarray(narrays, sizeof(dh->dh_hash[0]), M_DIRHASH, M_NOWAIT|M_ZERO); dh->dh_blkfree = mallocarray(nblocks, sizeof(dh->dh_blkfree[0]), M_DIRHASH, M_NOWAIT | M_ZERO); if (dh->dh_hash == NULL || dh->dh_blkfree == NULL) goto fail; for (i = 0; i < narrays; i++) { if ((dh->dh_hash[i] = DIRHASH_BLKALLOC()) == NULL) goto fail; for (j = 0; j < DH_NBLKOFF; j++) dh->dh_hash[i][j] = DIRHASH_EMPTY; } /* Initialise the hash table and block statistics. */ mtx_init(&dh->dh_mtx, IPL_NONE); dh->dh_narrays = narrays; dh->dh_hlen = nslots; dh->dh_nblk = nblocks; dh->dh_dirblks = dirblocks; for (i = 0; i < dirblocks; i++) dh->dh_blkfree[i] = DIRBLKSIZ / DIRALIGN; for (i = 0; i < DH_NFSTATS; i++) dh->dh_firstfree[i] = -1; dh->dh_firstfree[DH_NFSTATS] = 0; dh->dh_seqopt = 0; dh->dh_seqoff = 0; dh->dh_score = DH_SCOREINIT; ip->i_dirhash = dh; bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1; pos = 0; while (pos < DIP(ip, size)) { /* If necessary, get the next directory block. */ if ((pos & bmask) == 0) { if (bp != NULL) brelse(bp); if (UFS_BUFATOFF(ip, (off_t)pos, NULL, &bp) != 0) goto fail; } /* Add this entry to the hash. */ ep = (struct direct *)((char *)bp->b_data + (pos & bmask)); if (ep->d_reclen == 0 || ep->d_reclen > DIRBLKSIZ - (pos & (DIRBLKSIZ - 1))) { /* Corrupted directory. */ brelse(bp); goto fail; } if (ep->d_ino != 0) { /* Add the entry (simplified ufsdirhash_add). */ slot = ufsdirhash_hash(dh, ep->d_name, ep->d_namlen); while (DH_ENTRY(dh, slot) != DIRHASH_EMPTY) slot = WRAPINCR(slot, dh->dh_hlen); dh->dh_hused++; DH_ENTRY(dh, slot) = pos; ufsdirhash_adjfree(dh, pos, -DIRSIZ(0, ep)); } pos += ep->d_reclen; } if (bp != NULL) brelse(bp); DIRHASHLIST_LOCK(); TAILQ_INSERT_TAIL(&ufsdirhash_list, dh, dh_list); dh->dh_onlist = 1; DIRHASHLIST_UNLOCK(); return (0); fail: if (dh->dh_hash != NULL) { for (i = 0; i < narrays; i++) if (dh->dh_hash[i] != NULL) DIRHASH_BLKFREE(dh->dh_hash[i]); free(dh->dh_hash, M_DIRHASH, 0); } if (dh->dh_blkfree != NULL) free(dh->dh_blkfree, M_DIRHASH, 0); free(dh, M_DIRHASH, 0); ip->i_dirhash = NULL; DIRHASHLIST_LOCK(); ufs_dirhashmem -= memreqd; DIRHASHLIST_UNLOCK(); return (-1); }
/* * amap_extend: extend the size of an amap (if needed) * * => called from uvm_map when we want to extend an amap to cover * a new mapping (rather than allocate a new one) * => to safely extend an amap it should have a reference count of * one (thus it can't be shared) * => XXXCDC: support padding at this level? */ int amap_extend(struct vm_map_entry *entry, vsize_t addsize) { struct vm_amap *amap = entry->aref.ar_amap; int slotoff = entry->aref.ar_pageoff; int slotmapped, slotadd, slotneed, slotalloc; #ifdef UVM_AMAP_PPREF int *newppref, *oldppref; #endif u_int *newsl, *newbck, *oldsl, *oldbck; struct vm_anon **newover, **oldover; int slotadded; /* * first, determine how many slots we need in the amap. don't * forget that ar_pageoff could be non-zero: this means that * there are some unused slots before us in the amap. */ AMAP_B2SLOT(slotmapped, entry->end - entry->start); /* slots mapped */ AMAP_B2SLOT(slotadd, addsize); /* slots to add */ slotneed = slotoff + slotmapped + slotadd; /* * case 1: we already have enough slots in the map and thus * only need to bump the reference counts on the slots we are * adding. */ if (amap->am_nslot >= slotneed) { #ifdef UVM_AMAP_PPREF if (amap->am_ppref && amap->am_ppref != PPREF_NONE) { amap_pp_adjref(amap, slotoff + slotmapped, slotadd, 1); } #endif return (0); } /* * case 2: we pre-allocated slots for use and we just need to * bump nslot up to take account for these slots. */ if (amap->am_maxslot >= slotneed) { #ifdef UVM_AMAP_PPREF if (amap->am_ppref && amap->am_ppref != PPREF_NONE) { if ((slotoff + slotmapped) < amap->am_nslot) amap_pp_adjref(amap, slotoff + slotmapped, (amap->am_nslot - (slotoff + slotmapped)), 1); pp_setreflen(amap->am_ppref, amap->am_nslot, 1, slotneed - amap->am_nslot); } #endif amap->am_nslot = slotneed; /* * no need to zero am_anon since that was done at * alloc time and we never shrink an allocation. */ return (0); } /* * case 3: we need to malloc a new amap and copy all the amap * data over from old amap to the new one. * * XXXCDC: could we take advantage of a kernel realloc()? */ if (slotneed >= UVM_AMAP_LARGE) return E2BIG; slotalloc = malloc_roundup(slotneed * MALLOC_SLOT_UNIT) / MALLOC_SLOT_UNIT; #ifdef UVM_AMAP_PPREF newppref = NULL; if (amap->am_ppref && amap->am_ppref != PPREF_NONE) { newppref = mallocarray(slotalloc, sizeof(int), M_UVMAMAP, M_WAITOK | M_CANFAIL); if (newppref == NULL) { /* give up if malloc fails */ free(amap->am_ppref, M_UVMAMAP, 0); amap->am_ppref = PPREF_NONE; } } #endif newsl = malloc(slotalloc * MALLOC_SLOT_UNIT, M_UVMAMAP, M_WAITOK | M_CANFAIL); if (newsl == NULL) { #ifdef UVM_AMAP_PPREF if (newppref != NULL) { free(newppref, M_UVMAMAP, 0); } #endif return (ENOMEM); } newbck = (int *)(((char *)newsl) + slotalloc * sizeof(int)); newover = (struct vm_anon **)(((char *)newbck) + slotalloc * sizeof(int)); KASSERT(amap->am_maxslot < slotneed); /* now copy everything over to new malloc'd areas... */ slotadded = slotalloc - amap->am_nslot; /* do am_slots */ oldsl = amap->am_slots; memcpy(newsl, oldsl, sizeof(int) * amap->am_nused); amap->am_slots = newsl; /* do am_anon */ oldover = amap->am_anon; memcpy(newover, oldover, sizeof(struct vm_anon *) * amap->am_nslot); memset(newover + amap->am_nslot, 0, sizeof(struct vm_anon *) * slotadded); amap->am_anon = newover; /* do am_bckptr */ oldbck = amap->am_bckptr; memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot); memset(newbck + amap->am_nslot, 0, sizeof(int) * slotadded); /* XXX: needed? */ amap->am_bckptr = newbck; #ifdef UVM_AMAP_PPREF /* do ppref */ oldppref = amap->am_ppref; if (newppref) { memcpy(newppref, oldppref, sizeof(int) * amap->am_nslot); memset(newppref + amap->am_nslot, 0, sizeof(int) * slotadded); amap->am_ppref = newppref; if ((slotoff + slotmapped) < amap->am_nslot) amap_pp_adjref(amap, slotoff + slotmapped, (amap->am_nslot - (slotoff + slotmapped)), 1); pp_setreflen(newppref, amap->am_nslot, 1, slotneed - amap->am_nslot); } #endif /* update master values */ amap->am_nslot = slotneed; amap->am_maxslot = slotalloc; /* and free */ free(oldsl, M_UVMAMAP, 0); #ifdef UVM_AMAP_PPREF if (oldppref && oldppref != PPREF_NONE) free(oldppref, M_UVMAMAP, 0); #endif return (0); }
/* * Allocate a new 'session' and return an encoded session id. 'sidp' * contains our registration id, and should contain an encoded session * id on successful allocation. */ int ubsec_newsession(u_int32_t *sidp, struct cryptoini *cri) { struct cryptoini *c, *encini = NULL, *macini = NULL; struct ubsec_softc *sc = NULL; struct ubsec_session *ses = NULL; MD5_CTX md5ctx; SHA1_CTX sha1ctx; int i, sesn; if (sidp == NULL || cri == NULL) return (EINVAL); for (i = 0; i < ubsec_cd.cd_ndevs; i++) { sc = ubsec_cd.cd_devs[i]; if (sc == NULL || sc->sc_cid == (*sidp)) break; } if (sc == NULL) return (EINVAL); for (c = cri; c != NULL; c = c->cri_next) { if (c->cri_alg == CRYPTO_MD5_HMAC || c->cri_alg == CRYPTO_SHA1_HMAC) { if (macini) return (EINVAL); macini = c; } else if (c->cri_alg == CRYPTO_3DES_CBC || c->cri_alg == CRYPTO_AES_CBC) { if (encini) return (EINVAL); encini = c; } else return (EINVAL); } if (encini == NULL && macini == NULL) return (EINVAL); if (encini && encini->cri_alg == CRYPTO_AES_CBC) { switch (encini->cri_klen) { case 128: case 192: case 256: break; default: return (EINVAL); } } if (sc->sc_sessions == NULL) { ses = sc->sc_sessions = (struct ubsec_session *)malloc( sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); if (ses == NULL) return (ENOMEM); sesn = 0; sc->sc_nsessions = 1; } else { for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { if (sc->sc_sessions[sesn].ses_used == 0) { ses = &sc->sc_sessions[sesn]; break; } } if (ses == NULL) { sesn = sc->sc_nsessions; ses = mallocarray((sesn + 1), sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT); if (ses == NULL) return (ENOMEM); bcopy(sc->sc_sessions, ses, sesn * sizeof(struct ubsec_session)); explicit_bzero(sc->sc_sessions, sesn * sizeof(struct ubsec_session)); free(sc->sc_sessions, M_DEVBUF, 0); sc->sc_sessions = ses; ses = &sc->sc_sessions[sesn]; sc->sc_nsessions++; } } bzero(ses, sizeof(struct ubsec_session)); ses->ses_used = 1; if (encini) { /* Go ahead and compute key in ubsec's byte order */ if (encini->cri_alg == CRYPTO_AES_CBC) { bcopy(encini->cri_key, ses->ses_key, encini->cri_klen / 8); } else bcopy(encini->cri_key, ses->ses_key, 24); SWAP32(ses->ses_key[0]); SWAP32(ses->ses_key[1]); SWAP32(ses->ses_key[2]); SWAP32(ses->ses_key[3]); SWAP32(ses->ses_key[4]); SWAP32(ses->ses_key[5]); SWAP32(ses->ses_key[6]); SWAP32(ses->ses_key[7]); } if (macini) { for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= HMAC_IPAD_VAL; if (macini->cri_alg == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, macini->cri_key, macini->cri_klen / 8); MD5Update(&md5ctx, hmac_ipad_buffer, HMAC_MD5_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, macini->cri_key, macini->cri_klen / 8); SHA1Update(&sha1ctx, hmac_ipad_buffer, HMAC_SHA1_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(sha1ctx.state, ses->ses_hminner, sizeof(sha1ctx.state)); } for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); if (macini->cri_alg == CRYPTO_MD5_HMAC) { MD5Init(&md5ctx); MD5Update(&md5ctx, macini->cri_key, macini->cri_klen / 8); MD5Update(&md5ctx, hmac_opad_buffer, HMAC_MD5_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); } else { SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, macini->cri_key, macini->cri_klen / 8); SHA1Update(&sha1ctx, hmac_opad_buffer, HMAC_SHA1_BLOCK_LEN - (macini->cri_klen / 8)); bcopy(sha1ctx.state, ses->ses_hmouter, sizeof(sha1ctx.state)); } for (i = 0; i < macini->cri_klen / 8; i++) macini->cri_key[i] ^= HMAC_OPAD_VAL; } *sidp = UBSEC_SID(sc->sc_dv.dv_unit, sesn); return (0); }
void kue_attachhook(void *xsc) { struct kue_softc *sc = xsc; int s; struct ifnet *ifp; struct usbd_device *dev = sc->kue_udev; struct usbd_interface *iface; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; /* Load the firmware into the NIC. */ if (kue_load_fw(sc)) { printf("%s: loading firmware failed\n", sc->kue_dev.dv_xname); return; } err = usbd_device2interface_handle(dev, KUE_IFACE_IDX, &iface); if (err) { printf("%s: getting interface handle failed\n", sc->kue_dev.dv_xname); return; } sc->kue_iface = iface; id = usbd_get_interface_descriptor(iface); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { printf("%s: couldn't get ep %d\n", sc->kue_dev.dv_xname, i); return; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->kue_ed[KUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->kue_ed[KUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->kue_ed[KUE_ENDPT_INTR] = ed->bEndpointAddress; } } if (sc->kue_ed[KUE_ENDPT_RX] == 0 || sc->kue_ed[KUE_ENDPT_TX] == 0) { printf("%s: missing endpoint\n", sc->kue_dev.dv_xname); return; } /* Read ethernet descriptor */ err = kue_ctl(sc, KUE_CTL_READ, KUE_CMD_GET_ETHER_DESCRIPTOR, 0, &sc->kue_desc, sizeof(sc->kue_desc)); if (err) { printf("%s: could not read Ethernet descriptor\n", sc->kue_dev.dv_xname); return; } sc->kue_mcfilters = mallocarray(KUE_MCFILTCNT(sc), ETHER_ADDR_LEN, M_USBDEV, M_NOWAIT); if (sc->kue_mcfilters == NULL) { printf("%s: no memory for multicast filter buffer\n", sc->kue_dev.dv_xname); return; } s = splnet(); /* * A KLSI chip was detected. Inform the world. */ printf("%s: address %s\n", sc->kue_dev.dv_xname, ether_sprintf(sc->kue_desc.kue_macaddr)); bcopy(sc->kue_desc.kue_macaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); /* Initialize interface info.*/ ifp = GET_IFP(sc); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = kue_ioctl; ifp->if_start = kue_start; ifp->if_watchdog = kue_watchdog; strlcpy(ifp->if_xname, sc->kue_dev.dv_xname, IFNAMSIZ); IFQ_SET_READY(&ifp->if_snd); /* Attach the interface. */ if_attach(ifp); ether_ifattach(ifp); sc->kue_attached = 1; splx(s); }