int mcd_attach(struct mcd_softc *sc) { int unit; unit = device_get_unit(sc->dev); MCD_LOCK(sc); sc->data.flags |= MCDINIT; mcd_soft_reset(sc); bioq_init(&sc->data.head); #ifdef NOTYET /* wire controller for interrupts and dma */ mcd_configure(sc); #endif MCD_UNLOCK(sc); /* name filled in probe */ sc->mcd_dev_t = make_dev(&mcd_cdevsw, 8 * unit, UID_ROOT, GID_OPERATOR, 0640, "mcd%d", unit); sc->mcd_dev_t->si_drv1 = (void *)sc; callout_init_mtx(&sc->timer, &sc->mtx, 0); return (0); }
int ida_init(struct ida_softc *ida) { int error; ida->unit = device_get_unit(ida->dev); ida->tag = rman_get_bustag(ida->regs); ida->bsh = rman_get_bushandle(ida->regs); SLIST_INIT(&ida->free_qcbs); STAILQ_INIT(&ida->qcb_queue); bioq_init(&ida->bio_queue); ida->qcbs = kmalloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF, M_INTWAIT|M_ZERO); /* * Create our DMA tags */ /* DMA tag for our hardware QCB structures */ error = bus_dma_tag_create(ida->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ida->hwqcb_dmat); if (error) return (ENOMEM); /* DMA tag for mapping buffers into device space */ error = bus_dma_tag_create(ida->parent_dmat, /*alignment*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/MAXBSIZE, /*nsegments*/IDA_NSEG, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ida->buffer_dmat); if (error) return (ENOMEM); /* Allocation of hardware QCBs */ /* XXX allocation is rounded to hardware page size */ error = bus_dmamem_alloc(ida->hwqcb_dmat, (void *)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap); if (error) return (ENOMEM); /* And permanently map them in */ bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap, ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0); bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb)); ida_alloc_qcb(ida); /* allocate an initial qcb */ return (0); }
static cam_status ptctor(struct cam_periph *periph, void *arg) { struct pt_softc *softc; struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (periph == NULL) { kprintf("ptregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } if (cgd == NULL) { kprintf("ptregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = kmalloc(sizeof(*softc), M_DEVBUF, M_INTWAIT | M_ZERO); LIST_INIT(&softc->pending_ccbs); softc->state = PT_STATE_NORMAL; bioq_init(&softc->bio_queue); softc->io_timeout = SCSI_PT_DEFAULT_TIMEOUT * 1000; periph->softc = softc; cam_periph_unlock(periph); cam_extend_set(ptperiphs, periph->unit_number, periph); devstat_add_entry(&softc->device_stats, "pt", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE, SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI, DEVSTAT_PRIORITY_OTHER); make_dev(&pt_ops, periph->unit_number, UID_ROOT, GID_OPERATOR, 0600, "%s%d", periph->periph_name, periph->unit_number); cam_periph_lock(periph); /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE, ptasync, periph, periph->path); /* Tell the user we've attached to the device */ xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); }
static int opalflash_attach(device_t dev) { struct opalflash_softc *sc; phandle_t node; cell_t flash_blocksize, opal_id; uint32_t regs[2]; sc = device_get_softc(dev); sc->sc_dev = dev; node = ofw_bus_get_node(dev); OF_getencprop(node, "ibm,opal-id", &opal_id, sizeof(opal_id)); sc->sc_opal_id = opal_id; if (OF_getencprop(node, "ibm,flash-block-size", &flash_blocksize, sizeof(flash_blocksize)) < 0) { device_printf(dev, "Cannot determine flash block size.\n"); return (ENXIO); } if (!OF_hasprop(node, "no-erase")) sc->sc_erase = true; OPALFLASH_LOCK_INIT(sc); if (OF_getencprop(node, "reg", regs, sizeof(regs)) < 0) { device_printf(dev, "Unable to get flash size.\n"); return (ENXIO); } sc->sc_disk = disk_alloc(); sc->sc_disk->d_name = "opalflash"; sc->sc_disk->d_open = opalflash_open; sc->sc_disk->d_close = opalflash_close; sc->sc_disk->d_strategy = opalflash_strategy; sc->sc_disk->d_ioctl = opalflash_ioctl; sc->sc_disk->d_getattr = opalflash_getattr; sc->sc_disk->d_drv1 = sc; sc->sc_disk->d_maxsize = DFLTPHYS; sc->sc_disk->d_mediasize = regs[1]; sc->sc_disk->d_unit = device_get_unit(sc->sc_dev); sc->sc_disk->d_sectorsize = FLASH_BLOCKSIZE; sc->sc_disk->d_stripesize = flash_blocksize; sc->sc_disk->d_dump = NULL; disk_create(sc->sc_disk, DISK_VERSION); bioq_init(&sc->sc_bio_queue); kproc_create(&opalflash_task, sc, &sc->sc_p, 0, 0, "task: OPAL Flash"); return (0); }
static int cfi_disk_attach(device_t dev) { struct cfi_disk_softc *sc = device_get_softc(dev); sc->parent = device_get_softc(device_get_parent(dev)); /* validate interface width; assumed by other code */ if (sc->parent->sc_width != 1 && sc->parent->sc_width != 2 && sc->parent->sc_width != 4) return EINVAL; sc->disk = disk_alloc(); if (sc->disk == NULL) return ENOMEM; sc->disk->d_name = "cfid"; sc->disk->d_unit = device_get_unit(dev); sc->disk->d_open = cfi_disk_open; sc->disk->d_close = cfi_disk_close; sc->disk->d_strategy = cfi_disk_strategy; sc->disk->d_ioctl = cfi_disk_ioctl; sc->disk->d_dump = NULL; /* NB: no dumps */ sc->disk->d_getattr = cfi_disk_getattr; sc->disk->d_sectorsize = CFI_DISK_SECSIZE; sc->disk->d_mediasize = sc->parent->sc_size; sc->disk->d_maxsize = CFI_DISK_MAXIOSIZE; /* NB: use stripesize to hold the erase/region size */ if (sc->parent->sc_regions) { /* * Multiple regions, use the last one. This is a * total hack as it's (presently) used only by * geom_redboot to locate the FIS directory which * lies at the start of the last erase region. */ sc->disk->d_stripesize = sc->parent->sc_region[sc->parent->sc_regions-1].r_blksz; } else sc->disk->d_stripesize = sc->disk->d_mediasize; sc->disk->d_drv1 = sc; disk_create(sc->disk, DISK_VERSION); mtx_init(&sc->qlock, "CFID I/O lock", NULL, MTX_DEF); bioq_init(&sc->bioq); sc->tq = taskqueue_create("cfid_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->tq); taskqueue_start_threads(&sc->tq, 1, PI_DISK, "cfid taskq"); TASK_INIT(&sc->iotask, 0, cfi_io_proc, sc); return 0; }
static int htif_blk_attach(device_t dev) { struct htif_blk_softc *sc; char prefix[] = " size="; char *str; long size; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->htif_io_mtx, device_get_nameunit(dev), "htif_blk", MTX_DEF); HTIF_BLK_LOCK_INIT(sc); str = strstr(htif_get_id(dev), prefix); size = strtol((str + 6), NULL, 10); if (size == 0) { return (ENXIO); } sc->index = htif_get_index(dev); if (sc->index < 0) return (EINVAL); htif_setup_intr(sc->index, htif_blk_intr, sc); sc->disk = disk_alloc(); sc->disk->d_drv1 = sc; sc->disk->d_maxsize = 4096; /* Max transfer */ sc->disk->d_name = "htif_blk"; sc->disk->d_open = htif_blk_open; sc->disk->d_close = htif_blk_close; sc->disk->d_strategy = htif_blk_strategy; sc->disk->d_unit = 0; sc->disk->d_sectorsize = SECTOR_SIZE; sc->disk->d_mediasize = size; disk_create(sc->disk, DISK_VERSION); bioq_init(&sc->bio_queue); sc->running = 1; kproc_create(&htif_blk_task, sc, &sc->p, 0, 0, "%s: transfer", device_get_nameunit(dev)); return (0); }
static int mambodisk_attach(device_t dev) { struct mambodisk_softc *sc; struct disk *d; intmax_t mb; char unit; sc = device_get_softc(dev); sc->dev = dev; MBODISK_LOCK_INIT(sc); d = sc->disk = disk_alloc(); d->d_open = mambodisk_open; d->d_close = mambodisk_close; d->d_strategy = mambodisk_strategy; d->d_name = "mambodisk"; d->d_drv1 = sc; d->d_maxsize = MAXPHYS; /* Maybe ask bridge? */ d->d_sectorsize = 512; sc->maxblocks = mambocall(MAMBO_DISK_INFO,MAMBO_INFO_BLKSZ,d->d_unit) / 512; d->d_unit = device_get_unit(dev); d->d_mediasize = mambocall(MAMBO_DISK_INFO,MAMBO_INFO_DEVSZ,d->d_unit) * 1024ULL; /* Mambo gives size in KB */ mb = d->d_mediasize >> 20; /* 1MiB == 1 << 20 */ unit = 'M'; if (mb >= 10240) { /* 1GiB = 1024 MiB */ unit = 'G'; mb /= 1024; } device_printf(dev, "%ju%cB, %d byte sectors\n", mb, unit, d->d_sectorsize); disk_create(d, DISK_VERSION); bioq_init(&sc->bio_queue); sc->running = 1; kproc_create(&mambodisk_task, sc, &sc->p, 0, 0, "task: mambo hd"); return (0); }
struct acd * acd_init_lun(struct atapi *ata, int unit, struct atapi_params *ap, int lun, struct devstat *device_stats) { struct acd *ptr; dev_t pdev; if (!(ptr = malloc(sizeof(struct acd), M_TEMP, M_NOWAIT | M_ZERO))) return NULL; bioq_init(&ptr->bio_queue); ptr->ata = ata; ptr->unit = unit; ptr->lun = lun; ptr->param = ap; ptr->flags = F_MEDIA_CHANGED; ptr->flags &= ~(F_WRITTEN|F_TRACK_PREP|F_TRACK_PREPED); ptr->block_size = 2048; ptr->refcnt = 0; ptr->slot = -1; ptr->changer_info = NULL; if (device_stats == NULL) { if (!(ptr->device_stats = malloc(sizeof(struct devstat), M_TEMP, M_NOWAIT | M_ZERO))) return NULL; } else ptr->device_stats = device_stats; pdev = make_dev(&acd_cdevsw, dkmakeminor(lun, 0, 0), UID_ROOT, GID_OPERATOR, 0640, "wcd%da", lun); make_dev_alias(pdev, "rwcd%da", lun); pdev->si_drv1 = ptr; pdev = make_dev(&acd_cdevsw, dkmakeminor(lun, 0, RAW_PART), UID_ROOT, GID_OPERATOR, 0640, "wcd%dc", lun); make_dev_alias(pdev, "rwcd%dc", lun); pdev->si_drv1 = ptr; return ptr; }
int isf_attach(struct isf_softc *sc) { uint16_t id; u_long start, size; struct isf_chips *cp = chip_ids; start = rman_get_start(sc->isf_res); if (start % 2 != 0) { device_printf(sc->isf_dev, "Unsupported flash start alignment %lu\n", start); return (ENXIO); } isf_write_cmd(sc, 0, ISF_CMD_RDI); id = isf_read_reg(sc, ISF_REG_ID); while (cp->chip_id != id) cp++; if (cp->chip_desc == NULL) { device_printf(sc->isf_dev, "Unsupported device ID 0x%04x\n", id); return (ENXIO); } isf_write_cmd(sc, 0, ISF_CMD_RA); size = rman_get_size(sc->isf_res); if (size != cp->chip_size) { device_printf(sc->isf_dev, "Unsupported flash size %lu\n", size); return (ENXIO); } bioq_init(&sc->isf_bioq); ISF_LOCK_INIT(sc); sc->isf_disk = NULL; isf_disk_insert(sc, size); return(0); }
void altera_sdcard_attach(struct altera_sdcard_softc *sc) { ALTERA_SDCARD_LOCK_INIT(sc); ALTERA_SDCARD_CONDVAR_INIT(sc); sc->as_disk = NULL; bioq_init(&sc->as_bioq); sc->as_currentbio = NULL; sc->as_state = ALTERA_SDCARD_STATE_NOCARD; sc->as_taskqueue = taskqueue_create("altera_sdcardc taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->as_taskqueue); taskqueue_start_threads(&sc->as_taskqueue, 1, PI_DISK, "altera_sdcardc%d taskqueue", sc->as_unit); TIMEOUT_TASK_INIT(sc->as_taskqueue, &sc->as_task, 0, altera_sdcard_task, sc); /* * Kick off timer-driven processing with a manual poll so that we * synchronously detect an already-inserted SD Card during the boot or * other driver attach point. */ altera_sdcard_task(sc, 1); }
int afdattach(struct ata_device *atadev) { struct afd_softc *fdp; dev_t dev; fdp = malloc(sizeof(struct afd_softc), M_AFD, M_NOWAIT | M_ZERO); if (!fdp) { ata_prtdev(atadev, "out of memory\n"); return 0; } fdp->device = atadev; fdp->lun = ata_get_lun(&afd_lun_map); ata_set_name(atadev, "afd", fdp->lun); bioq_init(&fdp->queue); if (afd_sense(fdp)) { free(fdp, M_AFD); return 0; } devstat_add_entry(&fdp->stats, "afd", fdp->lun, DEV_BSIZE, DEVSTAT_NO_ORDERED_TAGS, DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_IDE, DEVSTAT_PRIORITY_WFD); dev = disk_create(fdp->lun, &fdp->disk, 0, &afd_cdevsw, &afddisk_cdevsw); dev->si_drv1 = fdp; fdp->dev = dev; fdp->dev->si_iosize_max = 256 * DEV_BSIZE; afd_describe(fdp); atadev->flags |= ATA_D_MEDIA_CHANGED; atadev->driver = fdp; return 1; }
int create_geom_disk(struct nand_chip *chip) { struct disk *ndisk, *rdisk; /* Create the disk device */ ndisk = disk_alloc(); ndisk->d_strategy = nand_strategy; ndisk->d_ioctl = nand_ioctl; ndisk->d_getattr = nand_getattr; ndisk->d_name = "gnand"; ndisk->d_drv1 = chip; ndisk->d_maxsize = chip->chip_geom.block_size; ndisk->d_sectorsize = chip->chip_geom.page_size; ndisk->d_mediasize = chip->chip_geom.chip_size; ndisk->d_unit = chip->num + 10 * device_get_unit(device_get_parent(chip->dev)); /* * When using BBT, make two last blocks of device unavailable * to user (because those are used to store BBT table). */ if (chip->bbt != NULL) ndisk->d_mediasize -= (2 * chip->chip_geom.block_size); ndisk->d_flags = DISKFLAG_CANDELETE; snprintf(ndisk->d_ident, sizeof(ndisk->d_ident), "nand: Man:0x%02x Dev:0x%02x", chip->id.man_id, chip->id.dev_id); ndisk->d_rotation_rate = DISK_RR_NON_ROTATING; disk_create(ndisk, DISK_VERSION); /* Create the RAW disk device */ rdisk = disk_alloc(); rdisk->d_strategy = nand_strategy_raw; rdisk->d_ioctl = nand_ioctl; rdisk->d_getattr = nand_getattr; rdisk->d_name = "gnand.raw"; rdisk->d_drv1 = chip; rdisk->d_maxsize = chip->chip_geom.block_size; rdisk->d_sectorsize = chip->chip_geom.page_size; rdisk->d_mediasize = chip->chip_geom.chip_size; rdisk->d_unit = chip->num + 10 * device_get_unit(device_get_parent(chip->dev)); rdisk->d_flags = DISKFLAG_CANDELETE; snprintf(rdisk->d_ident, sizeof(rdisk->d_ident), "nand_raw: Man:0x%02x Dev:0x%02x", chip->id.man_id, chip->id.dev_id); rdisk->d_rotation_rate = DISK_RR_NON_ROTATING; disk_create(rdisk, DISK_VERSION); chip->ndisk = ndisk; chip->rdisk = rdisk; mtx_init(&chip->qlock, "NAND I/O lock", NULL, MTX_DEF); bioq_init(&chip->bioq); TASK_INIT(&chip->iotask, 0, nand_io_proc, chip); chip->tq = taskqueue_create("nand_taskq", M_WAITOK, taskqueue_thread_enqueue, &chip->tq); taskqueue_start_threads(&chip->tq, 1, PI_DISK, "nand taskq"); if (bootverbose) device_printf(chip->dev, "Created gnand%d for chip [0x%0x, " "0x%0x]\n", ndisk->d_unit, chip->id.man_id, chip->id.dev_id); return (0); }
static struct g_geom * g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags) { int error; uint32_t i, total_offsets, offsets_read, blk; void *buf; struct cloop_header *header; struct g_consumer *cp; struct g_geom *gp; struct g_provider *pp2; struct g_uzip_softc *sc; enum { GEOM_UZIP = 1, GEOM_ULZMA } type; g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name); g_topology_assert(); /* Skip providers that are already open for writing. */ if (pp->acw > 0) return (NULL); buf = NULL; /* * Create geom instance. */ gp = g_new_geomf(mp, "%s.uzip", pp->name); cp = g_new_consumer(gp); error = g_attach(cp, pp); if (error == 0) error = g_access(cp, 1, 0, 0); if (error) { goto e1; } g_topology_unlock(); /* * Read cloop header, look for CLOOP magic, perform * other validity checks. */ DPRINTF(GUZ_DBG_INFO, ("%s: media sectorsize %u, mediasize %jd\n", gp->name, pp->sectorsize, (intmax_t)pp->mediasize)); buf = g_read_data(cp, 0, pp->sectorsize, NULL); if (buf == NULL) goto e2; header = (struct cloop_header *) buf; if (strncmp(header->magic, CLOOP_MAGIC_START, sizeof(CLOOP_MAGIC_START) - 1) != 0) { DPRINTF(GUZ_DBG_ERR, ("%s: no CLOOP magic\n", gp->name)); goto e3; } switch (header->magic[CLOOP_OFS_COMPR]) { case CLOOP_COMP_LZMA: case CLOOP_COMP_LZMA_DDP: type = GEOM_ULZMA; if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_LZMA) { DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n", gp->name)); goto e3; } DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_LZMA image found\n", gp->name)); break; case CLOOP_COMP_LIBZ: case CLOOP_COMP_LIBZ_DDP: type = GEOM_UZIP; if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_ZLIB) { DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n", gp->name)); goto e3; } DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_ZLIB image found\n", gp->name)); break; default: DPRINTF(GUZ_DBG_ERR, ("%s: unsupported image type\n", gp->name)); goto e3; } /* * Initialize softc and read offsets. */ sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO); gp->softc = sc; sc->blksz = ntohl(header->blksz); sc->nblocks = ntohl(header->nblocks); if (sc->blksz % 512 != 0) { printf("%s: block size (%u) should be multiple of 512.\n", gp->name, sc->blksz); goto e4; } if (sc->blksz > MAX_BLKSZ) { printf("%s: block size (%u) should not be larger than %d.\n", gp->name, sc->blksz, MAX_BLKSZ); } total_offsets = sc->nblocks + 1; if (sizeof(struct cloop_header) + total_offsets * sizeof(uint64_t) > pp->mediasize) { printf("%s: media too small for %u blocks\n", gp->name, sc->nblocks); goto e4; } sc->toc = malloc(total_offsets * sizeof(struct g_uzip_blk), M_GEOM_UZIP, M_WAITOK | M_ZERO); offsets_read = MIN(total_offsets, (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t)); for (i = 0; i < offsets_read; i++) { sc->toc[i].offset = be64toh(((uint64_t *) (header + 1))[i]); sc->toc[i].blen = BLEN_UNDEF; } DPRINTF(GUZ_DBG_INFO, ("%s: %u offsets in the first sector\n", gp->name, offsets_read)); for (blk = 1; offsets_read < total_offsets; blk++) { uint32_t nread; free(buf, M_GEOM); buf = g_read_data( cp, blk * pp->sectorsize, pp->sectorsize, NULL); if (buf == NULL) goto e5; nread = MIN(total_offsets - offsets_read, pp->sectorsize / sizeof(uint64_t)); DPRINTF(GUZ_DBG_TOC, ("%s: %u offsets read from sector %d\n", gp->name, nread, blk)); for (i = 0; i < nread; i++) { sc->toc[offsets_read + i].offset = be64toh(((uint64_t *) buf)[i]); sc->toc[offsets_read + i].blen = BLEN_UNDEF; } offsets_read += nread; } free(buf, M_GEOM); buf = NULL; offsets_read -= 1; DPRINTF(GUZ_DBG_INFO, ("%s: done reading %u block offsets from %u " "sectors\n", gp->name, offsets_read, blk)); if (sc->nblocks != offsets_read) { DPRINTF(GUZ_DBG_ERR, ("%s: read %s offsets than expected " "blocks\n", gp->name, sc->nblocks < offsets_read ? "more" : "less")); goto e5; } /* * "Fake" last+1 block, to make it easier for the TOC parser to * iterate without making the last element a special case. */ sc->toc[sc->nblocks].offset = pp->mediasize; /* Massage TOC (table of contents), make sure it is sound */ if (g_uzip_parse_toc(sc, pp, gp) != 0) { DPRINTF(GUZ_DBG_ERR, ("%s: TOC error\n", gp->name)); goto e5; } mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF); mtx_init(&sc->queue_mtx, "geom_uzip wrkthread", NULL, MTX_DEF); bioq_init(&sc->bio_queue); sc->last_blk = -1; sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK); sc->req_total = 0; sc->req_cached = 0; if (type == GEOM_UZIP) { sc->dcp = g_uzip_zlib_ctor(sc->blksz); } else { sc->dcp = g_uzip_lzma_ctor(sc->blksz); } if (sc->dcp == NULL) { goto e6; } sc->uzip_do = &g_uzip_do; error = kproc_create(g_uzip_wrkthr, sc, &sc->procp, 0, 0, "%s", gp->name); if (error != 0) { goto e7; } g_topology_lock(); pp2 = g_new_providerf(gp, "%s", gp->name); pp2->sectorsize = 512; pp2->mediasize = (off_t)sc->nblocks * sc->blksz; pp2->stripesize = pp->stripesize; pp2->stripeoffset = pp->stripeoffset; g_error_provider(pp2, 0); g_access(cp, -1, 0, 0); DPRINTF(GUZ_DBG_INFO, ("%s: taste ok (%d, %jd), (%d, %d), %x\n", gp->name, pp2->sectorsize, (intmax_t)pp2->mediasize, pp2->stripeoffset, pp2->stripesize, pp2->flags)); DPRINTF(GUZ_DBG_INFO, ("%s: %u x %u blocks\n", gp->name, sc->nblocks, sc->blksz)); return (gp); e7: sc->dcp->free(sc->dcp); e6: free(sc->last_buf, M_GEOM); mtx_destroy(&sc->queue_mtx); mtx_destroy(&sc->last_mtx); e5: free(sc->toc, M_GEOM); e4: free(gp->softc, M_GEOM_UZIP); e3: if (buf != NULL) { free(buf, M_GEOM); } e2: g_topology_lock(); g_access(cp, -1, 0, 0); e1: g_detach(cp); g_destroy_consumer(cp); g_destroy_geom(gp); return (NULL); }
static cam_status ptctor(struct cam_periph *periph, void *arg) { struct pt_softc *softc; struct ccb_setasync csa; struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (periph == NULL) { printf("ptregister: periph was NULL!!\n"); return(CAM_REQ_CMP_ERR); } if (cgd == NULL) { printf("ptregister: no getdev CCB, can't register device\n"); return(CAM_REQ_CMP_ERR); } softc = (struct pt_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); if (softc == NULL) { printf("daregister: Unable to probe new device. " "Unable to allocate softc\n"); return(CAM_REQ_CMP_ERR); } bzero(softc, sizeof(*softc)); LIST_INIT(&softc->pending_ccbs); softc->state = PT_STATE_NORMAL; bioq_init(&softc->bio_queue); softc->io_timeout = SCSI_PT_DEFAULT_TIMEOUT * 1000; periph->softc = softc; devstat_add_entry(&softc->device_stats, "pt", periph->unit_number, 0, DEVSTAT_NO_BLOCKSIZE, SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI, DEVSTAT_PRIORITY_OTHER); softc->dev = make_dev(&pt_cdevsw, periph->unit_number, UID_ROOT, GID_OPERATOR, 0600, "%s%d", periph->periph_name, periph->unit_number); softc->dev->si_drv1 = periph; /* * Add async callbacks for bus reset and * bus device reset calls. I don't bother * checking if this fails as, in most cases, * the system will function just fine without * them and the only alternative would be to * not attach the device on failure. */ xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; csa.callback = ptasync; csa.callback_arg = periph; xpt_action((union ccb *)&csa); /* Tell the user we've attached to the device */ xpt_announce_periph(periph, NULL); return(CAM_REQ_CMP); }
static void * nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg) { struct nvd_disk *ndisk; struct disk *disk; struct nvd_controller *ctrlr = ctrlr_arg; ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK); disk = disk_alloc(); disk->d_strategy = nvd_strategy; disk->d_ioctl = nvd_ioctl; disk->d_name = "nvd"; disk->d_drv1 = ndisk; disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns); disk->d_sectorsize = nvme_ns_get_sector_size(ns); disk->d_mediasize = (off_t)nvme_ns_get_size(ns); if (TAILQ_EMPTY(&disk_head)) disk->d_unit = 0; else disk->d_unit = TAILQ_LAST(&disk_head, disk_list)->disk->d_unit + 1; disk->d_flags = 0; if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED) disk->d_flags |= DISKFLAG_CANDELETE; if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED) disk->d_flags |= DISKFLAG_CANFLUSHCACHE; /* ifdef used here to ease porting to stable branches at a later point. */ #ifdef DISKFLAG_UNMAPPED_BIO disk->d_flags |= DISKFLAG_UNMAPPED_BIO; #endif strlcpy(disk->d_ident, nvme_ns_get_serial_number(ns), sizeof(disk->d_ident)); #if __FreeBSD_version >= 900034 strlcpy(disk->d_descr, nvme_ns_get_model_number(ns), sizeof(disk->d_descr)); #endif disk_create(disk, DISK_VERSION); ndisk->ns = ns; ndisk->disk = disk; ndisk->cur_depth = 0; mtx_init(&ndisk->bioqlock, "NVD bioq lock", NULL, MTX_DEF); bioq_init(&ndisk->bioq); TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk); ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK, taskqueue_thread_enqueue, &ndisk->tq); taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq"); TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq); TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq); return (NULL); }
static int vtblk_attach(device_t dev) { struct vtblk_softc *sc; struct virtio_blk_config blkcfg; int error; sc = device_get_softc(dev); sc->vtblk_dev = dev; VTBLK_LOCK_INIT(sc, device_get_nameunit(dev)); bioq_init(&sc->vtblk_bioq); TAILQ_INIT(&sc->vtblk_req_free); TAILQ_INIT(&sc->vtblk_req_ready); virtio_set_feature_desc(dev, vtblk_feature_desc); vtblk_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) sc->vtblk_flags |= VTBLK_FLAG_INDIRECT; if (virtio_with_feature(dev, VIRTIO_BLK_F_RO)) sc->vtblk_flags |= VTBLK_FLAG_READONLY; if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER)) sc->vtblk_flags |= VTBLK_FLAG_BARRIER; /* Get local copy of config. */ virtio_read_device_config(dev, 0, &blkcfg, sizeof(struct virtio_blk_config)); /* * With the current sglist(9) implementation, it is not easy * for us to support a maximum segment size as adjacent * segments are coalesced. For now, just make sure it's larger * than the maximum supported transfer size. */ if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) { if (blkcfg.size_max < MAXPHYS) { error = ENOTSUP; device_printf(dev, "host requires unsupported " "maximum segment size feature\n"); goto fail; } } sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg); if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) { error = EINVAL; device_printf(dev, "fewer than minimum number of segments " "allowed: %d\n", sc->vtblk_max_nsegs); goto fail; } sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT); if (sc->vtblk_sglist == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate sglist\n"); goto fail; } error = vtblk_alloc_virtqueue(sc); if (error) { device_printf(dev, "cannot allocate virtqueue\n"); goto fail; } error = vtblk_alloc_requests(sc); if (error) { device_printf(dev, "cannot preallocate requests\n"); goto fail; } vtblk_alloc_disk(sc, &blkcfg); TASK_INIT(&sc->vtblk_intr_task, 0, vtblk_intr_task, sc); sc->vtblk_tq = taskqueue_create_fast("vtblk_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->vtblk_tq); if (sc->vtblk_tq == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate taskqueue\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY); if (error) { device_printf(dev, "cannot setup virtqueue interrupt\n"); goto fail; } taskqueue_start_threads(&sc->vtblk_tq, 1, PI_DISK, "%s taskq", device_get_nameunit(dev)); vtblk_create_disk(sc); virtqueue_enable_intr(sc->vtblk_vq); fail: if (error) vtblk_detach(dev); return (error); }
static int pst_attach(device_t dev) { struct pst_softc *psc = device_get_softc(dev); struct i2o_get_param_reply *reply; struct i2o_device_identity *ident; int lun = device_get_unit(dev); int8_t name [32]; if (!(reply = iop_get_util_params(psc->iop, psc->lct->local_tid, I2O_PARAMS_OPERATION_FIELD_GET, I2O_BSA_DEVICE_INFO_GROUP_NO))) return ENODEV; if (!(psc->info = (struct i2o_bsa_device *) malloc(sizeof(struct i2o_bsa_device), M_PSTRAID, M_NOWAIT))) { contigfree(reply, PAGE_SIZE, M_PSTIOP); return ENOMEM; } bcopy(reply->result, psc->info, sizeof(struct i2o_bsa_device)); contigfree(reply, PAGE_SIZE, M_PSTIOP); if (!(reply = iop_get_util_params(psc->iop, psc->lct->local_tid, I2O_PARAMS_OPERATION_FIELD_GET, I2O_UTIL_DEVICE_IDENTITY_GROUP_NO))) return ENODEV; ident = (struct i2o_device_identity *)reply->result; #ifdef PSTDEBUG printf("pst: vendor=<%.16s> product=<%.16s>\n", ident->vendor, ident->product); printf("pst: description=<%.16s> revision=<%.8s>\n", ident->description, ident->revision); printf("pst: capacity=%lld blocksize=%d\n", psc->info->capacity, psc->info->block_size); #endif bpack(ident->vendor, ident->vendor, 16); bpack(ident->product, ident->product, 16); sprintf(name, "%s %s", ident->vendor, ident->product); contigfree(reply, PAGE_SIZE, M_PSTIOP); bioq_init(&psc->queue); psc->disk = disk_alloc(); psc->disk->d_name = "pst"; psc->disk->d_strategy = pststrategy; psc->disk->d_maxsize = 64 * 1024; /*I2O_SGL_MAX_SEGS * PAGE_SIZE;*/ psc->disk->d_drv1 = psc; psc->disk->d_unit = lun; psc->disk->d_sectorsize = psc->info->block_size; psc->disk->d_mediasize = psc->info->capacity; psc->disk->d_fwsectors = 63; psc->disk->d_fwheads = 255; disk_create(psc->disk, DISK_VERSION); printf("pst%d: %lluMB <%.40s> [%lld/%d/%d] on %.16s\n", lun, (unsigned long long)psc->info->capacity / (1024 * 1024), name, psc->info->capacity/(512*255*63), 255, 63, device_get_nameunit(psc->iop->dev)); EVENTHANDLER_REGISTER(shutdown_post_sync, pst_shutdown, dev, SHUTDOWN_PRI_FIRST); return 0; }
static void * nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg) { uint8_t descr[NVME_MODEL_NUMBER_LENGTH+1]; struct nvd_disk *ndisk; struct disk *disk; struct nvd_controller *ctrlr = ctrlr_arg; ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK); disk = disk_alloc(); disk->d_strategy = nvd_strategy; disk->d_ioctl = nvd_ioctl; disk->d_name = NVD_STR; disk->d_drv1 = ndisk; disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns); disk->d_sectorsize = nvme_ns_get_sector_size(ns); disk->d_mediasize = (off_t)nvme_ns_get_size(ns); if (TAILQ_EMPTY(&disk_head)) disk->d_unit = 0; else disk->d_unit = TAILQ_LAST(&disk_head, disk_list)->disk->d_unit + 1; disk->d_flags = 0; if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED) disk->d_flags |= DISKFLAG_CANDELETE; if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED) disk->d_flags |= DISKFLAG_CANFLUSHCACHE; /* ifdef used here to ease porting to stable branches at a later point. */ #ifdef DISKFLAG_UNMAPPED_BIO disk->d_flags |= DISKFLAG_UNMAPPED_BIO; #endif /* * d_ident and d_descr are both far bigger than the length of either * the serial or model number strings. */ nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns), sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH); nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr), NVME_MODEL_NUMBER_LENGTH); #if __FreeBSD_version >= 900034 strlcpy(disk->d_descr, descr, sizeof(descr)); #endif ndisk->ns = ns; ndisk->disk = disk; ndisk->cur_depth = 0; mtx_init(&ndisk->bioqlock, "NVD bioq lock", NULL, MTX_DEF); bioq_init(&ndisk->bioq); TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk); ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK, taskqueue_thread_enqueue, &ndisk->tq); taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq"); TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq); TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq); disk_create(disk, DISK_VERSION); printf(NVD_STR"%u: <%s> NVMe namespace\n", disk->d_unit, descr); printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit, (uintmax_t)disk->d_mediasize / (1024*1024), (uintmax_t)disk->d_mediasize / disk->d_sectorsize, disk->d_sectorsize); return (NULL); }
int ida_init(struct ida_softc *ida) { struct ida_controller_info cinfo; device_t child; int error, i, unit; SLIST_INIT(&ida->free_qcbs); STAILQ_INIT(&ida->qcb_queue); bioq_init(&ida->bio_queue); ida->qcbs = (struct ida_qcb *) malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF, M_NOWAIT | M_ZERO); if (ida->qcbs == NULL) return (ENOMEM); /* * Create our DMA tags */ /* DMA tag for our hardware QCB structures */ error = bus_dma_tag_create( /* parent */ ida->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ NULL, /* lockarg */ NULL, &ida->hwqcb_dmat); if (error) return (ENOMEM); /* DMA tag for mapping buffers into device space */ error = bus_dma_tag_create( /* parent */ ida->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ MAXBSIZE, /* nsegments */ IDA_NSEG, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &Giant, &ida->buffer_dmat); if (error) return (ENOMEM); /* Allocation of hardware QCBs */ /* XXX allocation is rounded to hardware page size */ error = bus_dmamem_alloc(ida->hwqcb_dmat, (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap); if (error) return (ENOMEM); /* And permanently map them in */ bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap, ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0); bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb)); error = ida_alloc_qcbs(ida); if (error) return (error); mtx_lock(&ida->lock); ida->cmd.int_enable(ida, 0); error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), IDA_CONTROLLER, 0, DMA_DATA_IN); if (error) { mtx_unlock(&ida->lock); device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n"); return (error); } device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n", cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1], cinfo.firm_rev[2], cinfo.firm_rev[3]); if (ida->flags & IDA_FIRMWARE) { int data; error = ida_command(ida, CMD_START_FIRMWARE, &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN); if (error) { mtx_unlock(&ida->lock); device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n"); return (error); } } ida->cmd.int_enable(ida, 1); ida->flags |= IDA_ATTACHED; mtx_unlock(&ida->lock); for (i = 0; i < cinfo.num_drvs; i++) { child = device_add_child(ida->dev, /*"idad"*/NULL, -1); if (child != NULL) device_set_ivars(child, (void *)(intptr_t)i); } ida->ich.ich_func = ida_startup; ida->ich.ich_arg = ida; if (config_intrhook_establish(&ida->ich) != 0) { device_delete_children(ida->dev); device_printf(ida->dev, "Cannot establish configuration hook\n"); return (error); } unit = device_get_unit(ida->dev); ida->ida_dev_t = make_dev(&ida_cdevsw, unit, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "ida%d", unit); ida->ida_dev_t->si_drv1 = ida; return (0); }
static int vtblk_attach(device_t dev) { struct vtblk_softc *sc; struct virtio_blk_config blkcfg; int error; sc = device_get_softc(dev); sc->vtblk_dev = dev; lwkt_serialize_init(&sc->vtblk_slz); bioq_init(&sc->vtblk_bioq); TAILQ_INIT(&sc->vtblk_req_free); TAILQ_INIT(&sc->vtblk_req_ready); virtio_set_feature_desc(dev, vtblk_feature_desc); vtblk_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_BLK_F_RO)) sc->vtblk_flags |= VTBLK_FLAG_READONLY; if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER)) sc->vtblk_flags |= VTBLK_FLAG_BARRIER; if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE)) sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG; vtblk_setup_sysctl(sc); /* Get local copy of config. */ virtio_read_device_config(dev, 0, &blkcfg, sizeof(struct virtio_blk_config)); /* * With the current sglist(9) implementation, it is not easy * for us to support a maximum segment size as adjacent * segments are coalesced. For now, just make sure it's larger * than the maximum supported transfer size. */ if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) { if (blkcfg.size_max < MAXPHYS) { error = ENOTSUP; device_printf(dev, "host requires unsupported " "maximum segment size feature\n"); goto fail; } } sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg); if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) { error = EINVAL; device_printf(dev, "fewer than minimum number of segments " "allowed: %d\n", sc->vtblk_max_nsegs); goto fail; } /* * Allocate working sglist. The number of segments may be too * large to safely store on the stack. */ sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT); if (sc->vtblk_sglist == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate sglist\n"); goto fail; } error = vtblk_alloc_virtqueue(sc); if (error) { device_printf(dev, "cannot allocate virtqueue\n"); goto fail; } error = vtblk_alloc_requests(sc); if (error) { device_printf(dev, "cannot preallocate requests\n"); goto fail; } vtblk_alloc_disk(sc, &blkcfg); error = virtio_setup_intr(dev, &sc->vtblk_slz); if (error) { device_printf(dev, "cannot setup virtqueue interrupt\n"); goto fail; } virtqueue_enable_intr(sc->vtblk_vq); fail: if (error) vtblk_detach(dev); return (error); }