Example #1
0
/*
 * translate request into iovec + start offset
 * do sanity checks along the way
 */
static int xen_block_parse_request(XenBlockRequest *request)
{
    XenBlockDataPlane *dataplane = request->dataplane;
    size_t len;
    int i;

    switch (request->req.operation) {
    case BLKIF_OP_READ:
        break;
    case BLKIF_OP_FLUSH_DISKCACHE:
        request->presync = 1;
        if (!request->req.nr_segments) {
            return 0;
        }
        /* fall through */
    case BLKIF_OP_WRITE:
        break;
    case BLKIF_OP_DISCARD:
        return 0;
    default:
        error_report("error: unknown operation (%d)", request->req.operation);
        goto err;
    };

    if (request->req.operation != BLKIF_OP_READ &&
        blk_is_read_only(dataplane->blk)) {
        error_report("error: write req for ro device");
        goto err;
    }

    request->start = request->req.sector_number * XEN_BLKIF_SECTOR_SIZE;
    for (i = 0; i < request->req.nr_segments; i++) {
        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
            error_report("error: nr_segments too big");
            goto err;
        }
        if (request->req.seg[i].first_sect > request->req.seg[i].last_sect) {
            error_report("error: first > last sector");
            goto err;
        }
        if (request->req.seg[i].last_sect * XEN_BLKIF_SECTOR_SIZE >=
            XC_PAGE_SIZE) {
            error_report("error: page crossing");
            goto err;
        }

        len = (request->req.seg[i].last_sect -
               request->req.seg[i].first_sect + 1) * XEN_BLKIF_SECTOR_SIZE;
        request->size += len;
    }
    if (request->start + request->size > blk_getlength(dataplane->blk)) {
        error_report("error: access beyond end of file");
        goto err;
    }
    return 0;

err:
    request->status = BLKIF_RSP_ERROR;
    return -1;
}
Example #2
0
static void m25p80_realize(SSISlave *ss, Error **errp)
{
    Flash *s = M25P80(ss);
    M25P80Class *mc = M25P80_GET_CLASS(s);
    int ret;

    s->pi = mc->pi;

    s->size = s->pi->sector_size * s->pi->n_sectors;
    s->dirty_page = -1;

    if (s->blk) {
        uint64_t perm = BLK_PERM_CONSISTENT_READ |
                        (blk_is_read_only(s->blk) ? 0 : BLK_PERM_WRITE);
        ret = blk_set_perm(s->blk, perm, BLK_PERM_ALL, errp);
        if (ret < 0) {
            return;
        }

        DB_PRINT_L(0, "Binding to IF_MTD drive\n");
        s->storage = blk_blockalign(s->blk, s->size);

        if (blk_pread(s->blk, 0, s->storage, s->size) != s->size) {
            error_setg(errp, "failed to read the initial flash content");
            return;
        }
    } else {
        DB_PRINT_L(0, "No BDRV - binding to RAM\n");
        s->storage = blk_blockalign(NULL, s->size);
        memset(s->storage, 0xFF, s->size);
    }
}
Example #3
0
File: nand.c Project: tornadory/orp
static void nand_realize(DeviceState *dev, Error **errp)
{
    int pagesize;
    NANDFlashState *s = NAND(dev);

    s->buswidth = nand_flash_ids[s->chip_id].width >> 3;
    s->size = nand_flash_ids[s->chip_id].size << 20;
    if (nand_flash_ids[s->chip_id].options & NAND_SAMSUNG_LP) {
        s->page_shift = 11;
        s->erase_shift = 6;
    } else {
        s->page_shift = nand_flash_ids[s->chip_id].page_shift;
        s->erase_shift = nand_flash_ids[s->chip_id].erase_shift;
    }

    switch (1 << s->page_shift) {
    case 256:
        nand_init_256(s);
        break;
    case 512:
        nand_init_512(s);
        break;
    case 2048:
        nand_init_2048(s);
        break;
    default:
        error_setg(errp, "Unsupported NAND block size %#x\n",
                   1 << s->page_shift);
        return;
    }

    pagesize = 1 << s->oob_shift;
    s->mem_oob = 1;
    if (s->blk) {
        if (blk_is_read_only(s->blk)) {
            error_setg(errp, "Can't use a read-only drive");
            return;
        }
        if (blk_getlength(s->blk) >=
                (s->pages << s->page_shift) + (s->pages << s->oob_shift)) {
            pagesize = 0;
            s->mem_oob = 0;
        }
    } else {
        pagesize += 1 << s->page_shift;
    }
    if (pagesize) {
        s->storage = (uint8_t *) memset(g_malloc(s->pages * pagesize),
                                        0xff, s->pages * pagesize);
    }
    /* Give s->ioaddr a sane value in case we save state before it is used. */
    s->ioaddr = s->io;
}
Example #4
0
static void scsi_read_complete(void * opaque, int ret)
{
    SCSIGenericReq *r = (SCSIGenericReq *)opaque;
    SCSIDevice *s = r->req.dev;
    int len;

    assert(r->req.aiocb != NULL);
    r->req.aiocb = NULL;

    if (ret || r->req.io_canceled) {
        scsi_command_complete_noio(r, ret);
        return;
    }

    len = r->io_header.dxfer_len - r->io_header.resid;
    DPRINTF("Data ready tag=0x%x len=%d\n", r->req.tag, len);

    r->len = -1;
    if (len == 0) {
        scsi_command_complete_noio(r, 0);
        return;
    }

    /* Snoop READ CAPACITY output to set the blocksize.  */
    if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
        (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
        s->blocksize = ldl_be_p(&r->buf[4]);
        s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
    } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
               (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
        s->blocksize = ldl_be_p(&r->buf[8]);
        s->max_lba = ldq_be_p(&r->buf[0]);
    }
    blk_set_guest_block_size(s->conf.blk, s->blocksize);

    /* Patch MODE SENSE device specific parameters if the BDS is opened
     * readonly.
     */
    if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) &&
        blk_is_read_only(s->conf.blk) &&
        (r->req.cmd.buf[0] == MODE_SENSE ||
         r->req.cmd.buf[0] == MODE_SENSE_10) &&
        (r->req.cmd.buf[1] & 0x8) == 0) {
        if (r->req.cmd.buf[0] == MODE_SENSE) {
            r->buf[2] |= 0x80;
        } else  {
            r->buf[3] |= 0x80;
        }
    }
    scsi_req_data(&r->req, len);
    scsi_req_unref(&r->req);
}
Example #5
0
static inline void flash_sync_area(Flash *s, int64_t off, int64_t len)
{
    QEMUIOVector *iov;

    if (!s->blk || blk_is_read_only(s->blk)) {
        return;
    }

    assert(!(len % BDRV_SECTOR_SIZE));
    iov = g_new(QEMUIOVector, 1);
    qemu_iovec_init(iov, 1);
    qemu_iovec_add(iov, s->storage + off, len);
    blk_aio_pwritev(s->blk, off, iov, 0, blk_sync_complete, iov);
}
Example #6
0
static void flash_sync_page(Flash *s, int page)
{
    QEMUIOVector *iov;

    if (!s->blk || blk_is_read_only(s->blk)) {
        return;
    }

    iov = g_new(QEMUIOVector, 1);
    qemu_iovec_init(iov, 1);
    qemu_iovec_add(iov, s->storage + page * s->pi->page_size,
                   s->pi->page_size);
    blk_aio_pwritev(s->blk, page * s->pi->page_size, iov, 0,
                    blk_sync_complete, iov);
}
Example #7
0
static void flash_sync_page(Flash *s, int page)
{
    int blk_sector, nb_sectors;
    QEMUIOVector iov;

    if (!s->blk || blk_is_read_only(s->blk)) {
        return;
    }

    blk_sector = (page * s->pi->page_size) / BDRV_SECTOR_SIZE;
    nb_sectors = DIV_ROUND_UP(s->pi->page_size, BDRV_SECTOR_SIZE);
    qemu_iovec_init(&iov, 1);
    qemu_iovec_add(&iov, s->storage + blk_sector * BDRV_SECTOR_SIZE,
                   nb_sectors * BDRV_SECTOR_SIZE);
    blk_aio_writev(s->blk, blk_sector, &iov, nb_sectors, blk_sync_complete,
                   NULL);
}
Example #8
0
static inline void flash_sync_area(Flash *s, int64_t off, int64_t len)
{
    int64_t start, end, nb_sectors;
    QEMUIOVector iov;

    if (!s->blk || blk_is_read_only(s->blk)) {
        return;
    }

    assert(!(len % BDRV_SECTOR_SIZE));
    start = off / BDRV_SECTOR_SIZE;
    end = (off + len) / BDRV_SECTOR_SIZE;
    nb_sectors = end - start;
    qemu_iovec_init(&iov, 1);
    qemu_iovec_add(&iov, s->storage + (start * BDRV_SECTOR_SIZE),
                                        nb_sectors * BDRV_SECTOR_SIZE);
    blk_aio_writev(s->blk, start, &iov, nb_sectors, blk_sync_complete, NULL);
}
Example #9
0
static void scsi_generic_realize(SCSIDevice *s, Error **errp)
{
    int rc;
    int sg_version;
    struct sg_scsi_id scsiid;

    if (!s->conf.blk) {
        error_setg(errp, "drive property not set");
        return;
    }

    if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) {
        error_setg(errp, "Device doesn't support drive option werror");
        return;
    }
    if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
        error_setg(errp, "Device doesn't support drive option rerror");
        return;
    }

    /* check we are using a driver managing SG_IO (version 3 and after */
    rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
    if (rc < 0) {
        error_setg_errno(errp, -rc, "cannot get SG_IO version number");
        if (rc != -EPERM) {
            error_append_hint(errp, "Is this a SCSI device?\n");
        }
        return;
    }
    if (sg_version < 30000) {
        error_setg(errp, "scsi generic interface too old");
        return;
    }

    /* get LUN of the /dev/sg? */
    if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
        error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
        return;
    }
    if (!blkconf_apply_backend_options(&s->conf,
                                       blk_is_read_only(s->conf.blk),
                                       true, errp)) {
        return;
    }

    /* define device state */
    s->type = scsiid.scsi_type;
    DPRINTF("device type %d\n", s->type);

    switch (s->type) {
    case TYPE_TAPE:
        s->blocksize = get_stream_blocksize(s->conf.blk);
        if (s->blocksize == -1) {
            s->blocksize = 0;
        }
        break;

        /* Make a guess for block devices, we'll fix it when the guest sends.
         * READ CAPACITY.  If they don't, they likely would assume these sizes
         * anyway. (TODO: they could also send MODE SENSE).
         */
    case TYPE_ROM:
    case TYPE_WORM:
        s->blocksize = 2048;
        break;
    default:
        s->blocksize = 512;
        break;
    }

    DPRINTF("block size %d\n", s->blocksize);

    scsi_generic_read_device_identification(s);
}
Example #10
0
static void scsi_read_complete(void * opaque, int ret)
{
    SCSIGenericReq *r = (SCSIGenericReq *)opaque;
    SCSIDevice *s = r->req.dev;
    int len;

    assert(r->req.aiocb != NULL);
    r->req.aiocb = NULL;

    aio_context_acquire(blk_get_aio_context(s->conf.blk));

    if (ret || r->req.io_canceled) {
        scsi_command_complete_noio(r, ret);
        goto done;
    }

    len = r->io_header.dxfer_len - r->io_header.resid;
    DPRINTF("Data ready tag=0x%x len=%d\n", r->req.tag, len);

    r->len = -1;
    if (len == 0) {
        scsi_command_complete_noio(r, 0);
        goto done;
    }

    /* Snoop READ CAPACITY output to set the blocksize.  */
    if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
        (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
        s->blocksize = ldl_be_p(&r->buf[4]);
        s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
    } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
               (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
        s->blocksize = ldl_be_p(&r->buf[8]);
        s->max_lba = ldq_be_p(&r->buf[0]);
    }
    blk_set_guest_block_size(s->conf.blk, s->blocksize);

    /* Patch MODE SENSE device specific parameters if the BDS is opened
     * readonly.
     */
    if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) &&
        blk_is_read_only(s->conf.blk) &&
        (r->req.cmd.buf[0] == MODE_SENSE ||
         r->req.cmd.buf[0] == MODE_SENSE_10) &&
        (r->req.cmd.buf[1] & 0x8) == 0) {
        if (r->req.cmd.buf[0] == MODE_SENSE) {
            r->buf[2] |= 0x80;
        } else  {
            r->buf[3] |= 0x80;
        }
    }
    if (s->type == TYPE_DISK &&
        r->req.cmd.buf[0] == INQUIRY &&
        r->req.cmd.buf[2] == 0xb0) {
        uint32_t max_transfer =
            blk_get_max_transfer(s->conf.blk) / s->blocksize;

        assert(max_transfer);
        stl_be_p(&r->buf[8], max_transfer);
        /* Also take care of the opt xfer len. */
        stl_be_p(&r->buf[12],
                 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
    }
    scsi_req_data(&r->req, len);
    scsi_req_unref(&r->req);

done:
    aio_context_release(blk_get_aio_context(s->conf.blk));
}
Example #11
0
static int blk_connect(struct XenDevice *xendev)
{
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
    int index, qflags;
    bool readonly = true;
    bool writethrough = true;
    int order, ring_ref;
    unsigned int ring_size, max_grants;
    unsigned int i;

    trace_xen_disk_connect(xendev->name);

    /* read-only ? */
    if (blkdev->directiosafe) {
        qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
    } else {
        qflags = 0;
        writethrough = false;
    }
    if (strcmp(blkdev->mode, "w") == 0) {
        qflags |= BDRV_O_RDWR;
        readonly = false;
    }
    if (blkdev->feature_discard) {
        qflags |= BDRV_O_UNMAP;
    }

    /* init qemu block driver */
    index = (xendev->dev - 202 * 256) / 16;
    blkdev->dinfo = drive_get(IF_XEN, 0, index);
    if (!blkdev->dinfo) {
        Error *local_err = NULL;
        QDict *options = NULL;

        if (strcmp(blkdev->fileproto, "<unset>")) {
            options = qdict_new();
            qdict_put_str(options, "driver", blkdev->fileproto);
        }

        /* setup via xenbus -> create new block driver instance */
        xen_pv_printf(xendev, 2, "create new bdrv (xenbus setup)\n");
        blkdev->blk = blk_new_open(blkdev->filename, NULL, options,
                                   qflags, &local_err);
        if (!blkdev->blk) {
            xen_pv_printf(xendev, 0, "error: %s\n",
                          error_get_pretty(local_err));
            error_free(local_err);
            return -1;
        }
        blk_set_enable_write_cache(blkdev->blk, !writethrough);
    } else {
        /* setup via qemu cmdline -> already setup for us */
        xen_pv_printf(xendev, 2,
                      "get configured bdrv (cmdline setup)\n");
        blkdev->blk = blk_by_legacy_dinfo(blkdev->dinfo);
        if (blk_is_read_only(blkdev->blk) && !readonly) {
            xen_pv_printf(xendev, 0, "Unexpected read-only drive");
            blkdev->blk = NULL;
            return -1;
        }
        /* blkdev->blk is not create by us, we get a reference
         * so we can blk_unref() unconditionally */
        blk_ref(blkdev->blk);
    }
    blk_attach_dev_legacy(blkdev->blk, blkdev);
    blkdev->file_size = blk_getlength(blkdev->blk);
    if (blkdev->file_size < 0) {
        BlockDriverState *bs = blk_bs(blkdev->blk);
        const char *drv_name = bs ? bdrv_get_format_name(bs) : NULL;
        xen_pv_printf(xendev, 1, "blk_getlength: %d (%s) | drv %s\n",
                      (int)blkdev->file_size, strerror(-blkdev->file_size),
                      drv_name ?: "-");
        blkdev->file_size = 0;
    }