Beispiel #1
0
void mptsas_fix_config_endianness(MPIMsgConfig *req)
{
    le16_to_cpus(&req->ExtPageLength);
    le32_to_cpus(&req->MsgContext);
    le32_to_cpus(&req->PageAddress);
    mptsas_fix_sgentry_endianness(&req->PageBufferSGE);
}
Beispiel #2
0
void mptsas_fix_scsi_io_endianness(MPIMsgSCSIIORequest *req)
{
    le32_to_cpus(&req->MsgContext);
    le32_to_cpus(&req->Control);
    le32_to_cpus(&req->DataLength);
    le32_to_cpus(&req->SenseBufferLowAddr);
}
Beispiel #3
0
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
{
	le32_to_cpus(&r->base);
	le16_to_cpus(&r->entry_size);
	le16_to_cpus(&r->size);
	le32_to_cpus(&r->tail);
	le32_to_cpus(&r->head);
}
Beispiel #4
0
static void mptsas_fix_sgentry_endianness(MPISGEntry *sge)
{
    le32_to_cpus(&sge->FlagsLength);
    if (sge->FlagsLength & MPI_SGE_FLAGS_64_BIT_ADDRESSING) {
       le64_to_cpus(&sge->u.Address64);
    } else {
       le32_to_cpus(&sge->u.Address32);
    }
}
Beispiel #5
0
void mptsas_fix_ioc_init_endianness(MPIMsgIOCInit *req)
{
    le32_to_cpus(&req->MsgContext);
    le16_to_cpus(&req->ReplyFrameSize);
    le32_to_cpus(&req->HostMfaHighAddr);
    le32_to_cpus(&req->SenseBufferHighAddr);
    le32_to_cpus(&req->ReplyFifoHostSignalingAddr);
    mptsas_fix_sgentry_endianness(&req->HostPageBufferSGE);
    le16_to_cpus(&req->MsgVersion);
    le16_to_cpus(&req->HeaderVersion);
}
Beispiel #6
0
static int parallels_open(BlockDriverState *bs, int flags)
{
    BDRVParallelsState *s = bs->opaque;
    int i;
    struct parallels_header ph;

    bs->read_only = 1; // no write support yet

    if (bdrv_pread(bs->file, 0, &ph, sizeof(ph)) != sizeof(ph))
        goto fail;

    if (memcmp(ph.magic, HEADER_MAGIC, 16) ||
	(le32_to_cpu(ph.version) != HEADER_VERSION)) {
        goto fail;
    }

    bs->total_sectors = le32_to_cpu(ph.nb_sectors);

    s->tracks = le32_to_cpu(ph.tracks);

    s->catalog_size = le32_to_cpu(ph.catalog_entries);
    s->catalog_bitmap = g_malloc(s->catalog_size * 4);
    if (bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4) !=
	s->catalog_size * 4)
	goto fail;
    for (i = 0; i < s->catalog_size; i++)
	le32_to_cpus(&s->catalog_bitmap[i]);

    return 0;
fail:
    if (s->catalog_bitmap)
	g_free(s->catalog_bitmap);
    return -1;
}
Beispiel #7
0
static u32 *
cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
			u32 *bfi_image_size, char *fw_name)
{
	const struct firmware *fw;
	u32 n;

	if (request_firmware(&fw, fw_name, &pdev->dev)) {
		dev_alert(&pdev->dev, "can't load firmware %s\n", fw_name);
		goto error;
	}

	*bfi_image = (u32 *)fw->data;
	*bfi_image_size = fw->size/sizeof(u32);
	bfi_fw = fw;

	/* Convert loaded firmware to host order as it is stored in file
	 * as sequence of LE32 integers.
	 */
	for (n = 0; n < *bfi_image_size; n++)
		le32_to_cpus(*bfi_image + n);

	return *bfi_image;
error:
	return NULL;
}
Beispiel #8
0
/* All VHDX structures on disk are little endian */
static void vhdx_header_le_import(VHDXHeader *h)
{
    assert(h != NULL);

    le32_to_cpus(&h->signature);
    le32_to_cpus(&h->checksum);
    le64_to_cpus(&h->sequence_number);

    leguid_to_cpus(&h->file_write_guid);
    leguid_to_cpus(&h->data_write_guid);
    leguid_to_cpus(&h->log_guid);

    le16_to_cpus(&h->log_version);
    le16_to_cpus(&h->version);
    le32_to_cpus(&h->log_length);
    le64_to_cpus(&h->log_offset);
}
Beispiel #9
0
static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
                          Error **errp)
{
    BDRVParallelsState *s = bs->opaque;
    int i;
    struct parallels_header ph;
    int ret;

    bs->read_only = 1; // no write support yet

    ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph));
    if (ret < 0) {
        goto fail;
    }

    if (memcmp(ph.magic, HEADER_MAGIC, 16) ||
        (le32_to_cpu(ph.version) != HEADER_VERSION)) {
        error_setg(errp, "Image not in Parallels format");
        ret = -EINVAL;
        goto fail;
    }

    bs->total_sectors = le32_to_cpu(ph.nb_sectors);

    s->tracks = le32_to_cpu(ph.tracks);
    if (s->tracks == 0) {
        error_setg(errp, "Invalid image: Zero sectors per track");
        ret = -EINVAL;
        goto fail;
    }

    s->catalog_size = le32_to_cpu(ph.catalog_entries);
    if (s->catalog_size > INT_MAX / 4) {
        error_setg(errp, "Catalog too large");
        ret = -EFBIG;
        goto fail;
    }
    s->catalog_bitmap = g_try_malloc(s->catalog_size * 4);
    if (s->catalog_size && s->catalog_bitmap == NULL) {
        ret = -ENOMEM;
        goto fail;
    }

    ret = bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4);
    if (ret < 0) {
        goto fail;
    }

    for (i = 0; i < s->catalog_size; i++)
	le32_to_cpus(&s->catalog_bitmap[i]);

    qemu_co_mutex_init(&s->lock);
    return 0;

fail:
    g_free(s->catalog_bitmap);
    return ret;
}
Beispiel #10
0
/*
 * Helper function to analyze a PIMFOR management frame header.
 */
static pimfor_header_t *
pimfor_decode_header(void *data, int len)
{
	pimfor_header_t *h = data;

	while ((void *) h < data + len) {
		if (h->flags & PIMFOR_FLAG_LITTLE_ENDIAN) {
			le32_to_cpus(&h->oid);
			le32_to_cpus(&h->length);
		} else {
			be32_to_cpus(&h->oid);
			be32_to_cpus(&h->length);
		}
		if (h->oid != OID_INL_TUNNEL)
			return h;
		h++;
	}
	return NULL;
}
Beispiel #11
0
static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent)
{
    int ret;
    int l1_size, i;

    /* read the L1 table */
    l1_size = extent->l1_size * sizeof(uint32_t);
    extent->l1_table = g_malloc(l1_size);
    ret = bdrv_pread(extent->file,
                    extent->l1_table_offset,
                    extent->l1_table,
                    l1_size);
    if (ret < 0) {
        goto fail_l1;
    }
    for (i = 0; i < extent->l1_size; i++) {
        le32_to_cpus(&extent->l1_table[i]);
    }

    if (extent->l1_backup_table_offset) {
        extent->l1_backup_table = g_malloc(l1_size);
        ret = bdrv_pread(extent->file,
                        extent->l1_backup_table_offset,
                        extent->l1_backup_table,
                        l1_size);
        if (ret < 0) {
            goto fail_l1b;
        }
        for (i = 0; i < extent->l1_size; i++) {
            le32_to_cpus(&extent->l1_backup_table[i]);
        }
    }

    extent->l2_cache =
        g_malloc(extent->l2_size * L2_CACHE_SIZE * sizeof(uint32_t));
    return 0;
 fail_l1b:
    g_free(extent->l1_backup_table);
 fail_l1:
    g_free(extent->l1_table);
    return ret;
}
Beispiel #12
0
/**
 * Read register
 *
 * @v smsc95xx		SMSC95xx device
 * @v address		Register address
 * @ret value		Register value
 * @ret rc		Return status code
 */
static inline __attribute__ (( always_inline )) int
smsc95xx_readl ( struct smsc95xx_device *smsc95xx, unsigned int address,
		 uint32_t *value ) {
	int rc;

	/* Read register */
	if ( ( rc = smsc95xx_raw_readl ( smsc95xx, address, value ) ) != 0 )
		return rc;
	le32_to_cpus ( value );

	return 0;
}
Beispiel #13
0
static int smsc95xx_rx_fixup(struct usbnet *dev, void *buf, int len)
{
	while (len > 0) {
		u32 header, align_count;
		unsigned char *packet;
		u16 size;

		memcpy(&header, buf, sizeof(header));
		le32_to_cpus(&header);
		buf += 4 + NET_IP_ALIGN;
		len -= 4 + NET_IP_ALIGN;
		packet = buf;

		/* get the packet length */
		size = (u16)((header & RX_STS_FL_) >> 16);
		align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;

		if (header & RX_STS_ES_) {
			netif_dbg(dev, rx_err, dev->net,
				  "Error header=0x%08x\n", header);
		} else {
			/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
			if (size > (ETH_FRAME_LEN + 12)) {
				netif_dbg(dev, rx_err, dev->net,
					  "size err header=0x%08x\n", header);
				return 0;
			}

			/* last frame in this batch */
			if (len == size) {
				net_receive(&dev->edev, buf, len - 4);
				return 1;
			}

			net_receive(&dev->edev, packet, len - 4);
		}

		len -= size;

		/* padding bytes before the next frame starts */
		if (len)
			len -= align_count;
	}

	if (len < 0) {
		netdev_warn(dev->net, "invalid rx length<0 %d\n", len);
		return 0;
	}

	return 1;
}
Beispiel #14
0
static int parallels_open(BlockDriverState *bs, const char *filename, int flags)
{
    BDRVParallelsState *s = (BDRVParallelsState *)bs->opaque;
    int fd, i;
    struct parallels_header ph;

    fd = open(filename, O_RDWR | O_BINARY | O_LARGEFILE);
    if (fd < 0) {
        fd = open(filename, O_RDONLY | O_BINARY | O_LARGEFILE);
        if (fd < 0)
            return -1;
    }

    bs->read_only = 1; // no write support yet

    s->fd = fd;

    if (read(fd, &ph, sizeof(ph)) != sizeof(ph))
        goto fail;

    if (memcmp(ph.magic, HEADER_MAGIC, 16) ||
	(le32_to_cpu(ph.version) != HEADER_VERSION)) {
        goto fail;
    }

    bs->total_sectors = le32_to_cpu(ph.nb_sectors);

    if (lseek(s->fd, 64, SEEK_SET) != 64)
	goto fail;

    s->tracks = le32_to_cpu(ph.tracks);

    s->catalog_size = le32_to_cpu(ph.catalog_entries);
    s->catalog_bitmap = (uint32_t *)qemu_malloc(s->catalog_size * 4);
    if (!s->catalog_bitmap)
	goto fail;
    if (read(s->fd, s->catalog_bitmap, s->catalog_size * 4) !=
	s->catalog_size * 4)
	goto fail;
    for (i = 0; i < s->catalog_size; i++)
	le32_to_cpus(&s->catalog_bitmap[i]);

    return 0;
fail:
    if (s->catalog_bitmap)
	qemu_free(s->catalog_bitmap);
    close(fd);
    return -1;
}
Beispiel #15
0
static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
{
	int ret;

	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
		USB_VENDOR_REQUEST_READ_REGISTER,
		USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
		00, index, data, 4, USB_CTRL_GET_TIMEOUT);

	if (ret < 0)
		netdev_warn(dev->net, "Failed to read register index 0x%08x\n", index);

	le32_to_cpus(data);

	debug("%s: 0x%08x 0x%08x\n", __func__, index, *data);

	return ret;
}
Beispiel #16
0
static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
                          Error **errp)
{
    BDRVParallelsState *s = bs->opaque;
    int i;
    struct parallels_header ph;
    int ret;

    bs->read_only = 1; // no write support yet

    ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph));
    if (ret < 0) {
        goto fail;
    }

    if (memcmp(ph.magic, HEADER_MAGIC, 16) ||
        (le32_to_cpu(ph.version) != HEADER_VERSION)) {
        ret = -EMEDIUMTYPE;
        goto fail;
    }

    bs->total_sectors = le32_to_cpu(ph.nb_sectors);

    s->tracks = le32_to_cpu(ph.tracks);

    s->catalog_size = le32_to_cpu(ph.catalog_entries);
    s->catalog_bitmap = g_malloc(s->catalog_size * 4);

    ret = bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4);
    if (ret < 0) {
        goto fail;
    }

    for (i = 0; i < s->catalog_size; i++)
	le32_to_cpus(&s->catalog_bitmap[i]);

    qemu_co_mutex_init(&s->lock);
    return 0;

fail:
    g_free(s->catalog_bitmap);
    return ret;
}
/*
 * This function will validate Map info data provided by FW
 */
u8 MR_ValidateMapInfo(struct megasas_instance *instance)
{
	struct fusion_context *fusion = instance->ctrl_context;
	struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)];
	struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
	struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
	struct MR_LD_RAID         *raid;
	int ldCount, num_lds;
	u16 ld;


	if (le32_to_cpu(pFwRaidMap->totalSize) !=
	    (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
	     (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
		printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
		       (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
				       sizeof(struct MR_LD_SPAN_MAP)) +
				      (sizeof(struct MR_LD_SPAN_MAP) *
					le32_to_cpu(pFwRaidMap->ldCount))));
		printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
		       ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
			le32_to_cpu(pFwRaidMap->totalSize));
		return 0;
	}

	if (instance->UnevenSpanSupport)
		mr_update_span_set(map, ldSpanInfo);

	mr_update_load_balance_params(map, lbInfo);

	num_lds = le32_to_cpu(map->raidMap.ldCount);

	/*Convert Raid capability values to CPU arch */
	for (ldCount = 0; ldCount < num_lds; ldCount++) {
		ld = MR_TargetIdToLdGet(ldCount, map);
		raid = MR_LdRaidGet(ld, map);
		le32_to_cpus((u32 *)&raid->capability);
	}

	return 1;
}
Beispiel #18
0
static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
				 u16 index, u16 size, void *data, int eflag)
{
	int ret;

	if (eflag && (2 == size)) {
		u16 buf;
		ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
		le16_to_cpus(&buf);
		*((u16 *)data) = buf;
	} else if (eflag && (4 == size)) {
		u32 buf;
		ret = __ax88179_read_cmd(dev, cmd, value, index, size, &buf, 1);
		le32_to_cpus(&buf);
		*((u32 *)data) = buf;
	} else {
		ret = __ax88179_read_cmd(dev, cmd, value, index, size, data, 1);
	}

	return ret;
}
Beispiel #19
0
static void v9fs_uint32_read(P9Req *req, uint32_t *val)
{
    v9fs_memread(req, val, 4);
    le32_to_cpus(val);
}
Beispiel #20
0
/* Metadata initial parser
 *
 * This loads all the metadata entry fields.  This may cause additional
 * fields to be processed (e.g. parent locator, etc..).
 *
 * There are 5 Metadata items that are always required:
 *      - File Parameters (block size, has a parent)
 *      - Virtual Disk Size (size, in bytes, of the virtual drive)
 *      - Page 83 Data (scsi page 83 guid)
 *      - Logical Sector Size (logical sector size in bytes, either 512 or
 *                             4096.  We only support 512 currently)
 *      - Physical Sector Size (512 or 4096)
 *
 * Also, if the File Parameters indicate this is a differencing file,
 * we must also look for the Parent Locator metadata item.
 */
static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
{
    int ret = 0;
    uint8_t *buffer;
    int offset = 0;
    uint32_t i = 0;
    VHDXMetadataTableEntry md_entry;

    buffer = qemu_blockalign(bs, VHDX_METADATA_TABLE_MAX_SIZE);

    ret = bdrv_pread(bs->file, s->metadata_rt.file_offset, buffer,
                     VHDX_METADATA_TABLE_MAX_SIZE);
    if (ret < 0) {
        goto exit;
    }
    memcpy(&s->metadata_hdr, buffer, sizeof(s->metadata_hdr));
    offset += sizeof(s->metadata_hdr);

    le64_to_cpus(&s->metadata_hdr.signature);
    le16_to_cpus(&s->metadata_hdr.reserved);
    le16_to_cpus(&s->metadata_hdr.entry_count);

    if (memcmp(&s->metadata_hdr.signature, "metadata", 8)) {
        ret = -EINVAL;
        goto exit;
    }

    s->metadata_entries.present = 0;

    if ((s->metadata_hdr.entry_count * sizeof(md_entry)) >
        (VHDX_METADATA_TABLE_MAX_SIZE - offset)) {
        ret = -EINVAL;
        goto exit;
    }

    for (i = 0; i < s->metadata_hdr.entry_count; i++) {
        memcpy(&md_entry, buffer + offset, sizeof(md_entry));
        offset += sizeof(md_entry);

        leguid_to_cpus(&md_entry.item_id);
        le32_to_cpus(&md_entry.offset);
        le32_to_cpus(&md_entry.length);
        le32_to_cpus(&md_entry.data_bits);
        le32_to_cpus(&md_entry.reserved2);

        if (guid_eq(md_entry.item_id, file_param_guid)) {
            if (s->metadata_entries.present & META_FILE_PARAMETER_PRESENT) {
                ret = -EINVAL;
                goto exit;
            }
            s->metadata_entries.file_parameters_entry = md_entry;
            s->metadata_entries.present |= META_FILE_PARAMETER_PRESENT;
            continue;
        }

        if (guid_eq(md_entry.item_id, virtual_size_guid)) {
            if (s->metadata_entries.present & META_VIRTUAL_DISK_SIZE_PRESENT) {
                ret = -EINVAL;
                goto exit;
            }
            s->metadata_entries.virtual_disk_size_entry = md_entry;
            s->metadata_entries.present |= META_VIRTUAL_DISK_SIZE_PRESENT;
            continue;
        }

        if (guid_eq(md_entry.item_id, page83_guid)) {
            if (s->metadata_entries.present & META_PAGE_83_PRESENT) {
                ret = -EINVAL;
                goto exit;
            }
            s->metadata_entries.page83_data_entry = md_entry;
            s->metadata_entries.present |= META_PAGE_83_PRESENT;
            continue;
        }

        if (guid_eq(md_entry.item_id, logical_sector_guid)) {
            if (s->metadata_entries.present &
                META_LOGICAL_SECTOR_SIZE_PRESENT) {
                ret = -EINVAL;
                goto exit;
            }
            s->metadata_entries.logical_sector_size_entry = md_entry;
            s->metadata_entries.present |= META_LOGICAL_SECTOR_SIZE_PRESENT;
            continue;
        }

        if (guid_eq(md_entry.item_id, phys_sector_guid)) {
            if (s->metadata_entries.present & META_PHYS_SECTOR_SIZE_PRESENT) {
                ret = -EINVAL;
                goto exit;
            }
            s->metadata_entries.phys_sector_size_entry = md_entry;
            s->metadata_entries.present |= META_PHYS_SECTOR_SIZE_PRESENT;
            continue;
        }

        if (guid_eq(md_entry.item_id, parent_locator_guid)) {
            if (s->metadata_entries.present & META_PARENT_LOCATOR_PRESENT) {
                ret = -EINVAL;
                goto exit;
            }
            s->metadata_entries.parent_locator_entry = md_entry;
            s->metadata_entries.present |= META_PARENT_LOCATOR_PRESENT;
            continue;
        }

        if (md_entry.data_bits & VHDX_META_FLAGS_IS_REQUIRED) {
            /* cannot read vhdx file - required region table entry that
             * we do not understand.  per spec, we must fail to open */
            ret = -ENOTSUP;
            goto exit;
        }
    }

    if (s->metadata_entries.present != META_ALL_PRESENT) {
        ret = -ENOTSUP;
        goto exit;
    }

    ret = bdrv_pread(bs->file,
                     s->metadata_entries.file_parameters_entry.offset
                                         + s->metadata_rt.file_offset,
                     &s->params,
                     sizeof(s->params));

    if (ret < 0) {
        goto exit;
    }

    le32_to_cpus(&s->params.block_size);
    le32_to_cpus(&s->params.data_bits);


    /* We now have the file parameters, so we can tell if this is a
     * differencing file (i.e.. has_parent), is dynamic or fixed
     * sized (leave_blocks_allocated), and the block size */

    /* The parent locator required iff the file parameters has_parent set */
    if (s->params.data_bits & VHDX_PARAMS_HAS_PARENT) {
        if (s->metadata_entries.present & META_PARENT_LOCATOR_PRESENT) {
            /* TODO: parse  parent locator fields */
            ret = -ENOTSUP; /* temp, until differencing files are supported */
            goto exit;
        } else {
            /* if has_parent is set, but there is not parent locator present,
             * then that is an invalid combination */
            ret = -EINVAL;
            goto exit;
        }
    }

    /* determine virtual disk size, logical sector size,
     * and phys sector size */

    ret = bdrv_pread(bs->file,
                     s->metadata_entries.virtual_disk_size_entry.offset
                                           + s->metadata_rt.file_offset,
                     &s->virtual_disk_size,
                     sizeof(uint64_t));
    if (ret < 0) {
        goto exit;
    }
    ret = bdrv_pread(bs->file,
                     s->metadata_entries.logical_sector_size_entry.offset
                                             + s->metadata_rt.file_offset,
                     &s->logical_sector_size,
                     sizeof(uint32_t));
    if (ret < 0) {
        goto exit;
    }
    ret = bdrv_pread(bs->file,
                     s->metadata_entries.phys_sector_size_entry.offset
                                          + s->metadata_rt.file_offset,
                     &s->physical_sector_size,
                     sizeof(uint32_t));
    if (ret < 0) {
        goto exit;
    }

    le64_to_cpus(&s->virtual_disk_size);
    le32_to_cpus(&s->logical_sector_size);
    le32_to_cpus(&s->physical_sector_size);

    if (s->logical_sector_size == 0 || s->params.block_size == 0) {
        ret = -EINVAL;
        goto exit;
    }

    /* both block_size and sector_size are guaranteed powers of 2 */
    s->sectors_per_block = s->params.block_size / s->logical_sector_size;
    s->chunk_ratio = (VHDX_MAX_SECTORS_PER_BLOCK) *
                     (uint64_t)s->logical_sector_size /
                     (uint64_t)s->params.block_size;

    /* These values are ones we will want to use for division / multiplication
     * later on, and they are all guaranteed (per the spec) to be powers of 2,
     * so we can take advantage of that for shift operations during
     * reads/writes */
    if (s->logical_sector_size & (s->logical_sector_size - 1)) {
        ret = -EINVAL;
        goto exit;
    }
    if (s->sectors_per_block & (s->sectors_per_block - 1)) {
        ret = -EINVAL;
        goto exit;
    }
    if (s->chunk_ratio & (s->chunk_ratio - 1)) {
        ret = -EINVAL;
        goto exit;
    }
    s->block_size = s->params.block_size;
    if (s->block_size & (s->block_size - 1)) {
        ret = -EINVAL;
        goto exit;
    }

    s->logical_sector_size_bits = 31 - clz32(s->logical_sector_size);
    s->sectors_per_block_bits =   31 - clz32(s->sectors_per_block);
    s->chunk_ratio_bits =         63 - clz64(s->chunk_ratio);
    s->block_size_bits =          31 - clz32(s->block_size);

    ret = 0;

exit:
    qemu_vfree(buffer);
    return ret;
}
Beispiel #21
0
static void vdi_header_to_cpu(VdiHeader *header)
{
    le32_to_cpus(&header->signature);
    le32_to_cpus(&header->version);
    le32_to_cpus(&header->header_size);
    le32_to_cpus(&header->image_type);
    le32_to_cpus(&header->image_flags);
    le32_to_cpus(&header->offset_bmap);
    le32_to_cpus(&header->offset_data);
    le32_to_cpus(&header->cylinders);
    le32_to_cpus(&header->heads);
    le32_to_cpus(&header->sectors);
    le32_to_cpus(&header->sector_size);
    le64_to_cpus(&header->disk_size);
    le32_to_cpus(&header->block_size);
    le32_to_cpus(&header->block_extra);
    le32_to_cpus(&header->blocks_in_image);
    le32_to_cpus(&header->blocks_allocated);
    uuid_convert(header->uuid_image);
    uuid_convert(header->uuid_last_snap);
    uuid_convert(header->uuid_link);
    uuid_convert(header->uuid_parent);
}
Beispiel #22
0
void mptsas_fix_scsi_task_mgmt_endianness(MPIMsgSCSITaskMgmt *req)
{
    le32_to_cpus(&req->MsgContext);
    le32_to_cpus(&req->TaskMsgContext);
}
Beispiel #23
0
int escore_write_vs_data_block(struct escore_priv *escore)
{
	u32 cmd;
	u32 resp;
	int ret;
	u8 *dptr;
	u16 rem;
	u8 wdb[4];
	struct escore_voice_sense *voice_sense =
			(struct escore_voice_sense *) escore->voice_sense;

	if (voice_sense->vs_keyword_param_size == 0) {
		dev_warn(escore->dev,
			"%s(): attempt to write empty keyword data block\n",
			__func__);
		return -ENOENT;
	}

	BUG_ON(voice_sense->vs_keyword_param_size % 4 != 0);

	mutex_lock(&escore->api_mutex);

	cmd = (ES_WRITE_DATA_BLOCK << 16) |
		(voice_sense->vs_keyword_param_size & 0xffff);
	cmd = cpu_to_le32(cmd);
	ret = escore->bus.ops.write(escore, (char *)&cmd, 4);
	if (ret < 0) {
		dev_err(escore->dev,
			"%s(): error writing cmd 0x%08x to device\n",
			__func__, cmd);
		goto EXIT;
	}

	usleep_range(10000, 10000);
	ret = escore->bus.ops.read(escore, (char *)&resp, 4);
	if (ret < 0) {
		dev_dbg(escore->dev, "%s(): error sending request = %d\n",
			__func__, ret);
		goto EXIT;
	}

	le32_to_cpus(resp);
	dev_dbg(escore->dev, "%s(): resp=0x%08x\n", __func__, resp);
	if ((resp & 0xffff0000) != (ES_WRITE_DATA_BLOCK << 16)) {
		dev_err(escore->dev, "%s(): invalid write data block 0x%08x\n",
			__func__, resp);
		goto EXIT;
	}

	dptr = voice_sense->vs_keyword_param;
	for (rem = voice_sense->vs_keyword_param_size; rem > 0;
					rem -= 4, dptr += 4) {
		wdb[0] = dptr[3];
		wdb[1] = dptr[2];
		wdb[2] = dptr[1];
		wdb[3] = dptr[0];
		ret = escore->bus.ops.write(escore, (char *)wdb, 4);
		if (ret < 0) {
			dev_err(escore->dev, "%s(): v-s wdb error offset=%hu\n",
			    __func__, dptr - voice_sense->vs_keyword_param);
			goto EXIT;
		}
	}

	usleep_range(10000, 10000);
	memset(&resp, 0, 4);

	ret = escore->bus.ops.read(escore, (char *)&resp, 4);
	if (ret < 0) {
		dev_dbg(escore->dev, "%s(): error sending request = %d\n",
			__func__, ret);
		goto EXIT;
	}

	le32_to_cpus(resp);
	dev_dbg(escore->dev, "%s(): resp=0x%08x\n", __func__, resp);
	if (resp & 0xffff) {
		dev_err(escore->dev, "%s(): write data block error 0x%08x\n",
			__func__, resp);
		goto EXIT;
	}

	dev_info(escore->dev, "%s(): v-s wdb success\n", __func__);
EXIT:
	mutex_unlock(&escore->api_mutex);
	if (ret != 0)
		dev_err(escore->dev, "%s(): v-s wdb failed ret=%d\n",
			__func__, ret);
	return ret;
}
Beispiel #24
0
/* Note: this may only end up being called in a api locked context. In
 * that case the mutex locks need to be removed.
 */
int escore_read_vs_data_block(struct escore_priv *escore)
{
	struct escore_voice_sense *voice_sense =
			(struct escore_voice_sense *) escore->voice_sense;
	/* This function is not re-entrant so avoid stack bloat. */
	u8 block[ES_VS_KEYWORD_PARAM_MAX];

	u32 cmd;
	u32 resp;
	int ret;
	unsigned size;
	unsigned rdcnt;

	mutex_lock(&escore->api_mutex);

	/* Read voice sense keyword data block request. */
	cmd = cpu_to_le32(ES_READ_DATA_BLOCK << 16 | ES_VS_DATA_BLOCK);
	escore->bus.ops.write(escore, (char *)&cmd, 4);

	usleep_range(5000, 5000);

	ret = escore->bus.ops.read(escore, (char *)&resp, 4);
	if (ret < 0) {
		dev_dbg(escore->dev, "%s(): error sending request = %d\n",
			__func__, ret);
		goto out;
	}

	le32_to_cpus(resp);
	size = resp & 0xffff;
	dev_dbg(escore->dev, "%s(): resp=0x%08x size=%d\n",
		__func__, resp, size);
	if ((resp & 0xffff0000) != (ES_READ_DATA_BLOCK << 16)) {
		dev_err(escore->dev,
			"%s(): invalid read vs data block response = 0x%08x\n",
			__func__, resp);
		goto out;
	}

	BUG_ON(size == 0);
	BUG_ON(size > ES_VS_KEYWORD_PARAM_MAX);
	BUG_ON(size % 4 != 0);

	/* This assumes we need to transfer the block in 4 byte
	 * increments. This is true on slimbus, but may not hold true
	 * for other buses.
	 */
	for (rdcnt = 0; rdcnt < size; rdcnt += 4) {
		ret = escore->bus.ops.read(escore, (char *)&resp, 4);
		if (ret < 0) {
			dev_dbg(escore->dev,
				"%s(): error reading data block at %d bytes\n",
				__func__, rdcnt);
			goto out;
		}
		memcpy(&block[rdcnt], &resp, 4);
	}

	memcpy(voice_sense->vs_keyword_param, block, rdcnt);
	voice_sense->vs_keyword_param_size = rdcnt;
	dev_dbg(escore->dev, "%s(): stored v-s keyword block of %d bytes\n",
		__func__, rdcnt);

out:
	mutex_unlock(&escore->api_mutex);
	if (ret)
		dev_err(escore->dev, "%s(): v-s read data block failure=%d\n",
			__func__, ret);
	return ret;
}
Beispiel #25
0
void mptsas_fix_port_enable_endianness(MPIMsgPortEnable *req)
{
    le32_to_cpus(&req->MsgContext);
}
Beispiel #26
0
void mptsas_fix_port_facts_endianness(MPIMsgPortFacts *req)
{
    le32_to_cpus(&req->MsgContext);
}
Beispiel #27
0
static int vhdx_open_region_tables(BlockDriverState *bs, BDRVVHDXState *s)
{
    int ret = 0;
    uint8_t *buffer;
    int offset = 0;
    VHDXRegionTableEntry rt_entry;
    uint32_t i;
    bool bat_rt_found = false;
    bool metadata_rt_found = false;

    /* We have to read the whole 64KB block, because the crc32 is over the
     * whole block */
    buffer = qemu_blockalign(bs, VHDX_HEADER_BLOCK_SIZE);

    ret = bdrv_pread(bs->file, VHDX_REGION_TABLE_OFFSET, buffer,
                     VHDX_HEADER_BLOCK_SIZE);
    if (ret < 0) {
        goto fail;
    }
    memcpy(&s->rt, buffer, sizeof(s->rt));
    le32_to_cpus(&s->rt.signature);
    le32_to_cpus(&s->rt.checksum);
    le32_to_cpus(&s->rt.entry_count);
    le32_to_cpus(&s->rt.reserved);
    offset += sizeof(s->rt);

    if (!vhdx_checksum_is_valid(buffer, VHDX_HEADER_BLOCK_SIZE, 4) ||
        memcmp(&s->rt.signature, "regi", 4)) {
        ret = -EINVAL;
        goto fail;
    }

    /* Per spec, maximum region table entry count is 2047 */
    if (s->rt.entry_count > 2047) {
        ret = -EINVAL;
        goto fail;
    }

    for (i = 0; i < s->rt.entry_count; i++) {
        memcpy(&rt_entry, buffer + offset, sizeof(rt_entry));
        offset += sizeof(rt_entry);

        leguid_to_cpus(&rt_entry.guid);
        le64_to_cpus(&rt_entry.file_offset);
        le32_to_cpus(&rt_entry.length);
        le32_to_cpus(&rt_entry.data_bits);

        /* see if we recognize the entry */
        if (guid_eq(rt_entry.guid, bat_guid)) {
            /* must be unique; if we have already found it this is invalid */
            if (bat_rt_found) {
                ret = -EINVAL;
                goto fail;
            }
            bat_rt_found = true;
            s->bat_rt = rt_entry;
            continue;
        }

        if (guid_eq(rt_entry.guid, metadata_guid)) {
            /* must be unique; if we have already found it this is invalid */
            if (metadata_rt_found) {
                ret = -EINVAL;
                goto fail;
            }
            metadata_rt_found = true;
            s->metadata_rt = rt_entry;
            continue;
        }

        if (rt_entry.data_bits & VHDX_REGION_ENTRY_REQUIRED) {
            /* cannot read vhdx file - required region table entry that
             * we do not understand.  per spec, we must fail to open */
            ret = -ENOTSUP;
            goto fail;
        }
    }
    ret = 0;

fail:
    qemu_vfree(buffer);
    return ret;
}
Beispiel #28
0
static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
	struct gl_header	*header;
	struct gl_packet	*packet;
	struct sk_buff		*gl_skb;
	u32			size;

	header = (struct gl_header *) skb->data;

	// get the packet count of the received skb
	le32_to_cpus(&header->packet_count);
	if ((header->packet_count > GL_MAX_TRANSMIT_PACKETS)
			|| (header->packet_count < 0)) {
		dbg("genelink: invalid received packet count %d",
			header->packet_count);
		return 0;
	}

	// set the current packet pointer to the first packet
	packet = &header->packets;

	// decrement the length for the packet count size 4 bytes
	skb_pull(skb, 4);

	while (header->packet_count > 1) {
		// get the packet length
		size = le32_to_cpu(packet->packet_length);

		// this may be a broken packet
		if (size > GL_MAX_PACKET_LEN) {
			dbg("genelink: invalid rx length %d", size);
			return 0;
		}

		// allocate the skb for the individual packet
		gl_skb = alloc_skb(size, GFP_ATOMIC);
		if (gl_skb) {

			// copy the packet data to the new skb
			memcpy(skb_put(gl_skb, size),
					packet->packet_data, size);
			usbnet_skb_return(dev, gl_skb);
		}

		// advance to the next packet
		packet = (struct gl_packet *)
			&packet->packet_data [size];
		header->packet_count--;

		// shift the data pointer to the next gl_packet
		skb_pull(skb, size + 4);
	}

	// skip the packet length field 4 bytes
	skb_pull(skb, 4);

	if (skb->len > GL_MAX_PACKET_LEN) {
		dbg("genelink: invalid rx length %d", skb->len);
		return 0;
	}
	return 1;
}
Beispiel #29
0
void mptsas_fix_event_notification_endianness(MPIMsgEventNotify *req)
{
    le32_to_cpus(&req->MsgContext);
}
Beispiel #30
0
void mptsas_fix_ioc_facts_endianness(MPIMsgIOCFacts *req)
{
    le32_to_cpus(&req->MsgContext);
}