Esempio n. 1
0
static int
check_label(int fd, dk_efi_t *dk_ioc)
{
	efi_gpt_t		*efi;
	uint_t			crc;

	if (efi_ioctl(fd, DKIOCGETEFI, dk_ioc) == -1) {
		switch (errno) {
		case EIO:
			return (VT_EIO);
		default:
			return (VT_ERROR);
		}
	}
	efi = dk_ioc->dki_data;
	if (efi->efi_gpt_Signature != LE_64(EFI_SIGNATURE)) {
		if (efi_debug)
			(void) fprintf(stderr,
			    "Bad EFI signature: 0x%llx != 0x%llx\n",
			    (long long)efi->efi_gpt_Signature,
			    (long long)LE_64(EFI_SIGNATURE));
		return (VT_EINVAL);
	}

	/*
	 * check CRC of the header; the size of the header should
	 * never be larger than one block
	 */
	crc = efi->efi_gpt_HeaderCRC32;
	efi->efi_gpt_HeaderCRC32 = 0;
	len_t headerSize = (len_t)LE_32(efi->efi_gpt_HeaderSize);

	if(headerSize < EFI_MIN_LABEL_SIZE || headerSize > EFI_LABEL_SIZE) {
		if (efi_debug)
			(void) fprintf(stderr,
				"Invalid EFI HeaderSize %llu.  Assuming %d.\n",
				headerSize, EFI_MIN_LABEL_SIZE);
	}

	if ((headerSize > dk_ioc->dki_length) ||
	    crc != LE_32(efi_crc32((unsigned char *)efi, headerSize))) {
		if (efi_debug)
			(void) fprintf(stderr,
			    "Bad EFI CRC: 0x%x != 0x%x\n",
			    crc, LE_32(efi_crc32((unsigned char *)efi,
			    headerSize)));
		return (VT_EINVAL);
	}

	return (0);
}
Esempio n. 2
0
/*
 * Change 'sh_bof' to the beginning of the next record.
 */
static int
spa_history_advance_bof(spa_t *spa, spa_history_phys_t *shpp)
{
	objset_t *mos = spa->spa_meta_objset;
	uint64_t firstread, reclen, phys_bof;
	char buf[sizeof (reclen)];
	int err;

	phys_bof = spa_history_log_to_phys(shpp->sh_bof, shpp);
	firstread = MIN(sizeof (reclen), shpp->sh_phys_max_off - phys_bof);

	if ((err = dmu_read(mos, spa->spa_history, phys_bof, firstread,
	    buf, DMU_READ_PREFETCH)) != 0)
		return (err);
	if (firstread != sizeof (reclen)) {
		if ((err = dmu_read(mos, spa->spa_history,
		    shpp->sh_pool_create_len, sizeof (reclen) - firstread,
		    buf + firstread, DMU_READ_PREFETCH)) != 0)
			return (err);
	}

	reclen = LE_64(*((uint64_t *)buf));
	shpp->sh_bof += reclen + sizeof (reclen);
	shpp->sh_records_lost++;
	return (0);
}
Esempio n. 3
0
/* Must be called with vd->vdev_tsd_lock taken */
static uint64_t
vdev_disk_get_space(vdev_t *vd, uint64_t capacity, uint_t blksz)
{
	ASSERT(vd->vdev_wholedisk);
	ASSERT(rw_lock_held(&vd->vdev_tsd_lock));

	vdev_disk_t *dvd = vd->vdev_tsd;
	dk_efi_t dk_ioc;
	efi_gpt_t *efi;
	uint64_t avail_space = 0;
	int rc = ENXIO, efisize = EFI_LABEL_SIZE * 2;

	dk_ioc.dki_data = kmem_alloc(efisize, KM_SLEEP);
	dk_ioc.dki_lba = 1;
	dk_ioc.dki_length = efisize;
	dk_ioc.dki_data_64 = (uint64_t)(uintptr_t)dk_ioc.dki_data;
	efi = dk_ioc.dki_data;

	/*
	 * Here we are called with vdev_tsd_lock taken,
	 * so it's safe to use dvd and vd_lh if not NULL
	 */
	if (dvd != NULL && dvd->vd_lh != NULL) {
		rc = ldi_ioctl(dvd->vd_lh, DKIOCGETEFI, (intptr_t)&dk_ioc,
		    FKIOCTL, kcred, NULL);
	}
	if (rc == 0) {
		uint64_t efi_altern_lba = LE_64(efi->efi_gpt_AlternateLBA);

		if (capacity > efi_altern_lba)
			avail_space = (capacity - efi_altern_lba) * blksz;
	}
	kmem_free(dk_ioc.dki_data, efisize);
	return (avail_space);
}
Esempio n. 4
0
static uint64_t
vdev_disk_get_space(vdev_t *vd, uint64_t capacity, uint_t blksz)
{
	ASSERT(vd->vdev_wholedisk);

	vdev_disk_t *dvd = vd->vdev_tsd;
	dk_efi_t dk_ioc;
	efi_gpt_t *efi;
	uint64_t avail_space = 0;
	int efisize = EFI_LABEL_SIZE * 2;

	dk_ioc.dki_data = kmem_alloc(efisize, KM_SLEEP);
	dk_ioc.dki_lba = 1;
	dk_ioc.dki_length = efisize;
	dk_ioc.dki_data_64 = (uint64_t)(uintptr_t)dk_ioc.dki_data;
	efi = dk_ioc.dki_data;

	if (ldi_ioctl(dvd->vd_lh, DKIOCGETEFI, (intptr_t)&dk_ioc,
	    FKIOCTL, kcred, NULL) == 0) {
		uint64_t efi_altern_lba = LE_64(efi->efi_gpt_AlternateLBA);

		if (capacity > efi_altern_lba)
			avail_space = (capacity - efi_altern_lba) * blksz;
	}
	kmem_free(dk_ioc.dki_data, efisize);
	return (avail_space);
}
Esempio n. 5
0
static int
mptsas_raidvol_page_1_cb(mptsas_t *mpt, caddr_t page_memp,
    ddi_acc_handle_t accessp, uint16_t iocstatus, uint32_t iocloginfo,
    va_list ap)
{
#ifndef __lock_lint
	_NOTE(ARGUNUSED(ap))
#endif
	pMpi2RaidVolPage1_t	raidpage;
	int			rval = DDI_SUCCESS, i;
	uint8_t			*sas_addr = NULL;
	uint8_t			tmp_sas_wwn[SAS_WWN_BYTE_SIZE];
	uint64_t		*sas_wwn;

	if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
		mptsas_log(mpt, CE_WARN, "mptsas_raidvol_page_1_cb "
		    "config: IOCStatus=0x%x, IOCLogInfo=0x%x",
		    iocstatus, iocloginfo);
		rval = DDI_FAILURE;
		return (rval);
	}
	sas_wwn = va_arg(ap, uint64_t *);

	raidpage = (pMpi2RaidVolPage1_t)page_memp;
	sas_addr = (uint8_t *)(&raidpage->WWID);
	for (i = 0; i < SAS_WWN_BYTE_SIZE; i++) {
		tmp_sas_wwn[i] = ddi_get8(accessp, sas_addr + i);
	}
	bcopy(tmp_sas_wwn, sas_wwn, SAS_WWN_BYTE_SIZE);
	*sas_wwn = LE_64(*sas_wwn);
	return (rval);
}
Esempio n. 6
0
/* ARGSUSED */
static int
xhci_mdb_print_epctx(uintptr_t addr, uint_t flags, int argc,
    const mdb_arg_t *argv)
{
	uint32_t info, info2, txinfo;
	xhci_endpoint_context_t epctx;

	if (!(flags & DCMD_ADDRSPEC)) {
		mdb_warn("::xhci_epctx requires an address\n");
		return (DCMD_USAGE);
	}

	if (mdb_vread(&epctx, sizeof (epctx), addr) != sizeof (epctx)) {
		mdb_warn("failed to read xhci_endpoint_context_t at %p", addr);
		return (DCMD_ERR);
	}

	info = LE_32(epctx.xec_info);
	info2 = LE_32(epctx.xec_info2);
	txinfo = LE_32(epctx.xec_txinfo);

	mdb_printf("Endpoint State: %s (%d)\n",
	    xhci_mdb_epctx_states[XHCI_EPCTX_STATE(info)],
	    XHCI_EPCTX_STATE(info));

	mdb_printf("Mult: %d\n", XHCI_EPCTX_GET_MULT(info));
	mdb_printf("Max Streams: %d\n", XHCI_EPCTX_GET_MAXP_STREAMS(info));
	mdb_printf("LSA: %d\n", XHCI_EPCTX_GET_LSA(info));
	mdb_printf("Interval: %d\n", XHCI_EPCTX_GET_IVAL(info));
	mdb_printf("Max ESIT Hi: %d\n", XHCI_EPCTX_GET_MAX_ESIT_HI(info));

	mdb_printf("CErr: %d\n", XHCI_EPCTX_GET_CERR(info2));
	mdb_printf("EP Type: %s (%d)\n",
	    xhci_mdb_epctx_eptypes[XHCI_EPCTX_GET_EPTYPE(info2)],
	    XHCI_EPCTX_GET_EPTYPE(info2));
	mdb_printf("Host Initiate Disable: %d\n", XHCI_EPCTX_GET_HID(info2));
	mdb_printf("Max Burst: %d\n", XHCI_EPCTX_GET_MAXB(info2));
	mdb_printf("Max Packet Size: %d\n", XHCI_EPCTX_GET_MPS(info2));

	mdb_printf("Ring DCS: %d\n", LE_64(epctx.xec_dequeue) & 0x1);
	mdb_printf("Ring PA: 0x%lx\n", LE_64(epctx.xec_dequeue) & ~0xf);

	mdb_printf("Average TRB Length: %d\n", XHCI_EPCTX_AVG_TRB_LEN(txinfo));
	mdb_printf("Max ESIT: %d\n", XHCI_EPCTX_GET_MAX_ESIT_PAYLOAD(txinfo));

	return (DCMD_OK);
}
Esempio n. 7
0
/*
 * Validate the PQI mode of adapter.
 */
int pqisrc_check_pqimode(pqisrc_softstate_t *softs)
{
	int ret = PQI_STATUS_FAILURE;
	int tmo = 0;
	uint64_t signature = 0;

	DBG_FUNC("IN\n");

	/* Check the PQI device signature */
	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
	do {
		signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
        
		if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
				sizeof(uint64_t)) == 0) {
			ret = PQI_STATUS_SUCCESS;
			break;
		}
		OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL);
	} while (tmo--);

	PRINT_PQI_SIGNATURE(signature);

	if (tmo <= 0) {
		DBG_ERR("PQI Signature is invalid\n");
		ret = PQI_STATUS_TIMEOUT;
		goto err_out;
	}

	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
	/* Check function and status code for the device */
	COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config,
		PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
	if (!tmo) {
		DBG_ERR("PQI device is not in IDLE state\n");
		ret = PQI_STATUS_TIMEOUT;
		goto err_out;
	}


	tmo = PQISRC_PQIMODE_READY_TIMEOUT;
	/* Check the PQI device status register */
	COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
				PQI_DEV_STATE_AT_INIT, tmo);
	if (!tmo) {
		DBG_ERR("PQI Registers are not ready\n");
		ret = PQI_STATUS_TIMEOUT;
		goto err_out;
	}

	DBG_FUNC("OUT\n");
	return ret;
err_out:
	DBG_FUNC("OUT failed\n");
	return ret;
}
Esempio n. 8
0
/* ARGSUSED */
static int
xhci_mdb_print_trb(uintptr_t addr, uint_t flags, int argc,
    const mdb_arg_t *argv)
{
	xhci_trb_t trb;
	uint64_t pa;
	uint32_t status, trbflags, type;

	if (!(flags & DCMD_ADDRSPEC)) {
		mdb_warn("::xhci_trb expects an address\n");
		return (DCMD_USAGE);
	}

	if (mdb_vread(&trb, sizeof (trb), addr) != sizeof (trb)) {
		mdb_warn("failed to read xhci_trb_t at 0x%x", addr);
		return (DCMD_ERR);
	}

	pa = LE_64(trb.trb_addr);
	status = LE_32(trb.trb_status);
	trbflags = LE_32(trb.trb_flags);

	type = XHCI_TRB_GET_TYPE(trbflags);

	if ((flags & DCMD_LOOP) && !(flags & DCMD_LOOPFIRST))
		mdb_printf("\n");

	mdb_set_dot(addr + sizeof (xhci_trb_t));
	mdb_printf("%s TRB (%d)\n", xhci_mdb_trb_type_to_str(type), type);
	mdb_inc_indent(XHCI_MDB_TRB_INDENT);

	switch (XHCI_RING_TYPE_SHIFT(type)) {
	case XHCI_EVT_XFER:
		return (xhci_mdb_print_transfer_event(pa, status, trbflags));
	case XHCI_EVT_CMD_COMPLETE:
		return (xhci_mdb_print_command_event(pa, status, trbflags));
	case XHCI_EVT_PORT_CHANGE:
		return (xhci_mdb_print_psc(pa, status, trbflags));
	case XHCI_TRB_TYPE_NORMAL:
		return (xhci_mdb_print_normal_trb(pa, status, trbflags));
	}

	/*
	 * Just print generic information if we don't have a specific printer
	 * for that TRB type.
	 */
	mdb_printf("TRB Address: 0x%lx\n", pa);
	mdb_printf("TRB Status: 0x%x\n", status);
	mdb_printf("TRB Flags: 0x%x\n", trbflags);
	mdb_dec_indent(XHCI_MDB_TRB_INDENT);

	return (DCMD_OK);
}
Esempio n. 9
0
static int
derive_key(libzfs_handle_t *hdl, zfs_keyformat_t format, uint64_t iters,
    uint8_t *key_material, size_t key_material_len, uint64_t salt,
    uint8_t **key_out)
{
	int ret;
	uint8_t *key;

	*key_out = NULL;

	key = zfs_alloc(hdl, WRAPPING_KEY_LEN);
	if (!key)
		return (ENOMEM);

	switch (format) {
	case ZFS_KEYFORMAT_RAW:
		bcopy(key_material, key, WRAPPING_KEY_LEN);
		break;
	case ZFS_KEYFORMAT_HEX:
		ret = hex_key_to_raw((char *)key_material,
		    WRAPPING_KEY_LEN * 2, key);
		if (ret != 0) {
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "Invalid hex key provided."));
			goto error;
		}
		break;
	case ZFS_KEYFORMAT_PASSPHRASE:
		salt = LE_64(salt);
		ret = pbkdf2(key_material, strlen((char *)key_material),
		    ((uint8_t *)&salt), sizeof (uint64_t), iters,
		    key, WRAPPING_KEY_LEN);
		if (ret != 0) {
			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
			    "Failed to generate key from passphrase."));
			goto error;
		}
		break;
	default:
		ret = EINVAL;
		goto error;
	}

	*key_out = key;
	return (0);

error:
	free(key);

	*key_out = NULL;
	return (ret);
}
Esempio n. 10
0
/*ARGSUSED*/
static void
spa_history_log_sync(void *arg1, void *arg2, dmu_tx_t *tx)
{
	spa_t		*spa = arg1;
	history_arg_t	*hap = arg2;
	const char	*history_str = hap->ha_history_str;
	objset_t	*mos = spa->spa_meta_objset;
	dmu_buf_t	*dbp;
	spa_history_phys_t *shpp;
	size_t		reclen;
	uint64_t	le_len;
	nvlist_t	*nvrecord;
	char		*record_packed = NULL;
	int		ret;

	/*
	 * If we have an older pool that doesn't have a command
	 * history object, create it now.
	 */
	mutex_enter(&spa->spa_history_lock);
	if (!spa->spa_history)
		spa_history_create_obj(spa, tx);
	mutex_exit(&spa->spa_history_lock);

	/*
	 * Get the offset of where we need to write via the bonus buffer.
	 * Update the offset when the write completes.
	 */
	VERIFY(0 == dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
	shpp = dbp->db_data;

	dmu_buf_will_dirty(dbp, tx);

#ifdef ZFS_DEBUG
	{
		dmu_object_info_t doi;
		dmu_object_info_from_db(dbp, &doi);
		ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS);
	}
#endif

	VERIFY(nvlist_alloc(&nvrecord, NV_UNIQUE_NAME, KM_SLEEP) == 0);
	VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_TIME,
	    gethrestime_sec()) == 0);
	VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_WHO, hap->ha_uid) == 0);
	if (hap->ha_zone != NULL)
		VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_ZONE,
		    hap->ha_zone) == 0);
#ifdef _KERNEL
	VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_HOST,
	    utsname.nodename) == 0);
#endif
	if (hap->ha_log_type == LOG_CMD_POOL_CREATE ||
	    hap->ha_log_type == LOG_CMD_NORMAL) {
		VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_CMD,
		    history_str) == 0);

		zfs_dbgmsg("command: %s", history_str);
	} else {
		VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_INT_EVENT,
		    hap->ha_event) == 0);
		VERIFY(nvlist_add_uint64(nvrecord, ZPOOL_HIST_TXG,
		    tx->tx_txg) == 0);
		VERIFY(nvlist_add_string(nvrecord, ZPOOL_HIST_INT_STR,
		    history_str) == 0);

		zfs_dbgmsg("internal %s pool:%s txg:%llu %s",
		    zfs_history_event_names[hap->ha_event], spa_name(spa),
		    (longlong_t)tx->tx_txg, history_str);

	}

	VERIFY(nvlist_size(nvrecord, &reclen, NV_ENCODE_XDR) == 0);
	record_packed = kmem_alloc(reclen, KM_SLEEP);

	VERIFY(nvlist_pack(nvrecord, &record_packed, &reclen,
	    NV_ENCODE_XDR, KM_SLEEP) == 0);

	mutex_enter(&spa->spa_history_lock);
	if (hap->ha_log_type == LOG_CMD_POOL_CREATE)
		VERIFY(shpp->sh_eof == shpp->sh_pool_create_len);

	/* write out the packed length as little endian */
	le_len = LE_64((uint64_t)reclen);
	ret = spa_history_write(spa, &le_len, sizeof (le_len), shpp, tx);
	if (!ret)
		ret = spa_history_write(spa, record_packed, reclen, shpp, tx);

	if (!ret && hap->ha_log_type == LOG_CMD_POOL_CREATE) {
		shpp->sh_pool_create_len += sizeof (le_len) + reclen;
		shpp->sh_bof = shpp->sh_pool_create_len;
	}

	mutex_exit(&spa->spa_history_lock);
	nvlist_free(nvrecord);
	kmem_free(record_packed, reclen);
	dmu_buf_rele(dbp, FTAG);

	strfree(hap->ha_history_str);
	if (hap->ha_zone != NULL)
		strfree(hap->ha_zone);
	kmem_free(hap, sizeof (history_arg_t));
}
Esempio n. 11
0
/*ARGSUSED*/
static void
spa_history_log_sync(void *arg, dmu_tx_t *tx)
{
	nvlist_t	*nvl = arg;
	spa_t		*spa = dmu_tx_pool(tx)->dp_spa;
	objset_t	*mos = spa->spa_meta_objset;
	dmu_buf_t	*dbp;
	spa_history_phys_t *shpp;
	size_t		reclen;
	uint64_t	le_len;
	char		*record_packed = NULL;
	int		ret;

	/*
	 * If we have an older pool that doesn't have a command
	 * history object, create it now.
	 */
	mutex_enter(&spa->spa_history_lock);
	if (!spa->spa_history)
		spa_history_create_obj(spa, tx);
	mutex_exit(&spa->spa_history_lock);

	/*
	 * Get the offset of where we need to write via the bonus buffer.
	 * Update the offset when the write completes.
	 */
	VERIFY0(dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
	shpp = dbp->db_data;

	dmu_buf_will_dirty(dbp, tx);

#ifdef ZFS_DEBUG
	{
		dmu_object_info_t doi;
		dmu_object_info_from_db(dbp, &doi);
		ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS);
	}
#endif

	fnvlist_add_uint64(nvl, ZPOOL_HIST_TIME, gethrestime_sec());
#ifdef _KERNEL
	fnvlist_add_string(nvl, ZPOOL_HIST_HOST, utsname.nodename);
#endif
	if (nvlist_exists(nvl, ZPOOL_HIST_CMD)) {
		zfs_dbgmsg("command: %s",
		    fnvlist_lookup_string(nvl, ZPOOL_HIST_CMD));
	} else if (nvlist_exists(nvl, ZPOOL_HIST_INT_NAME)) {
		if (nvlist_exists(nvl, ZPOOL_HIST_DSNAME)) {
			zfs_dbgmsg("txg %lld %s %s (id %llu) %s",
			    fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG),
			    fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME),
			    fnvlist_lookup_string(nvl, ZPOOL_HIST_DSNAME),
			    fnvlist_lookup_uint64(nvl, ZPOOL_HIST_DSID),
			    fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR));
		} else {
			zfs_dbgmsg("txg %lld %s %s",
			    fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG),
			    fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME),
			    fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR));
		}
	} else if (nvlist_exists(nvl, ZPOOL_HIST_IOCTL)) {
		zfs_dbgmsg("ioctl %s",
		    fnvlist_lookup_string(nvl, ZPOOL_HIST_IOCTL));
	}

	record_packed = fnvlist_pack(nvl, &reclen);

	mutex_enter(&spa->spa_history_lock);

	/* write out the packed length as little endian */
	le_len = LE_64((uint64_t)reclen);
	ret = spa_history_write(spa, &le_len, sizeof (le_len), shpp, tx);
	if (!ret)
		ret = spa_history_write(spa, record_packed, reclen, shpp, tx);

	/* The first command is the create, which we keep forever */
	if (ret == 0 && shpp->sh_pool_create_len == 0 &&
	    nvlist_exists(nvl, ZPOOL_HIST_CMD)) {
		shpp->sh_pool_create_len = shpp->sh_bof = shpp->sh_eof;
	}

	mutex_exit(&spa->spa_history_lock);
	fnvlist_pack_free(record_packed, reclen);
	dmu_buf_rele(dbp, FTAG);
	fnvlist_free(nvl);
}
Esempio n. 12
0
/*ARGSUSED*/
static void
spa_history_log_sync(void *arg, dmu_tx_t *tx)
{
	nvlist_t	*nvl = arg;
	spa_t		*spa = dmu_tx_pool(tx)->dp_spa;
	objset_t	*mos = spa->spa_meta_objset;
	dmu_buf_t	*dbp;
	spa_history_phys_t *shpp;
	size_t		reclen;
	uint64_t	le_len;
	char		*record_packed = NULL;
	int		ret;

	/*
	 * If we have an older pool that doesn't have a command
	 * history object, create it now.
	 */
	mutex_enter(&spa->spa_history_lock);
	if (!spa->spa_history)
		spa_history_create_obj(spa, tx);
	mutex_exit(&spa->spa_history_lock);

	/*
	 * Get the offset of where we need to write via the bonus buffer.
	 * Update the offset when the write completes.
	 */
	VERIFY0(dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp));
	shpp = dbp->db_data;

	dmu_buf_will_dirty(dbp, tx);

#ifdef ZFS_DEBUG
	{
		dmu_object_info_t doi;
		dmu_object_info_from_db(dbp, &doi);
		ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS);
	}
#endif

	fnvlist_add_uint64(nvl, ZPOOL_HIST_TIME, gethrestime_sec());
	fnvlist_add_string(nvl, ZPOOL_HIST_HOST, utsname()->nodename);

	if (nvlist_exists(nvl, ZPOOL_HIST_CMD)) {
		zfs_dbgmsg("command: %s",
		    fnvlist_lookup_string(nvl, ZPOOL_HIST_CMD));
	} else if (nvlist_exists(nvl, ZPOOL_HIST_INT_NAME)) {
		if (nvlist_exists(nvl, ZPOOL_HIST_DSNAME)) {
			zfs_dbgmsg("txg %lld %s %s (id %llu) %s",
			    fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG),
			    fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME),
			    fnvlist_lookup_string(nvl, ZPOOL_HIST_DSNAME),
			    fnvlist_lookup_uint64(nvl, ZPOOL_HIST_DSID),
			    fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR));
		} else {
			zfs_dbgmsg("txg %lld %s %s",
			    fnvlist_lookup_uint64(nvl, ZPOOL_HIST_TXG),
			    fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_NAME),
			    fnvlist_lookup_string(nvl, ZPOOL_HIST_INT_STR));
		}
		/*
		 * The history sysevent is posted only for internal history
		 * messages to show what has happened, not how it happened. For
		 * example, the following command:
		 *
		 * # zfs destroy -r tank/foo
		 *
		 * will result in one sysevent posted per dataset that is
		 * destroyed as a result of the command - which could be more
		 * than one event in total.  By contrast, if the sysevent was
		 * posted as a result of the ZPOOL_HIST_CMD key being present
		 * it would result in only one sysevent being posted with the
		 * full command line arguments, requiring the consumer to know
		 * how to parse and understand zfs(1M) command invocations.
		 */
		spa_history_log_notify(spa, nvl);
	} else if (nvlist_exists(nvl, ZPOOL_HIST_IOCTL)) {
		zfs_dbgmsg("ioctl %s",
		    fnvlist_lookup_string(nvl, ZPOOL_HIST_IOCTL));
	}

	VERIFY3U(nvlist_pack(nvl, &record_packed, &reclen, NV_ENCODE_NATIVE,
	    KM_SLEEP), ==, 0);

	mutex_enter(&spa->spa_history_lock);

	/* write out the packed length as little endian */
	le_len = LE_64((uint64_t)reclen);
	ret = spa_history_write(spa, &le_len, sizeof (le_len), shpp, tx);
	if (!ret)
		ret = spa_history_write(spa, record_packed, reclen, shpp, tx);

	/* The first command is the create, which we keep forever */
	if (ret == 0 && shpp->sh_pool_create_len == 0 &&
	    nvlist_exists(nvl, ZPOOL_HIST_CMD)) {
		shpp->sh_pool_create_len = shpp->sh_bof = shpp->sh_eof;
	}

	mutex_exit(&spa->spa_history_lock);
	fnvlist_pack_free(record_packed, reclen);
	dmu_buf_rele(dbp, FTAG);
	fnvlist_free(nvl);
}
Esempio n. 13
0
static int
efi_read(int fd, struct dk_gpt *vtoc)
{
	int			i, j;
	int			label_len;
	int			rval = 0;
	int			md_flag = 0;
	int			vdc_flag = 0;
	struct dk_minfo		disk_info;
	dk_efi_t		dk_ioc;
	efi_gpt_t		*efi;
	efi_gpe_t		*efi_parts;
	struct dk_cinfo		dki_info;
	uint32_t		user_length;
	boolean_t		legacy_label = B_FALSE;

	/*
	 * get the partition number for this file descriptor.
	 */
	if (ioctl(fd, DKIOCINFO, (caddr_t)&dki_info) == -1) {
		if (efi_debug) {
			(void) fprintf(stderr, "DKIOCINFO errno 0x%x\n", errno);
		}
		switch (errno) {
		case EIO:
			return (VT_EIO);
		case EINVAL:
			return (VT_EINVAL);
		default:
			return (VT_ERROR);
		}
	}
	if ((strncmp(dki_info.dki_cname, "pseudo", 7) == 0) &&
	    (strncmp(dki_info.dki_dname, "md", 3) == 0)) {
		md_flag++;
	} else if ((strncmp(dki_info.dki_cname, "vdc", 4) == 0) &&
	    (strncmp(dki_info.dki_dname, "vdc", 4) == 0)) {
		/*
		 * The controller and drive name "vdc" (virtual disk client)
		 * indicates a LDoms virtual disk.
		 */
		vdc_flag++;
	}

	/* get the LBA size */
	if (ioctl(fd, DKIOCGMEDIAINFO, (caddr_t)&disk_info) == -1) {
		if (efi_debug) {
			(void) fprintf(stderr,
			    "assuming LBA 512 bytes %d\n",
			    errno);
		}
		disk_info.dki_lbsize = DEV_BSIZE;
	}
	if (disk_info.dki_lbsize == 0) {
		if (efi_debug) {
			(void) fprintf(stderr,
			    "efi_read: assuming LBA 512 bytes\n");
		}
		disk_info.dki_lbsize = DEV_BSIZE;
	}
	/*
	 * Read the EFI GPT to figure out how many partitions we need
	 * to deal with.
	 */
	dk_ioc.dki_lba = 1;
	if (NBLOCKS(vtoc->efi_nparts, disk_info.dki_lbsize) < 34) {
		label_len = EFI_MIN_ARRAY_SIZE + disk_info.dki_lbsize;
	} else {
		label_len = vtoc->efi_nparts * (int) sizeof (efi_gpe_t) +
		    disk_info.dki_lbsize;
		if (label_len % disk_info.dki_lbsize) {
			/* pad to physical sector size */
			label_len += disk_info.dki_lbsize;
			label_len &= ~(disk_info.dki_lbsize - 1);
		}
	}

	if ((dk_ioc.dki_data = calloc(label_len, 1)) == NULL)
		return (VT_ERROR);

	dk_ioc.dki_length = disk_info.dki_lbsize;
	user_length = vtoc->efi_nparts;
	efi = dk_ioc.dki_data;
	if (md_flag) {
		dk_ioc.dki_length = label_len;
		if (efi_ioctl(fd, DKIOCGETEFI, &dk_ioc) == -1) {
			switch (errno) {
			case EIO:
				return (VT_EIO);
			default:
				return (VT_ERROR);
			}
		}
	} else if ((rval = check_label(fd, &dk_ioc)) == VT_EINVAL) {
		/*
		 * No valid label here; try the alternate. Note that here
		 * we just read GPT header and save it into dk_ioc.data,
		 * Later, we will read GUID partition entry array if we
		 * can get valid GPT header.
		 */

		/*
		 * This is a workaround for legacy systems. In the past, the
		 * last sector of SCSI disk was invisible on x86 platform. At
		 * that time, backup label was saved on the next to the last
		 * sector. It is possible for users to move a disk from previous
		 * solaris system to present system. Here, we attempt to search
		 * legacy backup EFI label first.
		 */
		dk_ioc.dki_lba = disk_info.dki_capacity - 2;
		dk_ioc.dki_length = disk_info.dki_lbsize;
		rval = check_label(fd, &dk_ioc);
		if (rval == VT_EINVAL) {
			/*
			 * we didn't find legacy backup EFI label, try to
			 * search backup EFI label in the last block.
			 */
			dk_ioc.dki_lba = disk_info.dki_capacity - 1;
			dk_ioc.dki_length = disk_info.dki_lbsize;
			rval = check_label(fd, &dk_ioc);
			if (rval == 0) {
				legacy_label = B_TRUE;
				if (efi_debug)
					(void) fprintf(stderr,
					    "efi_read: primary label corrupt; "
					    "using EFI backup label located on"
					    " the last block\n");
			}
		} else {
			if ((efi_debug) && (rval == 0))
				(void) fprintf(stderr, "efi_read: primary label"
				    " corrupt; using legacy EFI backup label "
				    " located on the next to last block\n");
		}

		if (rval == 0) {
			dk_ioc.dki_lba = LE_64(efi->efi_gpt_PartitionEntryLBA);
			vtoc->efi_flags |= EFI_GPT_PRIMARY_CORRUPT;
			vtoc->efi_nparts =
			    LE_32(efi->efi_gpt_NumberOfPartitionEntries);
			/*
			 * Partition tables are between backup GPT header
			 * table and ParitionEntryLBA (the starting LBA of
			 * the GUID partition entries array). Now that we
			 * already got valid GPT header and saved it in
			 * dk_ioc.dki_data, we try to get GUID partition
			 * entry array here.
			 */
			/* LINTED */
			dk_ioc.dki_data = (efi_gpt_t *)((char *)dk_ioc.dki_data
			    + disk_info.dki_lbsize);
			if (legacy_label)
				dk_ioc.dki_length = disk_info.dki_capacity - 1 -
				    dk_ioc.dki_lba;
			else
				dk_ioc.dki_length = disk_info.dki_capacity - 2 -
				    dk_ioc.dki_lba;
			dk_ioc.dki_length *= disk_info.dki_lbsize;
			if (dk_ioc.dki_length >
			    ((len_t)label_len - sizeof (*dk_ioc.dki_data))) {
				rval = VT_EINVAL;
			} else {
				/*
				 * read GUID partition entry array
				 */
				rval = efi_ioctl(fd, DKIOCGETEFI, &dk_ioc);
			}
		}

	} else if (rval == 0) {

		dk_ioc.dki_lba = LE_64(efi->efi_gpt_PartitionEntryLBA);
		/* LINTED */
		dk_ioc.dki_data = (efi_gpt_t *)((char *)dk_ioc.dki_data
		    + disk_info.dki_lbsize);
		dk_ioc.dki_length = label_len - disk_info.dki_lbsize;
		rval = efi_ioctl(fd, DKIOCGETEFI, &dk_ioc);

	} else if (vdc_flag && rval == VT_ERROR && errno == EINVAL) {
		/*
		 * When the device is a LDoms virtual disk, the DKIOCGETEFI
		 * ioctl can fail with EINVAL if the virtual disk backend
		 * is a ZFS volume serviced by a domain running an old version
		 * of Solaris. This is because the DKIOCGETEFI ioctl was
		 * initially incorrectly implemented for a ZFS volume and it
		 * expected the GPT and GPE to be retrieved with a single ioctl.
		 * So we try to read the GPT and the GPE using that old style
		 * ioctl.
		 */
		dk_ioc.dki_lba = 1;
		dk_ioc.dki_length = label_len;
		rval = check_label(fd, &dk_ioc);
	}

	if (rval < 0) {
		free(efi);
		return (rval);
	}

	/* LINTED -- always longlong aligned */
	efi_parts = (efi_gpe_t *)(((char *)efi) + disk_info.dki_lbsize);

	/*
	 * Assemble this into a "dk_gpt" struct for easier
	 * digestibility by applications.
	 */
	vtoc->efi_version = LE_32(efi->efi_gpt_Revision);
	vtoc->efi_nparts = LE_32(efi->efi_gpt_NumberOfPartitionEntries);
	vtoc->efi_part_size = LE_32(efi->efi_gpt_SizeOfPartitionEntry);
	vtoc->efi_lbasize = disk_info.dki_lbsize;
	vtoc->efi_last_lba = disk_info.dki_capacity - 1;
	vtoc->efi_first_u_lba = LE_64(efi->efi_gpt_FirstUsableLBA);
	vtoc->efi_last_u_lba = LE_64(efi->efi_gpt_LastUsableLBA);
	vtoc->efi_altern_lba = LE_64(efi->efi_gpt_AlternateLBA);
	UUID_LE_CONVERT(vtoc->efi_disk_uguid, efi->efi_gpt_DiskGUID);

	/*
	 * If the array the user passed in is too small, set the length
	 * to what it needs to be and return
	 */
	if (user_length < vtoc->efi_nparts) {
		return (VT_EINVAL);
	}

	for (i = 0; i < vtoc->efi_nparts; i++) {

		UUID_LE_CONVERT(vtoc->efi_parts[i].p_guid,
		    efi_parts[i].efi_gpe_PartitionTypeGUID);

		for (j = 0;
		    j < sizeof (conversion_array)
		    / sizeof (struct uuid_to_ptag); j++) {

			if (bcmp(&vtoc->efi_parts[i].p_guid,
			    &conversion_array[j].uuid,
			    sizeof (struct uuid)) == 0) {
				vtoc->efi_parts[i].p_tag = j;
				break;
			}
		}
		if (vtoc->efi_parts[i].p_tag == V_UNASSIGNED)
			continue;
		vtoc->efi_parts[i].p_flag =
		    LE_16(efi_parts[i].efi_gpe_Attributes.PartitionAttrs);
		vtoc->efi_parts[i].p_start =
		    LE_64(efi_parts[i].efi_gpe_StartingLBA);
		vtoc->efi_parts[i].p_size =
		    LE_64(efi_parts[i].efi_gpe_EndingLBA) -
		    vtoc->efi_parts[i].p_start + 1;
		for (j = 0; j < EFI_PART_NAME_LEN; j++) {
			vtoc->efi_parts[i].p_name[j] =
			    (uchar_t)LE_16(
			    efi_parts[i].efi_gpe_PartitionName[j]);
		}

		UUID_LE_CONVERT(vtoc->efi_parts[i].p_uguid,
		    efi_parts[i].efi_gpe_UniquePartitionGUID);
	}
	free(efi);

	return (dki_info.dki_partition);
}
Esempio n. 14
0
/*ARGSUSED*/
int
zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
{
	zvol_state_t *zv;
	struct dk_cinfo dkc;
	struct dk_minfo dkm;
	dk_efi_t efi;
	efi_gpt_t gpt;
	efi_gpe_t gpe;
	struct uuid uuid = EFI_RESERVED;
	uint32_t crc;
	int error = 0;

	mutex_enter(&zvol_state_lock);

	zv = ddi_get_soft_state(zvol_state, getminor(dev));

	if (zv == NULL) {
		mutex_exit(&zvol_state_lock);
		return (ENXIO);
	}

	switch (cmd) {

	case DKIOCINFO:
		bzero(&dkc, sizeof (dkc));
		(void) strcpy(dkc.dki_cname, "zvol");
		(void) strcpy(dkc.dki_dname, "zvol");
		dkc.dki_ctype = DKC_UNKNOWN;
		dkc.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
		mutex_exit(&zvol_state_lock);
		if (ddi_copyout(&dkc, (void *)arg, sizeof (dkc), flag))
			error = EFAULT;
		return (error);

	case DKIOCGMEDIAINFO:
		bzero(&dkm, sizeof (dkm));
		dkm.dki_lbsize = 1U << zv->zv_min_bs;
		dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
		dkm.dki_media_type = DK_UNKNOWN;
		mutex_exit(&zvol_state_lock);
		if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
			error = EFAULT;
		return (error);

	case DKIOCGETEFI:
		if (ddi_copyin((void *)arg, &efi, sizeof (dk_efi_t), flag)) {
			mutex_exit(&zvol_state_lock);
			return (EFAULT);
		}

		bzero(&gpt, sizeof (gpt));
		bzero(&gpe, sizeof (gpe));

		efi.dki_data = (void *)(uintptr_t)efi.dki_data_64;

		if (efi.dki_length < sizeof (gpt) + sizeof (gpe)) {
			mutex_exit(&zvol_state_lock);
			return (EINVAL);
		}

		efi.dki_length = sizeof (gpt) + sizeof (gpe);

		gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
		gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
		gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
		gpt.efi_gpt_FirstUsableLBA = LE_64(0ULL);
		gpt.efi_gpt_LastUsableLBA =
		    LE_64((zv->zv_volsize >> zv->zv_min_bs) - 1);
		gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
		gpt.efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (gpe));

		UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
		gpe.efi_gpe_StartingLBA = gpt.efi_gpt_FirstUsableLBA;
		gpe.efi_gpe_EndingLBA = gpt.efi_gpt_LastUsableLBA;

		CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
		gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);

		CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
		gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);

		mutex_exit(&zvol_state_lock);
		if (ddi_copyout(&gpt, efi.dki_data, sizeof (gpt), flag) ||
		    ddi_copyout(&gpe, efi.dki_data + 1, sizeof (gpe), flag))
			error = EFAULT;
		return (error);

	default:
		error = ENOTSUP;
		break;

	}
	mutex_exit(&zvol_state_lock);
	return (error);
}
Esempio n. 15
0
void
arn_recv_mgmt(struct ieee80211com *ic, mblk_t *mp, struct ieee80211_node *in,
    int subtype, int rssi, uint32_t rstamp)
{
	struct arn_softc *sc = (struct arn_softc *)ic;

	/*
	 * Call up first so subsequent work can use information
	 * potentially stored in the node (e.g. for ibss merge).
	 */
	sc->sc_recv_mgmt(ic, mp, in, subtype, rssi, rstamp);

	ARN_LOCK(sc);
	switch (subtype) {
	case IEEE80211_FC0_SUBTYPE_BEACON:
		/* update rssi statistics */
		if (sc->sc_bsync && in == ic->ic_bss &&
		    ic->ic_state == IEEE80211_S_RUN) {
			/*
			 * Resync beacon timers using the tsf of the beacon
			 * frame we just received.
			 */
			arn_beacon_config(sc);
		}
		/* FALLTHRU */
	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
		if (ic->ic_opmode == IEEE80211_M_IBSS &&
		    ic->ic_state == IEEE80211_S_RUN &&
		    (in->in_capinfo & IEEE80211_CAPINFO_IBSS)) {
			uint64_t tsf = arn_extend_tsf(sc, rstamp);
			/*
			 * Handle ibss merge as needed; check the tsf on the
			 * frame before attempting the merge.  The 802.11 spec
			 * says the station should change it's bssid to match
			 * the oldest station with the same ssid, where oldest
			 * is determined by the tsf.  Note that hardware
			 * reconfiguration happens through callback to
			 * ath_newstate as the state machine will go from
			 * RUN -> RUN when this happens.
			 */
			if (LE_64(in->in_tstamp.tsf) >= tsf) {
				ARN_DBG((ARN_DBG_BEACON, "arn: arn_recv_mgmt:"
				    "ibss merge, rstamp %u tsf %lu "
				    "tstamp %lu\n", rstamp, tsf,
				    in->in_tstamp.tsf));
				ARN_UNLOCK(sc);
				ARN_DBG((ARN_DBG_BEACON, "arn_recv_mgmt():"
				    "ibss_merge: rstamp=%d in_tstamp=%02x %02x"
				    " %02x %02x %02x %02x %02x %02x\n",
				    rstamp, in->in_tstamp.data[0],
				    in->in_tstamp.data[1],
				    in->in_tstamp.data[2],
				    in->in_tstamp.data[3],
				    in->in_tstamp.data[4],
				    in->in_tstamp.data[5],
				    in->in_tstamp.data[6],
				    in->in_tstamp.data[7]));
				(void) ieee80211_ibss_merge(in);
				return;
			}
		}
		break;
	}
	ARN_UNLOCK(sc);
}
Esempio n. 16
0
static int
efi_read(int fd, struct dk_gpt *vtoc)
{
	int			i, j;
	int			label_len;
	int			rval = 0;
	int			md_flag = 0;
	struct dk_minfo		disk_info;
	dk_efi_t		dk_ioc;
	efi_gpt_t		*efi;
	efi_gpe_t		*efi_parts;
	struct dk_cinfo		dki_info;
	uint32_t		user_length;

	/*
	 * get the partition number for this file descriptor.
	 */
	if (ioctl(fd, DKIOCINFO, (caddr_t)&dki_info) == -1) {
		if (efi_debug)
		    (void) fprintf(stderr, "DKIOCINFO errno 0x%x\n", errno);
		switch (errno) {
		case EIO:
			return (VT_EIO);
		case EINVAL:
			return (VT_EINVAL);
		default:
			return (VT_ERROR);
		}
	}
	if ((strncmp(dki_info.dki_cname, "pseudo", 7) == 0) &&
	    (strncmp(dki_info.dki_dname, "md", 3) == 0)) {
		md_flag++;
	}
	/* get the LBA size */
	if (ioctl(fd, DKIOCGMEDIAINFO, (caddr_t)&disk_info) == -1) {
		if (efi_debug) {
			(void) fprintf(stderr,
			    "assuming LBA 512 bytes %d\n",
			    errno);
		}
		disk_info.dki_lbsize = DEV_BSIZE;
	}
	if (disk_info.dki_lbsize == 0) {
		if (efi_debug) {
			(void) fprintf(stderr,
			    "efi_read: assuming LBA 512 bytes\n");
		}
		disk_info.dki_lbsize = DEV_BSIZE;
	}
	/*
	 * Read the EFI GPT to figure out how many partitions we need
	 * to deal with.
	 */
	dk_ioc.dki_lba = 1;
	if (NBLOCKS(vtoc->efi_nparts, disk_info.dki_lbsize) < 34) {
		label_len = EFI_MIN_ARRAY_SIZE + disk_info.dki_lbsize;
	} else {
		label_len = vtoc->efi_nparts * (int) sizeof (efi_gpe_t) +
				    disk_info.dki_lbsize;
		if (label_len % disk_info.dki_lbsize) {
			/* pad to physical sector size */
			label_len += disk_info.dki_lbsize;
			label_len &= ~(disk_info.dki_lbsize - 1);
		}
	}

	if ((dk_ioc.dki_data = calloc(label_len, 1)) == NULL)
		return (VT_ERROR);

	dk_ioc.dki_length = label_len;
	user_length = vtoc->efi_nparts;
	efi = dk_ioc.dki_data;
	if (md_flag) {
		if (efi_ioctl(fd, DKIOCGETEFI, &dk_ioc) == -1) {
			switch (errno) {
			case EIO:
				return (VT_EIO);
			default:
				return (VT_ERROR);
			}
		}
	} else if ((rval = check_label(fd, &dk_ioc)) == VT_EINVAL) {
		/* no valid label here; try the alternate */
		dk_ioc.dki_lba = disk_info.dki_capacity - 1;
		dk_ioc.dki_length = disk_info.dki_lbsize;
		rval = check_label(fd, &dk_ioc);
		if (rval != 0) {
			/*
			 * This is a workaround for legacy systems.
			 *
			 * In the past, the last sector of SCSI disk was
			 * invisible on x86 platform. At that time, backup
			 * label was saved on the next to the last sector.
			 * It is possible for users to move a disk from
			 * previous solaris system to present system.
			 */
			dk_ioc.dki_lba = disk_info.dki_capacity - 2;
			dk_ioc.dki_length = disk_info.dki_lbsize;
			rval = check_label(fd, &dk_ioc);
			if (efi_debug && (rval == 0)) {
				(void) fprintf(stderr,
				    "efi_read: primary label corrupt; "
				    "using legacy EFI backup label\n");
			}
		}

		if (rval == 0) {
			if (efi_debug) {
				(void) fprintf(stderr,
				    "efi_read: primary label corrupt; "
				    "using backup\n");
			}
			dk_ioc.dki_lba = LE_64(efi->efi_gpt_PartitionEntryLBA);
			vtoc->efi_flags |= EFI_GPT_PRIMARY_CORRUPT;
			vtoc->efi_nparts =
			    LE_32(efi->efi_gpt_NumberOfPartitionEntries);
			/*
			 * partitions are between last usable LBA and
			 * backup partition header
			 */
			dk_ioc.dki_data++;
			dk_ioc.dki_length = disk_info.dki_capacity -
						    dk_ioc.dki_lba - 1;
			dk_ioc.dki_length *= disk_info.dki_lbsize;
			if (dk_ioc.dki_length > (len_t)label_len) {
				rval = VT_EINVAL;
			} else {
				rval = efi_ioctl(fd, DKIOCGETEFI, &dk_ioc);
			}
		}
	}
	if (rval < 0) {
		free(efi);
		return (rval);
	}

	/* partitions start in the next block */
	/* LINTED -- always longlong aligned */
	efi_parts = (efi_gpe_t *)(((char *)efi) + disk_info.dki_lbsize);

	/*
	 * Assemble this into a "dk_gpt" struct for easier
	 * digestibility by applications.
	 */
	vtoc->efi_version = LE_32(efi->efi_gpt_Revision);
	vtoc->efi_nparts = LE_32(efi->efi_gpt_NumberOfPartitionEntries);
	vtoc->efi_part_size = LE_32(efi->efi_gpt_SizeOfPartitionEntry);
	vtoc->efi_lbasize = disk_info.dki_lbsize;
	vtoc->efi_last_lba = disk_info.dki_capacity - 1;
	vtoc->efi_first_u_lba = LE_64(efi->efi_gpt_FirstUsableLBA);
	vtoc->efi_last_u_lba = LE_64(efi->efi_gpt_LastUsableLBA);
	UUID_LE_CONVERT(vtoc->efi_disk_uguid, efi->efi_gpt_DiskGUID);

	/*
	 * If the array the user passed in is too small, set the length
	 * to what it needs to be and return
	 */
	if (user_length < vtoc->efi_nparts) {
		return (VT_EINVAL);
	}

	for (i = 0; i < vtoc->efi_nparts; i++) {

	    UUID_LE_CONVERT(vtoc->efi_parts[i].p_guid,
		efi_parts[i].efi_gpe_PartitionTypeGUID);

	    for (j = 0;
		j < sizeof (conversion_array) / sizeof (struct uuid_to_ptag);
		j++) {

		    if (bcmp(&vtoc->efi_parts[i].p_guid,
			&conversion_array[j].uuid,
			sizeof (struct uuid)) == 0) {
			    vtoc->efi_parts[i].p_tag = j;
			    break;
		    }
	    }
	    if (vtoc->efi_parts[i].p_tag == V_UNASSIGNED)
		    continue;
	    vtoc->efi_parts[i].p_flag =
		LE_16(efi_parts[i].efi_gpe_Attributes.PartitionAttrs);
	    vtoc->efi_parts[i].p_start =
		LE_64(efi_parts[i].efi_gpe_StartingLBA);
	    vtoc->efi_parts[i].p_size =
		LE_64(efi_parts[i].efi_gpe_EndingLBA) -
		    vtoc->efi_parts[i].p_start + 1;
	    for (j = 0; j < EFI_PART_NAME_LEN; j++) {
		vtoc->efi_parts[i].p_name[j] =
		    (uchar_t)LE_16(efi_parts[i].efi_gpe_PartitionName[j]);
	    }

	    UUID_LE_CONVERT(vtoc->efi_parts[i].p_uguid,
		efi_parts[i].efi_gpe_UniquePartitionGUID);
	}
	free(efi);

	return (dki_info.dki_partition);
}