Beispiel #1
0
static int
ik_rec_update(struct btr_instance *tins, struct btr_record *rec,
	       daos_iov_t *key, daos_iov_t *val_iov)
{
	struct umem_instance	*umm = &tins->ti_umm;
	struct ik_rec		*irec;
	char			*val;
	TMMID(struct ik_rec)	 irec_mmid;

	irec_mmid = umem_id_u2t(rec->rec_mmid, struct ik_rec);
	irec = umem_id2ptr_typed(umm, irec_mmid);

	if (irec->ir_val_msize >= val_iov->iov_len) {
		umem_tx_add(umm, irec->ir_val_mmid, irec->ir_val_msize);

	} else {
		umem_tx_add_mmid_typed(umm, irec_mmid);
		umem_free(umm, irec->ir_val_mmid);

		irec->ir_val_msize = val_iov->iov_len;
		irec->ir_val_mmid = umem_alloc(umm, val_iov->iov_len);
		D_ASSERT(!UMMID_IS_NULL(irec->ir_val_mmid));
	}
	val = umem_id2ptr(umm, irec->ir_val_mmid);

	memcpy(val, val_iov->iov_buf, val_iov->iov_len);
	irec->ir_val_size = val_iov->iov_len;
	return 0;
}
Beispiel #2
0
void
rm_str(char *str)
{
	size_t sz = strlen(str);
	sz++;
	umem_free(str, sz);
}
Beispiel #3
0
static void
smu_free(struct storage *s)
{
	struct smu *smu;

	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
	smu = s->priv;
	assert(smu->sz == smu->s.space);
	Lck_Lock(&smu_mtx);
	VSC_C_main->sma_nobj--;
	VSC_C_main->sma_nbytes -= smu->sz;
	VSC_C_main->sma_bfree += smu->sz;
	Lck_Unlock(&smu_mtx);
	umem_free(smu->s.ptr, smu->s.space);
	umem_free(smu, sizeof *smu);
}
Beispiel #4
0
int main(int argc, char** argv){
    ZVM_UMEM_INIT;

    int c=0;
    int i;
    const int size = 1024*1024;
    for (i=0; i < 100000; i++){
	c+=size+i;
	void* addr = umem_alloc(size+i, UMEM_NOFAIL);
	if ( addr != NULL ){
	    printf("OK i=%d alloc size=%d, addr=%p\n", i, size+i, addr );
	    umem_free(addr, size+i);
	}
	else{
	    printf("FAIL i=%d alloc size=%d, addr=%p\n", i, size+i, addr );
	}
    }

    /* write to default device (in our case it is stdout) */
    printf("hello, world\n");

    /* write to user log (stderr) */
    fprintf(stderr, "hello, world\n");

    return 0;
}
Beispiel #5
0
void
fmd_agent_close(fmd_agent_hdl_t *hdl)
{
	(void) close(hdl->agent_devfd);
	nvlist_free(hdl->agent_ioc_versions);
	umem_free(hdl, sizeof (fmd_agent_hdl_t));
}
Beispiel #6
0
/*
 * Open /dev/fm and return a handle.  ver is the overall interface version.
 */
static fmd_agent_hdl_t *
fmd_agent_open_dev(int ver, int mode)
{
	fmd_agent_hdl_t *hdl;
	int fd, err;
	nvlist_t *nvl;

	if ((fd = open("/dev/fm", mode)) < 0)
		return (NULL); /* errno is set for us */

	if ((hdl = umem_alloc(sizeof (fmd_agent_hdl_t),
	    UMEM_DEFAULT)) == NULL) {
		err = errno;
		(void) close(fd);
		errno = err;
		return (NULL);
	}

	hdl->agent_devfd = fd;
	hdl->agent_version = ver;

	/*
	 * Get the individual interface versions.
	 */
	if ((err = fmd_agent_nvl_ioctl(hdl, FM_IOC_VERSIONS, ver, NULL, &nvl))
	    < 0) {
		(void) close(fd);
		umem_free(hdl, sizeof (fmd_agent_hdl_t));
		errno = err;
		return (NULL);
	}

	hdl->agent_ioc_versions = nvl;
	return (hdl);
}
Beispiel #7
0
static int32_t
fma_cap_cpu_info(cpu_tbl_t *ci)
{
    nvlist_t **cpus, *nvl;
    uint_t ncpu, i;
    fmd_agent_hdl_t *hdl;
    char *ven;
    int32_t family, model;

    if ((hdl = fmd_agent_open(FMD_AGENT_VERSION)) == NULL)
        return (-1);
    if (fmd_agent_physcpu_info(hdl, &cpus, &ncpu) != 0) {
        fmd_agent_close(hdl);
        return (-1);
    }
    fmd_agent_close(hdl);

    if (cpus == NULL)
        return (-1);

    /*
     * There is no mixed CPU type on x86 systems, it's ok to
     * just pick the first one
     */
    nvl = cpus[0];
    if (nvlist_lookup_string(nvl, FM_PHYSCPU_INFO_VENDOR_ID, &ven) != 0 ||
            nvlist_lookup_int32(nvl, FM_PHYSCPU_INFO_FAMILY, &family) != 0 ||
            nvlist_lookup_int32(nvl, FM_PHYSCPU_INFO_MODEL, &model) != 0) {
        for (i = 0; i < ncpu; i++)
            nvlist_free(cpus[i]);
        umem_free(cpus, sizeof (nvlist_t *) * ncpu);
        return (-1);
    }

    (void) snprintf(ci->vendor, X86_VENDOR_STRLEN, "%s", ven);
    ci->family = family;
    ci->model = model;

    for (i = 0; i < ncpu; i++)
        nvlist_free(cpus[i]);
    umem_free(cpus, sizeof (nvlist_t *) * ncpu);
    return (0);
}
Beispiel #8
0
static void
handle_comments(char *buf, int len)
{
	if (cind >= 2)
		return;

	if (buf[0] != '#')
		return;

	if (ccmnt[cind] != NULL)
		umem_free(ccmnt[cind], strlen(ccmnt[cind]) + 1);
	ccmnt[cind] = mystrcpy(buf, len);
	cind++;
}
Beispiel #9
0
void
taskq_destroy(taskq_t *tq)
{
	int t;
	int nthreads = tq->tq_nthreads;

	taskq_wait(tq);

	mxlock(&tq->tq_lock);

	tq->tq_flags &= ~TASKQ_ACTIVE;
	condbcast(&tq->tq_dispatch_cv);

	while (tq->tq_nthreads != 0)
		condwait(&tq->tq_wait_cv, &tq->tq_lock);

	tq->tq_minalloc = 0;
	while (tq->tq_nalloc != 0) {
		assert(tq->tq_freelist != NULL);
		task_free(tq, task_alloc(tq, UMEM_NOFAIL));
	}

	mxunlock(&tq->tq_lock);

	for (t = 0; t < nthreads; t++)
		pthread_join(tq->tq_threadlist[t], NULL);

	umem_free(tq->tq_threadlist, nthreads * sizeof (pthread_t));

	rwdestroy(&tq->tq_threadlock);
	mxdestroy(&tq->tq_lock);
	conddestroy(&tq->tq_dispatch_cv);
	conddestroy(&tq->tq_wait_cv);
	conddestroy(&tq->tq_maxalloc_cv);

	umem_free(tq, sizeof (taskq_t));
}
Beispiel #10
0
void
zk_thread_exit(void)
{
	kthread_t *kt = curthread;

	ASSERT(pthread_equal(kt->t_tid, pthread_self()));

	umem_free(kt, sizeof (kthread_t));

	pthread_mutex_lock(&kthread_lock);
	kthread_nr--;
	pthread_mutex_unlock(&kthread_lock);

	pthread_cond_broadcast(&kthread_cond);
	pthread_exit((void *)TS_MAGIC);
}
Beispiel #11
0
static void
task_free(taskq_t *tq, taskq_ent_t *t)
{
	if (tq->tq_nalloc <= tq->tq_minalloc) {
		t->tqent_next = tq->tq_freelist;
		tq->tq_freelist = t;
	} else {
		tq->tq_nalloc--;
		mxunlock(&tq->tq_lock);
		umem_free(t, sizeof (taskq_ent_t));
		mxlock(&tq->tq_lock);
	}

	if (tq->tq_maxalloc_wait)
		condsig(&tq->tq_maxalloc_cv);
}
Beispiel #12
0
static void
print_list(list_t *seg_list, char *fname, int options)
{
	uint64_t	lz_holes, bs = 0;
	uint64_t	hole_blks_seen = 0, data_blks_seen = 0;
	seg_t		*seg;

	if (0 == bs)
		if (zfs_get_hole_count(fname, &lz_holes, &bs) != 0) {
			perror("zfs_get_hole_count");
			exit(1);
		}

	while ((seg = list_remove_head(seg_list)) != NULL) {
		if (options & PRINT_VERBOSE)
			(void) fprintf(stdout, "%c %llu:%llu\n",
			    seg->seg_type == SEEK_HOLE ? 'h' : 'd',
			    seg->seg_offset, seg->seg_len);

		if (seg->seg_type == SEEK_HOLE) {
			hole_blks_seen += seg->seg_len / bs;
		} else {
			data_blks_seen += seg->seg_len / bs;
		}
		umem_free(seg, sizeof (seg_t));
	}

	/* Verify libzfs sees the same number of hole blocks found manually. */
	if (lz_holes != hole_blks_seen) {
		(void) fprintf(stderr, "Counted %llu holes, but libzfs found "
		    "%llu\n", hole_blks_seen, lz_holes);
		exit(1);
	}

	if (options & PRINT_HOLE && options & PRINT_DATA) {
		(void) fprintf(stdout, "datablks: %llu\n", data_blks_seen);
		(void) fprintf(stdout, "holeblks: %llu\n", hole_blks_seen);
		return;
	}

	if (options & PRINT_DATA)
		(void) fprintf(stdout, "%llu\n", data_blks_seen);
	if (options & PRINT_HOLE)
		(void) fprintf(stdout, "%llu\n", hole_blks_seen);
}
Beispiel #13
0
static int
pool_active(void *unused, const char *name, uint64_t guid,
    boolean_t *isactive)
{
	zfs_cmd_t *zcp;
	nvlist_t *innvl;
	char *packed = NULL;
	size_t size = 0;
	int fd, ret;

	/*
	 * Use ZFS_IOC_POOL_SYNC to confirm if a pool is active
	 */

	fd = open("/dev/zfs", O_RDWR);
	if (fd < 0)
		return (-1);

	zcp = umem_zalloc(sizeof (zfs_cmd_t), UMEM_NOFAIL);

	innvl = fnvlist_alloc();
	fnvlist_add_boolean_value(innvl, "force", B_FALSE);

	(void) strlcpy(zcp->zc_name, name, sizeof (zcp->zc_name));
	packed = fnvlist_pack(innvl, &size);
	zcp->zc_nvlist_src = (uint64_t)(uintptr_t)packed;
	zcp->zc_nvlist_src_size = size;

	ret = ioctl(fd, ZFS_IOC_POOL_SYNC, zcp);

	fnvlist_pack_free(packed, size);
	free((void *)(uintptr_t)zcp->zc_nvlist_dst);
	nvlist_free(innvl);
	umem_free(zcp, sizeof (zfs_cmd_t));

	(void) close(fd);

	*isactive = (ret == 0);

	return (0);
}
Beispiel #14
0
static void
smu_trim(const struct storage *s, size_t size)
{
	struct smu *smu;
	void *p;

	CHECK_OBJ_NOTNULL(s, STORAGE_MAGIC);
	smu = s->priv;
	assert(smu->sz == smu->s.space);
	if ((p = umem_alloc(size, UMEM_DEFAULT)) != NULL) {
		memcpy(p, smu->s.ptr, size);
		umem_free(smu->s.ptr, smu->s.space);
		Lck_Lock(&smu_mtx);
		VSC_C_main->sma_nbytes -= (smu->sz - size);
		VSC_C_main->sma_bfree += smu->sz - size;
		smu->sz = size;
		Lck_Unlock(&smu_mtx);
		smu->s.ptr = p;
		smu->s.space = size;
	}
}
Beispiel #15
0
void
thread_fini(void)
{
	kthread_t *kt = curthread;

	ASSERT(pthread_equal(kt->t_tid, pthread_self()));
	ASSERT3P(kt->t_func, ==, NULL);

	umem_free(kt, sizeof (kthread_t));

	/* Wait for all threads to exit via thread_exit() */
	VERIFY3S(pthread_mutex_lock(&kthread_lock), ==, 0);

	kthread_nr--; /* Main thread is exiting */

	while (kthread_nr > 0)
		VERIFY0(pthread_cond_wait(&kthread_cond, &kthread_lock));

	ASSERT3S(kthread_nr, ==, 0);
	VERIFY3S(pthread_mutex_unlock(&kthread_lock), ==, 0);

	VERIFY3S(pthread_key_delete(kthread_key), ==, 0);
}
Beispiel #16
0
/*ARGSUSED*/
static int
create_chips(topo_mod_t *mod, tnode_t *pnode, const char *name,
    topo_instance_t min, topo_instance_t max, void *arg, nvlist_t *auth,
    int mc_offchip)
{
	fmd_agent_hdl_t *hdl;
	nvlist_t **cpus;
	int nerr = 0;
	uint_t i, ncpu;

	if (strcmp(name, CHIP_NODE_NAME) != 0)
		return (0);

	if ((hdl = fmd_agent_open(FMD_AGENT_VERSION)) == NULL)
		return (-1);
	if (fmd_agent_physcpu_info(hdl, &cpus, &ncpu) != 0) {
		whinge(mod, NULL, "create_chip: fmd_agent_physcpu_info "
		    "failed: %s\n", fmd_agent_errmsg(hdl));
		fmd_agent_close(hdl);
		return (-1);
	}
	fmd_agent_close(hdl);

	for (i = 0; i < ncpu; i++) {
		nerr -= create_chip(mod, pnode, min, max, cpus[i], auth,
		    mc_offchip);
		nvlist_free(cpus[i]);
	}
	umem_free(cpus, sizeof (nvlist_t *) * ncpu);

	if (nerr == 0) {
		return (0);
	} else {
		(void) topo_mod_seterrno(mod, EMOD_PARTIAL_ENUM);
		return (-1);
	}
}
Beispiel #17
0
void
inj_free(void *buf, size_t sz)
{
	umem_free(buf, sz);
}
Beispiel #18
0
int32_t fork() {
    /* Time to... fork!
     * I had spent a long time preparing for this!
     * now I am done, it's time to start working on fork().
     */
    int32_t i;
    proc_t *newproc;

    /* create a new process structure: */
    newproc = kmalloc(sizeof(proc_t));
    if (newproc == NULL)
        return -1;

    /* set parent */
    newproc->parent = curproc;

    /* initialize descriptors: */
    newproc->plist.proc = newproc;
    newproc->sched.proc = newproc;
    newproc->irqd.proc  = newproc;
    newproc->semad.proc = newproc;

    /* create memory: */
    if (umem_init(&(newproc->umem))) {
        kfree(newproc);
        return -1; /* error. */
    }

    /* inherit parent's memory: */
    if (umem_copy(&(curproc->umem), &(newproc->umem))) {
        umem_free(&(newproc->umem));
        kfree(newproc);
        return -1; /* error. */
    }

    /* create a new kernel stack. */
    newproc->kstack = (unsigned char *) kmalloc(KERNEL_STACK_SIZE);
    if (newproc->kstack == NULL) {
        umem_free(&(newproc->umem));
        kfree(newproc);
        return -1; /* error. */
    }
    initproc->phy_stack_bot = arch_vmpage_getAddr(NULL, newproc->kstack);

    /* initialize kernel stack...
     * this invokes page faults to allocate memory for
     * the stack early.
     */
    for (i = 0; i < KERNEL_STACK_SIZE; i++)
        newproc->kstack[i] = 0;

    /* copy context from parent's stack to child's stack: */
    copy_context(newproc);

    /* copy the set of file descriptors: */
    for (i = 0; i < FD_MAX; i++) {
        if (curproc->file[i] == NULL) {
            newproc->file[i] = NULL;
        } else {
            curproc->file[i]->fcount++;
            newproc->file[i] = curproc->file[i];
        }
    }

    /* inherit the current working directory: */
    curproc->cwd->fcount++;
    newproc->cwd = curproc->cwd;

    /* set pid: */
    newproc->pid = ++last_pid;

    /* inform the scheduler that this is a just-forked process: */
    newproc->after_fork = 1;

    /* initialize inbox */
    newproc->inbox_lock = 0;
    newproc->blocked_for_msg = 0;
    linkedlist_init(&(newproc->inbox));

    /* children */
    newproc->blocked_for_child = 0;

    /* not blocked */
    newproc->blocked = 0;
    newproc->lock_to_unlock = NULL;

    /* exit status: */
    newproc->terminated = 0;
    newproc->status = 0;

    /* add the new process to the list of processes: */
    linkedlist_addlast((linkedlist*)&proclist, (linknode*)&(newproc->plist));

    /* add to scheduler's queue: */
    linkedlist_addlast((linkedlist*)&q_ready, (linknode*)&(newproc->sched));

    /* call the scheduler */
    scheduler();

    /* return */
    if (curproc == newproc) {
        return 0;
    } else {
        return newproc->pid;
    }

}
Beispiel #19
0
/*
 * Ontario SBL event handler, subscribed to:
 * 	PICLEVENT_SYSEVENT_DEVICE_ADDED
 * 	PICLEVENT_SYSEVENT_DEVICE_REMOVED
 */
static void
piclsbl_handler(const char *ename, const void *earg, size_t size,
		void *cookie)
{
	char		*devfs_path;
	char		hdd_location[PICL_PROPNAMELEN_MAX];
	nvlist_t	*nvlp = NULL;
	pcp_msg_t	send_msg;
	pcp_msg_t	recv_msg;
	pcp_sbl_req_t	*req_ptr = NULL;
	pcp_sbl_resp_t	*resp_ptr = NULL;
	int		status = -1;
	int		target;
	disk_lookup_t	lookup;
	int		channel_fd;

	/*
	 * setup the request data to attach to the libpcp msg
	 */
	if ((req_ptr = (pcp_sbl_req_t *)umem_zalloc(sizeof (pcp_sbl_req_t),
			UMEM_DEFAULT)) == NULL)
		goto sbl_return;

	/*
	 * This plugin serves to enable or disable the blue RAS
	 * 'ok-to-remove' LED that is on each of the 4 disks on the
	 * Ontario.  We catch the event via the picl handler, and
	 * if the event is DEVICE_ADDED for one of our onboard disks,
	 * then we'll be turning off the LED. Otherwise, if the event
	 * is DEVICE_REMOVED, then we turn it on.
	 */
	if (strcmp(ename, PICLEVENT_SYSEVENT_DEVICE_ADDED) == 0)
		req_ptr->sbl_action = PCP_SBL_DISABLE;
	else if (strcmp(ename, PICLEVENT_SYSEVENT_DEVICE_REMOVED) == 0)
		req_ptr->sbl_action = PCP_SBL_ENABLE;
	else
		goto sbl_return;

	/*
	 * retrieve the device's physical path from the event payload
	 */
	if (nvlist_unpack((char *)earg, size, &nvlp, NULL))
		goto sbl_return;
	if (nvlist_lookup_string(nvlp, "devfs-path", &devfs_path))
		goto sbl_return;

	/*
	 * look for this disk in the picl tree, and if it's
	 * location indicates that it's one of our internal
	 * disks, then set sbl_id to incdicate which one.
	 * otherwise, return as it is not one of our disks.
	 */
	lookup.path = strdup(devfs_path);
	lookup.disk = NULL;
	lookup.result = DISK_NOT_FOUND;

	/* first, find the disk */
	status = ptree_walk_tree_by_class(root_node, "disk", (void *)&lookup,
						cb_find_disk);
	if (status != PICL_SUCCESS)
		goto sbl_return;

	if (lookup.result == DISK_FOUND) {
		/* now, lookup it's location in the node */
		status = ptree_get_propval_by_name(lookup.disk, "Location",
				(void *)&hdd_location, PICL_PROPNAMELEN_MAX);
		if (status != PICL_SUCCESS) {
			syslog(LOG_ERR, "piclsbl: failed hdd discovery");
			goto sbl_return;
		}
	}

	if (strcmp(hdd_location, HDD0) == 0) {
		req_ptr->sbl_id = PCP_SBL_HDD0;
		target = 0;
	} else if (strcmp(hdd_location, HDD1) == 0) {
		req_ptr->sbl_id = PCP_SBL_HDD1;
		target = 1;
	} else if (strcmp(hdd_location, HDD2) == 0) {
		req_ptr->sbl_id = PCP_SBL_HDD2;
		target = 2;
	} else if (strcmp(hdd_location, HDD3) == 0) {
		req_ptr->sbl_id = PCP_SBL_HDD3;
		target = 3;
	} else {
		/* this is not one of the onboard disks */
		goto sbl_return;
	}

	/*
	 * check the onboard RAID configuration for this disk. if it is
	 * a member of a RAID and is not the RAID itself, ignore the event
	 */
	if (check_raid(target))
		goto sbl_return;

	/*
	 * we have the information we need, init the platform channel.
	 * the platform channel driver will only allow one connection
	 * at a time on this socket. on the offchance that more than
	 * one event comes in, we'll retry to initialize this connection
	 * up to 3 times
	 */
	if ((channel_fd = (*pcp_init_ptr)(LED_CHANNEL)) < 0) {
		/* failed to init; wait and retry up to 3 times */
		int s = PCPINIT_TIMEOUT;
		int retries = 0;
		while (++retries) {
			(void) sleep(s);
			if ((channel_fd = (*pcp_init_ptr)(LED_CHANNEL)) >= 0)
				break;
			else if (retries == 3) {
				syslog(LOG_ERR, "piclsbl: ",
					"SC channel initialization failed");
				goto sbl_return;
			}
			/* continue */
		}
	}

	/*
	 * populate the message for libpcp
	 */
	send_msg.msg_type = PCP_SBL_CONTROL;
	send_msg.sub_type = NULL;
	send_msg.msg_len = sizeof (pcp_sbl_req_t);
	send_msg.msg_data = (uint8_t *)req_ptr;

	/*
	 * send the request, receive the response
	 */
	if ((*pcp_send_recv_ptr)(channel_fd, &send_msg, &recv_msg,
		PCPCOMM_TIMEOUT) < 0) {
		/* we either timed out or erred; either way try again */
		int s = PCPCOMM_TIMEOUT;
		(void) sleep(s);
		if ((*pcp_send_recv_ptr)(channel_fd, &send_msg, &recv_msg,
				PCPCOMM_TIMEOUT) < 0) {
			syslog(LOG_ERR, "piclsbl: communication failure");
			goto sbl_return;
		}
	}

	/*
	 * validate that this data was meant for us
	 */
	if (recv_msg.msg_type != PCP_SBL_CONTROL_R) {
		syslog(LOG_ERR, "piclsbl: unbound packet received");
		goto sbl_return;
	}

	/*
	 * verify that the LED action has taken place
	 */
	resp_ptr = (pcp_sbl_resp_t *)recv_msg.msg_data;
	if (resp_ptr->status == PCP_SBL_ERROR) {
		syslog(LOG_ERR, "piclsbl: OK2RM LED action error");
		goto sbl_return;
	}

	/*
	 * ensure the LED action taken is the one requested
	 */
	if ((req_ptr->sbl_action == PCP_SBL_DISABLE) &&
		(resp_ptr->sbl_state != SBL_STATE_OFF))
		syslog(LOG_ERR, "piclsbl: OK2RM LED not OFF after disk "
				"configuration");
	else if ((req_ptr->sbl_action == PCP_SBL_ENABLE) &&
			(resp_ptr->sbl_state != SBL_STATE_ON))
		syslog(LOG_ERR, "piclsbl: OK2RM LED not ON after disk "
				"unconfiguration");
	else if (resp_ptr->sbl_state == SBL_STATE_UNKNOWN)
		syslog(LOG_ERR, "piclsbl: OK2RM LED set to unknown state");

sbl_return:

	(*pcp_close_ptr)(channel_fd);
	if (req_ptr != NULL)
		umem_free(req_ptr, sizeof (pcp_sbl_req_t));
	if (resp_ptr != NULL)
		free(resp_ptr);
	if (nvlp != NULL)
		nvlist_free(nvlp);
}
Beispiel #20
0
static void
freeentry(pkgentry_t *p)
{
	umem_free(p->line, p->len);
	umem_cache_free(ecache, p);
}
Beispiel #21
0
static void
cpu_free(void *data, size_t size)
{
	umem_free(data, size);
}
Beispiel #22
0
/*
 * Perform /dev/fm ioctl.  The input and output data are represented by
 * name-value lists (nvlists).
 */
int
fmd_agent_nvl_ioctl(fmd_agent_hdl_t *hdl, int cmd, uint32_t ver,
    nvlist_t *innvl, nvlist_t **outnvlp)
{
	fm_ioc_data_t fid;
	int err = 0;
	char *inbuf = NULL, *outbuf = NULL;
	size_t insz = 0, outsz = 0;

	if (innvl != NULL) {
		if ((err = nvlist_size(innvl, &insz, NV_ENCODE_NATIVE)) != 0)
			return (err);
		if (insz > FM_IOC_MAXBUFSZ)
			return (ENAMETOOLONG);
		if ((inbuf = umem_alloc(insz, UMEM_DEFAULT)) == NULL)
			return (errno);

		if ((err = nvlist_pack(innvl, &inbuf, &insz,
		    NV_ENCODE_NATIVE, 0)) != 0) {
			umem_free(inbuf, insz);
			return (err);
		}
	}

	if (outnvlp != NULL) {
		outsz = FM_IOC_OUT_BUFSZ;
	}
	for (;;) {
		if (outnvlp != NULL) {
			outbuf = umem_alloc(outsz, UMEM_DEFAULT);
			if (outbuf == NULL) {
				err = errno;
				break;
			}
		}

		fid.fid_version = ver;
		fid.fid_insz = insz;
		fid.fid_inbuf = inbuf;
		fid.fid_outsz = outsz;
		fid.fid_outbuf = outbuf;

		if (ioctl(hdl->agent_devfd, cmd, &fid) < 0) {
			if (errno == ENAMETOOLONG && outsz != 0 &&
			    outsz < (FM_IOC_OUT_MAXBUFSZ / 2)) {
				umem_free(outbuf, outsz);
				outsz *= 2;
				outbuf = umem_alloc(outsz, UMEM_DEFAULT);
				if (outbuf == NULL) {
					err = errno;
					break;
				}
			} else {
				err = errno;
				break;
			}
		} else if (outnvlp != NULL) {
			err = nvlist_unpack(fid.fid_outbuf, fid.fid_outsz,
			    outnvlp, 0);
			break;
		} else {
			break;
		}
	}

	if (inbuf != NULL)
		umem_free(inbuf, insz);
	if (outbuf != NULL)
		umem_free(outbuf, outsz);

	return (err);
}