Ejemplo n.º 1
0
int
u_sysctlbyname(int ns,
    const char *name,
    void *oldp,
    size_t *oldlenp,
    const void *newp,
    size_t newlen)
{
	nvlist_t *nvl = NULL;
	int retval = 0;

	/* Create nvlist to populate the request into */
	nvl = nvlist_create(0);
	if (nvl == NULL) {
		warn("nvlist_create");
		retval = -1;
		goto done;
	}

	/* Create nvlist for a sysctl_str request */
	nvlist_add_string(nvl, "type", "sysctl_str");
	nvlist_add_string(nvl, "sysctl_str", name);

	/* XXX this sets errno as appropriate */
	retval = u_sysctl_do_sysctl(nvl, ns, oldp, oldlenp, newp, newlen);

done:
	if (nvl)
		nvlist_destroy(nvl);
	return (retval);
}
Ejemplo n.º 2
0
static void
child(int sock)
{
	nvlist_t *nvl;

	nvl = nvlist_create(0);

	nvlist_add_bool(nvl, "nvlist/bool/true", true);
	nvlist_add_bool(nvl, "nvlist/bool/false", false);
	nvlist_add_number(nvl, "nvlist/number/0", 0);
	nvlist_add_number(nvl, "nvlist/number/1", 1);
	nvlist_add_number(nvl, "nvlist/number/-1", -1);
	nvlist_add_number(nvl, "nvlist/number/UINT64_MAX", UINT64_MAX);
	nvlist_add_number(nvl, "nvlist/number/INT64_MIN", INT64_MIN);
	nvlist_add_number(nvl, "nvlist/number/INT64_MAX", INT64_MAX);
	nvlist_add_string(nvl, "nvlist/string/", "");
	nvlist_add_string(nvl, "nvlist/string/x", "x");
	nvlist_add_string(nvl, "nvlist/string/abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz");
	nvlist_add_descriptor(nvl, "nvlist/descriptor/STDERR_FILENO", STDERR_FILENO);
	nvlist_add_binary(nvl, "nvlist/binary/x", "x", 1);
	nvlist_add_binary(nvl, "nvlist/binary/abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz", sizeof("abcdefghijklmnopqrstuvwxyz"));
	nvlist_add_nvlist(nvl, "nvlist/nvlist", nvl);

	nvlist_send(sock, nvl);

	nvlist_destroy(nvl);
}
Ejemplo n.º 3
0
int
ipmgmt_persist_if(ipmgmt_if_arg_t *sargp)
{
	ipadm_dbwrite_cbarg_t	cb;
	uint32_t		flags = sargp->ia_flags;
	nvlist_t		*nvl = NULL;
	int			err = 0;
	char			strval[IPMGMT_STRSIZE];

	if (!(flags & IPMGMT_PERSIST) || sargp->ia_family == AF_UNSPEC ||
	    sargp->ia_ifname[0] == '\0') {
		err = EINVAL;
		goto ret;
	}
	if ((err = nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0)) != 0)
		goto ret;
	if ((err = nvlist_add_string(nvl, IPADM_NVP_IFNAME,
	    sargp->ia_ifname)) != 0)
		goto ret;
	(void) snprintf(strval, IPMGMT_STRSIZE, "%d", sargp->ia_family);
	if ((err = nvlist_add_string(nvl, IPADM_NVP_FAMILY, strval)) != 0)
		goto ret;
	cb.dbw_nvl = nvl;
	cb.dbw_flags = 0;
	err = ipmgmt_db_walk(ipmgmt_db_add, &cb, IPADM_DB_WRITE);
ret:
	nvlist_free(nvl);
	return (err);
}
Ejemplo n.º 4
0
struct hostent *
cap_gethostbyname2(cap_channel_t *chan, const char *name, int type)
{
	struct hostent *hp;
	nvlist_t *nvl;

	nvl = nvlist_create(0);
	nvlist_add_string(nvl, "cmd", "gethostbyname");
	nvlist_add_number(nvl, "family", (uint64_t)type);
	nvlist_add_string(nvl, "name", name);
	nvl = cap_xfer_nvlist(chan, nvl, 0);
	if (nvl == NULL) {
		h_errno = NO_RECOVERY;
		return (NULL);
	}
	if (nvlist_get_number(nvl, "error") != 0) {
		h_errno = (int)nvlist_get_number(nvl, "error");
		nvlist_destroy(nvl);
		return (NULL);
	}

	hp = hostent_unpack(nvl, &hent);
	nvlist_destroy(nvl);
	return (hp);
}
Ejemplo n.º 5
0
/*
 * Enable processor set plugin.
 */
int
pool_pset_enable(void)
{
	int error;
	nvlist_t *props;

	ASSERT(pool_lock_held());
	ASSERT(INGLOBALZONE(curproc));
	/*
	 * Can't enable pools if there are existing cpu partitions.
	 */
	mutex_enter(&cpu_lock);
	if (cp_numparts > 1) {
		mutex_exit(&cpu_lock);
		return (EEXIST);
	}

	/*
	 * We want to switch things such that everything that was tagged with
	 * the special ALL_ZONES token now is explicitly visible to all zones:
	 * first add individual zones to the visibility list then remove the
	 * special "ALL_ZONES" token.  There must only be the default pset
	 * (PS_NONE) active if pools are being enabled, so we only need to
	 * deal with it.
	 *
	 * We want to make pool_pset_enabled() start returning B_TRUE before
	 * we call any of the visibility update functions.
	 */
	global_zone->zone_psetid = PS_NONE;
	/*
	 * We need to explicitly handle the global zone since
	 * zone_pset_set() won't modify it.
	 */
	pool_pset_visibility_add(PS_NONE, global_zone);
	/*
	 * A NULL argument means the ALL_ZONES token.
	 */
	pool_pset_visibility_remove(PS_NONE, NULL);
	error = zone_walk(pool_pset_zone_pset_set, (void *)PS_NONE);
	ASSERT(error == 0);

	/*
	 * It is safe to drop cpu_lock here.  We're still
	 * holding pool_lock so no new cpu partitions can
	 * be created while we're here.
	 */
	mutex_exit(&cpu_lock);
	(void) nvlist_alloc(&pool_pset_default->pset_props,
	    NV_UNIQUE_NAME, KM_SLEEP);
	props = pool_pset_default->pset_props;
	(void) nvlist_add_string(props, "pset.name", "pset_default");
	(void) nvlist_add_string(props, "pset.comment", "");
	(void) nvlist_add_int64(props, "pset.sys_id", PS_NONE);
	(void) nvlist_add_string(props, "pset.units", "population");
	(void) nvlist_add_byte(props, "pset.default", 1);
	(void) nvlist_add_uint64(props, "pset.max", 65536);
	(void) nvlist_add_uint64(props, "pset.min", 1);
	pool_pset_mod = pool_cpu_mod = gethrtime();
	return (0);
}
Ejemplo n.º 6
0
int
cmd_breakup_components(char *str, char *sep, nvlist_t **hc_nvl)
{
	char namebuf[64], instbuf[64];
	char *token, *tokbuf;
	int i, j, namelen, instlen;

	i = 0;
	for (token = strtok_r(str, sep, &tokbuf);
	    token != NULL;
	    token = strtok_r(NULL, sep, &tokbuf)) {
		namelen = strcspn(token, "0123456789");
		instlen = strspn(token+namelen, "0123456789");
		(void) strncpy(namebuf, token, namelen);
		namebuf[namelen] = '\0';

		if ((j = map_name(namebuf)) < 0)
			continue; /* skip names that don't map */

		if (instlen == 0) {
			(void) strncpy(instbuf, "0", 2);
		} else {
			(void) strncpy(instbuf, token+namelen, instlen);
			instbuf[instlen] = '\0';
		}
		if (nvlist_add_string(hc_nvl[i], FM_FMRI_HC_NAME,
		    tr_tbl[j].hc_component) != 0 ||
		    nvlist_add_string(hc_nvl[i], FM_FMRI_HC_ID, instbuf) != 0)
			return (-1);
		i++;
	}
	return (1);
}
Ejemplo n.º 7
0
/*
 * Encodes (packs) DFS information in 'info' into a flat
 * buffer in a name-value format. This function allocates a
 * buffer with appropriate size to contain all the information
 * so the caller MUST free the allocated memory by calling free().
 */
static uint32_t
dfs_root_encode(dfs_info_t *info, char **buf, size_t *bufsz)
{
	dfs_target_t *t;
	nvlist_t *nvl;
	int rc;

	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
		return (ERROR_NOT_ENOUGH_MEMORY);

	rc = nvlist_add_string(nvl, "comment", info->i_comment);
	rc |= nvlist_add_string(nvl, "guid", info->i_guid);
	rc |= nvlist_add_uint32(nvl, "state", info->i_state);
	rc |= nvlist_add_uint32(nvl, "timeout", info->i_timeout);
	rc |= nvlist_add_uint32(nvl, "propflags", info->i_propflags);
	t = info->i_targets;
	rc |= nvlist_add_string(nvl, "t_server", t->t_server);
	rc |= nvlist_add_string(nvl, "t_share", t->t_share);
	rc |= nvlist_add_uint32(nvl, "t_state", t->t_state);
	rc |= nvlist_add_uint32(nvl, "t_priority_class",
	    t->t_priority.p_class);
	rc |= nvlist_add_uint16(nvl, "t_priority_rank",
	    t->t_priority.p_rank);

	if (rc == 0)
		rc = nvlist_pack(nvl, buf, bufsz, NV_ENCODE_NATIVE, 0);

	nvlist_free(nvl);

	return ((rc == 0) ? ERROR_SUCCESS : ERROR_INTERNAL_ERROR);
}
Ejemplo n.º 8
0
/**
 * Create the vdev leaf for the given path.
 * The function assume that the path is a block device or a file.
 * Log devices and hot spares are not supported
 * @param psz_path: path to the device to use
 * @return the new vdev or NULL in case of error.
 */
nvlist_t *lzwu_make_leaf_vdev(const char *psz_path)
{
        struct stat64 statbuf;
        nvlist_t *p_vdev;
        const char *psz_type;

        if(stat64(psz_path, &statbuf) != 0)
                return NULL;

        if(S_ISBLK(statbuf.st_mode))
                psz_type = VDEV_TYPE_DISK;
        else if(S_ISREG(statbuf.st_mode))
                psz_type = VDEV_TYPE_FILE;
        else
                return NULL;

        nvlist_alloc(&p_vdev, NV_UNIQUE_NAME, 0);
        nvlist_add_string(p_vdev, ZPOOL_CONFIG_PATH, psz_path);
        nvlist_add_string(p_vdev, ZPOOL_CONFIG_TYPE, psz_type);
        nvlist_add_string(p_vdev, ZPOOL_CONFIG_IS_LOG, 0);
        if(!strcmp(psz_type, VDEV_TYPE_DISK))
                nvlist_add_uint64(p_vdev, ZPOOL_CONFIG_WHOLE_DISK, 0);

        return p_vdev;
}
Ejemplo n.º 9
0
int
topo_prop_getpgrp(tnode_t *node, const char *pgname, nvlist_t **pgrp,
    int *err)
{
	int ret;
	topo_hdl_t *thp = node->tn_hdl;
	nvlist_t *nvl, *pvnvl;
	topo_pgroup_t *pg;
	topo_propval_t *pv;
	topo_proplist_t *pvl;

	if (topo_hdl_nvalloc(thp, &nvl, 0) != 0) {
		*err = ETOPO_NOMEM;
		return (-1);
	}

	topo_node_lock(node);
	for (pg = topo_list_next(&node->tn_pgroups); pg != NULL;
	    pg = topo_list_next(pg)) {

		if (strcmp(pgname, pg->tpg_info->tpi_name) != 0)
			continue;

		if (nvlist_add_string(nvl, TOPO_PROP_GROUP_NAME,
		    pg->tpg_info->tpi_name) != 0 ||
		    nvlist_add_string(nvl, TOPO_PROP_GROUP_NSTAB,
		    topo_stability2name(pg->tpg_info->tpi_namestab)) != 0 ||
		    nvlist_add_string(nvl, TOPO_PROP_GROUP_DSTAB,
		    topo_stability2name(pg->tpg_info->tpi_datastab)) != 0 ||
		    nvlist_add_int32(nvl, TOPO_PROP_GROUP_VERSION,
		    pg->tpg_info->tpi_version) != 0)
			return (get_pgrp_seterror(node, nvl, err,
			    ETOPO_PROP_NVL));

		for (pvl = topo_list_next(&pg->tpg_pvals); pvl != NULL;
		    pvl = topo_list_next(pvl)) {

			pv = pvl->tp_pval;
			if (prop_val_add(node, &pvnvl, pv, err) < 0) {
				return (get_pgrp_seterror(node, nvl, err,
				    *err));
			}
			if ((ret = nvlist_add_nvlist(nvl, TOPO_PROP_VAL,
			    pvnvl)) != 0) {
				nvlist_free(pvnvl);
				return (get_pgrp_seterror(node, nvl, err, ret));
			}

			nvlist_free(pvnvl);
		}
		topo_node_unlock(node);
		*pgrp = nvl;
		return (0);
	}

	topo_node_unlock(node);
	*err = ETOPO_PROP_NOENT;
	return (-1);
}
Ejemplo n.º 10
0
int
cap_getaddrinfo(cap_channel_t *chan, const char *hostname, const char *servname,
    const struct addrinfo *hints, struct addrinfo **res)
{
	struct addrinfo *firstai, *prevai, *curai;
	unsigned int ii;
	const nvlist_t *nvlai;
	nvlist_t *nvl;
	int error;

	nvl = nvlist_create(0);
	nvlist_add_string(nvl, "cmd", "getaddrinfo");
	nvlist_add_string(nvl, "hostname", hostname);
	nvlist_add_string(nvl, "servname", servname);
	if (hints != NULL) {
		nvlist_add_number(nvl, "hints.ai_flags",
		    (uint64_t)hints->ai_flags);
		nvlist_add_number(nvl, "hints.ai_family",
		    (uint64_t)hints->ai_family);
		nvlist_add_number(nvl, "hints.ai_socktype",
		    (uint64_t)hints->ai_socktype);
		nvlist_add_number(nvl, "hints.ai_protocol",
		    (uint64_t)hints->ai_protocol);
	}
	nvl = cap_xfer_nvlist(chan, nvl);
	if (nvl == NULL)
		return (EAI_MEMORY);
	if (nvlist_get_number(nvl, "error") != 0) {
		error = (int)nvlist_get_number(nvl, "error");
		nvlist_destroy(nvl);
		return (error);
	}

	nvlai = NULL;
	firstai = prevai = curai = NULL;
	for (ii = 0; ; ii++) {
		if (!nvlist_existsf_nvlist(nvl, "res%u", ii))
			break;
		nvlai = nvlist_getf_nvlist(nvl, "res%u", ii);
		curai = addrinfo_unpack(nvlai);
		if (curai == NULL)
			break;
		if (prevai != NULL)
			prevai->ai_next = curai;
		else if (firstai == NULL)
			firstai = curai;
		prevai = curai;
	}
	nvlist_destroy(nvl);
	if (curai == NULL && nvlai != NULL) {
		if (firstai == NULL)
			freeaddrinfo(firstai);
		return (EAI_MEMORY);
	}

	*res = firstai;
	return (0);
}
Ejemplo n.º 11
0
static void
inhm_dimmlist(uint32_t node, nvlist_t *nvl)
{
	nvlist_t **dimmlist;
	nvlist_t **newchannel;
	int nchannels = CHANNELS_PER_MEMORY_CONTROLLER;
	int nd;
	uint8_t i, j;
	nhm_dimm_t **dimmpp;
	nhm_dimm_t *dimmp;

	dimmlist =  kmem_zalloc(sizeof (nvlist_t *) * MAX_DIMMS_PER_CHANNEL,
	    KM_SLEEP);
	newchannel = kmem_zalloc(sizeof (nvlist_t *) * nchannels, KM_SLEEP);
	dimmpp = &nhm_dimms[node * CHANNELS_PER_MEMORY_CONTROLLER *
	    MAX_DIMMS_PER_CHANNEL];
	(void) nvlist_add_string(nvl, "memory-policy",
	    closed_page ? "closed-page" : "open-page");
	(void) nvlist_add_string(nvl, "memory-ecc",
	    ecc_enabled ? lockstep[node] ? "x8" : "x4" : "no");
	for (i = 0; i < nchannels; i++) {
		(void) nvlist_alloc(&newchannel[i], NV_UNIQUE_NAME, KM_SLEEP);
		(void) nvlist_add_string(newchannel[i], "channel-mode",
		    CHANNEL_DISABLED(MC_STATUS_RD(node), i) ? "disabled" :
		    i != 2 && lockstep[node] ? "lockstep" :
		    i != 2 && mirror_mode[node] ?
		    REDUNDANCY_LOSS(MC_RAS_STATUS_RD(node)) ?
		    "redundancy-loss" : "mirror" :
		    i == 2 && spare_channel[node] &&
		    !REDUNDANCY_LOSS(MC_RAS_STATUS_RD(node)) ? "spare" :
		    "independent");
		nd = 0;
		for (j = 0; j < MAX_DIMMS_PER_CHANNEL; j++) {
			dimmp = *dimmpp;
			if (dimmp != NULL) {
				dimmlist[nd] = inhm_dimm(dimmp, node, i,
				    (uint32_t)j);
				nd++;
			}
			dimmpp++;
		}
		if (nd) {
			(void) nvlist_add_nvlist_array(newchannel[i],
			    "memory-dimms", dimmlist, nd);
			for (j = 0; j < nd; j++)
				nvlist_free(dimmlist[j]);
		}
	}
	(void) nvlist_add_nvlist_array(nvl, MCINTEL_NVLIST_MC, newchannel,
	    nchannels);
	for (i = 0; i < nchannels; i++)
		nvlist_free(newchannel[i]);
	kmem_free(dimmlist, sizeof (nvlist_t *) * MAX_DIMMS_PER_CHANNEL);
	kmem_free(newchannel, sizeof (nvlist_t *) * nchannels);
}
Ejemplo n.º 12
0
static void
signal_devtree(void)
{
	nvlist_t *nvl;
	char *packed_nvl;
	size_t nvl_size;
	int status;

	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME_TYPE, NULL) != 0)
		return;

	/*
	 * Right now (Aug. 2007) snowbird is the only other platform
	 * which uses this event.  Since that's a sun4u platform and
	 * this is sun4v we do not have to worry about possible confusion
	 * or interference between the two by grabbing this event for
	 * our own use here.  This event is consumed by the devtree
	 * plug-in.  The event signals the plug-in to re-run its
	 * cpu initialization function, which will cause it to add
	 * additional information to the cpu devtree nodes (particularly,
	 * the administrative state of the cpus.)
	 */
	if (nvlist_add_string(nvl, PICLEVENTARG_EVENT_NAME,
	    PICLEVENT_CPU_STATE_CHANGE) != 0) {
		free(nvl);
		return;
	}

	/*
	 * The devtree plug-in needs to see a devfs path argument for
	 * any event it considers.  We supply one here which is essentially
	 * a dummy since it is not processed by the devtree plug-in for
	 * this event.
	 */
	if (nvlist_add_string(nvl, PICLEVENTARG_DEVFS_PATH, "/cpu") != 0) {
		free(nvl);
		return;
	}
	packed_nvl = NULL;
	if (nvlist_pack(nvl, &packed_nvl, &nvl_size, NV_ENCODE_NATIVE,
	    0) != 0) {
		free(nvl);
		return;
	}
	if ((status = ptree_post_event(PICLEVENT_CPU_STATE_CHANGE,
	    packed_nvl, nvl_size, mdesc_ev_completion_handler)) !=
	    PICL_SUCCESS) {
		free(nvl);
		syslog(LOG_WARNING,
		    "signal_devtree: can't post cpu event: %d\n", status);
	}
}
boolean_t
test_add_progress_handler()
{
	boolean_t	retval = B_FALSE;
	char		*loggername = "mylogger";
	const char	*host = "localhost";
	int		port = 2333;
	nvlist_t	*handler_args = NULL;
	logger_t	*pLogger = NULL;


	printf("Test: test_add_progress_handler\n");
	pLogger = (logger_t *)test_setup();
	if (pLogger == NULL) {
		printf("Failed to get a Logger\n");
		printf("Cannot proceed with test\n");
		return (retval);
	}

	if (nvlist_alloc(&handler_args, NVATTRS, 0) != 0) {
		printf("Cannot allocate space for handler args\n");
		return (retval);
	}

	if (handler_args == NULL) {
		printf("nvlist_alloc failed.\n");
		printf("Cannot proceed with test\n");
		return (retval);
	}

	/* Create a list of arguments for a ProgressHandler */
	if ((nvlist_add_string(handler_args, HANDLER, PROGRESS_HANDLER) != 0) ||
	    (nvlist_add_string(handler_args, HOST, host) != 0) ||
	    (nvlist_add_int32(handler_args, PORT, port) != 0)) {
		nvlist_free(handler_args);
		printf("Cannot create handler args\n");
		return (retval);
	}

	retval = add_handler(pLogger, handler_args, LOGGING_PROGRESS_HDLR);
	nvlist_free(handler_args);
	if (!retval) {
		printf("test_add_progress_handler: Fail\n");
	} else {
		printf("test_add_progress_handler: Pass\n");
		retval = B_TRUE;
	}

	Py_XDECREF(pLogger);
	return (retval);
}
Ejemplo n.º 14
0
static tnode_t *
create_node(topo_mod_t *mod, tnode_t *pnode, nvlist_t *auth, char *name,
    topo_instance_t inst, uint16_t smbios_id)
{
	nvlist_t *fmri;
	tnode_t *cnode;

	if (mkrsrc(mod, pnode, name, inst, auth, &fmri) != 0) {
		whinge(mod, NULL, "create_node: mkrsrc failed\n");
		return (NULL);
	}

	if (FM_AWARE_SMBIOS(mod)) {
		id_t phys_cpu_smbid;
		int perr = 0;
		const char *serial = NULL;
		const char *part = NULL;
		const char *rev = NULL;

		phys_cpu_smbid = smbios_id;
		serial = chip_serial_smbios_get(mod, phys_cpu_smbid);
		part = chip_part_smbios_get(mod, phys_cpu_smbid);
		rev = chip_rev_smbios_get(mod, phys_cpu_smbid);

		perr += nvlist_add_string(fmri, FM_FMRI_HC_SERIAL_ID,
		    serial);
		perr += nvlist_add_string(fmri, FM_FMRI_HC_PART,
		    part);
		perr += nvlist_add_string(fmri, FM_FMRI_HC_REVISION,
		    rev);

		if (perr != 0)
			whinge(mod, NULL,
			    "create_node: nvlist_add_string failed\n");

		topo_mod_strfree(mod, (char *)serial);
		topo_mod_strfree(mod, (char *)part);
		topo_mod_strfree(mod, (char *)rev);
	}

	cnode = topo_node_bind(mod, pnode, name, inst, fmri);

	nvlist_free(fmri);
	if (cnode == NULL) {
		whinge(mod, NULL, "create_node: node bind failed"
		    " for %s %d\n", name, (int)inst);
	}

	return (cnode);
}
Ejemplo n.º 15
0
nvlist_t *
cmd_fault_add_location(fmd_hdl_t *hdl, nvlist_t *flt, const char *locstr) {

	char *t, *s;

	if (nvlist_lookup_string(flt, FM_FAULT_LOCATION, &t) == 0)
		return (flt); /* already has location value */

	/* Replace occurrence of ": " with "/" to avoid confusing ILOM. */
	t = fmd_hdl_zalloc(hdl, strlen(locstr) + 1, FMD_SLEEP);
	s = strstr(locstr, ": ");
	if (s != NULL) {
		(void) strncpy(t, locstr, s - locstr);
		(void) strcat(t, "/");
		(void) strcat(t, s + 2);
	} else {
		(void) strcpy(t, locstr);
	}

	/* Also, remove any J number from end of this string. */
	s = strstr(t, "/J");
	if (s != NULL)
		*s = '\0';

	if (nvlist_add_string(flt, FM_FAULT_LOCATION, t) != 0)
		fmd_hdl_error(hdl, "unable to alloc location for fault\n");
	fmd_hdl_free(hdl, t, strlen(locstr) + 1);
	return (flt);
}
Ejemplo n.º 16
0
int
cap_getnameinfo(cap_channel_t *chan, const struct sockaddr *sa, socklen_t salen,
    char *host, size_t hostlen, char *serv, size_t servlen, int flags)
{
	nvlist_t *nvl;
	int error;

	nvl = nvlist_create(0);
	nvlist_add_string(nvl, "cmd", "getnameinfo");
	nvlist_add_number(nvl, "hostlen", (uint64_t)hostlen);
	nvlist_add_number(nvl, "servlen", (uint64_t)servlen);
	nvlist_add_binary(nvl, "sa", sa, (size_t)salen);
	nvlist_add_number(nvl, "flags", (uint64_t)flags);
	nvl = cap_xfer_nvlist(chan, nvl, 0);
	if (nvl == NULL)
		return (EAI_MEMORY);
	if (nvlist_get_number(nvl, "error") != 0) {
		error = (int)nvlist_get_number(nvl, "error");
		nvlist_destroy(nvl);
		return (error);
	}

	if (host != NULL)
		strlcpy(host, nvlist_get_string(nvl, "host"), hostlen + 1);
	if (serv != NULL)
		strlcpy(serv, nvlist_get_string(nvl, "serv"), servlen + 1);
	nvlist_destroy(nvl);
	return (0);
}
Ejemplo n.º 17
0
struct hostent *
cap_gethostbyaddr(cap_channel_t *chan, const void *addr, socklen_t len,
    int type)
{
	struct hostent *hp;
	nvlist_t *nvl;

	nvl = nvlist_create(0);
	nvlist_add_string(nvl, "cmd", "gethostbyaddr");
	nvlist_add_binary(nvl, "addr", addr, (size_t)len);
	nvlist_add_number(nvl, "family", (uint64_t)type);
	nvl = cap_xfer_nvlist(chan, nvl, 0);
	if (nvl == NULL) {
		h_errno = NO_RECOVERY;
		return (NULL);
	}
	if (nvlist_get_number(nvl, "error") != 0) {
		h_errno = (int)nvlist_get_number(nvl, "error");
		nvlist_destroy(nvl);
		return (NULL);
	}
	hp = hostent_unpack(nvl, &hent);
	nvlist_destroy(nvl);
	return (hp);
}
Ejemplo n.º 18
0
/*
 * Set the pool-wide health based on the vdev state of the root vdev.
 */
int
set_pool_health(nvlist_t *config)
{
	nvlist_t *nvroot;
	vdev_stat_t *vs;
	uint_t vsc;
	char *health;

	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
	    &nvroot) == 0);
	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
	    (uint64_t **)&vs, &vsc) == 0);

	switch (vs->vs_state) {

	case VDEV_STATE_CLOSED:
	case VDEV_STATE_CANT_OPEN:
	case VDEV_STATE_OFFLINE:
		health = dgettext(TEXT_DOMAIN, "FAULTED");
		break;

	case VDEV_STATE_DEGRADED:
		health = dgettext(TEXT_DOMAIN, "DEGRADED");
		break;

	case VDEV_STATE_HEALTHY:
		health = dgettext(TEXT_DOMAIN, "ONLINE");
		break;

	default:
		abort();
	}

	return (nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, health));
}
Ejemplo n.º 19
0
int
cap_pwd_limit_users(cap_channel_t *chan, const char * const *names,
    size_t nnames, uid_t *uids, size_t nuids)
{
	nvlist_t *limits, *users;
	char nvlname[64];
	unsigned int i;
	int n;

	if (cap_limit_get(chan, &limits) < 0)
		return (-1);
	if (limits == NULL) {
		limits = nvlist_create(0);
	} else {
		if (nvlist_exists_nvlist(limits, "users"))
			nvlist_free_nvlist(limits, "users");
	}
	users = nvlist_create(0);
	for (i = 0; i < nuids; i++) {
		n = snprintf(nvlname, sizeof(nvlname), "uid%u", i);
		assert(n > 0 && n < (int)sizeof(nvlname));
		nvlist_add_number(users, nvlname, (uint64_t)uids[i]);
	}
	for (i = 0; i < nnames; i++) {
		n = snprintf(nvlname, sizeof(nvlname), "name%u", i);
		assert(n > 0 && n < (int)sizeof(nvlname));
		nvlist_add_string(users, nvlname, names[i]);
	}
	nvlist_move_nvlist(limits, "users", users);
	return (cap_limit_set(chan, limits));
}
Ejemplo n.º 20
0
void
libdiskmgt_add_str(nvlist_t *attrs, char *name, char *val, int *errp)
{
	if (*errp == 0) {
		*errp = nvlist_add_string(attrs, name, val);
	}
}
Ejemplo n.º 21
0
int
cap_grp_limit_groups(cap_channel_t *chan, const char * const *names,
    size_t nnames, gid_t *gids, size_t ngids)
{
	nvlist_t *limits, *groups;
	unsigned int i;
	char nvlname[64];
	int n;

	if (cap_limit_get(chan, &limits) < 0)
		return (-1);
	if (limits == NULL) {
		limits = nvlist_create(0);
	} else {
		if (nvlist_exists_nvlist(limits, "groups"))
			nvlist_free_nvlist(limits, "groups");
	}
	groups = nvlist_create(0);
	for (i = 0; i < ngids; i++) {
		n = snprintf(nvlname, sizeof(nvlname), "gid%u", i);
		assert(n > 0 && n < (int)sizeof(nvlname));
		nvlist_add_number(groups, nvlname, (uint64_t)gids[i]);
	}
	for (i = 0; i < nnames; i++) {
		n = snprintf(nvlname, sizeof(nvlname), "gid%u", i);
		assert(n > 0 && n < (int)sizeof(nvlname));
		nvlist_add_string(groups, nvlname, names[i]);
	}
	nvlist_move_nvlist(limits, "groups", groups);
	return (cap_limit_set(chan, limits));
}
Ejemplo n.º 22
0
Archivo: zfs_mod.c Proyecto: pyavdr/zfs
static void
zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
{
    dev_data_t *dp = data;
    char *path;
    uint_t c, children;
    nvlist_t **child;

    /*
     * First iterate over any children.
     */
    if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
                                   &child, &children) == 0) {
        for (c = 0; c < children; c++)
            zfs_iter_vdev(zhp, child[c], data);
        return;
    }

    /* once a vdev was matched and processed there is nothing left to do */
    if (dp->dd_found)
        return;

    /*
     * Match by GUID if available otherwise fallback to devid or physical
     */
    if (dp->dd_vdev_guid != 0) {
        uint64_t guid;

        if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
                                 &guid) != 0 || guid != dp->dd_vdev_guid) {
            return;
        }
        zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched on %llu", guid);
        dp->dd_found = B_TRUE;

    } else if (dp->dd_compare != NULL) {
        /*
         * NOTE: On Linux there is an event for partition, so unlike
         * illumos, substring matching is not required to accomodate
         * the partition suffix. An exact match will be present in
         * the dp->dd_compare value.
         */
        if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
                strcmp(dp->dd_compare, path) != 0) {
            return;
        }
        zed_log_msg(LOG_INFO, "  zfs_iter_vdev: matched %s on %s",
                    dp->dd_prop, path);
        dp->dd_found = B_TRUE;

        /* pass the new devid for use by replacing code */
        if (dp->dd_islabeled && dp->dd_new_devid != NULL) {
            (void) nvlist_add_string(nvl, "new_devid",
                                     dp->dd_new_devid);
        }
    }

    (dp->dd_func)(zhp, nvl, dp->dd_islabeled);
}
Ejemplo n.º 23
0
static int
sw_add_optl_string(nvlist_t *nvl, char *name, char *val)
{
	if (val)
		return (nvlist_add_string(nvl, name, val) != 0);
	else
		return (0);
}
Ejemplo n.º 24
0
int
cap_sysctlbyname(cap_channel_t *chan, const char *name, void *oldp,
    size_t *oldlenp, const void *newp, size_t newlen)
{
	nvlist_t *nvl;
	const uint8_t *retoldp;
	uint8_t operation;
	size_t oldlen;

	operation = 0;
	if (oldp != NULL)
		operation |= CAP_SYSCTL_READ;
	if (newp != NULL)
		operation |= CAP_SYSCTL_WRITE;

	nvl = nvlist_create(0);
	nvlist_add_string(nvl, "cmd", "sysctl");
	nvlist_add_string(nvl, "name", name);
	nvlist_add_number(nvl, "operation", (uint64_t)operation);
	if (oldp == NULL && oldlenp != NULL)
		nvlist_add_null(nvl, "justsize");
	else if (oldlenp != NULL)
		nvlist_add_number(nvl, "oldlen", (uint64_t)*oldlenp);
	if (newp != NULL)
		nvlist_add_binary(nvl, "newp", newp, newlen);
	nvl = cap_xfer_nvlist(chan, nvl, 0);
	if (nvl == NULL)
		return (-1);
	if (nvlist_get_number(nvl, "error") != 0) {
		errno = (int)nvlist_get_number(nvl, "error");
		nvlist_destroy(nvl);
		return (-1);
	}

	if (oldp == NULL && oldlenp != NULL) {
		*oldlenp = (size_t)nvlist_get_number(nvl, "oldlen");
	} else if (oldp != NULL) {
		retoldp = nvlist_get_binary(nvl, "oldp", &oldlen);
		memcpy(oldp, retoldp, oldlen);
		if (oldlenp != NULL)
			*oldlenp = oldlen;
	}
	nvlist_destroy(nvl);

	return (0);
}
Ejemplo n.º 25
0
static int
fmri_prop(topo_hdl_t *thp, nvlist_t *rsrc, const char *pgname,
    const char *pname, nvlist_t *args, nvlist_t **prop,
    int *err)
{
	int rv;
	nvlist_t *in = NULL;
	tnode_t *rnode;
	char *scheme;

	if (nvlist_lookup_string(rsrc, FM_FMRI_SCHEME, &scheme) != 0)
		return (set_error(thp, ETOPO_FMRI_MALFORM, err,
		    TOPO_METH_PROP_GET, in));

	if ((rnode = topo_hdl_root(thp, scheme)) == NULL)
		return (set_error(thp, ETOPO_METHOD_NOTSUP, err,
		    TOPO_METH_PROP_GET, in));

	if (topo_hdl_nvalloc(thp, &in, NV_UNIQUE_NAME) != 0)
		return (set_error(thp, ETOPO_FMRI_NVL, err,
		    TOPO_METH_PROP_GET, in));

	rv = nvlist_add_nvlist(in, TOPO_PROP_RESOURCE, rsrc);
	rv |= nvlist_add_string(in, TOPO_PROP_GROUP, pgname);
	rv |= nvlist_add_string(in, TOPO_PROP_VAL_NAME, pname);
	if (args != NULL)
		rv |= nvlist_add_nvlist(in, TOPO_PROP_PARGS, args);
	if (rv != 0)
		return (set_error(thp, ETOPO_FMRI_NVL, err,
		    TOPO_METH_PROP_GET, in));

	*prop = NULL;
	rv = topo_method_invoke(rnode, TOPO_METH_PROP_GET,
	    TOPO_METH_PROP_GET_VERSION, in, prop, err);

	nvlist_free(in);

	if (rv != 0)
		return (-1); /* *err is set for us */

	if (*prop == NULL)
		return (set_error(thp, ETOPO_PROP_NOENT, err,
		    TOPO_METH_PROP_GET, NULL));
	return (0);
}
Ejemplo n.º 26
0
/* Ensure that "None" is an option in the digest list, and select it */
static kv_status_t
iser_handle_digest(nvpair_t *choices, const idm_kv_xlate_t *ikvx,
                   nvlist_t *request_nvl, nvlist_t *response_nvl, nvlist_t *negotiated_nvl)
{
    kv_status_t		kvrc = KV_VALUE_ERROR;
    int			nvrc = 0;
    nvpair_t		*digest_choice;
    char			*digest_choice_string;

    /*
     * Loop through all digest choices.  We need to enforce no
     * "None" for both header and data digest.  If we find our
     * required value, add the value to our negotiated values list
     * and respond with that value in the login response. If not,
     * indicate a value error for the iSCSI layer to work with.
     */
    digest_choice = idm_get_next_listvalue(choices, NULL);
    while (digest_choice != NULL) {
        nvrc = nvpair_value_string(digest_choice,
                                   &digest_choice_string);
        ASSERT(nvrc == 0);

        if (strcasecmp(digest_choice_string, "none") == 0) {

            /* Add to negotiated values list */
            nvrc = nvlist_add_string(negotiated_nvl,
                                     ikvx->ik_key_name, digest_choice_string);
            kvrc = idm_nvstat_to_kvstat(nvrc);
            if (nvrc == 0) {
                /* Add to login response list */
                nvrc = nvlist_add_string(response_nvl,
                                         ikvx->ik_key_name, digest_choice_string);
                kvrc = idm_nvstat_to_kvstat(nvrc);
                /* Remove from the request (we've handled it) */
                (void) nvlist_remove_all(request_nvl,
                                         ikvx->ik_key_name);
            }
            break;
        }
        digest_choice = idm_get_next_listvalue(choices,
                                               digest_choice);
    }

    return (kvrc);
}
Ejemplo n.º 27
0
int
fmd_fmri_expand(nvlist_t *nvl)
{
	uint8_t version;
	uint32_t cpuid;
	uint64_t serialid;
	char *serstr, serbuf[21]; /* sizeof (UINT64_MAX) + '\0' */
	int rc, err;
	topo_hdl_t *thp;

	if (nvlist_lookup_uint8(nvl, FM_VERSION, &version) != 0 ||
	    nvlist_lookup_uint32(nvl, FM_FMRI_CPU_ID, &cpuid) != 0)
		return (fmd_fmri_set_errno(EINVAL));

	/*
	 * If the cpu-scheme topology exports this method expand(), invoke it.
	 */
	if ((thp = fmd_fmri_topo_hold(TOPO_VERSION)) == NULL)
		return (fmd_fmri_set_errno(EINVAL));

	rc = topo_fmri_expand(thp, nvl, &err);
	fmd_fmri_topo_rele(thp);
	if (err != ETOPO_METHOD_NOTSUP)
		return (rc);

	if (version == CPU_SCHEME_VERSION0) {
		if ((rc = nvlist_lookup_uint64(nvl, FM_FMRI_CPU_SERIAL_ID,
		    &serialid)) != 0) {
			if (rc != ENOENT)
				return (fmd_fmri_set_errno(rc));

			if (cpu_get_serialid_V0(cpuid, &serialid) != 0)
				return (-1); /* errno is set for us */

			if ((rc = nvlist_add_uint64(nvl, FM_FMRI_CPU_SERIAL_ID,
			    serialid)) != 0)
				return (fmd_fmri_set_errno(rc));
		}
	} else if (version == CPU_SCHEME_VERSION1) {
		if ((rc = nvlist_lookup_string(nvl, FM_FMRI_CPU_SERIAL_ID,
		    &serstr)) != 0) {
			if (rc != ENOENT)
				return (fmd_fmri_set_errno(rc));

			if (cpu_get_serialid_V1(cpuid, serbuf, 21) != 0)
				return (0); /* Serial number is optional */

			if ((rc = nvlist_add_string(nvl, FM_FMRI_CPU_SERIAL_ID,
			    serbuf)) != 0)
				return (fmd_fmri_set_errno(rc));
		}
	} else {
		return (fmd_fmri_set_errno(EINVAL));
	}

	return (0);
}
Ejemplo n.º 28
0
void
main(void) {
	nvlist_t *nvl;

	nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0);
	nvlist_add_string(nvl, "mountpoint", "/mnt/altroot");
	TM_perform_transfer(nvl, show_progress);
	nvlist_free(nvl);
}
Ejemplo n.º 29
0
void
cap_endgrent(cap_channel_t *chan)
{
	nvlist_t *nvl;

	nvl = nvlist_create(0);
	nvlist_add_string(nvl, "cmd", "endgrent");
	/* Ignore any errors, we have no way to report them. */
	nvlist_destroy(cap_xfer_nvlist(chan, nvl, 0));
}
Ejemplo n.º 30
0
static void
cap_set_end_pwent(cap_channel_t *chan, const char *cmd)
{
	nvlist_t *nvl;

	nvl = nvlist_create(0);
	nvlist_add_string(nvl, "cmd", cmd);
	/* Ignore any errors, we have no way to report them. */
	nvlist_destroy(cap_xfer_nvlist(chan, nvl));
}