Example #1
0
/*
 * Build a height balanced tree of nodes consisting of a device id and
 * an inode number.  Duplicate nodes are not stored.  Return 1 if
 * node was added to the tree, return -1 upon error, otherwise return 0.
 */
int
add_tnode(avl_tree_t **stree, dev_t device, ino_t inode)
{
	tree_node_t	*tnode;
	avl_index_t	where;

	/*
	 * Create an AVL search tree to keep track of inodes
	 * visited/reported.
	 */
	if (*stree == NULL) {
		if ((*stree = calloc(1, sizeof (avl_tree_t)))
		    == NULL) {
			return (-1);
		}
		avl_create(*stree,
		    tnode_compare,
		    sizeof (tree_node_t),
		    OFFSETOF(tree_node_t, avl_link));
	}

	/* Initialize the node */
	if ((tnode = calloc(1, sizeof (*tnode))) == NULL) {
		return (-1);
	}
	tnode->node_dev = device;
	tnode->node_ino = inode;

	/* If the node is not already in the tree, then insert it */
	if (avl_find(*stree, tnode, &where) == NULL) {
		avl_insert(*stree, tnode, where);
		return (1);
	}

	/* The node is already in the tree, so just free it */
	free(tnode);
	return (0);
}
Example #2
0
uu_avl_t *
uu_avl_create(uu_avl_pool_t *pp, void *parent, uint32_t flags)
{
	uu_avl_t *ap, *next, *prev;

	if (flags & ~UU_AVL_DEBUG) {
		uu_set_error(UU_ERROR_UNKNOWN_FLAG);
		return (NULL);
	}

	ap = uu_zalloc(sizeof (*ap));
	if (ap == NULL) {
		uu_set_error(UU_ERROR_NO_MEMORY);
		return (NULL);
	}

	ap->ua_pool = pp;
	ap->ua_parent_enc = UU_PTR_ENCODE(parent);
	ap->ua_debug = pp->uap_debug || (flags & UU_AVL_DEBUG);
	ap->ua_index = (pp->uap_last_index = INDEX_NEXT(pp->uap_last_index));

	avl_create(&ap->ua_tree, &uu_avl_node_compare, pp->uap_objsize,
	    pp->uap_nodeoffset);

	ap->ua_null_walk.uaw_next = &ap->ua_null_walk;
	ap->ua_null_walk.uaw_prev = &ap->ua_null_walk;

	(void) pthread_mutex_lock(&pp->uap_lock);
	next = &pp->uap_null_avl;
	prev = UU_PTR_DECODE(next->ua_prev_enc);
	ap->ua_next_enc = UU_PTR_ENCODE(next);
	ap->ua_prev_enc = UU_PTR_ENCODE(prev);
	next->ua_prev_enc = UU_PTR_ENCODE(ap);
	prev->ua_next_enc = UU_PTR_ENCODE(ap);
	(void) pthread_mutex_unlock(&pp->uap_lock);

	return (ap);
}
Example #3
0
int main(void){

    struct avl_table* table = avl_create(cmp_fn, NULL, NULL);


    while((n = getline(&line, &length, stdin)) >= 0){
        char* the_string = malloc(n * sizeof(char) + 1);
        strcpy(the_string, line);
        the_string[n-1] = '\0';
        if(avl_insert(table, (void*)(the_string)) == NULL){
            printf("%s", line);
            fflush(stdout);
        }
    }
    free(line);

    /* balance_tree(&root, cmp_fn); */
    /* printf("done second\n"); */
    /* write_dot_file("balanced.txt", root); */


    return 0;
}
Example #4
0
int
zfs_iter_snapshots_sorted(zfs_handle_t *zhp, zfs_iter_f callback, void *data)
{
	int ret = 0;
	zfs_node_t *node;
	avl_tree_t avl;
	void *cookie = NULL;

	avl_create(&avl, zfs_snapshot_compare,
	    sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode));

	ret = zfs_iter_snapshots(zhp, B_FALSE, zfs_sort_snaps, &avl);

	for (node = avl_first(&avl); node != NULL; node = AVL_NEXT(&avl, node))
		ret |= callback(node->zn_handle, data);

	while ((node = avl_destroy_nodes(&avl, &cookie)) != NULL)
		free(node);

	avl_destroy(&avl);

	return (ret);
}
Example #5
0
int main(int argc, char *argv[])
{
	int num = 1000000;
	avl_table *root;

	if (argc > 1)
		num = atoi(argv[1]);

	uint64_t seed = time(NULL);
	RAND_NR_INIT(u, v, w, seed);

	root = avl_create(node_cmp, NULL, &avl_allocator_default);

	ulib_timer_t timer;
	timer_start(&timer);
	for (int i = 0; i < num; ++i) {
		node *t = new node;
		t->key = myrand();
		avl_insert(root, t);
	}
	printf("Inserting 1M elems elapsed: %f\n", timer_stop(&timer));

	timer_start(&timer);
	for (int i = 0; i < 1000000; ++i) {
		node t;
		t.key = myrand();
		avl_find(root, &t);
	}
	printf("Searching 10M elems elapsed: %f\n", timer_stop(&timer));

	timer_start(&timer);
	avl_destroy(root, node_destroy);
	printf("Deleting 1M elems elapsed: %f\n", timer_stop(&timer));

	return 0;
}
Example #6
0
/*ARGSUSED*/
static int
zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
    znode_t *zp = buf;

    inode_init_once(ZTOI(zp));
    list_link_init(&zp->z_link_node);

    mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
    rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
    rw_init(&zp->z_name_lock, NULL, RW_DEFAULT, NULL);
    mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
    rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);

    mutex_init(&zp->z_range_lock, NULL, MUTEX_DEFAULT, NULL);
    avl_create(&zp->z_range_avl, zfs_range_compare,
               sizeof (rl_t), offsetof(rl_t, r_node));

    zp->z_dirlocks = NULL;
    zp->z_acl_cached = NULL;
    zp->z_xattr_cached = NULL;
    zp->z_moved = 0;
    return (0);
}
Example #7
0
/* New session */
pppt_sess_t *
pppt_sess_lookup_create(scsi_devid_desc_t *lport_devid,
    scsi_devid_desc_t *rport_devid, stmf_remote_port_t *rport,
    uint64_t session_id, stmf_status_t *statusp)
{
	pppt_tgt_t		*tgt;
	pppt_sess_t		*ps;
	stmf_scsi_session_t	*ss;
	pppt_sess_t		tmp_ps;
	stmf_scsi_session_t	tmp_ss;
	*statusp = STMF_SUCCESS;

	PPPT_GLOBAL_LOCK();

	/*
	 * Look for existing session for this ID
	 */
	ps = pppt_sess_lookup_locked(session_id, lport_devid, rport);

	if (ps != NULL) {
		PPPT_GLOBAL_UNLOCK();
		return (ps);
	}

	/*
	 * No session with that ID, look for another session corresponding
	 * to the same IT nexus.
	 */
	tgt = pppt_tgt_lookup_locked(lport_devid);
	if (tgt == NULL) {
		*statusp = STMF_NOT_FOUND;
		PPPT_GLOBAL_UNLOCK();
		return (NULL);
	}

	mutex_enter(&tgt->target_mutex);
	if (tgt->target_state != TS_STMF_ONLINE) {
		*statusp = STMF_NOT_FOUND;
		mutex_exit(&tgt->target_mutex);
		PPPT_GLOBAL_UNLOCK();
		/* Can't create session to offline target */
		return (NULL);
	}

	bzero(&tmp_ps, sizeof (tmp_ps));
	bzero(&tmp_ss, sizeof (tmp_ss));
	tmp_ps.ps_stmf_sess = &tmp_ss;
	tmp_ss.ss_rport = rport;

	/*
	 * Look for an existing session on this IT nexus
	 */
	ps = avl_find(&tgt->target_sess_list, &tmp_ps, NULL);

	if (ps != NULL) {
		/*
		 * Now check the session ID.  It should not match because if
		 * it did we would have found it on the global session list.
		 * If the session ID in the command is higher than the existing
		 * session ID then we need to tear down the existing session.
		 */
		mutex_enter(&ps->ps_mutex);
		ASSERT(ps->ps_session_id != session_id);
		if (ps->ps_session_id > session_id) {
			/* Invalid session ID */
			mutex_exit(&ps->ps_mutex);
			mutex_exit(&tgt->target_mutex);
			PPPT_GLOBAL_UNLOCK();
			*statusp = STMF_INVALID_ARG;
			return (NULL);
		} else {
			/* Existing session needs to be invalidated */
			if (!ps->ps_closed) {
				pppt_sess_close_locked(ps);
			}
		}
		mutex_exit(&ps->ps_mutex);

		/* Fallthrough and create new session */
	}

	/*
	 * Allocate and fill in pppt_session_t with the appropriate data
	 * for the protocol.
	 */
	ps = kmem_zalloc(sizeof (*ps), KM_SLEEP);

	/* Fill in session fields */
	ps->ps_target = tgt;
	ps->ps_session_id = session_id;

	ss = stmf_alloc(STMF_STRUCT_SCSI_SESSION, 0,
	    0);
	if (ss == NULL) {
		mutex_exit(&tgt->target_mutex);
		PPPT_GLOBAL_UNLOCK();
		kmem_free(ps, sizeof (*ps));
		*statusp = STMF_ALLOC_FAILURE;
		return (NULL);
	}

	ss->ss_rport_id = kmem_zalloc(sizeof (scsi_devid_desc_t) +
	    rport_devid->ident_length + 1, KM_SLEEP);
	bcopy(rport_devid, ss->ss_rport_id,
	    sizeof (scsi_devid_desc_t) + rport_devid->ident_length + 1);

	ss->ss_lport = tgt->target_stmf_lport;

	ss->ss_rport = stmf_remote_port_alloc(rport->rport_tptid_sz);
	bcopy(rport->rport_tptid, ss->ss_rport->rport_tptid,
	    rport->rport_tptid_sz);

	if (stmf_register_scsi_session(tgt->target_stmf_lport, ss) !=
	    STMF_SUCCESS) {
		mutex_exit(&tgt->target_mutex);
		PPPT_GLOBAL_UNLOCK();
		kmem_free(ss->ss_rport_id,
		    sizeof (scsi_devid_desc_t) + rport_devid->ident_length + 1);
		stmf_remote_port_free(ss->ss_rport);
		stmf_free(ss);
		kmem_free(ps, sizeof (*ps));
		*statusp = STMF_TARGET_FAILURE;
		return (NULL);
	}

	ss->ss_port_private = ps;
	mutex_init(&ps->ps_mutex, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&ps->ps_cv, NULL, CV_DEFAULT, NULL);
	avl_create(&ps->ps_task_list, pppt_task_avl_compare,
	    sizeof (pppt_task_t), offsetof(pppt_task_t, pt_sess_ln));
	ps->ps_refcnt = 1;
	ps->ps_stmf_sess = ss;
	avl_add(&tgt->target_sess_list, ps);
	avl_add(&pppt_global.global_sess_list, ps);
	mutex_exit(&tgt->target_mutex);
	PPPT_GLOBAL_UNLOCK();
	stmf_trace("pppt", "New session %p", (void *)ps);

	return (ps);
}
Example #8
0
void deftounicode(strnumber glyph, strnumber unistr)
{
    char buf[SMALL_BUF_SIZE], *p;
    char buf2[SMALL_BUF_SIZE], *q;
    int valid_unistr;           /* 0: invalid; 1: unicode value; 2: string */
    int i, l;
    glyph_unicode_entry *gu, t;
    void **aa;

    p = makecstring(glyph);
    assert(strlen(p) < SMALL_BUF_SIZE);
    strcpy(buf, p);             /* copy the result to buf before next call of makecstring() */
    p = makecstring(unistr);
    while (*p == ' ')
        p++;                    /* ignore leading spaces */
    l = strlen(p);
    while (l > 0 && p[l - 1] == ' ')
        l--;                    /* ignore traling spaces */
    valid_unistr = 1;           /* a unicode value is the most common case */
    for (i = 0; i < l; i++) {
        if (p[i] == ' ')
            valid_unistr = 2;   /* if a space occurs we treat this entry as a string */
        else if (!isXdigit(p[i])) {
            valid_unistr = 0;
            break;
        }
    }
    if (l == 0 || valid_unistr == 0 || strlen(buf) == 0
        || strcmp(buf, notdef) == 0) {
        pdftex_warn("ToUnicode: invalid parameter(s): `%s' => `%s'", buf, p);
        return;
    }
    if (glyph_unicode_tree == NULL) {
        glyph_unicode_tree =
            avl_create(comp_glyph_unicode_entry, NULL, &avl_xallocator);
        assert(glyph_unicode_tree != NULL);
    }
    t.name = buf;
    /* allow overriding existing entries */
    if ((gu = (glyph_unicode_entry *) avl_find(glyph_unicode_tree, &t)) != NULL) {
        if (gu->code == UNI_STRING) {
            assert(gu->unicode_seq != NULL);
            xfree(gu->unicode_seq);
        }
    } else {                    /* make new entry */
        gu = new_glyph_unicode_entry();
        gu->name = xstrdup(buf);
    }
    if (valid_unistr == 2) {    /* a string with space(s) */
        /* copy p to buf2, ignoring spaces */
        for (q = buf2; *p != 0; p++)
            if (*p != ' ')
                *q++ = *p;
        *q = 0;
        gu->code = UNI_STRING;
        gu->unicode_seq = xstrdup(buf2);
    } else {
        i = sscanf(p, "%lX", &(gu->code));
        assert(i == 1);
    }
    aa = avl_probe(glyph_unicode_tree, gu);
    assert(aa != NULL);
}
Example #9
0
/*
 * Allocate memory for a new zvol_state_t and setup the required
 * request queue and generic disk structures for the block device.
 */
static zvol_state_t *
zvol_alloc(dev_t dev, const char *name)
{
	zvol_state_t *zv;
	int error = 0;

	zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
	if (zv == NULL)
		goto out;

	zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
	if (zv->zv_queue == NULL)
		goto out_kmem;

#ifdef HAVE_ELEVATOR_CHANGE
	error = elevator_change(zv->zv_queue, "noop");
#endif /* HAVE_ELEVATOR_CHANGE */
	if (error) {
		printk("ZFS: Unable to set \"%s\" scheduler for zvol %s: %d\n",
		    "noop", name, error);
		goto out_queue;
	}

#ifdef HAVE_BLK_QUEUE_FLUSH
	blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
#else
	blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
#endif /* HAVE_BLK_QUEUE_FLUSH */

	zv->zv_disk = alloc_disk(ZVOL_MINORS);
	if (zv->zv_disk == NULL)
		goto out_queue;

	zv->zv_queue->queuedata = zv;
	zv->zv_dev = dev;
	zv->zv_open_count = 0;
	strlcpy(zv->zv_name, name, MAXNAMELEN);

	mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
	avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
	    sizeof (rl_t), offsetof(rl_t, r_node));
	zv->zv_znode.z_is_zvol = TRUE;

	spin_lock_init(&zv->zv_lock);
	list_link_init(&zv->zv_next);

	zv->zv_disk->major = zvol_major;
	zv->zv_disk->first_minor = (dev & MINORMASK);
	zv->zv_disk->fops = &zvol_ops;
	zv->zv_disk->private_data = zv;
	zv->zv_disk->queue = zv->zv_queue;
	snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
	    ZVOL_DEV_NAME, (dev & MINORMASK));

	return zv;

out_queue:
	blk_cleanup_queue(zv->zv_queue);
out_kmem:
	kmem_free(zv, sizeof (zvol_state_t));
out:
	return NULL;
}
Example #10
0
/*
 * Check if user has requested permission.
 */
int
dsl_deleg_access(const char *dsname, const char *perm, cred_t *cr)
{
	dsl_dataset_t *ds;
	dsl_dir_t *dd;
	dsl_pool_t *dp;
	void *cookie;
	int	error;
	char	checkflag;
	objset_t *mos;
	avl_tree_t permsets;
	perm_set_t *setnode;

	error = dsl_dataset_hold(dsname, FTAG, &ds);
	if (error)
		return (error);

	dp = ds->ds_dir->dd_pool;
	mos = dp->dp_meta_objset;

	if (dsl_delegation_on(mos) == B_FALSE) {
		dsl_dataset_rele(ds, FTAG);
		return (ECANCELED);
	}

	if (spa_version(dmu_objset_spa(dp->dp_meta_objset)) <
	    SPA_VERSION_DELEGATED_PERMS) {
		dsl_dataset_rele(ds, FTAG);
		return (EPERM);
	}

	if (dsl_dataset_is_snapshot(ds)) {
		/*
		 * Snapshots are treated as descendents only,
		 * local permissions do not apply.
		 */
		checkflag = ZFS_DELEG_DESCENDENT;
	} else {
		checkflag = ZFS_DELEG_LOCAL;
	}

	avl_create(&permsets, perm_set_compare, sizeof (perm_set_t),
	    offsetof(perm_set_t, p_node));

	rw_enter(&dp->dp_config_rwlock, RW_READER);
	for (dd = ds->ds_dir; dd != NULL; dd = dd->dd_parent,
	    checkflag = ZFS_DELEG_DESCENDENT) {
		uint64_t zapobj;
		boolean_t expanded;

		/*
		 * If not in global zone then make sure
		 * the zoned property is set
		 */
		if (!INGLOBALZONE(curproc)) {
			uint64_t zoned;

			if (dsl_prop_get_dd(dd,
			    zfs_prop_to_name(ZFS_PROP_ZONED),
			    8, 1, &zoned, NULL) != 0)
				break;
			if (!zoned)
				break;
		}
		zapobj = dd->dd_phys->dd_deleg_zapobj;

		if (zapobj == 0)
			continue;

		dsl_load_user_sets(mos, zapobj, &permsets, checkflag, cr);
again:
		expanded = B_FALSE;
		for (setnode = avl_first(&permsets); setnode;
		    setnode = AVL_NEXT(&permsets, setnode)) {
			if (setnode->p_matched == B_TRUE)
				continue;

			/* See if this set directly grants this permission */
			error = dsl_check_access(mos, zapobj,
			    ZFS_DELEG_NAMED_SET, 0, setnode->p_setname, perm);
			if (error == 0)
				goto success;
			if (error == EPERM)
				setnode->p_matched = B_TRUE;

			/* See if this set includes other sets */
			error = dsl_load_sets(mos, zapobj,
			    ZFS_DELEG_NAMED_SET_SETS, 0,
			    setnode->p_setname, &permsets);
			if (error == 0)
				setnode->p_matched = expanded = B_TRUE;
		}
		/*
		 * If we expanded any sets, that will define more sets,
		 * which we need to check.
		 */
		if (expanded)
			goto again;

		error = dsl_check_user_access(mos, zapobj, perm, checkflag, cr);
		if (error == 0)
			goto success;
	}
	error = EPERM;
success:
	rw_exit(&dp->dp_config_rwlock);
	dsl_dataset_rele(ds, FTAG);

	cookie = NULL;
	while ((setnode = avl_destroy_nodes(&permsets, &cookie)) != NULL)
		kmem_free(setnode, sizeof (perm_set_t));

	return (error);
}
Example #11
0
/*
 * smb_lucache_do_update
 *
 * This function takes care of updating the AVL tree.
 * If an entry has been updated, it'll be modified in place.
 *
 * New entries will be added to a temporary AVL tree then
 * passwod file is unlocked and all the new entries will
 * be transferred to the main cache from the temporary tree.
 *
 * This function MUST NOT be called directly
 */
static int
smb_lucache_do_update(void)
{
	avl_tree_t tmp_cache;
	smb_pwbuf_t pwbuf;
	smb_passwd_t smbpw;
	smb_ucnode_t uc_node;
	smb_ucnode_t *uc_newnode;
	smb_luser_t *user;
	smb_sid_t *sid;
	idmap_stat idm_stat;
	int rc = SMB_PWE_SUCCESS;
	void *cookie = NULL;
	FILE *fp;

	if ((rc = smb_pwd_lock()) != SMB_PWE_SUCCESS) {
		syslog(LOG_WARNING, "smb_pwdutil: lock failed, err=%d", rc);
		return (rc);
	}

	if ((fp = fopen(SMB_PASSWD, "rF")) == NULL) {
		syslog(LOG_WARNING, "smb_pwdutil: open failed, %m");
		(void) smb_pwd_unlock();
		return (SMB_PWE_OPEN_FAILED);
	}

	avl_create(&tmp_cache, smb_lucache_cmp,
	    sizeof (smb_ucnode_t), offsetof(smb_ucnode_t, cn_link));

	bzero(&pwbuf, sizeof (smb_pwbuf_t));
	pwbuf.pw_pwd = &smbpw;

	(void) rw_rdlock(&smb_uch.uc_cache_lck);

	while (smb_pwd_fgetent(fp, &pwbuf, SMB_PWD_GETF_NOPWD) != NULL) {
		uc_node.cn_user.su_name = smbpw.pw_name;
		uc_newnode = avl_find(&smb_uch.uc_cache, &uc_node, NULL);
		if (uc_newnode) {
			/* update the node info */
			uc_newnode->cn_user.su_ctrl = smbpw.pw_flags;
			continue;
		}

		/* create a new node */
		if ((uc_newnode = malloc(sizeof (smb_ucnode_t))) == NULL) {
			rc = SMB_PWE_NO_MEMORY;
			break;
		}

		bzero(uc_newnode, sizeof (smb_ucnode_t));
		user = &uc_newnode->cn_user;
		user->su_ctrl = smbpw.pw_flags;

		idm_stat = smb_idmap_getsid(smbpw.pw_uid, SMB_IDMAP_USER, &sid);
		if (idm_stat != IDMAP_SUCCESS) {
			syslog(LOG_WARNING, "smb_pwdutil: couldn't obtain SID "
			    "for uid=%u (%d)", smbpw.pw_uid, idm_stat);
			free(uc_newnode);
			continue;
		}
		(void) smb_sid_getrid(sid, &user->su_rid);
		smb_sid_free(sid);

		user->su_name = strdup(smbpw.pw_name);
		if (user->su_name == NULL) {
			rc = SMB_PWE_NO_MEMORY;
			free(uc_newnode);
			break;
		}

		avl_add(&tmp_cache, uc_newnode);
	}

	(void) rw_unlock(&smb_uch.uc_cache_lck);
	(void) fclose(fp);
	(void) smb_pwd_unlock();

	/* Destroy the temporary list */
	(void) rw_wrlock(&smb_uch.uc_cache_lck);
	while ((uc_newnode = avl_destroy_nodes(&tmp_cache, &cookie)) != NULL) {
		avl_add(&smb_uch.uc_cache, uc_newnode);
	}
	(void) rw_unlock(&smb_uch.uc_cache_lck);

	avl_destroy(&tmp_cache);

	return (rc);
}
/*
 * Convert a list of ace_t entries to equivalent regular and default
 * aclent_t lists.  Return error (ENOTSUP) when conversion is not possible.
 */
static int
ln_ace_to_aent(ace_t *ace, int n, uid_t owner, gid_t group,
    aclent_t **aclentp, int *aclcnt, aclent_t **dfaclentp, int *dfaclcnt,
    int isdir)
{
	int error = 0;
	ace_t *acep;
	uint32_t bits;
	int i;
	ace_list_t *normacl = NULL, *dfacl = NULL, *acl;
	acevals_t *vals;

	*aclentp = NULL;
	*aclcnt = 0;
	*dfaclentp = NULL;
	*dfaclcnt = 0;

	/* we need at least user_obj, group_obj, and other_obj */
	if (n < 6) {
		error = ENOTSUP;
		goto out;
	}
	if (ace == NULL) {
		error = EINVAL;
		goto out;
	}

	error = cacl_malloc((void **)&normacl, sizeof (ace_list_t));
	if (error != 0)
		goto out;

	avl_create(&normacl->user, acevals_compare, sizeof (acevals_t),
	    offsetof(acevals_t, avl));
	avl_create(&normacl->group, acevals_compare, sizeof (acevals_t),
	    offsetof(acevals_t, avl));

	ace_list_init(normacl, 0);

	error = cacl_malloc((void **)&dfacl, sizeof (ace_list_t));
	if (error != 0)
		goto out;

	avl_create(&dfacl->user, acevals_compare, sizeof (acevals_t),
	    offsetof(acevals_t, avl));
	avl_create(&dfacl->group, acevals_compare, sizeof (acevals_t),
	    offsetof(acevals_t, avl));
	ace_list_init(dfacl, ACL_DEFAULT);

	/* process every ace_t... */
	for (i = 0; i < n; i++) {
		acep = &ace[i];

		/* rule out certain cases quickly */
		error = ace_to_aent_legal(acep);
		if (error != 0)
			goto out;

		/*
		 * Turn off these bits in order to not have to worry about
		 * them when doing the checks for compliments.
		 */
		acep->a_access_mask &= ~(ACE_WRITE_OWNER | ACE_DELETE |
		    ACE_SYNCHRONIZE | ACE_WRITE_ATTRIBUTES |
		    ACE_READ_NAMED_ATTRS | ACE_WRITE_NAMED_ATTRS);

		/* see if this should be a regular or default acl */
		bits = acep->a_flags &
		    (ACE_INHERIT_ONLY_ACE |
		    ACE_FILE_INHERIT_ACE |
		    ACE_DIRECTORY_INHERIT_ACE);
		if (bits != 0) {
			/* all or nothing on these inherit bits */
			if (bits != (ACE_INHERIT_ONLY_ACE |
			    ACE_FILE_INHERIT_ACE |
			    ACE_DIRECTORY_INHERIT_ACE)) {
				error = ENOTSUP;
				goto out;
			}
			acl = dfacl;
		} else {
			acl = normacl;
		}

		if ((acep->a_flags & ACE_OWNER)) {
			if (acl->state > ace_user_obj) {
				error = ENOTSUP;
				goto out;
			}
			acl->state = ace_user_obj;
			acl->seen |= USER_OBJ;
			vals = &acl->user_obj;
			vals->aent_type = USER_OBJ | acl->dfacl_flag;
		} else if ((acep->a_flags & ACE_EVERYONE)) {
			acl->state = ace_other_obj;
			acl->seen |= OTHER_OBJ;
			vals = &acl->other_obj;
			vals->aent_type = OTHER_OBJ | acl->dfacl_flag;
		} else if (acep->a_flags & ACE_IDENTIFIER_GROUP) {
			if (acl->state > ace_group) {
				error = ENOTSUP;
				goto out;
			}
			if ((acep->a_flags & ACE_GROUP)) {
				acl->seen |= GROUP_OBJ;
				vals = &acl->group_obj;
				vals->aent_type = GROUP_OBJ | acl->dfacl_flag;
			} else {
				acl->seen |= GROUP;
				vals = acevals_find(acep, &acl->group,
				    &acl->numgroups);
				if (vals == NULL) {
					error = ENOMEM;
					goto out;
				}
				vals->aent_type = GROUP | acl->dfacl_flag;
			}
			acl->state = ace_group;
		} else {
			if (acl->state > ace_user) {
				error = ENOTSUP;
				goto out;
			}
			acl->state = ace_user;
			acl->seen |= USER;
			vals = acevals_find(acep, &acl->user,
			    &acl->numusers);
			if (vals == NULL) {
				error = ENOMEM;
				goto out;
			}
			vals->aent_type = USER | acl->dfacl_flag;
		}

		if (!(acl->state > ace_unused)) {
			error = EINVAL;
			goto out;
		}

		if (acep->a_type == ACE_ACCESS_ALLOWED_ACE_TYPE) {
			/* no more than one allowed per aclent_t */
			if (vals->allowed != ACE_MASK_UNDEFINED) {
				error = ENOTSUP;
				goto out;
			}
			vals->allowed = acep->a_access_mask;
		} else {
			/*
			 * it's a DENY; if there was a previous DENY, it
			 * must have been an ACL_MASK.
			 */
			if (vals->denied != ACE_MASK_UNDEFINED) {
				/* ACL_MASK is for USER and GROUP only */
				if ((acl->state != ace_user) &&
				    (acl->state != ace_group)) {
					error = ENOTSUP;
					goto out;
				}

				if (! acl->hasmask) {
					acl->hasmask = 1;
					acl->acl_mask = vals->denied;
				/* check for mismatched ACL_MASK emulations */
				} else if (acl->acl_mask != vals->denied) {
					error = ENOTSUP;
					goto out;
				}
				vals->mask = vals->denied;
			}
			vals->denied = acep->a_access_mask;
		}
	}

	/* done collating; produce the aclent_t lists */
	if (normacl->state != ace_unused) {
		error = ace_list_to_aent(normacl, aclentp, aclcnt,
		    owner, group, isdir);
		if (error != 0) {
			goto out;
		}
	}
	if (dfacl->state != ace_unused) {
		error = ace_list_to_aent(dfacl, dfaclentp, dfaclcnt,
		    owner, group, isdir);
		if (error != 0) {
			goto out;
		}
	}

out:
	if (normacl != NULL)
		ace_list_free(normacl);
	if (dfacl != NULL)
		ace_list_free(dfacl);

	return (error);
}
Example #13
0
/* Copy the contents of TREE to a new tree in arena OWNER.  If COPY is
   non-NULL, then each data item is passed to function COPY, and the
   return values are inserted into the new tree; otherwise, the items
   are copied verbatim from the old tree to the new tree.  Returns the
   new tree. */
avl_tree *
avl_copy (MAYBE_ARENA const avl_tree *tree, avl_copy_func copy)
{
  /* This is a combination of Knuth's Algorithm 2.3.1C (copying a
     binary tree) and Algorithm 2.3.1T as modified by exercise 12
     (preorder traversal). */

  avl_tree *new_tree;

  /* PT1. */
  const avl_node *pa[AVL_MAX_HEIGHT];	/* Stack PA: nodes. */
  const avl_node **pp = pa;		/* Stack PA: stack pointer. */
  const avl_node *p = &tree->root;
  
  /* QT1. */
  avl_node *qa[AVL_MAX_HEIGHT];	/* Stack QA: nodes. */
  avl_node **qp = qa;		/* Stack QA: stack pointer. */
  avl_node *q;
  
  assert (tree != NULL);
#if PSPP
  new_tree = avl_create (owner, tree->cmp, tree->param);
#else
  new_tree = avl_create (tree->cmp, tree->param);
#endif
  new_tree->count = tree->count;
  q = &new_tree->root;

  for (;;)
    {
      /* C4. */
      if (p->link[0] != NULL)
	{
	  avl_node *r = new_node (owner);
	  r->link[0] = r->link[1] = NULL;
	  q->link[0] = r;
	}

      /* C5: Find preorder successors of P and Q.  */
      goto start;
      for (;;)
	{
	  /* PT2. */
	  while (p != NULL)
	    {
	      goto escape;
	    start:
	      /* PT3. */
	      *pp++ = p;
	      *qp++ = q;
	      p = p->link[0];
	      q = q->link[0];
	    }
      
	  /* PT4. */
	  if (pp == pa)
	    {
	      assert (qp == qa);
	      return new_tree;
	    }
	      
	  p = *--pp;
	  q = *--qp;

	  /* PT5. */
	  p = p->link[1];
	  q = q->link[1];
	}
    escape:

      /* C2. */
      if (p->link[1])
	{
	  avl_node *r = new_node (owner);
	  r->link[0] = r->link[1] = NULL;
	  q->link[1] = r;
	}

      /* C3. */
      q->bal = p->bal;
      if (copy == NULL)
	q->data = p->data;
      else
	q->data = copy (p->data, tree->param);
    }
}
Example #14
0
void
init_ctype(void)
{
	avl_create(&ctypes, ctype_compare, sizeof (ctype_node_t),
	    offsetof(ctype_node_t, avl));
}
Example #15
0
//-----------------------------------------------------------------------------
// class SweepLine
//-----------------------------------------------------------------------------
tedop::SweepLine::SweepLine(const pointlist plist) : _plist(plist) {
   _numv = _plist.size();
   _tree = avl_create(compare_seg, NULL, NULL);
}    
Example #16
0
iscsit_sess_t *
iscsit_sess_create(iscsit_tgt_t *tgt, iscsit_conn_t *ict,
    uint32_t cmdsn, uint8_t *isid, uint16_t tag,
    char *initiator_name, char *target_name,
    uint8_t *error_class, uint8_t *error_detail)
{
	iscsit_sess_t *result;

	*error_class = ISCSI_STATUS_CLASS_SUCCESS;

	/*
	 * Even if this session create "fails" for some reason we still need
	 * to return a valid session pointer so that we can send the failed
	 * login response.
	 */
	result = kmem_zalloc(sizeof (*result), KM_SLEEP);

	/* Allocate TSIH */
	if ((result->ist_tsih = iscsit_tsih_alloc()) == ISCSI_UNSPEC_TSIH) {
		/* Out of TSIH's */
		*error_class = ISCSI_STATUS_CLASS_TARGET_ERR;
		*error_detail = ISCSI_LOGIN_STATUS_NO_RESOURCES;
		/*
		 * Continue initializing this session so we can use it
		 * to complete the login process.
		 */
	}

	idm_sm_audit_init(&result->ist_state_audit);
	mutex_init(&result->ist_sn_mutex, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&result->ist_mutex, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&result->ist_cv, NULL, CV_DEFAULT, NULL);
	list_create(&result->ist_events, sizeof (sess_event_ctx_t),
	    offsetof(sess_event_ctx_t, se_ctx_node));
	list_create(&result->ist_conn_list, sizeof (iscsit_conn_t),
	    offsetof(iscsit_conn_t, ict_sess_ln));
	avl_create(&result->ist_task_list, iscsit_task_itt_compare,
	    sizeof (iscsit_task_t), offsetof(iscsit_task_t, it_sess_ln));
	result->ist_rxpdu_queue = kmem_zalloc(sizeof (iscsit_cbuf_t), KM_SLEEP);
	result->ist_state = SS_Q1_FREE;
	result->ist_last_state = SS_Q1_FREE;
	bcopy(isid, result->ist_isid, ISCSI_ISID_LEN);
	result->ist_tpgt_tag = tag;

	result->ist_tgt = tgt;
	/*
	 * cmdsn/expcmdsn do not advance during login phase.
	 */
	result->ist_expcmdsn = cmdsn;
	result->ist_maxcmdsn = result->ist_expcmdsn + 1;

	result->ist_initiator_name =
	    kmem_alloc(strlen(initiator_name) + 1, KM_SLEEP);
	(void) strcpy(result->ist_initiator_name, initiator_name);
	if (target_name) {
		/* A discovery session might not have a target name */
		result->ist_target_name =
		    kmem_alloc(strlen(target_name) + 1, KM_SLEEP);
		(void) strcpy(result->ist_target_name, target_name);
	}
	idm_refcnt_init(&result->ist_refcnt, result);

	/* Login code will fill in ist_stmf_sess if necessary */

	if (*error_class == ISCSI_STATUS_CLASS_SUCCESS) {
		/*
		 * Make sure the service is still enabled and if so get a global
		 * hold to represent this session.
		 */
		mutex_enter(&iscsit_global.global_state_mutex);
		if (iscsit_global.global_svc_state == ISE_ENABLED) {
			iscsit_global_hold();
			mutex_exit(&iscsit_global.global_state_mutex);

			/*
			 * Kick session state machine (also binds connection
			 * to session)
			 */
			iscsit_sess_sm_event(result, SE_CONN_IN_LOGIN, ict);

			*error_class = ISCSI_STATUS_CLASS_SUCCESS;
		} else {
			mutex_exit(&iscsit_global.global_state_mutex);
			*error_class = ISCSI_STATUS_CLASS_TARGET_ERR;
			*error_detail = ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE;
		}
	}

	/*
	 * As noted above we must return a session pointer even if something
	 * failed.  The resources will get freed later.
	 */
	return (result);
}
Example #17
0
/*
 * Insert a new string into the Str_tbl.  There are two AVL trees used.
 *
 *  .	The first LenNode AVL tree maintains a tree of nodes based on string
 *	sizes.
 *  .	Each LenNode maintains a StrNode AVL tree for each string.  Large
 *	applications have been known to contribute thousands of strings of
 *	the same size.  Should strings need to be removed (-z ignore), then
 *	the string AVL tree makes this removal efficient and scalable.
 */
int
st_insert(Str_tbl *stp, const char *str)
{
	size_t		len;
	StrNode		*snp, sn = { 0 };
	LenNode		*lnp, ln = { 0 };
	avl_index_t	where;

	/*
	 * String table can't have been cooked
	 */
	assert((stp->st_flags & FLG_STTAB_COOKED) == 0);

	/*
	 * Null strings always point to the head of the string
	 * table - no reason to keep searching.
	 */
	if ((len = strlen(str)) == 0)
		return (0);

	stp->st_fullstrsize += len + 1;
	stp->st_strcnt++;

	if ((stp->st_flags & FLG_STTAB_COMPRESS) == 0)
		return (0);

	/*
	 * From the controlling string table, determine which LenNode AVL node
	 * provides for this string length.  If the node doesn't exist, insert
	 * a new node to represent this string length.
	 */
	ln.ln_strlen = len;
	if ((lnp = avl_find(stp->st_lentree, &ln, &where)) == NULL) {
		if ((lnp = calloc(sizeof (LenNode), 1)) == NULL)
			return (-1);
		lnp->ln_strlen = len;
		avl_insert(stp->st_lentree, lnp, where);

		if ((lnp->ln_strtree = calloc(sizeof (avl_tree_t), 1)) == NULL)
			return (0);

		avl_create(lnp->ln_strtree, &avl_str_compare, sizeof (StrNode),
		    SGSOFFSETOF(StrNode, sn_avlnode));
	}

	/*
	 * From the string length AVL node determine whether a StrNode AVL node
	 * provides this string.  If the node doesn't exist, insert a new node
	 * to represent this string.
	 */
	sn.sn_str = str;
	if ((snp = avl_find(lnp->ln_strtree, &sn, &where)) == NULL) {
		if ((snp = calloc(sizeof (StrNode), 1)) == NULL)
			return (-1);
		snp->sn_str = str;
		avl_insert(lnp->ln_strtree, snp, where);
	}
	snp->sn_refcnt++;

	return (0);
}
Example #18
0
File: spmatrix.c Project: FMX/gsl
gsl_spmatrix *
gsl_spmatrix_alloc_nzmax(const size_t n1, const size_t n2,
                         const size_t nzmax, const size_t sptype)
{
  gsl_spmatrix *m;

  if (n1 == 0)
    {
      GSL_ERROR_VAL ("matrix dimension n1 must be positive integer",
                     GSL_EINVAL, 0);
    }
  else if (n2 == 0)
    {
      GSL_ERROR_VAL ("matrix dimension n2 must be positive integer",
                     GSL_EINVAL, 0);
    }

  m = calloc(1, sizeof(gsl_spmatrix));
  if (!m)
    {
      GSL_ERROR_VAL("failed to allocate space for spmatrix struct",
                    GSL_ENOMEM, 0);
    }

  m->size1 = n1;
  m->size2 = n2;
  m->nz = 0;
  m->nzmax = GSL_MAX(nzmax, 1);
  m->sptype = sptype;

  m->i = malloc(m->nzmax * sizeof(size_t));
  if (!m->i)
    {
      gsl_spmatrix_free(m);
      GSL_ERROR_VAL("failed to allocate space for row indices",
                    GSL_ENOMEM, 0);
    }

  if (sptype == GSL_SPMATRIX_TRIPLET)
    {
      m->tree_data = malloc(sizeof(gsl_spmatrix_tree));
      if (!m->tree_data)
        {
          gsl_spmatrix_free(m);
          GSL_ERROR_VAL("failed to allocate space for AVL tree struct",
                        GSL_ENOMEM, 0);
        }

      m->tree_data->n = 0;

      /* allocate tree data structure */
      m->tree_data->tree = avl_create(compare_triplet, (void *) m,
                                      &avl_allocator_spmatrix);
      if (!m->tree_data->tree)
        {
          gsl_spmatrix_free(m);
          GSL_ERROR_VAL("failed to allocate space for AVL tree",
                        GSL_ENOMEM, 0);
        }

      /* preallocate nzmax tree nodes */
      m->tree_data->node_array = malloc(m->nzmax * sizeof(struct avl_node));
      if (!m->tree_data->node_array)
        {
          gsl_spmatrix_free(m);
          GSL_ERROR_VAL("failed to allocate space for AVL tree nodes",
                        GSL_ENOMEM, 0);
        }

      m->p = malloc(m->nzmax * sizeof(size_t));
      if (!m->p)
        {
          gsl_spmatrix_free(m);
          GSL_ERROR_VAL("failed to allocate space for column indices",
                        GSL_ENOMEM, 0);
        }
    }
  else if (sptype == GSL_SPMATRIX_CCS)
    {
      m->p = malloc((n2 + 1) * sizeof(size_t));
      m->work = malloc(GSL_MAX(n1, n2) *
                       GSL_MAX(sizeof(size_t), sizeof(double)));
      if (!m->p || !m->work)
        {
          gsl_spmatrix_free(m);
          GSL_ERROR_VAL("failed to allocate space for column pointers",
                        GSL_ENOMEM, 0);
        }
    }

  m->data = malloc(m->nzmax * sizeof(double));
  if (!m->data)
    {
      gsl_spmatrix_free(m);
      GSL_ERROR_VAL("failed to allocate space for data",
                    GSL_ENOMEM, 0);
    }

  return m;
} /* gsl_spmatrix_alloc_nzmax() */
Example #19
0
int
zfs_sb_create(const char *osname, zfs_sb_t **zsbp)
{
	objset_t *os;
	zfs_sb_t *zsb;
	uint64_t zval;
	int i, error;
	uint64_t sa_obj;

	zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP | KM_NODEBUG);

	/*
	 * We claim to always be readonly so we can open snapshots;
	 * other ZPL code will prevent us from writing to snapshots.
	 */
	error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os);
	if (error) {
		kmem_free(zsb, sizeof (zfs_sb_t));
		return (error);
	}

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zsb->z_sb = NULL;
	zsb->z_parent = zsb;
	zsb->z_max_blksz = SPA_MAXBLOCKSIZE;
	zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
	zsb->z_os = os;

	error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version);
	if (error) {
		goto out;
	} else if (zsb->z_version >
	    zfs_zpl_version_map(spa_version(dmu_objset_spa(os)))) {
		(void) printk("Can't mount a version %lld file system "
		    "on a version %lld pool\n. Pool must be upgraded to mount "
		    "this file system.", (u_longlong_t)zsb->z_version,
		    (u_longlong_t)spa_version(dmu_objset_spa(os)));
		error = ENOTSUP;
		goto out;
	}
	if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
		goto out;
	zsb->z_norm = (int)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
		goto out;
	zsb->z_utf8 = (zval != 0);

	if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
		goto out;
	zsb->z_case = (uint_t)zval;

	/*
	 * Fold case on file systems that are always or sometimes case
	 * insensitive.
	 */
	if (zsb->z_case == ZFS_CASE_INSENSITIVE ||
	    zsb->z_case == ZFS_CASE_MIXED)
		zsb->z_norm |= U8_TEXTPREP_TOUPPER;

	zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
	zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);

	if (zsb->z_use_sa) {
		/* should either have both of these objects or none */
		error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
		    &sa_obj);
		if (error)
			goto out;

		error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval);
		if ((error == 0) && (zval == ZFS_XATTR_SA))
			zsb->z_xattr_sa = B_TRUE;
	} else {
		/*
		 * Pre SA versions file systems should never touch
		 * either the attribute registration or layout objects.
		 */
		sa_obj = 0;
	}

	error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
	    &zsb->z_attr_table);
	if (error)
		goto out;

	if (zsb->z_version >= ZPL_VERSION_SA)
		sa_register_update_callback(os, zfs_sa_upgrade);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
	    &zsb->z_root);
	if (error)
		goto out;
	ASSERT(zsb->z_root != 0);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
	    &zsb->z_unlinkedobj);
	if (error)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
	    8, 1, &zsb->z_userquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
	    8, 1, &zsb->z_groupquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
	    &zsb->z_fuid_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
	    &zsb->z_shares_dir);
	if (error && error != ENOENT)
		goto out;

	mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zsb->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rrw_init(&zsb->z_teardown_lock, B_FALSE);
	rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL);
	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);

	avl_create(&zsb->z_ctldir_snaps, snapentry_compare,
	    sizeof (zfs_snapentry_t), offsetof(zfs_snapentry_t, se_node));
	mutex_init(&zsb->z_ctldir_lock, NULL, MUTEX_DEFAULT, NULL);

	*zsbp = zsb;
	return (0);

out:
	dmu_objset_disown(os, zsb);
	*zsbp = NULL;
	kmem_free(zsb, sizeof (zfs_sb_t));
	return (error);
}
static zap_t *
mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
{
	zap_t *winner;
	zap_t *zap;
	int i;

	ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));

	zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
	rw_init(&zap->zap_rwlock, NULL, 0, 0);
	rw_enter(&zap->zap_rwlock, RW_WRITER);
	zap->zap_objset = os;
	zap->zap_object = obj;
	zap->zap_dbuf = db;

	if (((uint64_t *)db->db_data)[0] != ZBT_MICRO) {
		mutex_init(&zap->zap_f.zap_num_entries_mtx, NULL, 0, 0);
		zap->zap_f.zap_block_shift = highbit(db->db_size) - 1;
	} else {
		zap->zap_ismicro = TRUE;
	}

	/*
	 * Make sure that zap_ismicro is set before we let others see
	 * it, because zap_lockdir() checks zap_ismicro without the lock
	 * held.
	 */
	winner = dmu_buf_set_user(db, zap, &zap->zap_m.zap_phys, zap_evict);

	if (winner != NULL) {
#ifdef __APPLE__
		if (!zap->zap_ismicro)
			mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
#endif
		kmem_free(zap, sizeof (zap_t));
		return (winner);
	}

	if (zap->zap_ismicro) {
		zap->zap_salt = zap->zap_m.zap_phys->mz_salt;
		zap->zap_m.zap_num_chunks = db->db_size / MZAP_ENT_LEN - 1;
		avl_create(&zap->zap_m.zap_avl, mze_compare,
		    sizeof (mzap_ent_t), offsetof(mzap_ent_t, mze_node));

		for (i = 0; i < zap->zap_m.zap_num_chunks; i++) {
			mzap_ent_phys_t *mze =
			    &zap->zap_m.zap_phys->mz_chunk[i];
			if (mze->mze_name[0]) {
				zap->zap_m.zap_num_entries++;
				mze_insert(zap, i,
				    zap_hash(zap, mze->mze_name), mze);
			}
		}
	} else {
		zap->zap_salt = zap->zap_f.zap_phys->zap_salt;

		ASSERT3U(sizeof (struct zap_leaf_header), ==,
		    2*ZAP_LEAF_CHUNKSIZE);

		/*
		 * The embedded pointer table should not overlap the
		 * other members.
		 */
		ASSERT3P(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), >,
		    &zap->zap_f.zap_phys->zap_salt);

		/*
		 * The embedded pointer table should end at the end of
		 * the block
		 */
		ASSERT3U((uintptr_t)&ZAP_EMBEDDED_PTRTBL_ENT(zap,
		    1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)) -
		    (uintptr_t)zap->zap_f.zap_phys, ==,
		    zap->zap_dbuf->db_size);
	}
	rw_exit(&zap->zap_rwlock);
	return (zap);
}
Example #21
0
static void
server_main(int argc, char **argv)
{
	int did;
	int c;
	struct statvfs vfsbuf;
	int imexit = 0;
	pid_t parent;
	char *root = NULL;
	char *sadmdir = NULL;
	hrtime_t delta;
	int dir = 0;
	int dfd;

	(void) set_prog_name("pkgserv");

	openlog("pkgserv", LOG_PID | LOG_ODELAY, LOG_DAEMON);

	while ((c = getopt(argc, argv, "d:eoN:pP:R:r:")) != EOF) {
		switch (c) {
		case 'e':
			imexit = 1;
			break;
		case 'd':
			sadmdir = optarg;
			if (*sadmdir != '/' || strlen(sadmdir) >= PATH_MAX ||
			    access(sadmdir, X_OK) != 0)
				exit(99);
			break;
		case 'N':
			(void) set_prog_name(optarg);
			break;
		case 'o':
			one_shot = B_TRUE;
			verbose = 0;
			break;
		case 'p':
			/*
			 * We are updating possibly many zones; so we're not
			 * dumping based on a short timeout and we will not
			 * exit.
			 */
			permanent = B_TRUE;
			dumptimeout = 3600;
			break;
		case 'P':
			client_pid = atoi(optarg);
			break;
		case 'R':
			root = optarg;
			if (*root != '/' || strlen(root) >= PATH_MAX ||
			    access(root, X_OK) != 0)
				exit(99);
			break;
		case 'r':
			read_only = B_TRUE;
			one_shot = B_TRUE;
			verbose = 0;
			door = optarg;
			break;
		default:
			exit(99);
		}
	}

	if (one_shot && permanent) {
		progerr(gettext("Incorrect Usage"));
		exit(99);
	}

	umem_nofail_callback(no_memory_abort);

	if (root != NULL && strcmp(root, "/") != 0) {
		if (snprintf(pkgdir, PATH_MAX, "%s%s", root,
		    sadmdir == NULL ? SADM_DIR : sadmdir) >= PATH_MAX) {
			exit(99);
		}
	} else {
		if (sadmdir == NULL)
			(void) strcpy(pkgdir, SADM_DIR);
		else
			(void) strcpy(pkgdir, sadmdir);
	}

	if (chdir(pkgdir) != 0) {
		progerr(gettext("can't chdir to %s"), pkgdir);
		exit(2);
	}

	closefrom(3);

	if (!read_only && establish_lock(LOCK) < 0) {
		progerr(gettext(
		    "couldn't lock in %s (server running?): %s"),
		    pkgdir, strerror(errno));
		exit(1);
	}

	did = door_create(pkg_door_srv, 0, DOOR_REFUSE_DESC);
	if (did == -1) {
		progerr("door_create: %s", strerror(errno));
		exit(2);
	}

	(void) fdetach(door);

	if ((dfd = creat(door, 0644)) < 0 || close(dfd) < 0) {
		progerr("door_create: %s", strerror(errno));
		exit(2);
	}

	(void) mutex_lock(&mtx);

	myuid = geteuid();

	(void) sigset(SIGHUP, signal_handler);
	(void) sigset(SIGTERM, signal_handler);
	(void) sigset(SIGINT, signal_handler);
	(void) sigset(SIGQUIT, signal_handler);

	(void) signal(SIGPIPE, SIG_IGN);

	(void) atexit(finish);

	if (fattach(did, door) != 0) {
		progerr(gettext("attach door: %s"), strerror(errno));
		exit(2);
	}
	(void) close(did);

	ecache = umem_cache_create("entry", sizeof (pkgentry_t),
	    sizeof (char *), NULL, NULL, NULL, NULL, NULL, 0);

	avl_create(list, avlcmp, sizeof (pkgentry_t),
	    offsetof(pkgentry_t, avl));

	IS_ST0['\0'] = 1;
	IS_ST0[' '] = 1;
	IS_ST0['\t'] = 1;

	IS_ST0Q['\0'] = 1;
	IS_ST0Q[' '] = 1;
	IS_ST0Q['\t'] = 1;
	IS_ST0Q['='] = 1;

	parse_contents();
	if (parse_log() > 0)
		pkgdump();

	if (imexit)
		exit(0);

	if (statvfs(".", &vfsbuf) != 0) {
		progerr(gettext("statvfs: %s"), strerror(errno));
		exit(2);
	}

	if (strcmp(vfsbuf.f_basetype, "zfs") == 0)
		flushbeforemark = 0;

	/* We've started, tell the parent */
	parent = getppid();
	if (parent != 1)
		(void) kill(parent, SIGUSR1);

	if (!one_shot) {
		int fd;
		(void) setsid();
		fd = open("/dev/null", O_RDWR, 0);
		if (fd >= 0) {
			(void) dup2(fd, STDIN_FILENO);
			(void) dup2(fd, STDOUT_FILENO);
			(void) dup2(fd, STDERR_FILENO);
			if (fd > 2)
				(void) close(fd);
		}
	}

	lastcall = lastchange = gethrtime();

	/*
	 * Start the main thread, here is where we unlock the mutex.
	 */
	for (;;) {
		if (want_to_quit) {
			pkgdump();
			exit(0);
		}
		/* Wait forever when root or when there's a running filter */
		if (write_locked ||
		    (!one_shot && permanent && dir == changes)) {
			(void) cond_wait(&cv, &mtx);
			continue;
		}
		delta = time_since_(lastchange);
		/* Wait until DUMPTIMEOUT after last change before we pkgdump */
		if (delta < dumptimeout * LLNANOSEC) {
			my_cond_reltimedwait(delta, dumptimeout);
			continue;
		}
		/* Client still around? Just wait then. */
		if (client_pid > 1 && kill(client_pid, 0) == 0) {
			lastchange = lastcall = gethrtime();
			continue;
		}
		/* Wait for another EXITTIMEOUT seconds before we exit */
		if ((one_shot || !permanent) && dir == changes) {
			delta = time_since_(lastcall);
			if (delta < EXITTIMEOUT * LLNANOSEC) {
				my_cond_reltimedwait(delta, EXITTIMEOUT);
				continue;
			}
			exit(0);
		}
		pkgdump();
		dir = changes;
	}

	/*NOTREACHED*/
}
Example #22
0
		do {
			if (trav->avl_height == 0) {
				trav->avl_node = 0;
				return 0;
			}
			y = x;
			x = trav->avl_stack[--trav->avl_height];
		} while (y == x->avl_link[1]);
	}
	trav->avl_node = x;
	return x;
}

GLOBAL_FUNCTION(New, 0) {
	if (Count == 0) {
		Result->Val = avl_create($COMP, $HASH);
	} else if (Count == 1) {
		Result->Val = avl_create(Args[0].Val, $HASH);
	} else if (Count == 2) {
		Result->Val = avl_create(Args[0].Val, Args[1].Val);
	};
	return SUCCESS;
};

GLOBAL_FUNCTION(Make, 0) {
	struct avl_table *Table = avl_create($COMP, $HASH);
	int I;
	for (I = 0; I < Count; I+=2) {
		Std$Function_result Result1;
		Std$Object_t **Slot;
		Std$Function$call($HASH, 1, &Result1, Args[I].Val, 0);
Example #23
0
/*ARGSUSED*/
static int
emul64_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
	scsi_hba_tran_t *tran, struct scsi_device *sd)
{
	struct emul64	*emul64;
	emul64_tgt_t	*tgt;
	char		**geo_vidpid = NULL;
	char		*geo, *vidpid;
	uint32_t	*geoip = NULL;
	uint_t		length;
	uint_t		length2;
	lldaddr_t	sector_count;
	char		prop_name[15];
	int		ret = DDI_FAILURE;

	emul64 = TRAN2EMUL64(tran);
	EMUL64_MUTEX_ENTER(emul64);

	/*
	 * We get called for each target driver.conf node, multiple
	 * nodes may map to the same tgt,lun (sd.conf, st.conf, etc).
	 * Check to see if transport to tgt,lun already established.
	 */
	tgt = find_tgt(emul64, sd->sd_address.a_target, sd->sd_address.a_lun);
	if (tgt) {
		ret = DDI_SUCCESS;
		goto out;
	}

	/* see if we have driver.conf specified device for this target,lun */
	(void) snprintf(prop_name, sizeof (prop_name), "targ_%d_%d",
	    sd->sd_address.a_target, sd->sd_address.a_lun);
	if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba_dip,
	    DDI_PROP_DONTPASS, prop_name,
	    &geo_vidpid, &length) != DDI_PROP_SUCCESS)
		goto out;
	if (length < 2) {
		cmn_err(CE_WARN, "emul64: %s property does not have 2 "
		    "elements", prop_name);
		goto out;
	}

	/* pick geometry name and vidpid string from string array */
	geo = *geo_vidpid;
	vidpid = *(geo_vidpid + 1);

	/* lookup geometry property integer array */
	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, hba_dip, DDI_PROP_DONTPASS,
	    geo, (int **)&geoip, &length2) != DDI_PROP_SUCCESS) {
		cmn_err(CE_WARN, "emul64: didn't get prop '%s'", geo);
		goto out;
	}
	if (length2 < 6) {
		cmn_err(CE_WARN, "emul64: property %s does not have 6 "
		    "elements", *geo_vidpid);
		goto out;
	}

	/* allocate and initialize tgt structure for tgt,lun */
	tgt = kmem_zalloc(sizeof (emul64_tgt_t), KM_SLEEP);
	rw_init(&tgt->emul64_tgt_nw_lock, NULL, RW_DRIVER, NULL);
	mutex_init(&tgt->emul64_tgt_blk_lock, NULL, MUTEX_DRIVER, NULL);

	/* create avl for data block storage */
	avl_create(&tgt->emul64_tgt_data, emul64_bsd_blkcompare,
	    sizeof (blklist_t), offsetof(blklist_t, bl_node));

	/* save scsi_address and vidpid */
	bcopy(sd, &tgt->emul64_tgt_saddr, sizeof (struct scsi_address));
	(void) strncpy(tgt->emul64_tgt_inq, vidpid,
	    sizeof (emul64->emul64_tgt->emul64_tgt_inq));

	/*
	 * The high order 4 bytes of the sector count always come first in
	 * emul64.conf.  They are followed by the low order 4 bytes.  Not
	 * all CPU types want them in this order, but laddr_t takes care of
	 * this for us.  We then pick up geometry (ncyl X nheads X nsect).
	 */
	sector_count._p._u	= *(geoip + 0);
	sector_count._p._l	= *(geoip + 1);
	/*
	 * On 32-bit platforms, fix block size if it's greater than the
	 * allowable maximum.
	 */
#if !defined(_LP64)
	if (sector_count._f > DK_MAX_BLOCKS)
		sector_count._f = DK_MAX_BLOCKS;
#endif
	tgt->emul64_tgt_sectors = sector_count._f;
	tgt->emul64_tgt_dtype	= *(geoip + 2);
	tgt->emul64_tgt_ncyls	= *(geoip + 3);
	tgt->emul64_tgt_nheads	= *(geoip + 4);
	tgt->emul64_tgt_nsect	= *(geoip + 5);

	/* insert target structure into list */
	tgt->emul64_tgt_next = emul64->emul64_tgt;
	emul64->emul64_tgt = tgt;
	ret = DDI_SUCCESS;

out:	EMUL64_MUTEX_EXIT(emul64);
	if (geoip)
		ddi_prop_free(geoip);
	if (geo_vidpid)
		ddi_prop_free(geo_vidpid);
	return (ret);
}
Example #24
0
/*
 * Given a list of directories to search, find all pools stored on disk.  This
 * includes partial pools which are not available to import.  If no args are
 * given (argc is 0), then the default directory (/dev/dsk) is searched.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
static nvlist_t *
zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
{
	int i, dirs = iarg->paths;
	struct dirent64 *dp;
	char path[MAXPATHLEN];
	char *end, **dir = iarg->path;
	size_t pathleft;
	nvlist_t *ret = NULL;
	static char *default_dir = "/dev/dsk";
	pool_list_t pools = { 0 };
	pool_entry_t *pe, *penext;
	vdev_entry_t *ve, *venext;
	config_entry_t *ce, *cenext;
	name_entry_t *ne, *nenext;
	avl_tree_t slice_cache;
	rdsk_node_t *slice;
	void *cookie;

	if (dirs == 0) {
		dirs = 1;
		dir = &default_dir;
	}

	/*
	 * Go through and read the label configuration information from every
	 * possible device, organizing the information according to pool GUID
	 * and toplevel GUID.
	 */
	for (i = 0; i < dirs; i++) {
		tpool_t *t;
		char *rdsk;
		int dfd;
		boolean_t config_failed = B_FALSE;
		DIR *dirp;

		/* use realpath to normalize the path */
		if (realpath(dir[i], path) == 0) {
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
			goto error;
		}
		end = &path[strlen(path)];
		*end++ = '/';
		*end = 0;
		pathleft = &path[sizeof (path)] - end;

		/*
		 * Using raw devices instead of block devices when we're
		 * reading the labels skips a bunch of slow operations during
		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
		 */
		if (strcmp(path, "/dev/dsk/") == 0)
			rdsk = "/dev/rdsk/";
		else
			rdsk = path;

		if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
		    (dirp = fdopendir(dfd)) == NULL) {
			if (dfd >= 0)
				(void) close(dfd);
			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
			    rdsk);
			goto error;
		}

		avl_create(&slice_cache, slice_cache_compare,
		    sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
		/*
		 * This is not MT-safe, but we have no MT consumers of libzfs
		 */
		while ((dp = readdir64(dirp)) != NULL) {
			const char *name = dp->d_name;
			if (name[0] == '.' &&
			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
				continue;

			slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
			slice->rn_name = zfs_strdup(hdl, name);
			slice->rn_avl = &slice_cache;
			slice->rn_dfd = dfd;
			slice->rn_hdl = hdl;
			slice->rn_nozpool = B_FALSE;
			avl_add(&slice_cache, slice);
		}
		/*
		 * create a thread pool to do all of this in parallel;
		 * rn_nozpool is not protected, so this is racy in that
		 * multiple tasks could decide that the same slice can
		 * not hold a zpool, which is benign.  Also choose
		 * double the number of processors; we hold a lot of
		 * locks in the kernel, so going beyond this doesn't
		 * buy us much.
		 */
		t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
		    0, NULL);
		for (slice = avl_first(&slice_cache); slice;
		    (slice = avl_walk(&slice_cache, slice,
		    AVL_AFTER)))
			(void) tpool_dispatch(t, zpool_open_func, slice);
		tpool_wait(t);
		tpool_destroy(t);

		cookie = NULL;
		while ((slice = avl_destroy_nodes(&slice_cache,
		    &cookie)) != NULL) {
			if (slice->rn_config != NULL && !config_failed) {
				nvlist_t *config = slice->rn_config;
				boolean_t matched = B_TRUE;

				if (iarg->poolname != NULL) {
					char *pname;

					matched = nvlist_lookup_string(config,
					    ZPOOL_CONFIG_POOL_NAME,
					    &pname) == 0 &&
					    strcmp(iarg->poolname, pname) == 0;
				} else if (iarg->guid != 0) {
					uint64_t this_guid;

					matched = nvlist_lookup_uint64(config,
					    ZPOOL_CONFIG_POOL_GUID,
					    &this_guid) == 0 &&
					    iarg->guid == this_guid;
				}
				if (!matched) {
					nvlist_free(config);
				} else {
					/*
					 * use the non-raw path for the config
					 */
					(void) strlcpy(end, slice->rn_name,
					    pathleft);
					if (add_config(hdl, &pools, path,
					    config) != 0)
						config_failed = B_TRUE;
				}
			}
			free(slice->rn_name);
			free(slice);
		}
		avl_destroy(&slice_cache);

		(void) closedir(dirp);

		if (config_failed)
			goto error;
	}

	ret = get_configs(hdl, &pools, iarg->can_be_active);

error:
	for (pe = pools.pools; pe != NULL; pe = penext) {
		penext = pe->pe_next;
		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
			venext = ve->ve_next;
			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
				cenext = ce->ce_next;
				if (ce->ce_config)
					nvlist_free(ce->ce_config);
				free(ce);
			}
			free(ve);
		}
		free(pe);
	}

	for (ne = pools.names; ne != NULL; ne = nenext) {
		nenext = ne->ne_next;
		free(ne->ne_name);
		free(ne);
	}

	return (ret);
}
Example #25
0
static sfd_entry *read_sfd (char *sfd_name)
{
    void **aa;
    sfd_entry *sfd, tmp_sfd;
    subfont_entry *sf;
	char *ftemp = NULL;
    char buf[SMALL_BUF_SIZE], *p;
    long int i, j, k;
    int n;
    int callback_id=0;
    int file_opened=0;
    /* check whether this sfd has been read */
    tmp_sfd.name = sfd_name;
    if (sfd_tree == NULL) {
        sfd_tree = avl_create (comp_sfd_entry, NULL, &avl_xallocator);
        assert (sfd_tree != NULL);
    }
    sfd = (sfd_entry *) avl_find (sfd_tree, &tmp_sfd);
    if (sfd != NULL)
        return sfd;
    set_cur_file_name (sfd_name);
    if (sfd_buffer!=NULL) {
      xfree(sfd_buffer);
      sfd_buffer=NULL;
    }
    sfd_curbyte=0;
    sfd_size=0;

	callback_id=callback_defined(find_sfd_file_callback);
	if (callback_id>0) {
	  if(run_callback(callback_id,"S->S",cur_file_name,&ftemp)) {
		if(ftemp!=NULL&&strlen(ftemp)) {
		  if (cur_file_name)
			free(cur_file_name);
		  cur_file_name = xstrdup(ftemp);
		  free(ftemp);
		}
	  }
	}
    callback_id=callback_defined(read_sfd_file_callback);
    if (callback_id>0) {
      if(! (run_callback(callback_id,"S->bSd",cur_file_name,
		       &file_opened, &sfd_buffer,&sfd_size) &&
	    file_opened && 
	    sfd_size>0 ) ) {
	pdftex_warn ("cannot open SFD file for reading");
	cur_file_name = NULL;
	return NULL;      
      }
      sfd_read_file();
      sfd_close();
    }
    tex_printf ("{");
    tex_printf (cur_file_name);
    sfd = new_sfd_entry ();
    sfd->name = xstrdup (sfd_name);
    while (!sfd_eof ()) {
        sfd_getline (true);
        if (*sfd_line == 10)    /* empty line indicating eof */
            break;
        sf = new_subfont_entry ();
        sf->next = sfd->subfont;
        sfd->subfont = sf;
        sscanf (sfd_line, "%s %n", buf, &n);
        sf->infix = xstrdup (buf);
        p = sfd_line + n;       /* skip to the next word */
        k = 0;
      read_ranges:
        for (;;) {
            if (*p == '\\') {   /* continue on next line */
                sfd_getline (false);
                p = sfd_line;
                goto read_ranges;
            } else if (*p == 0) /* end of subfont */
                break;
            if (sscanf (p, " %li %n", &i, &n) == 0)
                pdftex_fail ("invalid token:\n%s", p);
            p += n;
            if (*p == ':') {    /* offset */
                k = i;
                p++;
            } else if (*p == '_') {     /* range */
                if (sscanf (p + 1, " %li %n", &j, &n) == 0)
                    pdftex_fail ("invalid token:\n%s", p);
                if (i > j || k + (j - i) > 255)
                    pdftex_fail ("invalid range:\n%s", p);
                while (i <= j)
                    sf->charcodes[k++] = i++;
                p += n + 1;
            } else              /* codepoint */
                sf->charcodes[k++] = i;
        }
    }
    tex_printf ("}");
    aa = avl_probe (sfd_tree, sfd);
    assert (aa != NULL);
    return sfd;
}
Example #26
0
/* Tests tree functions.
   |insert[]| and |delete[]| must contain some permutation of values
   |0|@dots{}|n - 1|.
   Uses |allocator| as the allocator for tree and node data.
   Higher values of |verbosity| produce more debug output. */
int
test_correctness (struct libavl_allocator *allocator,
                  int insert[], int delete[], int n, int verbosity)
{
  struct avl_table *tree;
  int okay = 1;
  int i;

  /* Test creating a AVL and inserting into it. */
  tree = avl_create (compare_ints, NULL, allocator);
  if (tree == NULL)
    {
      if (verbosity >= 0)
        printf ("  Out of memory creating tree.\n");
      return 1;
    }

  for (i = 0; i < n; i++)
    {
      if (verbosity >= 2)
        printf ("  Inserting %d...\n", insert[i]);

      /* Add the |i|th element to the tree. */
      {
        void **p = avl_probe (tree, &insert[i]);
Example #27
0
/*
 * pppt_enable_svc
 *
 * registers all the configured targets and target portals with STMF
 */
static int
pppt_enable_svc(void)
{
	stmf_port_provider_t	*pp;
	stmf_dbuf_store_t	*dbuf_store;
	int			rc = 0;

	ASSERT(pppt_global.global_svc_state == PSS_ENABLING);

	/*
	 * Make sure that can tell if we have partially allocated
	 * in case we need to exit and tear down anything allocated.
	 */
	pppt_global.global_dbuf_store = NULL;
	pp = NULL;
	pppt_global.global_pp = NULL;
	pppt_global.global_dispatch_taskq = NULL;
	pppt_global.global_sess_taskq = NULL;

	avl_create(&pppt_global.global_target_list,
	    pppt_tgt_avl_compare, sizeof (pppt_tgt_t),
	    offsetof(pppt_tgt_t, target_global_ln));

	avl_create(&pppt_global.global_sess_list,
	    pppt_sess_avl_compare_by_id, sizeof (pppt_sess_t),
	    offsetof(pppt_sess_t, ps_global_ln));

	/*
	 * Setup STMF dbuf store.  Tf buffers are associated with a particular
	 * lport (FC, SRP) then the dbuf_store should stored in the lport
	 * context, otherwise (iSCSI) the dbuf_store should be global.
	 */
	dbuf_store = stmf_alloc(STMF_STRUCT_DBUF_STORE, 0, 0);
	if (dbuf_store == NULL) {
		rc = ENOMEM;
		goto tear_down_and_return;
	}
	dbuf_store->ds_alloc_data_buf = pppt_dbuf_alloc;
	dbuf_store->ds_free_data_buf = pppt_dbuf_free;
	dbuf_store->ds_port_private = NULL;
	pppt_global.global_dbuf_store = dbuf_store;

	/* Register port provider */
	pp = stmf_alloc(STMF_STRUCT_PORT_PROVIDER, 0, 0);
	if (pp == NULL) {
		rc = ENOMEM;
		goto tear_down_and_return;
	}

	pp->pp_portif_rev = PORTIF_REV_1;
	pp->pp_instance = 0;
	pp->pp_name = PPPT_MODNAME;
	pp->pp_cb = NULL;

	pppt_global.global_pp = pp;

	if (stmf_register_port_provider(pp) != STMF_SUCCESS) {
		rc = EIO;
		goto tear_down_and_return;
	}

	pppt_global.global_dispatch_taskq = taskq_create("pppt_dispatch",
	    1, minclsyspri, 1, INT_MAX, TASKQ_PREPOPULATE);

	pppt_global.global_sess_taskq = taskq_create("pppt_session",
	    1, minclsyspri, 1, INT_MAX, TASKQ_PREPOPULATE);

	return (0);

tear_down_and_return:

	if (pppt_global.global_sess_taskq) {
		taskq_destroy(pppt_global.global_sess_taskq);
		pppt_global.global_sess_taskq = NULL;
	}

	if (pppt_global.global_dispatch_taskq) {
		taskq_destroy(pppt_global.global_dispatch_taskq);
		pppt_global.global_dispatch_taskq = NULL;
	}

	if (pppt_global.global_pp)
		pppt_global.global_pp = NULL;

	if (pp)
		stmf_free(pp);

	if (pppt_global.global_dbuf_store) {
		stmf_free(pppt_global.global_dbuf_store);
		pppt_global.global_dbuf_store = NULL;
	}

	avl_destroy(&pppt_global.global_sess_list);
	avl_destroy(&pppt_global.global_target_list);

	return (rc);
}
Example #28
0
/*
 * Initialize new entrance and segment descriptors and add them as lists to
 * the output file descriptor.
 */
uintptr_t
ld_ent_setup(Ofl_desc *ofl, Elf64_Xword segalign)
{
	Ent_desc	*enp;
	predef_seg_t	*psegs;
	Sg_desc		*sgp;
	size_t		idx;

	/*
	 * Initialize the elf library.
	 */
	if (elf_version(EV_CURRENT) == EV_NONE) {
		ld_eprintf(ofl, ERR_FATAL, MSG_ELF_LIBELF, EV_CURRENT);
		return (S_ERROR);
	}

	/*
	 * Initialize internal Global Symbol Table AVL tree
	 */
	avl_create(&ofl->ofl_symavl, &ld_sym_avl_comp, sizeof (Sym_avlnode),
	    SGSOFFSETOF(Sym_avlnode, sav_node));

	/* Initialize segment AVL tree */
	avl_create(&ofl->ofl_segs_avl, ofl_segs_avl_cmp,
	    sizeof (Sg_desc), SGSOFFSETOF(Sg_desc, sg_avlnode));

	/* Initialize entrance criteria AVL tree */
	avl_create(&ofl->ofl_ents_avl, ofl_ents_avl_cmp, sizeof (Ent_desc),
	    SGSOFFSETOF(Ent_desc, ec_avlnode));


	/*
	 * Allocate and initialize writable copies of both the entrance and
	 * segment descriptors.
	 *
	 * Note that on non-amd64 targets, this allocates a few more
	 * elements than are needed. For now, we are willing to overallocate
	 * a small amount to simplify the code.
	 */
	if ((psegs = libld_malloc(sizeof (sg_desc))) == NULL)
		return (S_ERROR);
	(void) memcpy(psegs, &sg_desc, sizeof (sg_desc));
	sgp = (Sg_desc *) psegs;

	/*
	 * The data segment and stack permissions can differ:
	 *
	 *	- Architectural/ABI per-platform differences
	 *	- Whether the object is built statically or dynamically
	 *
	 * Those segments so affected have their program header flags
	 * set here at runtime, rather than in the sg_desc templates above.
	 */
	psegs->psg_data.sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
	psegs->psg_bss.sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
	psegs->psg_dynamic.sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
	psegs->psg_sunwdtrace.sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
#if	defined(_ELF64)
	psegs->psg_ldata.sg_phdr.p_flags = ld_targ.t_m.m_dataseg_perm;
	psegs->psg_sunwdtrace.sg_phdr.p_flags |= PF_X;
#endif
	psegs->psg_sunwstack.sg_phdr.p_flags = ld_targ.t_m.m_stack_perm;
	if ((ofl->ofl_flags & FLG_OF_DYNAMIC) == 0)
		psegs->psg_data.sg_phdr.p_flags |= PF_X;

	/*
	 * Traverse the new entrance descriptor list converting the segment
	 * pointer entries to the absolute address within the new segment
	 * descriptor list.  Add each entrance descriptor to the output file
	 * list.
	 */
	if ((enp = libld_malloc(sizeof (ent_desc))) == NULL)
		return (S_ERROR);
	(void) memcpy(enp, ent_desc, sizeof (ent_desc));
	for (idx = 0; idx < (sizeof (ent_desc) / sizeof (ent_desc[0])); idx++,
	    enp++) {

#if	defined(_ELF64)
		/* Don't use the amd64 entry conditions for non-amd64 targets */
		if ((enp->ec_attrmask & SHF_X86_64_LARGE) &&
		    (ld_targ.t_m.m_mach != EM_X86_64))
			continue;
#endif
		if (aplist_append(&ofl->ofl_ents, enp,
		    AL_CNT_OFL_ENTRANCE) == NULL)
			return (S_ERROR);

		/*
		 * The segment pointer is currently pointing at a template
		 * segment descriptor in sg_desc. Compute its array index,
		 * and then use that index to compute the address of the
		 * corresponding descriptor in the writable copy.
		 */
		enp->ec_segment =
		    &sgp[(enp->ec_segment - (Sg_desc *) &sg_desc)];
	}

	/*
	 * Add each segment descriptor to the segment descriptor list. The
	 * ones with non-NULL sg_name are also entered into the AVL tree.
	 * For each loadable segment initialize a default alignment. Note
	 * that ld(1) and ld.so.1 initialize this differently.
	 */
	for (idx = 0; idx < predef_seg_nelts; idx++, sgp++) {
		Elf64_Phdr	*phdr = &(sgp->sg_phdr);

		/* Ignore amd64 segment templates for non-amd64 targets */
		switch (sgp->sg_id) {
		case SGID_LRODATA:
		case SGID_LDATA:
			if ((ld_targ.t_m.m_mach != EM_X86_64))
				continue;
		}

		if (phdr->p_type == PT_LOAD)
			phdr->p_align = segalign;

		if ((aplist_append(&ofl->ofl_segs, sgp,
		    AL_CNT_SEGMENTS)) == NULL)
			return (S_ERROR);

#ifdef NDEBUG			/* assert() is enabled */
		/*
		 * Enforce the segment name rule: Any segment that can
		 * be referenced by an entrance descriptor must have
		 * a name. Any segment that cannot, must have a NULL
		 * name pointer.
		 */
		switch (phdr->p_type) {
		case PT_LOAD:
		case PT_NOTE:
		case PT_NULL:
			assert(sgp->sg_name != NULL);
			break;
		default:
			assert(sgp->sg_name == NULL);
			break;
		}
#endif

		/*
		 * Add named segment descriptors to the AVL tree to
		 * provide O(logN) lookups.
		 */
		if (sgp->sg_name != NULL)
			avl_add(&ofl->ofl_segs_avl, sgp);
	}

	return (1);
}
Example #29
0
/*
 * Given a list of directories to search, find all pools stored on disk.  This
 * includes partial pools which are not available to import.  If no args are
 * given (argc is 0), then the default directory (/dev/dsk) is searched.
 * poolname or guid (but not both) are provided by the caller when trying
 * to import a specific pool.
 */
static nvlist_t *
zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
{
	int i, dirs = iarg->paths;
	struct dirent *dp;
	char path[MAXPATHLEN];
	char *end, **dir = iarg->path;
	size_t pathleft;
	nvlist_t *ret = NULL;
	pool_list_t pools = { 0 };
	pool_entry_t *pe, *penext;
	vdev_entry_t *ve, *venext;
	config_entry_t *ce, *cenext;
	name_entry_t *ne, *nenext;
	avl_tree_t slice_cache;
	rdsk_node_t *slice;
	void *cookie;

	verify(iarg->poolname == NULL || iarg->guid == 0);

	if (dirs == 0) {
#ifdef HAVE_LIBBLKID
		/* Use libblkid to scan all device for their type */
		if (zpool_find_import_blkid(hdl, &pools) == 0)
			goto skip_scanning;

		(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
		    dgettext(TEXT_DOMAIN, "blkid failure falling back "
		    "to manual probing"));
#endif /* HAVE_LIBBLKID */

		dir = zpool_default_import_path;
		dirs = DEFAULT_IMPORT_PATH_SIZE;
	}

	/*
	 * Go through and read the label configuration information from every
	 * possible device, organizing the information according to pool GUID
	 * and toplevel GUID.
	 */
	for (i = 0; i < dirs; i++) {
		taskq_t *t;
		char rdsk[MAXPATHLEN];
		int dfd;
		boolean_t config_failed = B_FALSE;
		DIR *dirp;

		/* use realpath to normalize the path */
		if (realpath(dir[i], path) == 0) {

			/* it is safe to skip missing search paths */
			if (errno == ENOENT)
				continue;

			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
			goto error;
		}
		end = &path[strlen(path)];
		*end++ = '/';
		*end = 0;
		pathleft = &path[sizeof (path)] - end;

		/*
		 * Using raw devices instead of block devices when we're
		 * reading the labels skips a bunch of slow operations during
		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
		 */
		if (strcmp(path, ZFS_DISK_ROOTD) == 0)
			(void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
		else
			(void) strlcpy(rdsk, path, sizeof (rdsk));

		if ((dfd = open(rdsk, O_RDONLY)) < 0 ||
		    (dirp = fdopendir(dfd)) == NULL) {
			if (dfd >= 0)
				(void) close(dfd);
			zfs_error_aux(hdl, strerror(errno));
			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
			    rdsk);
			goto error;
		}

		avl_create(&slice_cache, slice_cache_compare,
		    sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));

		/*
		 * This is not MT-safe, but we have no MT consumers of libzfs
		 */
		while ((dp = readdir(dirp)) != NULL) {
			const char *name = dp->d_name;
			if (name[0] == '.' &&
			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
				continue;

			slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
			slice->rn_name = zfs_strdup(hdl, name);
			slice->rn_avl = &slice_cache;
			slice->rn_dfd = dfd;
			slice->rn_hdl = hdl;
			slice->rn_nozpool = B_FALSE;
			avl_add(&slice_cache, slice);
		}

		/*
		 * create a thread pool to do all of this in parallel;
		 * rn_nozpool is not protected, so this is racy in that
		 * multiple tasks could decide that the same slice can
		 * not hold a zpool, which is benign.  Also choose
		 * double the number of processors; we hold a lot of
		 * locks in the kernel, so going beyond this doesn't
		 * buy us much.
		 */
		t = taskq_create("z_import", 2 * max_ncpus, defclsyspri,
		    2 * max_ncpus, INT_MAX, TASKQ_PREPOPULATE);
		for (slice = avl_first(&slice_cache); slice;
		    (slice = avl_walk(&slice_cache, slice,
		    AVL_AFTER)))
			(void) taskq_dispatch(t, zpool_open_func, slice,
			    TQ_SLEEP);
		taskq_wait(t);
		taskq_destroy(t);

		cookie = NULL;
		while ((slice = avl_destroy_nodes(&slice_cache,
		    &cookie)) != NULL) {
			if (slice->rn_config != NULL && !config_failed) {
				nvlist_t *config = slice->rn_config;
				boolean_t matched = B_TRUE;

				if (iarg->poolname != NULL) {
					char *pname;

					matched = nvlist_lookup_string(config,
					    ZPOOL_CONFIG_POOL_NAME,
					    &pname) == 0 &&
					    strcmp(iarg->poolname, pname) == 0;
				} else if (iarg->guid != 0) {
					uint64_t this_guid;

					matched = nvlist_lookup_uint64(config,
					    ZPOOL_CONFIG_POOL_GUID,
					    &this_guid) == 0 &&
					    iarg->guid == this_guid;
				}
				if (!matched) {
					nvlist_free(config);
				} else {
					/*
					 * use the non-raw path for the config
					 */
					(void) strlcpy(end, slice->rn_name,
					    pathleft);
					if (add_config(hdl, &pools, path, i+1,
					    slice->rn_num_labels, config) != 0)
						config_failed = B_TRUE;
				}
			}
			free(slice->rn_name);
			free(slice);
		}
		avl_destroy(&slice_cache);

		(void) closedir(dirp);

		if (config_failed)
			goto error;
	}

#ifdef HAVE_LIBBLKID
skip_scanning:
#endif
	ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);

error:
	for (pe = pools.pools; pe != NULL; pe = penext) {
		penext = pe->pe_next;
		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
			venext = ve->ve_next;
			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
				cenext = ce->ce_next;
				if (ce->ce_config)
					nvlist_free(ce->ce_config);
				free(ce);
			}
			free(ve);
		}
		free(pe);
	}

	for (ne = pools.names; ne != NULL; ne = nenext) {
		nenext = ne->ne_next;
		free(ne->ne_name);
		free(ne);
	}

	return (ret);
}
Example #30
0
int
sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count,
    sa_attr_type_t **user_table)
{
	zap_cursor_t zc;
	zap_attribute_t za;
	sa_os_t *sa;
	dmu_objset_type_t ostype = dmu_objset_type(os);
	sa_attr_type_t *tb;
	int error;

	mutex_enter(&os->os_lock);
	if (os->os_sa) {
		mutex_enter(&os->os_sa->sa_lock);
		mutex_exit(&os->os_lock);
		tb = os->os_sa->sa_user_table;
		mutex_exit(&os->os_sa->sa_lock);
		*user_table = tb;
		return (0);
	}

	sa = kmem_zalloc(sizeof (sa_os_t), KM_SLEEP);
	mutex_init(&sa->sa_lock, NULL, MUTEX_DEFAULT, NULL);
	sa->sa_master_obj = sa_obj;

	os->os_sa = sa;
	mutex_enter(&sa->sa_lock);
	mutex_exit(&os->os_lock);
	avl_create(&sa->sa_layout_num_tree, layout_num_compare,
	    sizeof (sa_lot_t), offsetof(sa_lot_t, lot_num_node));
	avl_create(&sa->sa_layout_hash_tree, layout_hash_compare,
	    sizeof (sa_lot_t), offsetof(sa_lot_t, lot_hash_node));

	if (sa_obj) {
		error = zap_lookup(os, sa_obj, SA_LAYOUTS,
		    8, 1, &sa->sa_layout_attr_obj);
		if (error != 0 && error != ENOENT)
			goto fail;
		error = zap_lookup(os, sa_obj, SA_REGISTRY,
		    8, 1, &sa->sa_reg_attr_obj);
		if (error != 0 && error != ENOENT)
			goto fail;
	}

	if ((error = sa_attr_table_setup(os, reg_attrs, count)) != 0)
		goto fail;

	if (sa->sa_layout_attr_obj != 0) {
		uint64_t layout_count;

		error = zap_count(os, sa->sa_layout_attr_obj,
		    &layout_count);

		/*
		 * Layout number count should be > 0
		 */
		if (error || (error == 0 && layout_count == 0)) {
			if (error == 0)
				error = EINVAL;
			goto fail;
		}

		for (zap_cursor_init(&zc, os, sa->sa_layout_attr_obj);
		    (error = zap_cursor_retrieve(&zc, &za)) == 0;
		    zap_cursor_advance(&zc)) {
			sa_attr_type_t *lot_attrs;
			uint64_t lot_num;

			lot_attrs = kmem_zalloc(sizeof (sa_attr_type_t) *
			    za.za_num_integers, KM_SLEEP);

			if ((error = (zap_lookup(os, sa->sa_layout_attr_obj,
			    za.za_name, 2, za.za_num_integers,
			    lot_attrs))) != 0) {
				kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
				    za.za_num_integers);
				break;
			}
			VERIFY(ddi_strtoull(za.za_name, NULL, 10,
			    (unsigned long long *)&lot_num) == 0);

			(void) sa_add_layout_entry(os, lot_attrs,
			    za.za_num_integers, lot_num,
			    sa_layout_info_hash(lot_attrs,
			    za.za_num_integers), B_FALSE, NULL);
			kmem_free(lot_attrs, sizeof (sa_attr_type_t) *
			    za.za_num_integers);
		}
		zap_cursor_fini(&zc);

		/*
		 * Make sure layout count matches number of entries added
		 * to AVL tree
		 */
		if (avl_numnodes(&sa->sa_layout_num_tree) != layout_count) {
			ASSERT(error != 0);
			goto fail;
		}
	}

	/* Add special layout number for old ZNODES */
	if (ostype == DMU_OST_ZFS) {
		(void) sa_add_layout_entry(os, sa_legacy_zpl_layout,
		    sa_legacy_attr_count, 0,
		    sa_layout_info_hash(sa_legacy_zpl_layout,
		    sa_legacy_attr_count), B_FALSE, NULL);

		(void) sa_add_layout_entry(os, sa_dummy_zpl_layout, 0, 1,
		    0, B_FALSE, NULL);
	}
	*user_table = os->os_sa->sa_user_table;
	mutex_exit(&sa->sa_lock);
	return (0);
fail:
	os->os_sa = NULL;
	sa_free_attr_table(sa);
	if (sa->sa_user_table)
		kmem_free(sa->sa_user_table, sa->sa_user_table_sz);
	mutex_exit(&sa->sa_lock);
	kmem_free(sa, sizeof (sa_os_t));
	return ((error == ECKSUM) ? EIO : error);
}