Beispiel #1
0
int
zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
{
	zfs_sb_t	*zsb = sb->s_fs_info;
	znode_t		*zp;
	uint64_t	object = 0;
	uint64_t	fid_gen = 0;
	uint64_t	gen_mask;
	uint64_t	zp_gen;
	int		i, err;

	*ipp = NULL;

	ZFS_ENTER(zsb);

	if (fidp->fid_len == LONG_FID_LEN) {
		zfid_long_t	*zlfid = (zfid_long_t *)fidp;
		uint64_t	objsetid = 0;
		uint64_t	setgen = 0;

		for (i = 0; i < sizeof (zlfid->zf_setid); i++)
			objsetid |= ((uint64_t)zlfid->zf_setid[i]) << (8 * i);

		for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
			setgen |= ((uint64_t)zlfid->zf_setgen[i]) << (8 * i);

		ZFS_EXIT(zsb);

		err = zfsctl_lookup_objset(sb, objsetid, &zsb);
		if (err)
			return (SET_ERROR(EINVAL));

		ZFS_ENTER(zsb);
	}

	if (fidp->fid_len == SHORT_FID_LEN || fidp->fid_len == LONG_FID_LEN) {
		zfid_short_t	*zfid = (zfid_short_t *)fidp;

		for (i = 0; i < sizeof (zfid->zf_object); i++)
			object |= ((uint64_t)zfid->zf_object[i]) << (8 * i);

		for (i = 0; i < sizeof (zfid->zf_gen); i++)
			fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
	} else {
		ZFS_EXIT(zsb);
		return (SET_ERROR(EINVAL));
	}

	/* A zero fid_gen means we are in the .zfs control directories */
	if (fid_gen == 0 &&
	    (object == ZFSCTL_INO_ROOT || object == ZFSCTL_INO_SNAPDIR)) {
		*ipp = zsb->z_ctldir;
		ASSERT(*ipp != NULL);
		if (object == ZFSCTL_INO_SNAPDIR) {
			VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
			    0, kcred, NULL, NULL) == 0);
		} else {
			igrab(*ipp);
		}
		ZFS_EXIT(zsb);
		return (0);
	}

	gen_mask = -1ULL >> (64 - 8 * i);

	dprintf("getting %llu [%llu mask %llx]\n", object, fid_gen, gen_mask);
	if ((err = zfs_zget(zsb, object, &zp))) {
		ZFS_EXIT(zsb);
		return (err);
	}
	(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zsb), &zp_gen,
	    sizeof (uint64_t));
	zp_gen = zp_gen & gen_mask;
	if (zp_gen == 0)
		zp_gen = 1;
	if (zp->z_unlinked || zp_gen != fid_gen) {
		dprintf("znode gen (%llu) != fid gen (%llu)\n", zp_gen,
		    fid_gen);
		iput(ZTOI(zp));
		ZFS_EXIT(zsb);
		return (SET_ERROR(EINVAL));
	}

	*ipp = ZTOI(zp);
	if (*ipp)
		zfs_inode_update(ITOZ(*ipp));

	ZFS_EXIT(zsb);
	return (0);
}
Beispiel #2
0
static int
zfs_replay_create(void *arg1, void *arg2, boolean_t byteswap)
{
	zfsvfs_t *zfsvfs = arg1;
	lr_create_t *lr = arg2;
	char *name = NULL;		/* location determined later */
	char *link;			/* symlink content follows name */
	znode_t *dzp;
	vnode_t *vp = NULL;
	xvattr_t xva;
	int vflg = 0;
	size_t lrsize = sizeof (lr_create_t);
	lr_attr_t *lrattr;
	void *start;
	size_t xvatlen;
	uint64_t txtype;
	struct componentname cn;
	int error;

	txtype = (lr->lr_common.lrc_txtype & ~TX_CI);
	if (byteswap) {
		byteswap_uint64_array(lr, sizeof (*lr));
		if (txtype == TX_CREATE_ATTR || txtype == TX_MKDIR_ATTR)
			zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
	}


	if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
		return (error);

	xva_init(&xva);
	zfs_init_vattr(&xva.xva_vattr, AT_TYPE | AT_MODE | AT_UID | AT_GID,
	    lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, lr->lr_foid);

	/*
	 * All forms of zfs create (create, mkdir, mkxattrdir, symlink)
	 * eventually end up in zfs_mknode(), which assigns the object's
	 * creation time and generation number.  The generic VOP_CREATE()
	 * doesn't have either concept, so we smuggle the values inside
	 * the vattr's otherwise unused va_ctime and va_nblocks fields.
	 */
	ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime);
	xva.xva_vattr.va_nblocks = lr->lr_gen;

	error = dmu_object_info(zfsvfs->z_os, lr->lr_foid, NULL);
	if (error != ENOENT)
		goto out;

	if (lr->lr_common.lrc_txtype & TX_CI)
		vflg |= FIGNORECASE;

	/*
	 * Symlinks don't have fuid info, and CIFS never creates
	 * symlinks.
	 *
	 * The _ATTR versions will grab the fuid info in their subcases.
	 */
	if ((int)lr->lr_common.lrc_txtype != TX_SYMLINK &&
	    (int)lr->lr_common.lrc_txtype != TX_MKDIR_ATTR &&
	    (int)lr->lr_common.lrc_txtype != TX_CREATE_ATTR) {
		start = (lr + 1);
		zfsvfs->z_fuid_replay =
		    zfs_replay_fuid_domain(start, &start,
		    lr->lr_uid, lr->lr_gid);
	}

	cn.cn_cred = kcred;
	cn.cn_thread = curthread;
	cn.cn_flags = SAVENAME;

	vn_lock(ZTOV(dzp), LK_EXCLUSIVE | LK_RETRY);
	switch (txtype) {
	case TX_CREATE_ATTR:
		lrattr = (lr_attr_t *)(caddr_t)(lr + 1);
		xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
		zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
		start = (caddr_t)(lr + 1) + xvatlen;
		zfsvfs->z_fuid_replay =
		    zfs_replay_fuid_domain(start, &start,
		    lr->lr_uid, lr->lr_gid);
		name = (char *)start;

		/*FALLTHROUGH*/
	case TX_CREATE:
		if (name == NULL)
			name = (char *)start;

		cn.cn_nameptr = name;
		error = VOP_CREATE(ZTOV(dzp), &vp, &cn, &xva.xva_vattr /*,vflg*/);
		break;
	case TX_MKDIR_ATTR:
		lrattr = (lr_attr_t *)(caddr_t)(lr + 1);
		xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
		zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
		start = (caddr_t)(lr + 1) + xvatlen;
		zfsvfs->z_fuid_replay =
		    zfs_replay_fuid_domain(start, &start,
		    lr->lr_uid, lr->lr_gid);
		name = (char *)start;

		/*FALLTHROUGH*/
	case TX_MKDIR:
		if (name == NULL)
			name = (char *)(lr + 1);

		cn.cn_nameptr = name;
		error = VOP_MKDIR(ZTOV(dzp), &vp, &cn, &xva.xva_vattr /*,vflg*/);
		break;
	case TX_MKXATTR:
		error = zfs_make_xattrdir(dzp, &xva.xva_vattr, &vp, kcred);
		break;
	case TX_SYMLINK:
		name = (char *)(lr + 1);
		link = name + strlen(name) + 1;
		cn.cn_nameptr = name;
		error = VOP_SYMLINK(ZTOV(dzp), &vp, &cn, &xva.xva_vattr, link /*,vflg*/);
		break;
	default:
		error = SET_ERROR(ENOTSUP);
	}
	VOP_UNLOCK(ZTOV(dzp), 0);

out:
	if (error == 0 && vp != NULL)
		VN_URELE(vp);

	VN_RELE(ZTOV(dzp));

	if (zfsvfs->z_fuid_replay)
		zfs_fuid_info_free(zfsvfs->z_fuid_replay);
	zfsvfs->z_fuid_replay = NULL;
	return (error);
}
Beispiel #3
0
/*
 * Read out the command history.
 */
int
spa_history_get(spa_t *spa, uint64_t *offp, uint64_t *len, char *buf)
{
	objset_t *mos = spa->spa_meta_objset;
	dmu_buf_t *dbp;
	uint64_t read_len, phys_read_off, phys_eof;
	uint64_t leftover = 0;
	spa_history_phys_t *shpp;
	int err;

	/*
	 * If the command history doesn't exist (older pool),
	 * that's ok, just return ENOENT.
	 */
	if (!spa->spa_history)
		return (SET_ERROR(ENOENT));

	/*
	 * The history is logged asynchronously, so when they request
	 * the first chunk of history, make sure everything has been
	 * synced to disk so that we get it.
	 */
	if (*offp == 0 && spa_writeable(spa))
		txg_wait_synced(spa_get_dsl(spa), 0);

	if ((err = dmu_bonus_hold(mos, spa->spa_history, FTAG, &dbp)) != 0)
		return (err);
	shpp = dbp->db_data;

#ifdef ZFS_DEBUG
	{
		dmu_object_info_t doi;
		dmu_object_info_from_db(dbp, &doi);
		ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_SPA_HISTORY_OFFSETS);
	}
#endif

	mutex_enter(&spa->spa_history_lock);
	phys_eof = spa_history_log_to_phys(shpp->sh_eof, shpp);

	if (*offp < shpp->sh_pool_create_len) {
		/* read in just the zpool create history */
		phys_read_off = *offp;
		read_len = MIN(*len, shpp->sh_pool_create_len -
		    phys_read_off);
	} else {
		/*
		 * Need to reset passed in offset to BOF if the passed in
		 * offset has since been overwritten.
		 */
		*offp = MAX(*offp, shpp->sh_bof);
		phys_read_off = spa_history_log_to_phys(*offp, shpp);

		/*
		 * Read up to the minimum of what the user passed down or
		 * the EOF (physical or logical).  If we hit physical EOF,
		 * use 'leftover' to read from the physical BOF.
		 */
		if (phys_read_off <= phys_eof) {
			read_len = MIN(*len, phys_eof - phys_read_off);
		} else {
			read_len = MIN(*len,
			    shpp->sh_phys_max_off - phys_read_off);
			if (phys_read_off + *len > shpp->sh_phys_max_off) {
				leftover = MIN(*len - read_len,
				    phys_eof - shpp->sh_pool_create_len);
			}
		}
	}

	/* offset for consumer to use next */
	*offp += read_len + leftover;

	/* tell the consumer how much you actually read */
	*len = read_len + leftover;

	if (read_len == 0) {
		mutex_exit(&spa->spa_history_lock);
		dmu_buf_rele(dbp, FTAG);
		return (0);
	}

	err = dmu_read(mos, spa->spa_history, phys_read_off, read_len, buf,
	    DMU_READ_PREFETCH);
	if (leftover && err == 0) {
		err = dmu_read(mos, spa->spa_history, shpp->sh_pool_create_len,
		    leftover, buf + read_len, DMU_READ_PREFETCH);
	}
	mutex_exit(&spa->spa_history_lock);

	dmu_buf_rele(dbp, FTAG);
	return (err);
}
Beispiel #4
0
/********************************************************************
* FUNCTION starter_starter_edit
* 
* Edit database object callback
* Path: /starter
* Add object instrumentation in COMMIT phase.
* 
* INPUTS:
*     see agt/agt_cb.h for details
* 
* RETURNS:
*     error status
********************************************************************/
static status_t starter_starter_edit (
    ses_cb_t *scb,
    rpc_msg_t *msg,
    agt_cbtyp_t cbtyp,
    op_editop_t editop,
    val_value_t *newval,
    val_value_t *curval)
{
    status_t res = NO_ERR;
    val_value_t *errorval = (curval) ? curval : newval;

    if (LOGDEBUG) {
        log_debug("\nEnter starter_starter_edit callback for %s phase",
            agt_cbtype_name(cbtyp));
    }

    switch (cbtyp) {
    case AGT_CB_VALIDATE:
        /* description-stmt validation here */
        break;
    case AGT_CB_APPLY:
        /* database manipulation done here */
        break;
    case AGT_CB_COMMIT:
        /* device instrumentation done here */
        switch (editop) {
        case OP_EDITOP_LOAD:
            break;
        case OP_EDITOP_MERGE:
            break;
        case OP_EDITOP_REPLACE:
            break;
        case OP_EDITOP_CREATE:
            break;
        case OP_EDITOP_DELETE:
            break;
        default:
            res = SET_ERROR(ERR_INTERNAL_VAL);
        }

        if (res == NO_ERR) {
            res = agt_check_cache(&starter_val, newval, curval, editop);
        }
        
        break;
    case AGT_CB_ROLLBACK:
        /* undo device instrumentation here */
        break;
    default:
        res = SET_ERROR(ERR_INTERNAL_VAL);
    }

    if (res != NO_ERR) {
        agt_record_error(
            scb,
            &msg->mhdr,
            NCX_LAYER_CONTENT,
            res,
            NULL,
            (errorval) ? NCX_NT_VAL : NCX_NT_NONE,
            errorval,
            (errorval) ? NCX_NT_VAL : NCX_NT_NONE,
            errorval);
    }
    return res;

} /* starter_starter_edit */
Beispiel #5
0
int
dsl_prop_get_dd(dsl_dir_t *dd, const char *propname,
    int intsz, int numints, void *buf, char *setpoint, boolean_t snapshot)
{
	int err = ENOENT;
	dsl_dir_t *target = dd;
	objset_t *mos = dd->dd_pool->dp_meta_objset;
	zfs_prop_t prop;
	boolean_t inheritable;
	boolean_t inheriting = B_FALSE;
	char *inheritstr;
	char *recvdstr;

	ASSERT(dsl_pool_config_held(dd->dd_pool));

	if (setpoint)
		setpoint[0] = '\0';

	prop = zfs_name_to_prop(propname);
	inheritable = (prop == ZPROP_INVAL || zfs_prop_inheritable(prop));
	inheritstr = kmem_asprintf("%s%s", propname, ZPROP_INHERIT_SUFFIX);
	recvdstr = kmem_asprintf("%s%s", propname, ZPROP_RECVD_SUFFIX);

	/*
	 * Note: dd may become NULL, therefore we shouldn't dereference it
	 * after this loop.
	 */
	for (; dd != NULL; dd = dd->dd_parent) {
		if (dd != target || snapshot) {
			if (!inheritable)
				break;
			inheriting = B_TRUE;
		}

		/* Check for a local value. */
		err = zap_lookup(mos, dd->dd_phys->dd_props_zapobj, propname,
		    intsz, numints, buf);
		if (err != ENOENT) {
			if (setpoint != NULL && err == 0)
				dsl_dir_name(dd, setpoint);
			break;
		}

		/*
		 * Skip the check for a received value if there is an explicit
		 * inheritance entry.
		 */
		err = zap_contains(mos, dd->dd_phys->dd_props_zapobj,
		    inheritstr);
		if (err != 0 && err != ENOENT)
			break;

		if (err == ENOENT) {
			/* Check for a received value. */
			err = zap_lookup(mos, dd->dd_phys->dd_props_zapobj,
			    recvdstr, intsz, numints, buf);
			if (err != ENOENT) {
				if (setpoint != NULL && err == 0) {
					if (inheriting) {
						dsl_dir_name(dd, setpoint);
					} else {
						(void) strcpy(setpoint,
						    ZPROP_SOURCE_VAL_RECVD);
					}
				}
				break;
			}
		}

		/*
		 * If we found an explicit inheritance entry, err is zero even
		 * though we haven't yet found the value, so reinitializing err
		 * at the end of the loop (instead of at the beginning) ensures
		 * that err has a valid post-loop value.
		 */
		err = SET_ERROR(ENOENT);
	}

	if (err == ENOENT)
		err = dodefault(propname, intsz, numints, buf);

	strfree(inheritstr);
	strfree(recvdstr);

	return (err);
}
/**
   UTG deactivated event handling function. Is called from interrupt routine.
\param
   pChannel     - handle to TAPI_CONNECTION structure
\return
   none
*/
IFX_void_t Tone_EventUTG_Deactivated (TAPI_CONNECTION *pChannel,
                                      IFX_uint8_t utgNum )
{
   VINETIC_DEVICE  *pDev = (VINETIC_DEVICE *)pChannel->pDevice;
   VINETIC_CHANNEL *pCh  = &pDev->pChannel[pChannel->nChannel];
   TAPI_TONE_DATA* pData;
   FWM_SIG_UTG* pUtgCmd = &pDev->pSigCh[pChannel->nChannel].utg[utgNum];

   /* possible utg resources, #0 and #1 */
   pData = &pChannel->TapiComplexToneData[utgNum];

   /* check utgNum,
   please note that resources like "utgNum" are counted from #0 */
   if  ( utgNum >= pDev->VinCapabl.nUtgPerCh)
   {
      SET_ERROR (ERR_NORESOURCE);
      return;
   }
   /*
   TRACE(VINETIC, DBG_LEVEL_LOW,
        ("Tone_EventUTG_Deactivated: On IRQ -> Deactivate UTG-Resource %d \n\r", utgNum));
   */
   /* reset the cache state */
   pUtgCmd->bit.sm = 0;
   pUtgCmd->bit.en = 0;
   /* Tone generation is in deactivated state */
   pData->nToneState = TAPI_CT_DEACTIVATED;
   /* This is a simple tone so prepare for next repetition (if any)
      of a simple tone sequence to play    */
   if (pData->nType == TAPI_TONE_TYPE_SIMPLE)
   {
      Event_SimpleTone(pChannel, utgNum);
   }

   /* This is a composed tone so retrieve the next (if any) simple tone
      sequence to play    */
   else if (pData->nType == TAPI_TONE_TYPE_COMP)
   {
      /* Prepare for the for next repetition (if any) of a simple tone */
      Event_SimpleTone(pChannel, utgNum);

      if (pData->nToneState == TAPI_CT_IDLE)
      {
         if (pData->nPauseTime == 0)
         {
            /* Cadencde Time zero is not possible for repeting tones */
            return;
         }
         /* Init the simple tone repetition counter again */
         pData->nSimpleCurrReps = 0;
         /* Setup the next simple tone to play within the composed tone */
         pData->nToneCounter++;
         /* There are no more simple tones to play within the composed tone */
         if (pData->nToneCounter >= pData->nMaxToneCount)
         {
            /* Init the tone counter again to play repetative composed tones */
            pData->nToneCounter = 0;
            /* Prepare for the for next repetition (if any) of a composed tone */
            Event_ComposedTone(pChannel, utgNum);
         }

         /* Kick off the next simple tone sequence to play */
         else
         {
            /* Start the voice path teardowm timer */
            TAPI_SetTime_Timer (pCh->pToneRes[utgNum].Tone_Timer,
                                pData->nPauseTime,
                                IFX_FALSE, IFX_FALSE);
         }
      }
   }
   return;
}
Beispiel #7
0
int
dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
    objset_t **osp)
{
	objset_t *os;
	int i, err;

	ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));

	os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
	os->os_dsl_dataset = ds;
	os->os_spa = spa;
	os->os_rootbp = bp;
	if (!BP_IS_HOLE(os->os_rootbp)) {
		arc_flags_t aflags = ARC_FLAG_WAIT;
		zbookmark_phys_t zb;
		SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
		    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);

		if (DMU_OS_IS_L2CACHEABLE(os))
			aflags |= ARC_FLAG_L2CACHE;
		if (DMU_OS_IS_L2COMPRESSIBLE(os))
			aflags |= ARC_FLAG_L2COMPRESS;

		dprintf_bp(os->os_rootbp, "reading %s", "");
		err = arc_read(NULL, spa, os->os_rootbp,
		    arc_getbuf_func, &os->os_phys_buf,
		    ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb);
		if (err != 0) {
			kmem_free(os, sizeof (objset_t));
			/* convert checksum errors into IO errors */
			if (err == ECKSUM)
				err = SET_ERROR(EIO);
			return (err);
		}

		/* Increase the blocksize if we are permitted. */
		if (spa_version(spa) >= SPA_VERSION_USERSPACE &&
		    arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) {
			arc_buf_t *buf = arc_buf_alloc(spa,
			    sizeof (objset_phys_t), &os->os_phys_buf,
			    ARC_BUFC_METADATA);
			bzero(buf->b_data, sizeof (objset_phys_t));
			bcopy(os->os_phys_buf->b_data, buf->b_data,
			    arc_buf_size(os->os_phys_buf));
			(void) arc_buf_remove_ref(os->os_phys_buf,
			    &os->os_phys_buf);
			os->os_phys_buf = buf;
		}

		os->os_phys = os->os_phys_buf->b_data;
		os->os_flags = os->os_phys->os_flags;
	} else {
		int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
		    sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE;
		os->os_phys_buf = arc_buf_alloc(spa, size,
		    &os->os_phys_buf, ARC_BUFC_METADATA);
		os->os_phys = os->os_phys_buf->b_data;
		bzero(os->os_phys, size);
	}

	/*
	 * Note: the changed_cb will be called once before the register
	 * func returns, thus changing the checksum/compression from the
	 * default (fletcher2/off).  Snapshots don't need to know about
	 * checksum/compression/copies.
	 */
	if (ds != NULL) {
		err = dsl_prop_register(ds,
		    zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
		    primary_cache_changed_cb, os);
		if (err == 0) {
			err = dsl_prop_register(ds,
			    zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
			    secondary_cache_changed_cb, os);
		}
		if (!ds->ds_is_snapshot) {
			if (err == 0) {
				err = dsl_prop_register(ds,
				    zfs_prop_to_name(ZFS_PROP_CHECKSUM),
				    checksum_changed_cb, os);
			}
			if (err == 0) {
				err = dsl_prop_register(ds,
				    zfs_prop_to_name(ZFS_PROP_COMPRESSION),
				    compression_changed_cb, os);
			}
			if (err == 0) {
				err = dsl_prop_register(ds,
				    zfs_prop_to_name(ZFS_PROP_COPIES),
				    copies_changed_cb, os);
			}
			if (err == 0) {
				err = dsl_prop_register(ds,
				    zfs_prop_to_name(ZFS_PROP_DEDUP),
				    dedup_changed_cb, os);
			}
			if (err == 0) {
				err = dsl_prop_register(ds,
				    zfs_prop_to_name(ZFS_PROP_LOGBIAS),
				    logbias_changed_cb, os);
			}
			if (err == 0) {
				err = dsl_prop_register(ds,
				    zfs_prop_to_name(ZFS_PROP_SYNC),
				    sync_changed_cb, os);
			}
			if (err == 0) {
				err = dsl_prop_register(ds,
				    zfs_prop_to_name(
				    ZFS_PROP_REDUNDANT_METADATA),
				    redundant_metadata_changed_cb, os);
			}
			if (err == 0) {
				err = dsl_prop_register(ds,
				    zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
				    recordsize_changed_cb, os);
			}
		}
		if (err != 0) {
			VERIFY(arc_buf_remove_ref(os->os_phys_buf,
			    &os->os_phys_buf));
			kmem_free(os, sizeof (objset_t));
			return (err);
		}
	} else {
		/* It's the meta-objset. */
		os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
		os->os_compress = ZIO_COMPRESS_ON;
		os->os_copies = spa_max_replication(spa);
		os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
		os->os_dedup_verify = B_FALSE;
		os->os_logbias = ZFS_LOGBIAS_LATENCY;
		os->os_sync = ZFS_SYNC_STANDARD;
		os->os_primary_cache = ZFS_CACHE_ALL;
		os->os_secondary_cache = ZFS_CACHE_ALL;
	}

	if (ds == NULL || !ds->ds_is_snapshot)
		os->os_zil_header = os->os_phys->os_zil_header;
	os->os_zil = zil_alloc(os, &os->os_zil_header);

	for (i = 0; i < TXG_SIZE; i++) {
		list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t),
		    offsetof(dnode_t, dn_dirty_link[i]));
		list_create(&os->os_free_dnodes[i], sizeof (dnode_t),
		    offsetof(dnode_t, dn_dirty_link[i]));
	}
	list_create(&os->os_dnodes, sizeof (dnode_t),
	    offsetof(dnode_t, dn_link));
	list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
	    offsetof(dmu_buf_impl_t, db_link));

	mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);

	dnode_special_open(os, &os->os_phys->os_meta_dnode,
	    DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
	if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) {
		dnode_special_open(os, &os->os_phys->os_userused_dnode,
		    DMU_USERUSED_OBJECT, &os->os_userused_dnode);
		dnode_special_open(os, &os->os_phys->os_groupused_dnode,
		    DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode);
	}

	*osp = os;
	return (0);
}
Beispiel #8
0
/********************************************************************
* FUNCTION send_lock_pdu_to_server
* 
* Send a <lock> or <unlock> operation to the server
*
* INPUTS:
*   server_cb == server control block to use
*   lockcb == lock control block to use within server_cb
*   islock == TRUE for lock; FALSE for unlock
*
* RETURNS:
*    status
*********************************************************************/
static status_t
    send_lock_pdu_to_server (server_cb_t *server_cb,
                            lock_cb_t *lockcb,
                            boolean islock)
{
    obj_template_t        *rpc, *input;
    mgr_rpc_req_t         *req;
    val_value_t           *reqdata, *targetval, *parmval;
    ses_cb_t              *scb;
    status_t               res;
    xmlns_id_t             obj_nsid;

    req = NULL;
    reqdata = NULL;
    res = NO_ERR;

    if (LOGDEBUG) {
        log_debug("\nSending <%s> request",
                  (islock) ? NCX_EL_LOCK : NCX_EL_UNLOCK);
    }

    if (islock) {
        rpc = ncx_find_object(get_netconf_mod(server_cb), 
                              NCX_EL_LOCK);
    } else {
        rpc = ncx_find_object(get_netconf_mod(server_cb), 
                              NCX_EL_UNLOCK);
    }
    if (!rpc) {
        return SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
    }

    obj_nsid = obj_get_nsid(rpc);

    /* get the 'input' section container */
    input = obj_find_child(rpc, NULL, YANG_K_INPUT);
    if (!input) {
        return SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
    }

    /* construct a method + parameter tree */
    reqdata = xml_val_new_struct(obj_get_name(rpc), obj_nsid);
    if (!reqdata) {
        log_error("\nError allocating a new RPC request");
        return ERR_INTERNAL_MEM;
    }

    /* set the [un]lock/input/target node XML namespace */
    targetval = xml_val_new_struct(NCX_EL_TARGET, obj_nsid);
    if (!targetval) {
        log_error("\nError allocating a new RPC request");
        val_free_value(reqdata);
        return ERR_INTERNAL_MEM;
    } else {
        val_add_child(targetval, reqdata);
    }

    parmval = xml_val_new_flag(lockcb->config_name,
                               obj_nsid);
    if (!parmval) {
        val_free_value(reqdata);
        return ERR_INTERNAL_MEM;
    } else {
        val_add_child(parmval, targetval);
    }

    scb = mgr_ses_get_scb(server_cb->mysid);
    if (!scb) {
        res = SET_ERROR(ERR_INTERNAL_PTR);
    } else {
        req = mgr_rpc_new_request(scb);
        if (!req) {
            res = ERR_INTERNAL_MEM;
            log_error("\nError allocating a new RPC request");
        } else {
            req->data = reqdata;
            req->rpc = rpc;
            req->timeout = server_cb->timeout;
        }
    }
        
    /* if all OK, send the RPC request */
    if (res == NO_ERR) {
        if (LOGDEBUG2) {
            log_debug2("\nabout to send RPC request with reqdata:");
            val_dump_value_max(reqdata, 
                               0,
                               server_cb->defindent,
                               DUMP_VAL_LOG,
                               server_cb->display_mode,
                               FALSE,
                               FALSE);
        }

        /* the request will be stored if this returns NO_ERR */
        res = mgr_rpc_send_request(scb, req, yangcli_reply_handler);
        if (res == NO_ERR) {
            if (islock) {
                lockcb->lock_state = LOCK_STATE_REQUEST_SENT;
            } else {
                lockcb->lock_state = LOCK_STATE_RELEASE_SENT;
            }
            (void)uptime(&lockcb->last_msg_time);
            server_cb->locks_cur_cfg = lockcb->config_id;
        }
    }

    /* cleanup and set next state */
    if (res != NO_ERR) {
        if (req) {
            mgr_rpc_free_request(req);
        } else if (reqdata) {
            val_free_value(reqdata);
        }
    } else {
        server_cb->state = MGR_IO_ST_CONN_RPYWAIT;
    }

    return res;

} /* send_lock_pdu_to_server */
Beispiel #9
0
/********************************************************************
* FUNCTION handle_get_locks_request_to_server
* 
* Send the first <lock> operation to the server
* in a get-locks command
*
* INPUTS:
*   server_cb == server control block to use
*   first == TRUE if this is the first call; FALSE otherwise
*   done == address of return final done flag
*
* OUTPUTS:
*    server_cb->state may be changed or other action taken
*    *done == TRUE when the return code is NO_ERR and all
*                 the locks are granted
*             FALSE otherwise
* RETURNS:
*    status; if NO_ERR then check *done flag
*            otherwise done is true on any error
*********************************************************************/
status_t
    handle_get_locks_request_to_server (server_cb_t *server_cb,
                                       boolean first,
                                       boolean *done)
{
    lock_cb_t      *lockcb;
    ncx_cfg_t       cfg_id;
    time_t          timenow;
    double          timediff;
    status_t        res;
    boolean         finddone, stillwaiting;

#ifdef DEBUG
    if (!server_cb || !done) {
        return  SET_ERROR(ERR_INTERNAL_PTR);
    }
#endif

    res = NO_ERR;
    *done = FALSE;
    finddone = FALSE;
    stillwaiting = FALSE;
    lockcb = NULL;
    (void)uptime(&timenow);

    if (first) {
        /* this is the first try, start the overall timer */
        (void)uptime(&server_cb->locks_start_time);
    } else if (check_locks_timeout(server_cb)) {
        log_error("\nError: get-locks timeout");
        handle_locks_cleanup(server_cb);
        return ERR_NCX_TIMEOUT;
    }

    /* find a config that needs a lock PDU sent
     * first check for a config that has not sent
     * any request yet
     */
    for (cfg_id = NCX_CFGID_RUNNING;
         cfg_id <= NCX_CFGID_STARTUP && !finddone;
         cfg_id++) {

        lockcb = &server_cb->lock_cb[cfg_id];

        if (lockcb->lock_used) {
            if (lockcb->lock_state == LOCK_STATE_IDLE) {
                finddone = TRUE;
            } else if (lockcb->lock_state == LOCK_STATE_FATAL_ERROR) {
                log_error("\nError: fatal error getting lock"
                          " on the %s config",
                          lockcb->config_name);
                return ERR_NCX_OPERATION_FAILED;
            }
        }
    }

    if (!finddone) {
        /* all entries that need to send a lock
         * request have tried at least once to do that
         * now go through all the states and see if any
         * retries are needed; exit if a fatal error is found
         */
        stillwaiting = FALSE;
        for (cfg_id = NCX_CFGID_RUNNING;
             cfg_id <= NCX_CFGID_STARTUP && !finddone;
             cfg_id++) {

            lockcb = &server_cb->lock_cb[cfg_id];
            if (lockcb->lock_used) {
                if (lockcb->lock_state == LOCK_STATE_TEMP_ERROR) {
                    timediff = difftime(timenow, lockcb->last_msg_time);
                    if (timediff >= 
                        (double)server_cb->locks_retry_interval) {
                        finddone = TRUE;
                    } else {
                        server_cb->locks_waiting = TRUE;
                        stillwaiting = TRUE;
                    }
                }
            }
        }

        /* check if there is more work to do after this 
         * function exits
         */
        if (!finddone && !stillwaiting) {
            *done = TRUE;
        }
    }

    /* check if a <lock> request needs to be sent */
    if (finddone && lockcb) {
        server_cb->command_mode = CMD_MODE_AUTOLOCK;
        res = send_lock_pdu_to_server(server_cb,
                                     lockcb,
                                     TRUE);
    }

    return res;
    
}  /* handle_get_locks_request_to_server */
Beispiel #10
0
/*
 * Avoid inlining the function to keep vdev_mirror_io_start(), which
 * is this functions only caller, as small as possible on the stack.
 */
noinline static mirror_map_t *
vdev_mirror_map_alloc(zio_t *zio)
{
	mirror_map_t *mm = NULL;
	mirror_child_t *mc;
	vdev_t *vd = zio->io_vd;
	int c, d;

	if (vd == NULL) {
		dva_t *dva = zio->io_bp->blk_dva;
		spa_t *spa = zio->io_spa;

		c = BP_GET_NDVAS(zio->io_bp);

		mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]),
		    KM_PUSHPAGE);
		mm->mm_children = c;
		mm->mm_replacing = B_FALSE;
		mm->mm_preferred = spa_get_random(c);
		mm->mm_root = B_TRUE;

		/*
		 * Check the other, lower-index DVAs to see if they're on
		 * the same vdev as the child we picked.  If they are, use
		 * them since they are likely to have been allocated from
		 * the primary metaslab in use at the time, and hence are
		 * more likely to have locality with single-copy data.
		 */
		for (c = mm->mm_preferred, d = c - 1; d >= 0; d--) {
			if (DVA_GET_VDEV(&dva[d]) == DVA_GET_VDEV(&dva[c]))
				mm->mm_preferred = d;
		}

		for (c = 0; c < mm->mm_children; c++) {
			mc = &mm->mm_child[c];

			mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
			mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
		}
	} else {
		int lowest_pending = INT_MAX;
		int lowest_nr = 1;

		c = vd->vdev_children;

		mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]),
		    KM_PUSHPAGE);
		mm->mm_children = c;
		mm->mm_replacing = (vd->vdev_ops == &vdev_replacing_ops ||
		    vd->vdev_ops == &vdev_spare_ops);
		mm->mm_preferred = 0;
		mm->mm_root = B_FALSE;

		for (c = 0; c < mm->mm_children; c++) {
			mc = &mm->mm_child[c];
			mc->mc_vd = vd->vdev_child[c];
			mc->mc_offset = zio->io_offset;

			if (mm->mm_replacing)
				continue;

			if (!vdev_readable(mc->mc_vd)) {
				mc->mc_error = SET_ERROR(ENXIO);
				mc->mc_tried = 1;
				mc->mc_skipped = 1;
				mc->mc_pending = INT_MAX;
				continue;
			}

			mc->mc_pending = vdev_mirror_pending(mc->mc_vd);
			if (mc->mc_pending < lowest_pending) {
				lowest_pending = mc->mc_pending;
				lowest_nr = 1;
			} else if (mc->mc_pending == lowest_pending) {
				lowest_nr++;
			}
		}

		d = gethrtime() / (NSEC_PER_USEC * zfs_vdev_mirror_switch_us);
		d = (d % lowest_nr) + 1;

		for (c = 0; c < mm->mm_children; c++) {
			mc = &mm->mm_child[c];

			if (mm->mm_child[c].mc_pending == lowest_pending) {
				if (--d == 0) {
					mm->mm_preferred = c;
					break;
				}
			}
		}
	}

	zio->io_vsd = mm;
	zio->io_vsd_ops = &vdev_mirror_vsd_ops;
	return (mm);
}
Beispiel #11
0
static int
vdev_disk_io_start(zio_t *zio)
{
	vdev_t *vd = zio->io_vd;
	vdev_disk_t *dvd = vd->vdev_tsd;
	struct buf *bp;
	vfs_context_t context;
	int flags, error = 0;

	if (zio->io_type == ZIO_TYPE_IOCTL) {
		zio_vdev_io_bypass(zio);

		/* XXPOLICY */
		if (vdev_is_dead(vd)) {
			zio->io_error = ENXIO;
			//zio_next_stage_async(zio);
			return (ZIO_PIPELINE_CONTINUE);
            //return;
		}

		switch (zio->io_cmd) {

		case DKIOCFLUSHWRITECACHE:

			if (zfs_nocacheflush)
				break;

			if (vd->vdev_nowritecache) {
				zio->io_error = SET_ERROR(ENOTSUP);
				break;
			}

			context = vfs_context_create((vfs_context_t)0);
			error = VNOP_IOCTL(dvd->vd_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
			(void) vfs_context_rele(context);

			if (error == 0)
				vdev_disk_ioctl_done(zio, error);
			else
				error = ENOTSUP;

			if (error == 0) {
				/*
				 * The ioctl will be done asychronously,
				 * and will call vdev_disk_ioctl_done()
				 * upon completion.
				 */
				return ZIO_PIPELINE_STOP;
			} else if (error == ENOTSUP || error == ENOTTY) {
				/*
				 * If we get ENOTSUP or ENOTTY, we know that
				 * no future attempts will ever succeed.
				 * In this case we set a persistent bit so
				 * that we don't bother with the ioctl in the
				 * future.
				 */
				vd->vdev_nowritecache = B_TRUE;
			}
			zio->io_error = error;

			break;

		default:
			zio->io_error = SET_ERROR(ENOTSUP);
		}

		//zio_next_stage_async(zio);
        return (ZIO_PIPELINE_CONTINUE);
	}

	if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0)
        return (ZIO_PIPELINE_STOP);
    //		return;

	if ((zio = vdev_queue_io(zio)) == NULL)
        return (ZIO_PIPELINE_CONTINUE);
    //		return;

	flags = (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE);
	//flags |= B_NOCACHE;

	if (zio->io_flags & ZIO_FLAG_FAILFAST)
		flags |= B_FAILFAST;

	/*
	 * Check the state of this device to see if it has been offlined or
	 * is in an error state.  If the device was offlined or closed,
	 * dvd will be NULL and buf_alloc below will fail
	 */
	//error = vdev_is_dead(vd) ? ENXIO : vdev_error_inject(vd, zio);
	if (vdev_is_dead(vd)) {
        error = ENXIO;
    }

	if (error) {
		zio->io_error = error;
		//zio_next_stage_async(zio);
		return (ZIO_PIPELINE_CONTINUE);
	}

	bp = buf_alloc(dvd->vd_devvp);

	ASSERT(bp != NULL);
	ASSERT(zio->io_data != NULL);
	ASSERT(zio->io_size != 0);

	buf_setflags(bp, flags);
	buf_setcount(bp, zio->io_size);
	buf_setdataptr(bp, (uintptr_t)zio->io_data);
    if (dvd->vd_ashift) {
        buf_setlblkno(bp, zio->io_offset>>dvd->vd_ashift);
        buf_setblkno(bp,  zio->io_offset>>dvd->vd_ashift);
    } else {
Beispiel #12
0
SP_PRIV enum sp_return list_ports(struct sp_port ***list)
{
	char name[PATH_MAX], target[PATH_MAX];
	struct dirent entry, *result;
#ifdef HAVE_STRUCT_SERIAL_STRUCT
	struct serial_struct serial_info;
	int ioctl_result;
#endif
	char buf[sizeof(entry.d_name) + 23];
	int len, fd;
	DIR *dir;
	int ret = SP_OK;
	struct stat statbuf;

	DEBUG("Enumerating tty devices");
	if (!(dir = opendir("/sys/class/tty")))
		RETURN_FAIL("Could not open /sys/class/tty");

	DEBUG("Iterating over results");
	while (!readdir_r(dir, &entry, &result) && result) {
		snprintf(buf, sizeof(buf), "/sys/class/tty/%s", entry.d_name);
		if (lstat(buf, &statbuf) == -1)
			continue;
		if (!S_ISLNK(statbuf.st_mode))
			snprintf(buf, sizeof(buf), "/sys/class/tty/%s/device", entry.d_name);
		len = readlink(buf, target, sizeof(target));
		if (len <= 0 || len >= (int)(sizeof(target) - 1))
			continue;
		target[len] = 0;
		if (strstr(target, "virtual"))
			continue;
		snprintf(name, sizeof(name), "/dev/%s", entry.d_name);
		DEBUG_FMT("Found device %s", name);
		if (strstr(target, "serial8250")) {
			/*
			 * The serial8250 driver has a hardcoded number of ports.
			 * The only way to tell which actually exist on a given system
			 * is to try to open them and make an ioctl call.
			 */
			DEBUG("serial8250 device, attempting to open");
			if ((fd = open(name, O_RDWR | O_NONBLOCK | O_NOCTTY)) < 0) {
				DEBUG("Open failed, skipping");
				continue;
			}
#ifdef HAVE_STRUCT_SERIAL_STRUCT
			ioctl_result = ioctl(fd, TIOCGSERIAL, &serial_info);
#endif
			close(fd);
#ifdef HAVE_STRUCT_SERIAL_STRUCT
			if (ioctl_result != 0) {
				DEBUG("ioctl failed, skipping");
				continue;
			}
			if (serial_info.type == PORT_UNKNOWN) {
				DEBUG("Port type is unknown, skipping");
				continue;
			}
#endif
		}
		DEBUG_FMT("Found port %s", name);
		*list = list_append(*list, name);
		if (!list) {
			SET_ERROR(ret, SP_ERR_MEM, "List append failed");
			break;
		}
	}
	closedir(dir);

	return ret;
}
Beispiel #13
0
int
zfs_sb_create(const char *osname, zfs_mntopts_t *zmo, zfs_sb_t **zsbp)
{
	objset_t *os;
	zfs_sb_t *zsb;
	uint64_t zval;
	int i, error;
	uint64_t sa_obj;

	zsb = kmem_zalloc(sizeof (zfs_sb_t), KM_SLEEP);

	/*
	 * We claim to always be readonly so we can open snapshots;
	 * other ZPL code will prevent us from writing to snapshots.
	 */
	error = dmu_objset_own(osname, DMU_OST_ZFS, B_TRUE, zsb, &os);
	if (error) {
		kmem_free(zsb, sizeof (zfs_sb_t));
		return (error);
	}

	/*
	 * Optional temporary mount options, free'd in zfs_sb_free().
	 */
	zsb->z_mntopts = (zmo ? zmo : zfs_mntopts_alloc());

	/*
	 * Initialize the zfs-specific filesystem structure.
	 * Should probably make this a kmem cache, shuffle fields,
	 * and just bzero up to z_hold_mtx[].
	 */
	zsb->z_sb = NULL;
	zsb->z_parent = zsb;
	zsb->z_max_blksz = SPA_OLD_MAXBLOCKSIZE;
	zsb->z_show_ctldir = ZFS_SNAPDIR_VISIBLE;
	zsb->z_os = os;

	error = zfs_get_zplprop(os, ZFS_PROP_VERSION, &zsb->z_version);
	if (error) {
		goto out;
	} else if (zsb->z_version > ZPL_VERSION) {
		error = SET_ERROR(ENOTSUP);
		goto out;
	}
	if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
		goto out;
	zsb->z_norm = (int)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &zval)) != 0)
		goto out;
	zsb->z_utf8 = (zval != 0);

	if ((error = zfs_get_zplprop(os, ZFS_PROP_CASE, &zval)) != 0)
		goto out;
	zsb->z_case = (uint_t)zval;

	if ((error = zfs_get_zplprop(os, ZFS_PROP_ACLTYPE, &zval)) != 0)
		goto out;
	zsb->z_acl_type = (uint_t)zval;

	/*
	 * Fold case on file systems that are always or sometimes case
	 * insensitive.
	 */
	if (zsb->z_case == ZFS_CASE_INSENSITIVE ||
	    zsb->z_case == ZFS_CASE_MIXED)
		zsb->z_norm |= U8_TEXTPREP_TOUPPER;

	zsb->z_use_fuids = USE_FUIDS(zsb->z_version, zsb->z_os);
	zsb->z_use_sa = USE_SA(zsb->z_version, zsb->z_os);

	if (zsb->z_use_sa) {
		/* should either have both of these objects or none */
		error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1,
		    &sa_obj);
		if (error)
			goto out;

		error = zfs_get_zplprop(os, ZFS_PROP_XATTR, &zval);
		if ((error == 0) && (zval == ZFS_XATTR_SA))
			zsb->z_xattr_sa = B_TRUE;
	} else {
		/*
		 * Pre SA versions file systems should never touch
		 * either the attribute registration or layout objects.
		 */
		sa_obj = 0;
	}

	error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
	    &zsb->z_attr_table);
	if (error)
		goto out;

	if (zsb->z_version >= ZPL_VERSION_SA)
		sa_register_update_callback(os, zfs_sa_upgrade);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1,
	    &zsb->z_root);
	if (error)
		goto out;
	ASSERT(zsb->z_root != 0);

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
	    &zsb->z_unlinkedobj);
	if (error)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA],
	    8, 1, &zsb->z_userquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ,
	    zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA],
	    8, 1, &zsb->z_groupquota_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES, 8, 1,
	    &zsb->z_fuid_obj);
	if (error && error != ENOENT)
		goto out;

	error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_SHARES_DIR, 8, 1,
	    &zsb->z_shares_dir);
	if (error && error != ENOENT)
		goto out;

	mutex_init(&zsb->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&zsb->z_lock, NULL, MUTEX_DEFAULT, NULL);
	list_create(&zsb->z_all_znodes, sizeof (znode_t),
	    offsetof(znode_t, z_link_node));
	rrm_init(&zsb->z_teardown_lock, B_FALSE);
	rw_init(&zsb->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
	rw_init(&zsb->z_fuid_lock, NULL, RW_DEFAULT, NULL);

	zsb->z_hold_mtx = vmem_zalloc(sizeof (kmutex_t) * ZFS_OBJ_MTX_SZ,
	    KM_SLEEP);
	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
		mutex_init(&zsb->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);

	*zsbp = zsb;
	return (0);

out:
	dmu_objset_disown(os, zsb);
	*zsbp = NULL;

	vmem_free(zsb->z_hold_mtx, sizeof (kmutex_t) * ZFS_OBJ_MTX_SZ);
	kmem_free(zsb, sizeof (zfs_sb_t));
	return (error);
}
Beispiel #14
0
static int
zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
    uint64_t *userp, uint64_t *groupp)
{
	/*
	 * Is it a valid type of object to track?
	 */
	if (bonustype != DMU_OT_ZNODE && bonustype != DMU_OT_SA)
		return (SET_ERROR(ENOENT));

	/*
	 * If we have a NULL data pointer
	 * then assume the id's aren't changing and
	 * return EEXIST to the dmu to let it know to
	 * use the same ids
	 */
	if (data == NULL)
		return (SET_ERROR(EEXIST));

	if (bonustype == DMU_OT_ZNODE) {
		znode_phys_t *znp = data;
		*userp = znp->zp_uid;
		*groupp = znp->zp_gid;
	} else {
		int hdrsize;
		sa_hdr_phys_t *sap = data;
		sa_hdr_phys_t sa = *sap;
		boolean_t swap = B_FALSE;

		ASSERT(bonustype == DMU_OT_SA);

		if (sa.sa_magic == 0) {
			/*
			 * This should only happen for newly created
			 * files that haven't had the znode data filled
			 * in yet.
			 */
			*userp = 0;
			*groupp = 0;
			return (0);
		}
		if (sa.sa_magic == BSWAP_32(SA_MAGIC)) {
			sa.sa_magic = SA_MAGIC;
			sa.sa_layout_info = BSWAP_16(sa.sa_layout_info);
			swap = B_TRUE;
		} else {
			VERIFY3U(sa.sa_magic, ==, SA_MAGIC);
		}

		hdrsize = sa_hdrsize(&sa);
		VERIFY3U(hdrsize, >=, sizeof (sa_hdr_phys_t));
		*userp = *((uint64_t *)((uintptr_t)data + hdrsize +
		    SA_UID_OFFSET));
		*groupp = *((uint64_t *)((uintptr_t)data + hdrsize +
		    SA_GID_OFFSET));
		if (swap) {
			*userp = BSWAP_64(*userp);
			*groupp = BSWAP_64(*groupp);
		}
	}
	return (0);
}
Beispiel #15
0
/********************************************************************
* FUNCTION help_program_module
*
* Print the full help text for an entire program module to STDOUT
*
* INPUTS:
*    modname == module name without file suffix
*    cliname == name of CLI parmset within the modname module
*
*********************************************************************/
void
    help_program_module (const xmlChar *modname,
                         const xmlChar *cliname,
                         help_mode_t mode)
{
    ncx_module_t         *mod;
    obj_template_t       *cli;
    uint32                nestlevel;
    help_mode_t           usemode;

#ifdef DEBUG
    if (!modname) {
        SET_ERROR(ERR_INTERNAL_PTR);
        return;
    }
    if (mode == HELP_MODE_NONE || mode > HELP_MODE_FULL) {
        SET_ERROR(ERR_INTERNAL_VAL);
        return;
    }
#endif

    nestlevel = get_nestlevel(mode);

    mod = ncx_find_module(modname, NULL);
    if (!mod) {
        log_error("\nhelp: Module '%s' not found", modname);
        SET_ERROR(ERR_NCX_MOD_NOT_FOUND);
        return;
    }

    log_stdout("\n\n  Program %s", mod->name);
    log_stdout("\n\n  Usage:");
    log_stdout("\n\n    %s [parameters]", mod->name);
    if (mode != HELP_MODE_BRIEF) {
        log_stdout("\n\n  Parameters can be entered in any order, and have ");
        log_stdout("the form:");
        log_stdout("\n\n    [start] name separator [value]");
        log_stdout("\n\n  where:");
        log_stdout("\n\n    start == 0, 1, or 2 dashes (foo, -foo, --foo)");
        log_stdout("\n\n    name == parameter name (foo)"
                   "\n\n  Parameter name completion "
                   "will be attempted "
                   "\n  if a partial name is entered.");
        log_stdout("\n\n    separator == whitespace or equals sign "
                   "(foo=bar, foo bar)");
        log_stdout("\n\n    value == string value for the parameter");
        log_stdout("\n\n Strings with whitespace need to be "
                   "double quoted."
                   "\n    (--foo=\"some string\")");
    }

    if (mode == HELP_MODE_FULL && mod->descr) {
        log_stdout("\n\n  Description:");
        help_write_lines(mod->descr, 4, TRUE);
    }

    if (cliname) {
        cli = ncx_find_object(mod, cliname);
        if (!cli) {
            log_error("\nhelp: CLI Object %s not found", cliname);
            SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
            return;
        } else if (cli->objtype == OBJ_TYP_CONTAINER) {
            log_stdout("\n\n Command Line Parameters");
            log_stdout("\n\n Key:  parm-name [built-in-type] [d:default]\n");


            if (mode == HELP_MODE_BRIEF) {
                usemode = HELP_MODE_NORMAL;
            } else {
                usemode = HELP_MODE_FULL;
            }
                
            obj_dump_datadefQ(obj_get_datadefQ(cli), 
                              usemode, 
                              nestlevel, 
                              4);
            log_stdout("\n");
        }
    }

    if (obj_any_rpcs(&mod->datadefQ) && mode == HELP_MODE_FULL) {
        log_stdout("\n\n  Local Commands\n");
        dump_rpcQ(&mod->datadefQ, mode, 4);
    }

}  /* help_program_module */
Beispiel #16
0
/********************************************************************
* FUNCTION handle_release_locks_request_to_server
* 
* Send an <unlock> operation to the server
* in a get-locks command teardown or a release-locks
* operation
*
* INPUTS:
*   server_cb == server control block to use
*   first == TRUE if this is the first call; FALSE otherwise
*   done == address of return final done flag
*
* OUTPUTS:
*    server_cb->state may be changed or other action taken
*    *done == TRUE when the return code is NO_ERR and all
*                 the locks are granted
*             FALSE otherwise
* RETURNS:
*    status; if NO_ERR then check *done flag
*            otherwise done is true on any error
*********************************************************************/
status_t
    handle_release_locks_request_to_server (server_cb_t *server_cb,
                                           boolean first,
                                           boolean *done)
{
    lock_cb_t      *lockcb;
    ncx_cfg_t       cfg_id;
    status_t        res;
    boolean         finddone;

#ifdef DEBUG
    if (!server_cb || !done) {
        return  SET_ERROR(ERR_INTERNAL_PTR);
    }
#endif

    lockcb = NULL;
    res = NO_ERR;
    finddone = FALSE;
    *done = FALSE;

    if (first) {
        server_cb->command_mode = CMD_MODE_AUTOUNLOCK;
        (void)uptime(&server_cb->locks_start_time);
    } else if (check_locks_timeout(server_cb)) {
        log_error("\nError: release-locks timeout");
        clear_lock_cbs(server_cb);
        return ERR_NCX_TIMEOUT;
    }

    /* do these in order because there are no
     * temporary errors for unlock
     */
    for (cfg_id = NCX_CFGID_RUNNING;
         cfg_id <= NCX_CFGID_STARTUP && !finddone;
         cfg_id++) {

        lockcb = &server_cb->lock_cb[cfg_id];
        if (lockcb->lock_used &&
            lockcb->lock_state == LOCK_STATE_ACTIVE) {
            finddone = TRUE;
        }
    }

    if (!finddone) {
        /* nothing to do */
        if (first) {
            log_info("\nNo locks to release");
        }
        server_cb->state = MGR_IO_ST_CONN_IDLE;
        clear_lock_cbs(server_cb);
        *done = TRUE;
    } else {
        res = send_lock_pdu_to_server(server_cb,
                                     lockcb,
                                     FALSE);
    }

    return res;
    
}  /* handle_release_locks_request_to_server */
/**
   Do low level UTG (Universal Tone Generator) configuration and activation
\param
   pChannel      - handle to TAPI_CONNECTION structure
\param
   pSimpleTone   - internal simple tone table entry
\return
   IFX_SUCCESS/IFX_ERROR
\remarks
   The selected signalling channel has to be activated before this command can
   be sent.
   For the UTG, the coefficients have to be programmed with a seperate command,
   before the UTG can be activated. The UTG should be inactive when programming
   the coefficients.
   The DMTF / AT Generator is overlaid with the tone generator, which means
   the same resources for the DMTF / AT Generator and the UTG mustn't be active
   simultaneously! DTMF /AT will be deactivated to avoid simulatenous activation.

   It is assumed that the UTG is deactive before calling this function which
   programms UTG coefficients.
   In case the UTG event interrupt is needed, the calling function should make
   sure that the DTMD/AT Generator is deactivated before.
   This function is completely protected to concurrent member access
*/
IFX_int32_t Tone_UTG_Start (TAPI_CONNECTION *pChannel,
                            IFX_TAPI_TONE_SIMPLE_t const *pSimpleTone,
                            TAPI_TONE_DST dst,
                            IFX_uint8_t utgNum )
{
   IFX_int32_t      ret = IFX_ERROR;
#if (VIN_CFG_FEATURES & VIN_FEAT_VOICE)
   VINETIC_DEVICE  *pDev = (VINETIC_DEVICE *)pChannel->pDevice;
   VINETIC_CHANNEL *pCh  = &pDev->pChannel[pChannel->nChannel];
   FWM_SIG_UTG* pUtgCmd = &pDev->pSigCh[pChannel->nChannel].utg[utgNum];
   IFX_uint16_t     pCmdCoef [27] = {0};
   IFX_uint8_t      utgRes = 0, ch = pCh->nChannel - 1;

   /* calling function ensures valid parameter */
   IFXOS_ASSERT(pSimpleTone != IFX_NULL);

   /* check utgNum,
   please note that resources like "utgNum" are counted from #0 */
   if (utgNum >= pDev->VinCapabl.nUtgPerCh)
   {
      SET_ERROR (ERR_NORESOURCE);
      return IFX_ERROR;
   }

   /* choose utg resource according to the FW capabilities */
   if (utgNum == 0)
   {  /* resources #0..#3 for first utg */
      utgRes = (IFX_uint8_t)ch;
      /* Setup UTG1 interrupt masks.
      Note : Mask rising edge interrupts for VIN_UTG1_ACT_MASK.
             Allow falling edge interrupts for VIN_UTG1_ACT_MASK.
      Note: VIN_UTG2_ACT_MASK is overlaid with V2CPE_EDSP1_INT1_DTMFG_ACT
            For this reason the mask settings are set here within "Tone_UTG_Start"! */
      ret = VINETIC_Host_Set_EdspIntMask (pCh, VIN_EDSP_UTG1, IFX_FALSE, 0,
                                          VIN_UTG1_ACT_MASK);
      if (ret == IFX_ERROR)
      {
         return IFX_ERROR;
      }
   }
   else if (utgNum == 1)
   {
      /* second resource, UTG2, uses different resources as UTG1 */
      utgRes = (IFX_uint8_t)(ch + pDev->nSigCnt); 
      /* check whether CID Generator is active,
      because the status bits for CIS and UTG2 are overlaid
      => UTG could not be used while CIS is active! */
      if (pDev->pSigCh[ch].cid_sender[2] & SIG_CID_EN)
      {
         SET_ERROR (ERR_CID_RUNNING);
         return (IFX_ERROR);
      }
      /* Setup UTG2 interrupt masks.
      Note : Mask rising edge interrupts for VIN_UTG2_ACT_MASK.
             Allow falling edge interrupts for VIN_UTG2_ACT_MASK.
      Note: VIN_UTG2_ACT_MASK is overlaid with V2CPE_EDSP1_INT1_CIS_ACT
            For this reason the mask settings are set here within "Tone_UTG_Start"! */
      ret = VINETIC_Host_Set_EdspIntMask (pCh, VIN_EDSP_UTG2, IFX_FALSE, 0,
                                          VIN_UTG2_ACT_MASK);
      if (ret == IFX_ERROR)
      {
         return IFX_ERROR;
      }
   }
   else
   {
      SET_ERROR (ERR_NORESOURCE);
      return IFX_ERROR;
   }
   memset (pCmdCoef, 0, sizeof(pCmdCoef));
   /* prepare command for coefficients */
   pCmdCoef [0] = CMD1_EOP | (utgRes & CMD1_CH);
   pCmdCoef [1] = ECMD_UTG_COEF;
   /* set the UTG coefficients */
   Utg_SetCoeff (pSimpleTone, pCmdCoef);
   ret = CmdWrite (pDev, pCmdCoef, 25);
   if (ret == IFX_ERROR)
      return IFX_ERROR;
   /* overwrite utg resource number, because here the "overall number" is used */
   pUtgCmd->bit.utgnr = (utgRes & CMD1_CH);
   /* Tone signal injection and muting the voice signal
     into adder 1 and/or adder 2 */
   switch (dst)
   {
      case  TAPI_TONE_DST_NET:
         /* put it to adder 1 to network */
         pUtgCmd->bit.a1 = SIG_UTG_MUTE;
         pUtgCmd->bit.a2 = SIG_UTG_NONE;
         break;
      case  TAPI_TONE_DST_NETLOCAL:
         /* put it to adder 1 to network and adder 2 for local */
         pUtgCmd->bit.a1 = SIG_UTG_MUTE;
         pUtgCmd->bit.a2 = SIG_UTG_MUTE;
         break;
      case  TAPI_TONE_DST_LOCAL:
      case  TAPI_TONE_DST_DEFAULT:
      default:
         /* play it locally: take adder 2 */
         pUtgCmd->bit.a1 = SIG_UTG_NONE;
         pUtgCmd->bit.a2 = SIG_UTG_MUTE;
         break;
   }
   /* activate the UTG with the tone action previously programmed */
   pUtgCmd->bit.sm = 0;
   pUtgCmd->bit.en = 1;
   ret = CmdWrite (pDev, pUtgCmd->value, 1);
   if (ret == IFX_SUCCESS)
   {
      /* tone is playing now, so set the state */
      pChannel->TapiComplexToneData[utgNum].nToneState = TAPI_CT_ACTIVE;

      if (pSimpleTone->loop > 0 || pSimpleTone->pause > 0)
      {
         /* auto stop after loop or after each generation step */
         pUtgCmd->bit.sm = 1;
         pUtgCmd->bit.en = 0;
         ret = CmdWrite (pDev, pUtgCmd->value, 1);
      }
   }
#endif /* (VIN_CFG_FEATURES & VIN_FEAT_VOICE) */

   return ret;
}
Beispiel #18
0
/********************************************************************
* FUNCTION send_discard_changes_pdu_to_server
* 
* Send a <discard-changes> operation to the server
*
* INPUTS:
*   server_cb == server control block to use
*
* RETURNS:
*    status
*********************************************************************/
status_t
    send_discard_changes_pdu_to_server (server_cb_t *server_cb)
{
    obj_template_t        *rpc;
    mgr_rpc_req_t         *req;
    val_value_t           *reqdata;
    ses_cb_t              *scb;
    status_t               res;
    xmlns_id_t             obj_nsid;

    req = NULL;
    reqdata = NULL;
    res = NO_ERR;

    if (LOGDEBUG) {
        log_debug("\nSending <discard-changes> request");
    }
    
    rpc = ncx_find_object(get_netconf_mod(server_cb), 
                          NCX_EL_DISCARD_CHANGES);
    if (!rpc) {
        return SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
    }

    obj_nsid = obj_get_nsid(rpc);

    /* construct a method node */
    reqdata = xml_val_new_flag(obj_get_name(rpc), 
                               obj_nsid);
    if (!reqdata) {
        log_error("\nError allocating a new RPC request");
        return ERR_INTERNAL_MEM;
    }

    scb = mgr_ses_get_scb(server_cb->mysid);
    if (!scb) {
        res = SET_ERROR(ERR_INTERNAL_PTR);
    } else {
        req = mgr_rpc_new_request(scb);
        if (!req) {
            res = ERR_INTERNAL_MEM;
            log_error("\nError allocating a new RPC request");
        } else {
            req->data = reqdata;
            req->rpc = rpc;
            req->timeout = server_cb->timeout;
        }
    }
        
    /* if all OK, send the RPC request */
    if (res == NO_ERR) {
        if (LOGDEBUG2) {
            log_debug2("\nabout to send RPC request with reqdata:");
            val_dump_value_max(reqdata, 
                               0,
                               server_cb->defindent,
                               DUMP_VAL_LOG,
                               server_cb->display_mode,
                               FALSE,
                               FALSE);
        }

        /* the request will be stored if this returns NO_ERR */
        res = mgr_rpc_send_request(scb, req, yangcli_reply_handler);
        if (res == NO_ERR) {
            server_cb->command_mode = CMD_MODE_AUTODISCARD;
        }
    }

    /* cleanup and set next state */
    if (res != NO_ERR) {
        if (req) {
            mgr_rpc_free_request(req);
        } else if (reqdata) {
            val_free_value(reqdata);
        }
    } else {
        server_cb->state = MGR_IO_ST_CONN_RPYWAIT;
    }

    return res;

} /* send_discard_changes_pdu_to_server */
Beispiel #19
0
static int
dsl_dataset_user_release_check_one(dsl_dataset_user_release_arg_t *ddura,
    dsl_dataset_t *ds, nvlist_t *holds, const char *snapname)
{
	uint64_t zapobj;
	nvlist_t *holds_found;
	objset_t *mos;
	int numholds;

	if (!dsl_dataset_is_snapshot(ds))
		return (SET_ERROR(EINVAL));

	if (nvlist_empty(holds))
		return (0);

	numholds = 0;
	mos = ds->ds_dir->dd_pool->dp_meta_objset;
	zapobj = dsl_dataset_phys(ds)->ds_userrefs_obj;
	holds_found = fnvlist_alloc();

	for (nvpair_t *pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
	    pair = nvlist_next_nvpair(holds, pair)) {
		uint64_t tmp;
		int error;
		const char *holdname = nvpair_name(pair);

		if (zapobj != 0)
			error = zap_lookup(mos, zapobj, holdname, 8, 1, &tmp);
		else
			error = SET_ERROR(ENOENT);

		/*
		 * Non-existent holds are put on the errlist, but don't
		 * cause an overall failure.
		 */
		if (error == ENOENT) {
			if (ddura->ddura_errlist != NULL) {
				char *errtag = kmem_asprintf("%s#%s",
				    snapname, holdname);
				fnvlist_add_int32(ddura->ddura_errlist, errtag,
				    ENOENT);
				strfree(errtag);
			}
			continue;
		}

		if (error != 0) {
			fnvlist_free(holds_found);
			return (error);
		}

		fnvlist_add_boolean(holds_found, holdname);
		numholds++;
	}

	if (DS_IS_DEFER_DESTROY(ds) &&
	    dsl_dataset_phys(ds)->ds_num_children == 1 &&
	    ds->ds_userrefs == numholds) {
		/* we need to destroy the snapshot as well */
		if (dsl_dataset_long_held(ds)) {
			fnvlist_free(holds_found);
			return (SET_ERROR(EBUSY));
		}
		fnvlist_add_boolean(ddura->ddura_todelete, snapname);
	}

	if (numholds != 0) {
		fnvlist_add_nvlist(ddura->ddura_chkholds, snapname,
		    holds_found);
	}
	fnvlist_free(holds_found);

	return (0);
}
Beispiel #20
0
/*
 * Return a znode for the extended attribute directory for zp.
 * ** If the directory does not already exist, it is created **
 *
 *	IN:	zp	- znode to obtain attribute directory from
 *		cr	- credentials of caller
 *		flags	- flags from the VOP_LOOKUP call
 *
 *	OUT:	xzpp	- pointer to extended attribute znode
 *
 *	RETURN:	0 on success
 *		error number on failure
 */
int
zfs_get_xattrdir(znode_t *zp, vnode_t **xvpp, cred_t *cr, int flags)
{
	zfsvfs_t	*zfsvfs = zp->z_zfsvfs;
	znode_t		*xzp;
	zfs_dirlock_t	*dl;
	vattr_t		va;
	int		error;
top:
	error = zfs_dirent_lock(&dl, zp, "", &xzp, ZXATTR, NULL, NULL);
	if (error)
		return (error);

	if (xzp != NULL) {
		*xvpp = ZTOV(xzp);
		zfs_dirent_unlock(dl);
		return (0);
	}


	if (!(flags & CREATE_XATTR_DIR)) {
		zfs_dirent_unlock(dl);
#ifdef illumos
		return (SET_ERROR(ENOENT));
#else
		return (SET_ERROR(ENOATTR));
#endif
	}

	if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
		zfs_dirent_unlock(dl);
		return (SET_ERROR(EROFS));
	}

	/*
	 * The ability to 'create' files in an attribute
	 * directory comes from the write_xattr permission on the base file.
	 *
	 * The ability to 'search' an attribute directory requires
	 * read_xattr permission on the base file.
	 *
	 * Once in a directory the ability to read/write attributes
	 * is controlled by the permissions on the attribute file.
	 */
	va.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID;
	va.va_type = VDIR;
	va.va_mode = S_IFDIR | S_ISVTX | 0777;
	zfs_fuid_map_ids(zp, cr, &va.va_uid, &va.va_gid);

	error = zfs_make_xattrdir(zp, &va, xvpp, cr);
	zfs_dirent_unlock(dl);

	if (error == ERESTART) {
		/* NB: we already did dmu_tx_wait() if necessary */
		goto top;
	}
	if (error == 0)
		VOP_UNLOCK(*xvpp, 0);

	return (error);
}
Beispiel #21
0
/********************************************************************
* FUNCTION y_starter_init
* 
* initialize the starter server instrumentation library
* 
* INPUTS:
*    modname == requested module name
*    revision == requested version (NULL for any)
* 
* RETURNS:
*     error status
********************************************************************/
status_t y_starter_init (
    const xmlChar *modname,
    const xmlChar *revision)
{
    status_t res = NO_ERR;
    agt_profile_t *agt_profile = agt_get_profile();

    y_starter_init_static_vars();

    /* change if custom handling done */
    if (xml_strcmp(modname, y_starter_M_starter)) {
        return ERR_NCX_UNKNOWN_MODULE;
    }

    if (revision && xml_strcmp(revision, y_starter_R_starter)) {
        return ERR_NCX_WRONG_VERSION;
    }
    res = ncxmod_load_module(
        y_starter_M_starter,
        y_starter_R_starter,
        &agt_profile->agt_savedevQ,
        &starter_mod);
    if (res != NO_ERR) {
        return res;
    }

    starter_obj = ncx_find_object(
        starter_mod,
        y_starter_N_starter);
    if (starter_mod == NULL) {
        return SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
    }
    starter_start_vnf_obj = ncx_find_object(
        starter_mod,
        y_starter_N_starter_start_vnf);
    if (starter_mod == NULL) {
        return SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
    }
    starter_kill_vnf_obj = ncx_find_object(
        starter_mod,
        y_starter_N_starter_kill_vnf);
    if (starter_mod == NULL) {
        return SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
    }
    starter_get_load_obj = ncx_find_object(
        starter_mod,
        y_starter_N_starter_get_load);
    if (starter_mod == NULL) {
        return SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
    }
    starter_get_processes_obj = ncx_find_object(
        starter_mod,
        y_starter_N_starter_get_processes);
    if (starter_mod == NULL) {
        return SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
    }
    processData_obj = ncx_find_object(
        starter_mod,
        y_starter_N_processData);
    if (starter_mod == NULL) {
        return SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
    }
    processDone_obj = ncx_find_object(
        starter_mod,
        y_starter_N_processDone);
    if (starter_mod == NULL) {
        return SET_ERROR(ERR_NCX_DEF_NOT_FOUND);
    }
    res = agt_rpc_register_method(
        y_starter_M_starter,
        y_starter_N_starter_start_vnf,
        AGT_RPC_PH_VALIDATE,
        y_starter_starter_start_vnf_validate);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_rpc_register_method(
        y_starter_M_starter,
        y_starter_N_starter_start_vnf,
        AGT_RPC_PH_INVOKE,
        y_starter_starter_start_vnf_invoke);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_rpc_register_method(
        y_starter_M_starter,
        y_starter_N_starter_kill_vnf,
        AGT_RPC_PH_VALIDATE,
        y_starter_starter_kill_vnf_validate);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_rpc_register_method(
        y_starter_M_starter,
        y_starter_N_starter_kill_vnf,
        AGT_RPC_PH_INVOKE,
        y_starter_starter_kill_vnf_invoke);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_rpc_register_method(
        y_starter_M_starter,
        y_starter_N_starter_get_load,
        AGT_RPC_PH_VALIDATE,
        y_starter_starter_get_load_validate);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_rpc_register_method(
        y_starter_M_starter,
        y_starter_N_starter_get_load,
        AGT_RPC_PH_INVOKE,
        y_starter_starter_get_load_invoke);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_rpc_register_method(
        y_starter_M_starter,
        y_starter_N_starter_get_processes,
        AGT_RPC_PH_VALIDATE,
        y_starter_starter_get_processes_validate);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_rpc_register_method(
        y_starter_M_starter,
        y_starter_N_starter_get_processes,
        AGT_RPC_PH_INVOKE,
        y_starter_starter_get_processes_invoke);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_cb_register_callback(
        y_starter_M_starter,
        (const xmlChar *)"/starter",
        y_starter_R_starter,
        starter_starter_edit);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_cb_register_callback(
        y_starter_M_starter,
        (const xmlChar *)"/starter/appName",
        y_starter_R_starter,
        starter_starter_appName_edit);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_cb_register_callback(
        y_starter_M_starter,
        (const xmlChar *)"/starter/appParams",
        y_starter_R_starter,
        starter_starter_appParams_edit);
    if (res != NO_ERR) {
        return res;
    }

    res = agt_cb_register_callback(
        y_starter_M_starter,
        (const xmlChar *)"/starter/capabilities",
        y_starter_R_starter,
        starter_starter_capabilities_edit);
    if (res != NO_ERR) {
        return res;
    }

    /* put your module initialization code here */
    
    return res;
} /* y_starter_init */
Beispiel #22
0
/*
 * Lock a directory entry.  A dirlock on <dzp, name> protects that name
 * in dzp's directory zap object.  As long as you hold a dirlock, you can
 * assume two things: (1) dzp cannot be reaped, and (2) no other thread
 * can change the zap entry for (i.e. link or unlink) this name.
 *
 * Input arguments:
 *	dzp	- znode for directory
 *	name	- name of entry to lock
 *	flag	- ZNEW: if the entry already exists, fail with EEXIST.
 *		  ZEXISTS: if the entry does not exist, fail with ENOENT.
 *		  ZSHARED: allow concurrent access with other ZSHARED callers.
 *		  ZXATTR: we want dzp's xattr directory
 *		  ZCILOOK: On a mixed sensitivity file system,
 *			   this lookup should be case-insensitive.
 *		  ZCIEXACT: On a purely case-insensitive file system,
 *			    this lookup should be case-sensitive.
 *		  ZRENAMING: we are locking for renaming, force narrow locks
 *		  ZHAVELOCK: Don't grab the z_name_lock for this call. The
 *			     current thread already holds it.
 *
 * Output arguments:
 *	zpp	- pointer to the znode for the entry (NULL if there isn't one)
 *	dlpp	- pointer to the dirlock for this entry (NULL on error)
 *      direntflags - (case-insensitive lookup only)
 *		flags if multiple case-sensitive matches exist in directory
 *      realpnp     - (case-insensitive lookup only)
 *		actual name matched within the directory
 *
 * Return value: 0 on success or errno on failure.
 *
 * NOTE: Always checks for, and rejects, '.' and '..'.
 * NOTE: For case-insensitive file systems we take wide locks (see below),
 *	 but return znode pointers to a single match.
 */
int
zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
    int flag, int *direntflags, pathname_t *realpnp)
{
	zfsvfs_t	*zfsvfs = dzp->z_zfsvfs;
	zfs_dirlock_t	*dl;
	boolean_t	update;
	boolean_t	exact;
	uint64_t	zoid;
	vnode_t		*vp = NULL;
	int		error = 0;
	int		cmpflags;

	*zpp = NULL;
	*dlpp = NULL;

	/*
	 * Verify that we are not trying to lock '.', '..', or '.zfs'
	 */
	if (name[0] == '.' &&
	    (name[1] == '\0' || (name[1] == '.' && name[2] == '\0')) ||
	    zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0)
		return (SET_ERROR(EEXIST));

	/*
	 * Case sensitivity and normalization preferences are set when
	 * the file system is created.  These are stored in the
	 * zfsvfs->z_case and zfsvfs->z_norm fields.  These choices
	 * affect what vnodes can be cached in the DNLC, how we
	 * perform zap lookups, and the "width" of our dirlocks.
	 *
	 * A normal dirlock locks a single name.  Note that with
	 * normalization a name can be composed multiple ways, but
	 * when normalized, these names all compare equal.  A wide
	 * dirlock locks multiple names.  We need these when the file
	 * system is supporting mixed-mode access.  It is sometimes
	 * necessary to lock all case permutations of file name at
	 * once so that simultaneous case-insensitive/case-sensitive
	 * behaves as rationally as possible.
	 */

	/*
	 * Decide if exact matches should be requested when performing
	 * a zap lookup on file systems supporting case-insensitive
	 * access.
	 */
	exact =
	    ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE) && (flag & ZCIEXACT)) ||
	    ((zfsvfs->z_case == ZFS_CASE_MIXED) && !(flag & ZCILOOK));

	/*
	 * Only look in or update the DNLC if we are looking for the
	 * name on a file system that does not require normalization
	 * or case folding.  We can also look there if we happen to be
	 * on a non-normalizing, mixed sensitivity file system IF we
	 * are looking for the exact name.
	 *
	 * Maybe can add TO-UPPERed version of name to dnlc in ci-only
	 * case for performance improvement?
	 */
	update = !zfsvfs->z_norm ||
	    ((zfsvfs->z_case == ZFS_CASE_MIXED) &&
	    !(zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER) && !(flag & ZCILOOK));

	/*
	 * ZRENAMING indicates we are in a situation where we should
	 * take narrow locks regardless of the file system's
	 * preferences for normalizing and case folding.  This will
	 * prevent us deadlocking trying to grab the same wide lock
	 * twice if the two names happen to be case-insensitive
	 * matches.
	 */
	if (flag & ZRENAMING)
		cmpflags = 0;
	else
		cmpflags = zfsvfs->z_norm;

	/*
	 * Wait until there are no locks on this name.
	 *
	 * Don't grab the the lock if it is already held. However, cannot
	 * have both ZSHARED and ZHAVELOCK together.
	 */
	ASSERT(!(flag & ZSHARED) || !(flag & ZHAVELOCK));
	if (!(flag & ZHAVELOCK))
		rw_enter(&dzp->z_name_lock, RW_READER);

	mutex_enter(&dzp->z_lock);
	for (;;) {
		if (dzp->z_unlinked) {
			mutex_exit(&dzp->z_lock);
			if (!(flag & ZHAVELOCK))
				rw_exit(&dzp->z_name_lock);
			return (SET_ERROR(ENOENT));
		}
		for (dl = dzp->z_dirlocks; dl != NULL; dl = dl->dl_next) {
			if ((u8_strcmp(name, dl->dl_name, 0, cmpflags,
			    U8_UNICODE_LATEST, &error) == 0) || error != 0)
				break;
		}
		if (error != 0) {
			mutex_exit(&dzp->z_lock);
			if (!(flag & ZHAVELOCK))
				rw_exit(&dzp->z_name_lock);
			return (SET_ERROR(ENOENT));
		}
		if (dl == NULL)	{
			size_t namesize;

			/*
			 * Allocate a new dirlock and add it to the list.
			 */
			namesize = strlen(name) + 1;
			dl = kmem_alloc(sizeof (zfs_dirlock_t) + namesize,
			    KM_SLEEP);
			cv_init(&dl->dl_cv, NULL, CV_DEFAULT, NULL);
			dl->dl_name = (char *)(dl + 1);
			bcopy(name, dl->dl_name, namesize);
			dl->dl_sharecnt = 0;
			dl->dl_namelock = 0;
			dl->dl_namesize = namesize;
			dl->dl_dzp = dzp;
			dl->dl_next = dzp->z_dirlocks;
			dzp->z_dirlocks = dl;
			break;
		}
		if ((flag & ZSHARED) && dl->dl_sharecnt != 0)
			break;
		cv_wait(&dl->dl_cv, &dzp->z_lock);
	}

	/*
	 * If the z_name_lock was NOT held for this dirlock record it.
	 */
	if (flag & ZHAVELOCK)
		dl->dl_namelock = 1;

	if (flag & ZSHARED)
		dl->dl_sharecnt++;

	mutex_exit(&dzp->z_lock);

	/*
	 * We have a dirlock on the name.  (Note that it is the dirlock,
	 * not the dzp's z_lock, that protects the name in the zap object.)
	 * See if there's an object by this name; if so, put a hold on it.
	 */
	if (flag & ZXATTR) {
		error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &zoid,
		    sizeof (zoid));
		if (error == 0)
			error = (zoid == 0 ? ENOENT : 0);
	} else {
		if (update)
			vp = dnlc_lookup(ZTOV(dzp), name);
		if (vp == DNLC_NO_VNODE) {
			VN_RELE(vp);
			error = SET_ERROR(ENOENT);
		} else if (vp) {
			if (flag & ZNEW) {
				zfs_dirent_unlock(dl);
				VN_RELE(vp);
				return (SET_ERROR(EEXIST));
			}
			*dlpp = dl;
			*zpp = VTOZ(vp);
			return (0);
		} else {
			error = zfs_match_find(zfsvfs, dzp, name, exact,
			    update, direntflags, realpnp, &zoid);
		}
	}
	if (error) {
		if (error != ENOENT || (flag & ZEXISTS)) {
			zfs_dirent_unlock(dl);
			return (error);
		}
	} else {
		if (flag & ZNEW) {
			zfs_dirent_unlock(dl);
			return (SET_ERROR(EEXIST));
		}
		error = zfs_zget(zfsvfs, zoid, zpp);
		if (error) {
			zfs_dirent_unlock(dl);
			return (error);
		}
		if (!(flag & ZXATTR) && update)
			dnlc_update(ZTOV(dzp), name, ZTOV(*zpp));
	}

	*dlpp = dl;

	return (0);
}
Beispiel #23
0
/********************************************************************
* FUNCTION mgr_xml_skip_subtree
* 
* Already encountered an error, so advance nodes until the
* matching start-node is reached or a terminating error occurs
*   - end of input
*   - start depth level reached
*
* INPUTS:
*   reader == XmlReader already initialized from File, Memory,
*             or whatever
*   startnode  == xml_node_t of the start node of the sub-tree to skip
* RETURNS:
*   status of the operation
* SIDE EFFECTS:
*   the xmlreader state is advanced until the current node is the
*   end node of the specified start node or a fatal error occurs
*********************************************************************/
status_t 
    mgr_xml_skip_subtree (xmlTextReaderPtr reader,
                          const xml_node_t *startnode)
{
    xml_node_t       node;
    const xmlChar   *qname, *badns;
    uint32           len;
    int              ret, depth, nodetyp;
    xmlns_id_t       nsid;
    boolean          done, justone;
    status_t         res;

#ifdef DEBUG
    if (!reader || !startnode) {
        return SET_ERROR(ERR_INTERNAL_PTR);
    }
#endif

    justone = FALSE;

    switch (startnode->nodetyp) {
    case XML_NT_START:
        break;
    case XML_NT_EMPTY:
        return NO_ERR;
    case XML_NT_STRING:
        justone = TRUE;
        break;
    case XML_NT_END:
        return NO_ERR;
    default:
        return SET_ERROR(ERR_INTERNAL_VAL);
    }
    xml_init_node(&node);
    res = mgr_xml_consume_node_noadv(reader, &node);
    if (res == NO_ERR) {
        res = xml_endnode_match(startnode, &node);
        if (res == NO_ERR) {
            xml_clean_node(&node);
            return NO_ERR;
        }
    }

    xml_clean_node(&node);
    if (justone) {
        return NO_ERR;
    }

    done = FALSE;
    while (!done) {

        /* advance the node pointer */
        ret = xmlTextReaderRead(reader);
        if (ret != 1) {
            /* fatal error */
            return ERR_XML_READER_EOF;
        }

        /* get the node depth to match the end node correctly */
        depth = xmlTextReaderDepth(reader);
        if (depth == -1) {
            /* not sure if this can happen, treat as fatal error */
            return ERR_XML_READER_INTERNAL;
        } else if (depth <= startnode->depth) {
            /* this depth override will cause errors to be ignored
             *   - wrong namespace in matching end node
             *   - unknown namespace in matching end node
             *   - wrong name in 'matching' end node
             */
            done = TRUE;
        }

        /* get the internal nodetype, check it and convert it */
        nodetyp = xmlTextReaderNodeType(reader);

        /* get the element QName */
        qname = xmlTextReaderConstName(reader);
        if (qname) {
            /* check for namespace prefix in the name 
             * only error is 'unregistered namespace ID'
             * which doesn't matter in this case
             */
            nsid = 0;
            (void)xml_check_ns(reader, qname, &nsid, &len, &badns);
        } else {
            qname = (const xmlChar *)"";
        }

        /* check the normal case to see if the search is done */
        if (depth == startnode->depth &&
            !xml_strcmp(qname, startnode->qname) &&
            nodetyp == XML_ELEMENT_DECL) {
            done = TRUE;
        }

#ifdef XML_UTIL_DEBUG
        log_debug3("\nxml_skip: %s L:%d T:%s",
               qname, depth, xml_get_node_name(nodetyp));
#endif
    }

    return NO_ERR;

}  /* mgr_xml_skip_subtree */
Beispiel #24
0
/*
 * Link zp into dl.  Can only fail if zp has been unlinked.
 */
int
zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
{
	znode_t *dzp = dl->dl_dzp;
	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
	vnode_t *vp = ZTOV(zp);
	uint64_t value;
	int zp_is_dir = (vp->v_type == VDIR);
	sa_bulk_attr_t bulk[5];
	uint64_t mtime[2], ctime[2];
	int count = 0;
	int error;

	mutex_enter(&zp->z_lock);

	if (!(flag & ZRENAMING)) {
		if (zp->z_unlinked) {	/* no new links to unlinked zp */
			ASSERT(!(flag & (ZNEW | ZEXISTS)));
			mutex_exit(&zp->z_lock);
			return (SET_ERROR(ENOENT));
		}
		zp->z_links++;
		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
		    &zp->z_links, sizeof (zp->z_links));

	}
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
	    &dzp->z_id, sizeof (dzp->z_id));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
	    &zp->z_pflags, sizeof (zp->z_pflags));

	if (!(flag & ZNEW)) {
		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
		    ctime, sizeof (ctime));
		zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime,
		    ctime, B_TRUE);
	}
	error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
	ASSERT(error == 0);

	mutex_exit(&zp->z_lock);

	mutex_enter(&dzp->z_lock);
	dzp->z_size++;
	dzp->z_links += zp_is_dir;
	count = 0;
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
	    &dzp->z_size, sizeof (dzp->z_size));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
	    &dzp->z_links, sizeof (dzp->z_links));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
	    mtime, sizeof (mtime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
	    ctime, sizeof (ctime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
	    &dzp->z_pflags, sizeof (dzp->z_pflags));
	zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
	error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
	ASSERT(error == 0);
	mutex_exit(&dzp->z_lock);

	value = zfs_dirent(zp, zp->z_mode);
	error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name,
	    8, 1, &value, tx);
	ASSERT(error == 0);

	dnlc_update(ZTOV(dzp), dl->dl_name, vp);

	return (0);
}
Beispiel #25
0
/*
 * Replay file create with optional ACL, xvattr information as well
 * as option FUID information.
 */
static int
zfs_replay_create_acl(void *arg1, void *arg2, boolean_t byteswap)
{
	zfsvfs_t *zfsvfs = arg1;
	lr_acl_create_t *lracl = arg2;
	char *name = NULL;		/* location determined later */
	lr_create_t *lr = (lr_create_t *)lracl;
	znode_t *dzp;
	vnode_t *vp = NULL;
	xvattr_t xva;
	int vflg = 0;
	vsecattr_t vsec = { 0 };
	lr_attr_t *lrattr;
	void *aclstart;
	void *fuidstart;
	size_t xvatlen = 0;
	uint64_t txtype;
	int error;

	txtype = (lr->lr_common.lrc_txtype & ~TX_CI);
	if (byteswap) {
		byteswap_uint64_array(lracl, sizeof (*lracl));
		if (txtype == TX_CREATE_ACL_ATTR ||
		    txtype == TX_MKDIR_ACL_ATTR) {
			lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
			zfs_replay_swap_attrs(lrattr);
			xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
		}

		aclstart = (caddr_t)(lracl + 1) + xvatlen;
		zfs_ace_byteswap(aclstart, lracl->lr_acl_bytes, B_FALSE);
		/* swap fuids */
		if (lracl->lr_fuidcnt) {
			byteswap_uint64_array((caddr_t)aclstart +
			    ZIL_ACE_LENGTH(lracl->lr_acl_bytes),
			    lracl->lr_fuidcnt * sizeof (uint64_t));
		}
	}

	if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
		return (error);

	xva_init(&xva);
	zfs_init_vattr(&xva.xva_vattr, AT_TYPE | AT_MODE | AT_UID | AT_GID,
	    lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, lr->lr_foid);

	/*
	 * All forms of zfs create (create, mkdir, mkxattrdir, symlink)
	 * eventually end up in zfs_mknode(), which assigns the object's
	 * creation time and generation number.  The generic VOP_CREATE()
	 * doesn't have either concept, so we smuggle the values inside
	 * the vattr's otherwise unused va_ctime and va_nblocks fields.
	 */
	ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime);
	xva.xva_vattr.va_nblocks = lr->lr_gen;

	error = dmu_object_info(zfsvfs->z_os, lr->lr_foid, NULL);
	if (error != ENOENT)
		goto bail;

	if (lr->lr_common.lrc_txtype & TX_CI)
		vflg |= FIGNORECASE;
	switch (txtype) {
	case TX_CREATE_ACL:
		aclstart = (caddr_t)(lracl + 1);
		fuidstart = (caddr_t)aclstart +
		    ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
		zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
		    (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
		    lr->lr_uid, lr->lr_gid);
		/*FALLTHROUGH*/
	case TX_CREATE_ACL_ATTR:
		if (name == NULL) {
			lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
			xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
			xva.xva_vattr.va_mask |= AT_XVATTR;
			zfs_replay_xvattr(lrattr, &xva);
		}
		vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
		vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen;
		vsec.vsa_aclcnt = lracl->lr_aclcnt;
		vsec.vsa_aclentsz = lracl->lr_acl_bytes;
		vsec.vsa_aclflags = lracl->lr_acl_flags;
		if (zfsvfs->z_fuid_replay == NULL) {
			fuidstart = (caddr_t)(lracl + 1) + xvatlen +
			    ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
			zfsvfs->z_fuid_replay =
			    zfs_replay_fuids(fuidstart,
			    (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
			    lr->lr_uid, lr->lr_gid);
		}

#ifdef TODO
		error = VOP_CREATE(ZTOV(dzp), name, &xva.xva_vattr,
		    0, 0, &vp, kcred, vflg, NULL, &vsec);
#else
		panic("%s:%u: unsupported condition", __func__, __LINE__);
#endif
		break;
	case TX_MKDIR_ACL:
		aclstart = (caddr_t)(lracl + 1);
		fuidstart = (caddr_t)aclstart +
		    ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
		zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
		    (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
		    lr->lr_uid, lr->lr_gid);
		/*FALLTHROUGH*/
	case TX_MKDIR_ACL_ATTR:
		if (name == NULL) {
			lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
			xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
			zfs_replay_xvattr(lrattr, &xva);
		}
		vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
		vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen;
		vsec.vsa_aclcnt = lracl->lr_aclcnt;
		vsec.vsa_aclentsz = lracl->lr_acl_bytes;
		vsec.vsa_aclflags = lracl->lr_acl_flags;
		if (zfsvfs->z_fuid_replay == NULL) {
			fuidstart = (caddr_t)(lracl + 1) + xvatlen +
			    ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
			zfsvfs->z_fuid_replay =
			    zfs_replay_fuids(fuidstart,
			    (void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
			    lr->lr_uid, lr->lr_gid);
		}
#ifdef TODO
		error = VOP_MKDIR(ZTOV(dzp), name, &xva.xva_vattr,
		    &vp, kcred, NULL, vflg, &vsec);
#else
		panic("%s:%u: unsupported condition", __func__, __LINE__);
#endif
		break;
	default:
		error = SET_ERROR(ENOTSUP);
	}

bail:
	if (error == 0 && vp != NULL)
		VN_RELE(vp);

	VN_RELE(ZTOV(dzp));

	if (zfsvfs->z_fuid_replay)
		zfs_fuid_info_free(zfsvfs->z_fuid_replay);
	zfsvfs->z_fuid_replay = NULL;

	return (error);
}
Beispiel #26
0
/*
 * Unlink zp from dl, and mark zp for deletion if this was the last link.
 * Can fail if zp is a mount point (EBUSY) or a non-empty directory (EEXIST).
 * If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list.
 * If it's non-NULL, we use it to indicate whether the znode needs deletion,
 * and it's the caller's job to do it.
 */
int
zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
	boolean_t *unlinkedp)
{
	znode_t *dzp = dl->dl_dzp;
	zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
	vnode_t *vp = ZTOV(zp);
	int zp_is_dir = (vp->v_type == VDIR);
	boolean_t unlinked = B_FALSE;
	sa_bulk_attr_t bulk[5];
	uint64_t mtime[2], ctime[2];
	int count = 0;
	int error;

	dnlc_remove(ZTOV(dzp), dl->dl_name);

	if (!(flag & ZRENAMING)) {
		if (vn_vfswlock(vp))		/* prevent new mounts on zp */
			return (SET_ERROR(EBUSY));

		if (vn_ismntpt(vp)) {		/* don't remove mount point */
			vn_vfsunlock(vp);
			return (SET_ERROR(EBUSY));
		}

		mutex_enter(&zp->z_lock);

		if (zp_is_dir && !zfs_dirempty(zp)) {
			mutex_exit(&zp->z_lock);
			vn_vfsunlock(vp);
#ifdef illumos
			return (SET_ERROR(EEXIST));
#else
			return (SET_ERROR(ENOTEMPTY));
#endif
		}

		/*
		 * If we get here, we are going to try to remove the object.
		 * First try removing the name from the directory; if that
		 * fails, return the error.
		 */
		error = zfs_dropname(dl, zp, dzp, tx, flag);
		if (error != 0) {
			mutex_exit(&zp->z_lock);
			vn_vfsunlock(vp);
			return (error);
		}

		if (zp->z_links <= zp_is_dir) {
			zfs_panic_recover("zfs: link count on vnode %p is %u, "
			    "should be at least %u", zp->z_vnode,
			    (int)zp->z_links,
			    zp_is_dir + 1);
			zp->z_links = zp_is_dir + 1;
		}
		if (--zp->z_links == zp_is_dir) {
			zp->z_unlinked = B_TRUE;
			zp->z_links = 0;
			unlinked = B_TRUE;
		} else {
			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
			    NULL, &ctime, sizeof (ctime));
			SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
			    NULL, &zp->z_pflags, sizeof (zp->z_pflags));
			zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
			    B_TRUE);
		}
		SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
		    NULL, &zp->z_links, sizeof (zp->z_links));
		error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
		count = 0;
		ASSERT(error == 0);
		mutex_exit(&zp->z_lock);
		vn_vfsunlock(vp);
	} else {
		error = zfs_dropname(dl, zp, dzp, tx, flag);
		if (error != 0)
			return (error);
	}

	mutex_enter(&dzp->z_lock);
	dzp->z_size--;		/* one dirent removed */
	dzp->z_links -= zp_is_dir;	/* ".." link from zp */
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
	    NULL, &dzp->z_links, sizeof (dzp->z_links));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
	    NULL, &dzp->z_size, sizeof (dzp->z_size));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs),
	    NULL, ctime, sizeof (ctime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
	    NULL, mtime, sizeof (mtime));
	SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
	    NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
	zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE);
	error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
	ASSERT(error == 0);
	mutex_exit(&dzp->z_lock);

	if (unlinkedp != NULL)
		*unlinkedp = unlinked;
	else if (unlinked)
		zfs_unlinked_add(zp, tx);

	return (0);
}
Beispiel #27
0
/* ARGSUSED */
static int
zfs_replay_error(void *arg1, void *arg2, boolean_t byteswap)
{
	return (SET_ERROR(ENOTSUP));
}
Beispiel #28
0
int
zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr)
{
	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
	znode_t *xzp;
	dmu_tx_t *tx;
	int error;
	zfs_acl_ids_t acl_ids;
	boolean_t fuid_dirtied;
	uint64_t parent;

	*xvpp = NULL;

	/*
	 * In FreeBSD, access checking for creating an EA is being done
	 * in zfs_setextattr(),
	 */
#ifndef __FreeBSD_kernel__
	if (error = zfs_zaccess(zp, ACE_WRITE_NAMED_ATTRS, 0, B_FALSE, cr))
		return (error);
#endif

	if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
	    &acl_ids)) != 0)
		return (error);
	if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
		zfs_acl_ids_free(&acl_ids);
		return (SET_ERROR(EDQUOT));
	}

	tx = dmu_tx_create(zfsvfs->z_os);
	dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
	    ZFS_SA_BASE_ATTR_SIZE);
	dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
	dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
	fuid_dirtied = zfsvfs->z_fuid_dirty;
	if (fuid_dirtied)
		zfs_fuid_txhold(zfsvfs, tx);
	error = dmu_tx_assign(tx, TXG_WAIT);
	if (error) {
		zfs_acl_ids_free(&acl_ids);
		dmu_tx_abort(tx);
		return (error);
	}
	zfs_mknode(zp, vap, tx, cr, IS_XATTR, &xzp, &acl_ids);

	if (fuid_dirtied)
		zfs_fuid_sync(zfsvfs, tx);

#ifdef DEBUG
	error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
	    &parent, sizeof (parent));
	ASSERT(error == 0 && parent == zp->z_id);
#endif

	VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
	    sizeof (xzp->z_id), tx));

	(void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp,
	    xzp, "", NULL, acl_ids.z_fuidp, vap);

	zfs_acl_ids_free(&acl_ids);
	dmu_tx_commit(tx);

	*xvpp = ZTOV(xzp);

	return (0);
}
Beispiel #29
0
seL4_Word _utspace_split_alloc(allocman_t *alloc, void *_split, size_t size_bits, seL4_Word type, const cspacepath_t *slot, uintptr_t paddr, bool canBeDev, int *error)
{
    utspace_split_t *split = (utspace_split_t*)_split;
    size_t sel4_size_bits;
    int sel4_error;
    struct utspace_split_node *node;
    /* get size of untyped call */
    sel4_size_bits = get_sel4_object_size(type, size_bits);
    if (size_bits != vka_get_object_size(type, sel4_size_bits) || size_bits == 0) {
        SET_ERROR(error, 1);
        return 0;
    }
    struct utspace_split_node **head = NULL;
    /* if we're allocating at a particular paddr then we will just trawl through every pool
     * and see if we can find out which one has what we want */
    if (paddr != ALLOCMAN_NO_PADDR) {
        if (canBeDev) {
            head = find_head_for_paddr(split->dev_heads, paddr, size_bits);
            if (!head) {
                head = find_head_for_paddr(split->dev_mem_heads, paddr, size_bits);
            }
        }
        if (!head) {
            head = find_head_for_paddr(split->heads, paddr, size_bits);
        }
        if (!head) {
            SET_ERROR(error, 1);
            ZF_LOGE("Failed to find any untyped capable of creating an object at address %p", (void*)paddr);
            return 0;
        }
        if (_refill_pool(alloc, split, head, size_bits, paddr)) {
            /* out of memory? */
            SET_ERROR(error, 1);
            ZF_LOGV("Failed to refill pool to allocate object of size %zu", size_bits);
            return 0;
        }
        /* search for the node we want to use. We have the advantage of knowing that
         * due to objects being size aligned that the base paddr of the untyped will
         * be exactly the paddr we want */
        for (node = head[size_bits]; node && node->paddr != paddr; node = node->next);
        /* _refill_pool should not have returned if this wasn't possible */
        assert(node);
    } else {
        /* if we can use device memory then preference allocating from there */
        if (canBeDev) {
            if (_refill_pool(alloc, split, split->dev_mem_heads, size_bits, ALLOCMAN_NO_PADDR)) {
                /* out of memory? */
                SET_ERROR(error, 1);
                ZF_LOGV("Failed to refill pool to allocate object of size %zu", size_bits);
                return 0;
            }
            head = split->dev_mem_heads;
        }
        if (!head) {
            head = split->heads;
            if (_refill_pool(alloc, split, head, size_bits, ALLOCMAN_NO_PADDR)) {
                /* out of memory? */
                SET_ERROR(error, 1);
                ZF_LOGV("Failed to refill pool to allocate object of size %zu", size_bits);
                return 0;
            }
        }
        /* use the first node for lack of a better one */
        node = head[size_bits];
    }
    /* Perform the untyped retype */
    sel4_error = seL4_Untyped_Retype(node->ut.capPtr, type, sel4_size_bits, slot->root, slot->dest, slot->destDepth, slot->offset, 1);
    if (sel4_error != seL4_NoError) {
        /* Well this shouldn't happen */
        ZF_LOGE("Failed to retype untyped, error %d\n", sel4_error);
        SET_ERROR(error, 1);
        return 0;
    }
    /* remove the node */
    _remove_node(&head[size_bits], node);
    SET_ERROR(error, 0);
    /* return the node as a cookie */
    return (seL4_Word)node;
}
Beispiel #30
0
int
zfs_domount(struct super_block *sb, zfs_mntopts_t *zmo, int silent)
{
	const char *osname = zmo->z_osname;
	zfs_sb_t *zsb;
	struct inode *root_inode;
	uint64_t recordsize;
	int error;

	error = zfs_sb_create(osname, zmo, &zsb);
	if (error)
		return (error);

	if ((error = dsl_prop_get_integer(osname, "recordsize",
	    &recordsize, NULL)))
		goto out;

	zsb->z_sb = sb;
	sb->s_fs_info = zsb;
	sb->s_magic = ZFS_SUPER_MAGIC;
	sb->s_maxbytes = MAX_LFS_FILESIZE;
	sb->s_time_gran = 1;
	sb->s_blocksize = recordsize;
	sb->s_blocksize_bits = ilog2(recordsize);
	zsb->z_bdi.ra_pages = 0;
	sb->s_bdi = &zsb->z_bdi;

	error = -zpl_bdi_setup_and_register(&zsb->z_bdi, "zfs");
	if (error)
		goto out;

	/* Set callback operations for the file system. */
	sb->s_op = &zpl_super_operations;
	sb->s_xattr = zpl_xattr_handlers;
	sb->s_export_op = &zpl_export_operations;
#ifdef HAVE_S_D_OP
	sb->s_d_op = &zpl_dentry_operations;
#endif /* HAVE_S_D_OP */

	/* Set features for file system. */
	zfs_set_fuid_feature(zsb);

	if (dmu_objset_is_snapshot(zsb->z_os)) {
		uint64_t pval;

		atime_changed_cb(zsb, B_FALSE);
		readonly_changed_cb(zsb, B_TRUE);
		if ((error = dsl_prop_get_integer(osname,
		    "xattr", &pval, NULL)))
			goto out;
		xattr_changed_cb(zsb, pval);
		if ((error = dsl_prop_get_integer(osname,
		    "acltype", &pval, NULL)))
			goto out;
		acltype_changed_cb(zsb, pval);
		zsb->z_issnap = B_TRUE;
		zsb->z_os->os_sync = ZFS_SYNC_DISABLED;
		zsb->z_snap_defer_time = jiffies;

		mutex_enter(&zsb->z_os->os_user_ptr_lock);
		dmu_objset_set_user(zsb->z_os, zsb);
		mutex_exit(&zsb->z_os->os_user_ptr_lock);
	} else {
		error = zfs_sb_setup(zsb, B_TRUE);
	}

	/* Allocate a root inode for the filesystem. */
	error = zfs_root(zsb, &root_inode);
	if (error) {
		(void) zfs_umount(sb);
		goto out;
	}

	/* Allocate a root dentry for the filesystem */
	sb->s_root = d_make_root(root_inode);
	if (sb->s_root == NULL) {
		(void) zfs_umount(sb);
		error = SET_ERROR(ENOMEM);
		goto out;
	}

	if (!zsb->z_issnap)
		zfsctl_create(zsb);

	zsb->z_arc_prune = arc_add_prune_callback(zpl_prune_sb, sb);
out:
	if (error) {
		dmu_objset_disown(zsb->z_os, zsb);
		zfs_sb_free(zsb);
	}

	return (error);
}