Exemplo n.º 1
0
/*
 * Return the next SLASH2 FID to use.  Note that from ZFS point of view,
 * it is perfectly okay that we use the same SLASH2 FID to refer to
 * different files/directories.  However, doing so can confuse our
 * clients (think identity theft).  So we must make sure that we never
 * reuse a SLASH2 FID, even after a crash.
 *
 * The siteid has already been baked into the initial cursor file.
 */
int
slm_get_next_slashfid(slfid_t *fidp)
{
	uint64_t fid;

	spinlock(&slm_fid_lock);
	/*
	 * This should never happen.  If it does, we crash to let the
	 * sysadmin know.  He could fix this if there is still room in
	 * the cycle bits.  We have to let the sysadmin know otherwise
	 * they will not know to bump the cycle bits.
	 */
	if (FID_GET_INUM(slm_next_fid) >= FID_MAX_INUM) {
		psclog_warnx("max FID "SLPRI_FID" reached, manual "
		    "intervention needed (bump the cycle bits)",
		    slm_next_fid);
		freelock(&slm_fid_lock);
		return (ENOSPC);
	}
	fid = slm_next_fid++;
	freelock(&slm_fid_lock);

	psclog_diag("most recently allocated FID: "SLPRI_FID, fid);
	*fidp = fid;
	return (0);
}
Exemplo n.º 2
0
struct wok_module *
mod_load(const char *path, const char *opts, char *errbuf,
    size_t errlen)
{
	int (*loadf)(struct pscfs *);
	struct wok_module *wm;
	void *h;
	int rc;

	h = dlopen(path, RTLD_NOW);
	if (h == NULL) {
		snprintf(errbuf, LINE_MAX, "%s\n", dlerror()); 
		fprintf(stderr, errbuf);
		return (NULL);
	}

	loadf = dlsym(h, "pscfs_module_load");
	if (loadf == NULL) {
		dlclose(h);
		snprintf(errbuf, LINE_MAX,
		    "symbol pscfs_module_load undefined.\n");
		fprintf(stderr, errbuf);
		return (NULL);
	}

	wm = PSCALLOC(sizeof(*wm));
	wm->wm_path = pfl_strdup(path);
	wm->wm_handle = h;
	wm->wm_opts = pfl_strdup(opts);
	wm->wm_module.pf_private = wm;
	pflfs_module_init(&wm->wm_module, opts);
	rc = loadf(&wm->wm_module);

	/*
	 * XXX XXX XXX
	 * This is a complete hack but this flush somehow avoids a bunch
	 * of zeroes from ending up in the log...
	 * XXX XXX XXX
	 */
	fflush(stderr);

	if (rc) {
		wm->wm_module.pf_handle_destroy = NULL;
		pflfs_module_destroy(&wm->wm_module);

		dlclose(h);
		PSCFREE(wm->wm_path);
		PSCFREE(wm);
		psclog_warnx("module failed to load: rc=%d module=%s",
		    rc, path);
		strlcpy(errbuf, strerror(rc), errlen);
		return (NULL);
	}
	return (wm);
}
Exemplo n.º 3
0
Arquivo: walk.c Projeto: pscedu/pfl
/*
 * Traverse a file hierarchy and perform an operation on each file
 * system entry.
 * @fn: file root.
 * @flags: behavorial flags.
 * @cmpf: optional dirent comparator for ordering.
 * @cbf: callback to invoke on each file.
 * @arg: optional argument to supply to callback.
 * Notes: the callback will be invoked with a fully resolved absolute
 *	path name unless the file in question is a symbolic link.
 */
int
pfl_filewalk(const char *fn, int flags, void *cmpf, int (*cbf)(FTSENT *,
    void *), void *arg)
{
	char * const pathv[] = { (char *)fn, NULL };
	int rc = 0, f_flags = 0;
	struct stat stb;
	FTSENT *f;
	FTS *fp;

	if (flags & PFL_FILEWALKF_RECURSIVE) {
		if (flags & PFL_FILEWALKF_NOSTAT)
			f_flags |= FTS_NOSTAT;
		if (flags & PFL_FILEWALKF_NOCHDIR)
			f_flags |= FTS_NOCHDIR;
		fp = pfl_fts_open(pathv, f_flags | FTS_COMFOLLOW |
		    FTS_PHYSICAL, cmpf);
		if (fp == NULL)
			psc_fatal("fts_open %s", fn);
		while ((f = pfl_fts_read(fp)) != NULL) {
			switch (f->fts_info) {
			case FTS_NS:
				psclog_warnx("%s: %s", f->fts_path,
				    strerror(f->fts_errno));
				break;
			case FTS_F:
			case FTS_D:
			case FTS_SL:
				if (flags & PFL_FILEWALKF_VERBOSE)
					warnx("processing %s%s",
					    fn, f->fts_info == FTS_D ?
					    "/" : "");
			case FTS_DP:
				rc = cbf(f, arg);
				if (rc) {
					pfl_fts_close(fp);
					return (rc);
				}
				break;
			default:
				if (f->fts_errno == 0)
					f->fts_errno = EOPNOTSUPP;
				psclog_warnx("%s: %s", f->fts_path,
				    strerror(f->fts_errno));
				break;
			}
		}
		pfl_fts_close(fp);
	} else {
		const char *basefn;
		size_t baselen;

		if (lstat(fn, &stb) == -1)
			err(1, "%s", fn);
		basefn = pfl_basename(fn);
		baselen = strlen(basefn);

		f = PSCALLOC(sizeof(*f) + baselen);
		f->fts_accpath = (char *)fn;
		f->fts_path = (char *)fn;
		f->fts_pathlen = strlen(fn);
		strlcpy(f->fts_name, basefn, baselen + 1);
		f->fts_namelen = baselen;
		f->fts_ino = stb.st_ino;
		f->fts_statp = &stb;
		switch (stb.st_mode & S_IFMT) {
		case S_IFDIR: f->fts_info = FTS_D; break;
		case S_IFREG: f->fts_info = FTS_F; break;
		case S_IFLNK: f->fts_info = FTS_SL; break;
		case S_IFBLK: f->fts_info = FTS_DEFAULT; break;
		default:
			psclog_warnx("%s: %s", fn,
			    strerror(EOPNOTSUPP));
			break;
		}
		rc = cbf(f, arg);
		PSCFREE(f);
	}
	return (rc);
}
Exemplo n.º 4
0
void
slm_repl_upd_write(struct bmap *b, int rel)
{
	struct {
		sl_replica_t	 iosv[SL_MAX_REPLICAS];
		char		*stat[SL_MAX_REPLICAS];
		unsigned	 nios;
	} add, del, chg;

	int off, vold, vnew, sprio, uprio, rc;
	struct sl_mds_iosinfo *si;
	struct bmap_mds_info *bmi;
	struct fidc_membh *f;
	struct sl_resource *r;
	sl_ios_id_t resid;
	unsigned n, nrepls;

	bmi = bmap_2_bmi(b);
	f = b->bcm_fcmh;
	sprio = bmi->bmi_sys_prio;
	uprio = bmi->bmi_usr_prio;

	add.nios = 0;
	del.nios = 0;
	chg.nios = 0;
	nrepls = fcmh_2_nrepls(f);
	for (n = 0, off = 0; n < nrepls; n++, off += SL_BITS_PER_REPLICA) {

		if (n == SL_DEF_REPLICAS)
			mds_inox_ensure_loaded(fcmh_2_inoh(f));

		resid = fcmh_2_repl(f, n);
		vold = SL_REPL_GET_BMAP_IOS_STAT(bmi->bmi_orepls, off);
		vnew = SL_REPL_GET_BMAP_IOS_STAT(bmi->bmi_repls, off);

		r = libsl_id2res(resid);
		si = r ? res2iosinfo(r) : &slm_null_iosinfo;

		if (vold == vnew)
			;

		/* Work was added. */
		else if ((vold != BREPLST_REPL_SCHED &&
		    vold != BREPLST_GARBAGE_QUEUED &&
		    vold != BREPLST_GARBAGE_SCHED &&
		    vnew == BREPLST_REPL_QUEUED) ||
		    (vold != BREPLST_GARBAGE_SCHED &&
		     vnew == BREPLST_GARBAGE_QUEUED &&
		     (si->si_flags & SIF_PRECLAIM_NOTSUP) == 0)) {
			OPSTAT_INCR("repl-work-add");
			PUSH_IOS(b, &add, resid, NULL);
		}

		/* Work has finished. */
		else if ((vold == BREPLST_REPL_QUEUED ||
		     vold == BREPLST_REPL_SCHED ||
		     vold == BREPLST_TRUNC_SCHED ||
		     vold == BREPLST_TRUNC_QUEUED ||
		     vold == BREPLST_GARBAGE_SCHED ||
		     vold == BREPLST_VALID) &&
		    (((si->si_flags & SIF_PRECLAIM_NOTSUP) &&
		      vnew == BREPLST_GARBAGE_QUEUED) ||
		     vnew == BREPLST_VALID ||
		     vnew == BREPLST_INVALID)) {
			OPSTAT_INCR("repl-work-del");
			PUSH_IOS(b, &del, resid, NULL);
		}

		/*
		 * Work that was previously scheduled failed so 
		 * requeue it.
		 */
		else if (vold == BREPLST_REPL_SCHED ||
		    vold == BREPLST_GARBAGE_SCHED ||
		    vold == BREPLST_TRUNC_SCHED)
			PUSH_IOS(b, &chg, resid, "Q");

		/* Work was scheduled. */
		else if (vnew == BREPLST_REPL_SCHED ||
		    vnew == BREPLST_GARBAGE_SCHED ||
		    vnew == BREPLST_TRUNC_SCHED)
			PUSH_IOS(b, &chg, resid, "S");

		/* Work was reprioritized. */
		else if (sprio != -1 || uprio != -1)
			PUSH_IOS(b, &chg, resid, NULL);
	}

	for (n = 0; n < add.nios; n++) {
		rc = slm_upsch_insert(b, add.iosv[n].bs_id, sprio,
		    uprio);
		if (!rc)
			continue;
		psclog_warnx("upsch insert failed: bno = %d, "
		    "fid=%"PRId64", ios= %d, rc = %d",
		    b->bcm_bmapno, bmap_2_fid(b), 
		    add.iosv[n].bs_id, rc);
	}

	for (n = 0; n < del.nios; n++) {
		spinlock(&slm_upsch_lock);
		dbdo(NULL, NULL,
		    " DELETE FROM upsch"
		    " WHERE	resid = ?"
		    "   AND	fid = ?"
		    "   AND	bno = ?",
		    SQLITE_INTEGER, del.iosv[n].bs_id,
		    SQLITE_INTEGER64, bmap_2_fid(b),
		    SQLITE_INTEGER, b->bcm_bmapno);
		freelock(&slm_upsch_lock);
	}

	for (n = 0; n < chg.nios; n++) {
		spinlock(&slm_upsch_lock);
		dbdo(NULL, NULL,
		    " UPDATE	upsch"
		    " SET	status = IFNULL(?, status),"
		    "		sys_prio = IFNULL(?, sys_prio),"
		    "		usr_prio = IFNULL(?, usr_prio)"
		    " WHERE	resid = ?"
		    "	AND	fid = ?"
		    "	AND	bno = ?",
		    chg.stat[n] ? SQLITE_TEXT : SQLITE_NULL,
		    chg.stat[n] ? chg.stat[n] : 0,
		    sprio == -1 ? SQLITE_NULL : SQLITE_INTEGER,
		    sprio == -1 ? 0 : sprio,
		    uprio == -1 ? SQLITE_NULL : SQLITE_INTEGER,
		    uprio == -1 ? 0 : uprio,
		    SQLITE_INTEGER, chg.iosv[n].bs_id,
		    SQLITE_INTEGER64, bmap_2_fid(b),
		    SQLITE_INTEGER, b->bcm_bmapno);
		freelock(&slm_upsch_lock);
	}

	bmap_2_bmi(b)->bmi_sys_prio = -1;
	bmap_2_bmi(b)->bmi_usr_prio = -1;

	if (rel) {
		BMAP_LOCK(b);
		b->bcm_flags &= ~BMAPF_REPLMODWR;
		bmap_wake_locked(b);
		bmap_op_done_type(b, BMAP_OPCNT_WORK);
	}
}
Exemplo n.º 5
0
/**
 * rsx_bulkserver - Setup a source or sink for a server.
 * @rq: RPC request associated with GET.
 * @type: GET_SINK receive from client or PUT_SOURCE to push to a client.
 * @ptl: portal to issue bulk xfer across.
 * @iov: iovec array of receive buffer.
 * @n: #iovecs.
 * Returns: 0 or negative errno on error.
 */
int
rsx_bulkserver(struct pscrpc_request *rq, int type, int ptl,
    struct iovec *iov, int n)
{
	int sum, i, rc, comms_error;
	struct pscrpc_bulk_desc *desc;
	struct l_wait_info lwi;
	uint64_t *v8;
	uint8_t *v1;

	psc_assert(type == BULK_GET_SINK || type == BULK_PUT_SOURCE);

	desc = pscrpc_prep_bulk_exp(rq, n, type, ptl);
	if (desc == NULL) {
		psclog_warnx("pscrpc_prep_bulk_exp returned a null desc");
		return (-ENOMEM); // XXX errno
	}
	desc->bd_nob = 0;
	desc->bd_iov_count = n;
	memcpy(desc->bd_iov, iov, n * sizeof(*iov));
	for (i = 0; i < n; i++)
		desc->bd_nob += iov[i].iov_len;

	/* Check for client eviction during previous I/O before proceeding. */
	if (desc->bd_export->exp_failed)
		rc = -ENOTCONN;
	else
		rc = pscrpc_start_bulk_transfer(desc);
	if (rc == 0) {
		lwi = LWI_TIMEOUT_INTERVAL(OBD_TIMEOUT / 2,
		    100, pfl_rsx_timeout, desc);

		rc = pscrpc_svr_wait_event(&desc->bd_waitq,
		    (!pscrpc_bulk_active(desc) || desc->bd_export->exp_failed),
		    &lwi, NULL);

		LASSERT(rc == 0 || rc == -ETIMEDOUT);
		if (rc == -ETIMEDOUT) {
			DEBUG_REQ(PLL_ERROR, rq, "timeout on bulk GET");
			pscrpc_abort_bulk(desc);
		} else if (desc->bd_export->exp_failed) {
			DEBUG_REQ(PLL_ERROR, rq, "eviction on bulk GET");
			rc = -ENOTCONN;
			pscrpc_abort_bulk(desc);
		} else if (!desc->bd_success ||
		    desc->bd_nob_transferred != desc->bd_nob) {
			DEBUG_REQ(PLL_ERROR, rq, "%s bulk GET %d(%d)",
			    desc->bd_success ? "truncated" : "network error on",
			    desc->bd_nob_transferred, desc->bd_nob);
			rc = -ETIMEDOUT;
		}
	} else {
		DEBUG_REQ(PLL_ERROR, rq, "pscrpc I/O bulk get failed: "
		    "rc=%d", rc);
	}
	comms_error = (rc != 0);

	/* count the number of bytes received, and hold for later... */
	if (rc == 0) {
		v1 = desc->bd_iov[0].iov_base;
		v8 = desc->bd_iov[0].iov_base;
		if (v1 == NULL) {
			DEBUG_REQ(PLL_ERROR, rq,
			    "desc->bd_iov[0].iov_base is NULL");
			rc = -ENXIO;
			goto out;
		}

		DEBUG_REQ(PLL_DIAG, rq,
		    "got %u bytes of bulk data across %d IOVs: "
		    "first byte is %#x (%"PRIx64")",
		    desc->bd_nob, desc->bd_iov_count, *v1, *v8);

		sum = 0;
		for (i = 0; i < desc->bd_iov_count; i++)
			sum += desc->bd_iov[i].iov_len;
		if (sum != desc->bd_nob)
			DEBUG_REQ(PLL_WARN, rq,
			    "sum (%d) does not match bd_nob (%d)",
			    sum, desc->bd_nob);
		//rc = pscrpc_reply(rq);
	}

 out:
	if (rc == 0)
		;
	else if (!comms_error) {
		/* Only reply if there were no comm problems with bulk. */
		rq->rq_status = rc;
		pscrpc_error(rq);
	} else {
#if 0
		// For now let's not free the reply state..
		if (rq->rq_reply_state != NULL) {
			/* reply out callback would free */
			pscrpc_rs_decref(rq->rq_reply_state);
			rq->rq_reply_state = NULL;
			rq->rq_repmsg      = NULL;
		}
#endif
		DEBUG_REQ(PLL_WARN, rq,
		    "ignoring bulk I/O comm error; "
		    "id %s - client will retry",
		    libcfs_id2str(rq->rq_peer));
	}
	pscrpc_free_bulk(desc);
	return (rc);
}