示例#1
0
文件: fld_cache.c 项目: Lezval/lustre
/**
 * create fld cache.
 */
struct fld_cache *fld_cache_init(const char *name,
                                 int cache_size, int cache_threshold)
{
        struct fld_cache *cache;
        ENTRY;

        LASSERT(name != NULL);
        LASSERT(cache_threshold < cache_size);

        OBD_ALLOC_PTR(cache);
        if (cache == NULL)
                RETURN(ERR_PTR(-ENOMEM));

        CFS_INIT_LIST_HEAD(&cache->fci_entries_head);
        CFS_INIT_LIST_HEAD(&cache->fci_lru);

        cache->fci_cache_count = 0;
	rwlock_init(&cache->fci_lock);

	strlcpy(cache->fci_name, name,
                sizeof(cache->fci_name));

        cache->fci_cache_size = cache_size;
        cache->fci_threshold = cache_threshold;

        /* Init fld cache info. */
        memset(&cache->fci_stat, 0, sizeof(cache->fci_stat));

        CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
               cache->fci_name, cache_size, cache_threshold);

        RETURN(cache);
}
示例#2
0
static void null_init_internal(void)
{
        static CFS_HLIST_HEAD(__list);

        null_sec.ps_policy = &null_policy;
        cfs_atomic_set(&null_sec.ps_refcount, 1);     /* always busy */
        null_sec.ps_id = -1;
        null_sec.ps_import = NULL;
        null_sec.ps_flvr.sf_rpc = SPTLRPC_FLVR_NULL;
        null_sec.ps_flvr.sf_flags = 0;
        null_sec.ps_part = LUSTRE_SP_ANY;
        null_sec.ps_dying = 0;
        cfs_spin_lock_init(&null_sec.ps_lock);
        cfs_atomic_set(&null_sec.ps_nctx, 1);         /* for "null_cli_ctx" */
        CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
        null_sec.ps_gc_interval = 0;
        null_sec.ps_gc_next = 0;

        cfs_hlist_add_head(&null_cli_ctx.cc_cache, &__list);
        cfs_atomic_set(&null_cli_ctx.cc_refcount, 1);    /* for hash */
        null_cli_ctx.cc_sec = &null_sec;
        null_cli_ctx.cc_ops = &null_ctx_ops;
        null_cli_ctx.cc_expire = 0;
        null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
                                PTLRPC_CTX_UPTODATE;
        null_cli_ctx.cc_vcred.vc_uid = 0;
        cfs_spin_lock_init(&null_cli_ctx.cc_lock);
        CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
        CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
}
示例#3
0
文件: workitem.c 项目: DCteam/lustre
static void
cfs_wi_sched_init(cfs_wi_sched_t *sched)
{
        sched->ws_shuttingdown = 0;
#ifdef __KERNEL__
        cfs_spin_lock_init(&sched->ws_lock);
        cfs_waitq_init(&sched->ws_waitq);
#endif
        CFS_INIT_LIST_HEAD(&sched->ws_runq);
        CFS_INIT_LIST_HEAD(&sched->ws_rerunq);
}
示例#4
0
文件: tracefile.c 项目: DCteam/lustre
static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
{
        int pgcount = tcd->tcd_cur_pages / 10;
        struct page_collection pc;
        struct cfs_trace_page *tage;
        struct cfs_trace_page *tmp;

        /*
         * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
         * from here: this will lead to infinite recursion.
         */

        if (printk_ratelimit())
                printk(CFS_KERN_WARNING "debug daemon buffer overflowed; "
                       "discarding 10%% of pages (%d of %ld)\n",
                       pgcount + 1, tcd->tcd_cur_pages);

        CFS_INIT_LIST_HEAD(&pc.pc_pages);
        cfs_spin_lock_init(&pc.pc_lock);

        cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
                                           struct cfs_trace_page, linkage) {
                if (pgcount-- == 0)
                        break;

                cfs_list_move_tail(&tage->linkage, &pc.pc_pages);
                tcd->tcd_cur_pages--;
        }
        put_pages_on_tcd_daemon_list(&pc, tcd);
}
示例#5
0
void cfs_timer_init(cfs_timer_t *l, cfs_timer_func_t *func, void *arg)
{
        CFS_INIT_LIST_HEAD(&l->tl_list);
        l->function = func;
        l->data = (ulong_ptr_t)arg;
        return;
}
示例#6
0
/**
 * Parses \<numaddr_range\> token of the syntax.
 *
 * \retval 1 if \a str parses to \<number\> | \<expr_list\>
 * \retval 0 otherwise
 */
static int
num_parse(char *str, int len,
          cfs_list_t *list, unsigned min, unsigned max)
{
    __u32 num;
    struct lstr src;
    struct numaddr_range *numaddr;

    src.ls_str = str;
    src.ls_len = len;

    LIBCFS_ALLOC(numaddr, sizeof(struct numaddr_range));
    if (numaddr == NULL)
        return 0;
    cfs_list_add_tail(&numaddr->nar_link, list);
    CFS_INIT_LIST_HEAD(&numaddr->nar_range_exprs);

    if (libcfs_str2num_check(src.ls_str, src.ls_len, &num, min, max)) {
        /* <number> */
        struct range_expr *expr;

        LIBCFS_ALLOC(expr, sizeof(struct range_expr));
        if (expr == NULL)
            return 0;

        expr->re_lo = expr->re_hi = num;
        expr->re_stride = 1;
        cfs_list_add_tail(&expr->re_link, &numaddr->nar_range_exprs);
        return 1;
    }

    return parse_expr_list(&src, &numaddr->nar_range_exprs, min, max);
}
示例#7
0
文件: dcache.c 项目: DCteam/lustre
int ll_set_dd(struct dentry *de)
{
        ENTRY;
        LASSERT(de != NULL);

        CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
               de->d_name.len, de->d_name.name, de, de->d_parent, de->d_inode,
               atomic_read(&de->d_count));

        if (de->d_fsdata == NULL) {
                struct ll_dentry_data *lld;

                OBD_ALLOC_PTR(lld);
                if (likely(lld != NULL)) {
                        CFS_INIT_LIST_HEAD(&lld->lld_sa_alias);
                        lock_dentry(de);
                        if (likely(de->d_fsdata == NULL))
                                de->d_fsdata = lld;
                        else
                                OBD_FREE_PTR(lld);
                        unlock_dentry(de);
                } else {
                        RETURN(-ENOMEM);
                }
        }

        RETURN(0);
}
示例#8
0
文件: module.c 项目: DCteam/lustre
static int init_libcfs_module(void)
{
        int rc;

        libcfs_arch_init();
        libcfs_init_nidstrings();
        cfs_init_rwsem(&cfs_tracefile_sem);
        cfs_init_mutex(&cfs_trace_thread_sem);
        cfs_init_rwsem(&ioctl_list_sem);
        CFS_INIT_LIST_HEAD(&ioctl_list);

        rc = libcfs_debug_init(5 * 1024 * 1024);
        if (rc < 0) {
                printk(CFS_KERN_ERR "LustreError: libcfs_debug_init: %d\n", rc);
                return (rc);
        }

#if LWT_SUPPORT
        rc = lwt_init();
        if (rc != 0) {
                CERROR("lwt_init: error %d\n", rc);
                goto cleanup_debug;
        }
#endif
        rc = cfs_psdev_register(&libcfs_dev);
        if (rc) {
                CERROR("misc_register: error %d\n", rc);
                goto cleanup_lwt;
        }

        rc = cfs_wi_startup();
        if (rc) {
                CERROR("startup workitem: error %d\n", rc);
                goto cleanup_deregister;
        }

        rc = insert_proc();
        if (rc) {
                CERROR("insert_proc: error %d\n", rc);
                goto cleanup_wi;
        }

        CDEBUG (D_OTHER, "portals setup OK\n");
        return (0);

 cleanup_wi:
        cfs_wi_shutdown();
 cleanup_deregister:
        cfs_psdev_deregister(&libcfs_dev);
 cleanup_lwt:
#if LWT_SUPPORT
        lwt_fini();
 cleanup_debug:
#endif
        libcfs_debug_cleanup();
        return rc;
}
示例#9
0
static struct page *llu_get_user_page(int index, void *addr, int offset,
                                      int count)
{
        struct page *page;

        OBD_ALLOC_PTR(page);
        if (!page)
                return NULL;
        page->index = index;
        page->addr = addr;
        page->_offset = offset;
        page->_count = count;

        CFS_INIT_LIST_HEAD(&page->list);
        CFS_INIT_LIST_HEAD(&page->_node);

        return page;
}
示例#10
0
int
lwt_init ()
{
	int     i;
        int     j;

        for (i = 0; i < cfs_num_online_cpus(); i++)
                if (lwt_cpus[i].lwtc_current_page != NULL)
                        return (-EALREADY);

        LASSERT (!lwt_enabled);

	/* NULL pointers, zero scalars */
	memset (lwt_cpus, 0, sizeof (lwt_cpus));
	lwt_pages_per_cpu =
		LWT_MEMORY / (cfs_num_online_cpus() * PAGE_CACHE_SIZE);

	for (i = 0; i < cfs_num_online_cpus(); i++)
		for (j = 0; j < lwt_pages_per_cpu; j++) {
			struct page *page = alloc_page(GFP_KERNEL);
			lwt_page_t  *lwtp;

			if (page == NULL) {
				CERROR ("Can't allocate page\n");
                                lwt_fini ();
				return (-ENOMEM);
			}

                        LIBCFS_ALLOC(lwtp, sizeof (*lwtp));
			if (lwtp == NULL) {
				CERROR ("Can't allocate lwtp\n");
                                __free_page(page);
				lwt_fini ();
				return (-ENOMEM);
			}

                        lwtp->lwtp_page = page;
                        lwtp->lwtp_events = page_address(page);
			memset(lwtp->lwtp_events, 0, PAGE_CACHE_SIZE);

			if (j == 0) {
				CFS_INIT_LIST_HEAD (&lwtp->lwtp_list);
				lwt_cpus[i].lwtc_current_page = lwtp;
			} else {
				cfs_list_add (&lwtp->lwtp_list,
				    &lwt_cpus[i].lwtc_current_page->lwtp_list);
			}
                }

        lwt_enabled = 1;
        cfs_mb();

        LWT_EVENT(0,0,0,0);

        return (0);
}
示例#11
0
文件: llog.c 项目: Lezval/lustre
/*
 * Allocate a new log or catalog handle
 * Used inside llog_open().
 */
struct llog_handle *llog_alloc_handle(void)
{
	struct llog_handle *loghandle;

	OBD_ALLOC_PTR(loghandle);
	if (loghandle == NULL)
		return NULL;

	init_rwsem(&loghandle->lgh_lock);
	spin_lock_init(&loghandle->lgh_hdr_lock);
	CFS_INIT_LIST_HEAD(&loghandle->u.phd.phd_entry);
	cfs_atomic_set(&loghandle->lgh_refcount, 1);

	return loghandle;
}
示例#12
0
/**
 * Finds or creates struct nidrange.
 *
 * Checks if \a src is a valid network name, looks for corresponding
 * nidrange on the ist of nidranges (\a nidlist), creates new struct
 * nidrange if it is not found.
 *
 * \retval pointer to struct nidrange matching network specified via \a src
 * \retval NULL if \a src does not match any network
 */
static struct nidrange *
add_nidrange(const struct lstr *src,
             cfs_list_t *nidlist)
{
    struct netstrfns *nf;
    struct nidrange *nr;
    int endlen;
    unsigned netnum;

    if (src->ls_len >= LNET_NIDSTR_SIZE)
        return NULL;

    nf = libcfs_namenum2netstrfns(src->ls_str);
    if (nf == NULL)
        return NULL;
    endlen = src->ls_len - strlen(nf->nf_name);
    if (endlen == 0)
        /* network name only, e.g. "elan" or "tcp" */
        netnum = 0;
    else {
        /* e.g. "elan25" or "tcp23", refuse to parse if
         * network name is not appended with decimal or
         * hexadecimal number */
        if (!libcfs_str2num_check(src->ls_str + strlen(nf->nf_name),
                                  endlen, &netnum,
                                  0, MAX_NUMERIC_VALUE))
            return NULL;
    }

    cfs_list_for_each_entry(nr, nidlist, nr_link) {
        if (nr->nr_netstrfns != nf)
            continue;
        if (nr->nr_netnum != netnum)
            continue;
        return nr;
    }

    LIBCFS_ALLOC(nr, sizeof(struct nidrange));
    if (nr == NULL)
        return NULL;
    cfs_list_add_tail(&nr->nr_link, nidlist);
    CFS_INIT_LIST_HEAD(&nr->nr_addrranges);
    nr->nr_netstrfns = nf;
    nr->nr_all = 0;
    nr->nr_netnum = netnum;

    return nr;
}
示例#13
0
文件: mgs_handler.c 项目: LLNL/lustre
static inline int mgs_init_export(struct obd_export *exp)
{
        struct mgs_export_data *data = &exp->u.eu_mgs_data;

        /* init mgs_export_data for fsc */
        cfs_spin_lock_init(&data->med_lock);
        CFS_INIT_LIST_HEAD(&data->med_clients);

        cfs_spin_lock(&exp->exp_lock);
        exp->exp_connecting = 1;
        cfs_spin_unlock(&exp->exp_lock);

        /* self-export doesn't need client data and ldlm initialization */
        if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
                                     &exp->exp_client_uuid)))
                return 0;
        return ldlm_init_export(exp);
}
示例#14
0
/**
 * Parses \<addrrange\> token on the syntax.
 *
 * Allocates struct addrrange and links to \a nidrange via
 * (nidrange::nr_addrranges)
 *
 * \retval 1 if \a src parses to '*' | \<ipaddr_range\> | \<numaddr_range\>
 * \retval 0 otherwise
 */
static int
parse_addrange(const struct lstr *src, struct nidrange *nidrange)
{
    struct addrrange *addrrange;

    if (src->ls_len == 1 && src->ls_str[0] == '*') {
        nidrange->nr_all = 1;
        return 1;
    }

    LIBCFS_ALLOC(addrrange, sizeof(struct addrrange));
    if (addrrange == NULL)
        return 0;
    cfs_list_add_tail(&addrrange->ar_link, &nidrange->nr_addrranges);
    CFS_INIT_LIST_HEAD(&addrrange->ar_numaddr_ranges);

    return nidrange->nr_netstrfns->nf_parse_addrlist(src->ls_str,
            src->ls_len,
            &addrrange->ar_numaddr_ranges);
}
示例#15
0
文件: tracefile.c 项目: DCteam/lustre
static void
panic_collect_pages(struct page_collection *pc)
{
        /* Do the collect_pages job on a single CPU: assumes that all other
         * CPUs have been stopped during a panic.  If this isn't true for some
         * arch, this will have to be implemented separately in each arch.  */
        int                        i;
        int                        j;
        struct cfs_trace_cpu_data *tcd;

        CFS_INIT_LIST_HEAD(&pc->pc_pages);

        cfs_tcd_for_each(tcd, i, j) {
                cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
                tcd->tcd_cur_pages = 0;

                if (pc->pc_want_daemon_pages) {
                        cfs_list_splice_init(&tcd->tcd_daemon_pages,
                                             &pc->pc_pages);
                        tcd->tcd_cur_daemon_pages = 0;
                }
        }
示例#16
0
文件: watchdog.c 项目: hpc/lustre
struct lc_watchdog *lc_watchdog_add(int timeout,
                                    void (*callback)(pid_t, void *),
                                    void *data)
{
        struct lc_watchdog *lcw = NULL;
        ENTRY;

        LIBCFS_ALLOC(lcw, sizeof(*lcw));
        if (lcw == NULL) {
                CDEBUG(D_INFO, "Could not allocate new lc_watchdog\n");
                RETURN(ERR_PTR(-ENOMEM));
        }

        cfs_spin_lock_init(&lcw->lcw_lock);
        lcw->lcw_refcount = 1; /* refcount for owner */
        lcw->lcw_task     = cfs_current();
        lcw->lcw_pid      = cfs_curproc_pid();
        lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
        lcw->lcw_data     = data;
        lcw->lcw_state    = LC_WATCHDOG_DISABLED;

        CFS_INIT_LIST_HEAD(&lcw->lcw_list);
        cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);

        cfs_down(&lcw_refcount_sem);
        if (++lcw_refcount == 1)
                lcw_dispatch_start();
        cfs_up(&lcw_refcount_sem);

        /* Keep this working in case we enable them by default */
        if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
                lcw->lcw_last_touched = cfs_time_current();
                cfs_timer_arm(&lcw->lcw_timer, cfs_time_seconds(timeout) +
                              cfs_time_current());
        }

        RETURN(lcw);
}
示例#17
0
/*
 * Concurrency: no concurrent access is possible that early in object
 * life-cycle.
 */
struct lu_object *osd_object_alloc(const struct lu_env *env,
				   const struct lu_object_header *hdr,
				   struct lu_device *d)
{
	struct osd_object *mo;

	OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, CFS_ALLOC_IO);
	if (mo != NULL) {
		struct lu_object *l;

		l = &mo->oo_dt.do_lu;
		dt_object_init(&mo->oo_dt, NULL, d);
		mo->oo_dt.do_ops = &osd_obj_ops;
		l->lo_ops = &osd_lu_obj_ops;
		CFS_INIT_LIST_HEAD(&mo->oo_sa_linkage);
		init_rwsem(&mo->oo_sem);
		sema_init(&mo->oo_guard, 1);
		rwlock_init(&mo->oo_attr_lock);
		return l;
	} else {
		return NULL;
	}
}
示例#18
0
struct trace_cpu_data *trace_get_tcd(void)
{
	struct trace_cpu_data *tcd;
	int nr_pages;
	struct list_head pages;

	/*
	 * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
	 * from here: this will lead to infinite recursion.
	 */

	/*
	 * debugging check for recursive call to libcfs_debug_msg()
	 */
	if (trace_owner == current_thread()) {
                /*
                 * Cannot assert here.
                 */
		printk(KERN_EMERG "recursive call to %s", __FUNCTION__);
		/*
                 * "The death of God left the angels in a strange position."
		 */
		cfs_enter_debugger();
	}
	tcd = &trace_data[0].tcd;
        CFS_INIT_LIST_HEAD(&pages);
	if (get_preemption_level() == 0)
		nr_pages = trace_refill_stock(tcd, CFS_ALLOC_STD, &pages);
	else
		nr_pages = 0;
	spin_lock(&trace_cpu_serializer);
	trace_owner = current_thread();
	tcd->tcd_cur_stock_pages += nr_pages;
	list_splice(&pages, &tcd->tcd_stock_pages);
	return tcd;
}
示例#19
0
/*
 * Initialize quota master target device. This includers connecting to
 * the backend OSD device, initializing the pool configuration and creating the
 * root procfs directory dedicated to this quota target.
 * The rest of the initialization is done when the stack is fully configured
 * (i.e. when ->ldo_start is called across the stack).
 *
 * This function is called on MDT0 setup.
 *
 * \param env - is the environment passed by the caller
 * \param qmt - is the quota master target to be initialized
 * \param ldt - is the device type structure associated with the qmt device
 * \param cfg - is the configuration record used to configure the qmt device
 *
 * \retval - 0 on success, appropriate error on failure
 */
static int qmt_device_init0(const struct lu_env *env, struct qmt_device *qmt,
			    struct lu_device_type *ldt, struct lustre_cfg *cfg)
{
	struct lu_device	*ld = qmt2lu_dev(qmt);
	struct obd_device	*obd, *mdt_obd;
	struct obd_type		*type;
	int			 rc;
	ENTRY;

	/* record who i am, it might be useful ... */
	strncpy(qmt->qmt_svname, lustre_cfg_string(cfg, 0),
		sizeof(qmt->qmt_svname) - 1);

	/* look-up the obd_device associated with the qmt */
	obd = class_name2obd(qmt->qmt_svname);
	if (obd == NULL)
		RETURN(-ENOENT);

	/* reference each other */
	obd->obd_lu_dev = ld;
	ld->ld_obd      = obd;

	/* look-up the parent MDT to steal its ldlm namespace ... */
	mdt_obd = class_name2obd(lustre_cfg_string(cfg, 2));
	if (mdt_obd == NULL)
		RETURN(-ENOENT);

	/* borrow  MDT namespace. kind of a hack until we have our own namespace
	 * & service threads */
	LASSERT(mdt_obd->obd_namespace != NULL);
	obd->obd_namespace = mdt_obd->obd_namespace;
	qmt->qmt_ns = obd->obd_namespace;

	/* connect to backend osd device */
	rc = qmt_connect_to_osd(env, qmt, cfg);
	if (rc)
		GOTO(out, rc);

	/* set up and start rebalance thread */
	thread_set_flags(&qmt->qmt_reba_thread, SVC_STOPPED);
	init_waitqueue_head(&qmt->qmt_reba_thread.t_ctl_waitq);
	CFS_INIT_LIST_HEAD(&qmt->qmt_reba_list);
	spin_lock_init(&qmt->qmt_reba_lock);
	rc = qmt_start_reba_thread(qmt);
	if (rc) {
		CERROR("%s: failed to start rebalance thread (%d)\n",
		       qmt->qmt_svname, rc);
		GOTO(out, rc);
	}

	/* at the moment there is no linkage between lu_type and obd_type, so
	 * we lookup obd_type this way */
	type = class_search_type(LUSTRE_QMT_NAME);
	LASSERT(type != NULL);

	/* register proc directory associated with this qmt */
	qmt->qmt_proc = lprocfs_register(qmt->qmt_svname, type->typ_procroot,
					 NULL, NULL);
	if (IS_ERR(qmt->qmt_proc)) {
		rc = PTR_ERR(qmt->qmt_proc);
		CERROR("%s: failed to create qmt proc entry (%d)\n",
		       qmt->qmt_svname, rc);
		GOTO(out, rc);
	}

	/* initialize pool configuration */
	rc = qmt_pool_init(env, qmt);
	if (rc)
		GOTO(out, rc);
	EXIT;
out:
	if (rc)
		qmt_device_fini(env, ld);
	return rc;
}
示例#20
0
文件: tgt_main.c 项目: Lezval/lustre
int tgt_init(const struct lu_env *env, struct lu_target *lut,
	     struct obd_device *obd, struct dt_device *dt,
	     struct tgt_opc_slice *slice, int request_fail_id,
	     int reply_fail_id)
{
	struct dt_object_format	 dof;
	struct lu_attr		 attr;
	struct lu_fid		 fid;
	struct dt_object	*o;
	int			 rc = 0;

	ENTRY;

	LASSERT(lut);
	LASSERT(obd);
	lut->lut_obd = obd;
	lut->lut_bottom = dt;
	lut->lut_last_rcvd = NULL;
	lut->lut_client_bitmap = NULL;
	obd->u.obt.obt_lut = lut;
	obd->u.obt.obt_magic = OBT_MAGIC;

	/* set request handler slice and parameters */
	lut->lut_slice = slice;
	lut->lut_reply_fail_id = reply_fail_id;
	lut->lut_request_fail_id = request_fail_id;

	/* sptlrcp variables init */
	rwlock_init(&lut->lut_sptlrpc_lock);
	sptlrpc_rule_set_init(&lut->lut_sptlrpc_rset);
	lut->lut_mds_capa = 1;
	lut->lut_oss_capa = 1;

	spin_lock_init(&lut->lut_flags_lock);
	lut->lut_sync_lock_cancel = NEVER_SYNC_ON_CANCEL;

	/* last_rcvd initialization is needed by replayable targets only */
	if (!obd->obd_replayable)
		RETURN(0);

	spin_lock_init(&lut->lut_translock);

	OBD_ALLOC(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
	if (lut->lut_client_bitmap == NULL)
		RETURN(-ENOMEM);

	memset(&attr, 0, sizeof(attr));
	attr.la_valid = LA_MODE;
	attr.la_mode = S_IFREG | S_IRUGO | S_IWUSR;
	dof.dof_type = dt_mode_to_dft(S_IFREG);

	lu_local_obj_fid(&fid, LAST_RECV_OID);

	o = dt_find_or_create(env, lut->lut_bottom, &fid, &dof, &attr);
	if (IS_ERR(o)) {
		rc = PTR_ERR(o);
		CERROR("%s: cannot open LAST_RCVD: rc = %d\n", tgt_name(lut),
		       rc);
		GOTO(out_bitmap, rc);
	}

	lut->lut_last_rcvd = o;
	rc = tgt_server_data_init(env, lut);
	if (rc < 0)
		GOTO(out_obj, rc);

	/* prepare transactions callbacks */
	lut->lut_txn_cb.dtc_txn_start = tgt_txn_start_cb;
	lut->lut_txn_cb.dtc_txn_stop = tgt_txn_stop_cb;
	lut->lut_txn_cb.dtc_txn_commit = NULL;
	lut->lut_txn_cb.dtc_cookie = lut;
	lut->lut_txn_cb.dtc_tag = LCT_DT_THREAD | LCT_MD_THREAD;
	CFS_INIT_LIST_HEAD(&lut->lut_txn_cb.dtc_linkage);

	dt_txn_callback_add(lut->lut_bottom, &lut->lut_txn_cb);

	RETURN(0);
out_obj:
	lu_object_put(env, &lut->lut_last_rcvd->do_lu);
	lut->lut_last_rcvd = NULL;
out_bitmap:
	OBD_FREE(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
	lut->lut_client_bitmap = NULL;
	return rc;
}
示例#21
0
void cfs_init_timer(cfs_timer_t *t)
{
        CFS_INIT_LIST_HEAD(&t->tl_list);
}
示例#22
0
文件: handlers.c 项目: Lezval/lustre
/* All actions that we need after sending hello on passive conn:
 * 1) Cope with 1st easy case: conn is already linked to a peer
 * 2) Cope with 2nd easy case: remove zombie conn
  * 3) Resolve race:
 *    a) find the peer
 *    b) link the conn to the peer if conn[idx] is empty
 *    c) if the conn[idx] isn't empty and is in READY state,
 *       remove the conn as duplicated
 *    d) if the conn[idx] isn't empty and isn't in READY state,
 *       override conn[idx] with the conn
 */
int
usocklnd_passiveconn_hellosent(usock_conn_t *conn)
{
        usock_conn_t    *conn2;
        usock_peer_t    *peer;
        cfs_list_t       tx_list;
        cfs_list_t       zcack_list;
        int              idx;
        int              rc = 0;

        /* almost nothing to do if conn is already linked to peer hash table */
        if (conn->uc_peer != NULL)
                goto passive_hellosent_done;

        /* conn->uc_peer == NULL, so the conn isn't accessible via
         * peer hash list, so nobody can touch the conn but us */

        if (conn->uc_ni == NULL) /* remove zombie conn */
                goto passive_hellosent_connkill;

        /* all code below is race resolution, because normally
         * passive conn is linked to peer just after receiving hello */
        CFS_INIT_LIST_HEAD (&tx_list);
        CFS_INIT_LIST_HEAD (&zcack_list);

        /* conn is passive and isn't linked to any peer,
           so its tx and zc_ack lists have to be empty */
        LASSERT (cfs_list_empty(&conn->uc_tx_list) &&
                 cfs_list_empty(&conn->uc_zcack_list) &&
                 conn->uc_sending == 0);

        rc = usocklnd_find_or_create_peer(conn->uc_ni, conn->uc_peerid, &peer);
        if (rc)
                return rc;

        idx = usocklnd_type2idx(conn->uc_type);

        /* try to link conn to peer */
        pthread_mutex_lock(&peer->up_lock);
        if (peer->up_conns[idx] == NULL) {
                usocklnd_link_conn_to_peer(conn, peer, idx);
                usocklnd_conn_addref(conn);
                conn->uc_peer = peer;
                usocklnd_peer_addref(peer);
        } else {
                conn2 = peer->up_conns[idx];
                pthread_mutex_lock(&conn2->uc_lock);

                if (conn2->uc_state == UC_READY) {
                        /* conn2 is in READY state, so conn is "duplicated" */
                        pthread_mutex_unlock(&conn2->uc_lock);
                        pthread_mutex_unlock(&peer->up_lock);
                        usocklnd_peer_decref(peer);
                        goto passive_hellosent_connkill;
                }

                /* uc_state != UC_READY => switch conn and conn2 */
                /* Relink txs and zc_acks from conn2 to conn.
                 * We're sure that nobody but us can access to conn,
                 * nevertheless we use mutex (if we're wrong yet,
                 * deadlock is easy to see that corrupted list */
                cfs_list_add(&tx_list, &conn2->uc_tx_list);
                cfs_list_del_init(&conn2->uc_tx_list);
                cfs_list_add(&zcack_list, &conn2->uc_zcack_list);
                cfs_list_del_init(&conn2->uc_zcack_list);

                pthread_mutex_lock(&conn->uc_lock);
                cfs_list_add_tail(&conn->uc_tx_list, &tx_list);
                cfs_list_del_init(&tx_list);
                cfs_list_add_tail(&conn->uc_zcack_list, &zcack_list);
                cfs_list_del_init(&zcack_list);
                conn->uc_peer = peer;
                pthread_mutex_unlock(&conn->uc_lock);

                conn2->uc_peer = NULL; /* make conn2 zombie */
                pthread_mutex_unlock(&conn2->uc_lock);
                usocklnd_conn_decref(conn2);

                usocklnd_link_conn_to_peer(conn, peer, idx);
                usocklnd_conn_addref(conn);
                conn->uc_peer = peer;
        }

        lnet_ni_decref(conn->uc_ni);
        conn->uc_ni = NULL;
        pthread_mutex_unlock(&peer->up_lock);
        usocklnd_peer_decref(peer);

  passive_hellosent_done:
        /* safely transit to UC_READY state */
        /* rc == 0 */
        pthread_mutex_lock(&conn->uc_lock);
        if (conn->uc_state != UC_DEAD) {
                usocklnd_rx_ksmhdr_state_transition(conn);

                /* we're ready to recive incoming packets and maybe
                   already have smth. to transmit */
                LASSERT (conn->uc_sending == 0);
                if ( cfs_list_empty(&conn->uc_tx_list) &&
                     cfs_list_empty(&conn->uc_zcack_list) ) {
                        conn->uc_tx_flag = 0;
                        rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
                                                 POLLIN);
                } else {
                        conn->uc_tx_deadline =
                                cfs_time_shift(usock_tuns.ut_timeout);
                        conn->uc_tx_flag = 1;
                        rc = usocklnd_add_pollrequest(conn, POLL_SET_REQUEST,
                                                      POLLIN | POLLOUT);
                }

                if (rc == 0)
                        conn->uc_state = UC_READY;
        }
        pthread_mutex_unlock(&conn->uc_lock);
        return rc;

  passive_hellosent_connkill:
        usocklnd_conn_kill(conn);
        return 0;
}
示例#23
0
文件: handlers.c 项目: Lezval/lustre
/* All actions that we need after receiving hello on active conn:
 * 1) Schedule removing if we're zombie
 * 2) Restart active conn if we lost the race
 * 3) Else: update RX part to receive KSM header
 */
int
usocklnd_activeconn_hellorecv(usock_conn_t *conn)
{
        int                rc    = 0;
        ksock_hello_msg_t *hello = conn->uc_rx_hello;
        usock_peer_t      *peer  = conn->uc_peer;

        /* Active conn with peer==NULL is zombie.
         * Don't try to link it to peer because the conn
         * has already had a chance to proceed at the beginning */
        if (peer == NULL) {
                LASSERT(cfs_list_empty(&conn->uc_tx_list) &&
                        cfs_list_empty(&conn->uc_zcack_list));

                usocklnd_conn_kill(conn);
                return 0;
        }

        peer->up_last_alive = cfs_time_current();

        /* peer says that we lost the race */
        if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
                /* Start new active conn, relink txs and zc_acks from
                 * the conn to new conn, schedule removing the conn.
                 * Actually, we're expecting that a passive conn will
                 * make us zombie soon and take care of our txs and
                 * zc_acks */

                cfs_list_t tx_list, zcack_list;
                usock_conn_t *conn2;
                int idx = usocklnd_type2idx(conn->uc_type);

                CFS_INIT_LIST_HEAD (&tx_list);
                CFS_INIT_LIST_HEAD (&zcack_list);

                /* Block usocklnd_send() to check peer->up_conns[idx]
                 * and to enqueue more txs */
                pthread_mutex_lock(&peer->up_lock);
                pthread_mutex_lock(&conn->uc_lock);

                /* usocklnd_shutdown() could kill us */
                if (conn->uc_state == UC_DEAD) {
                        pthread_mutex_unlock(&conn->uc_lock);
                        pthread_mutex_unlock(&peer->up_lock);
                        return 0;
                }

                LASSERT (peer == conn->uc_peer);
                LASSERT (peer->up_conns[idx] == conn);

                rc = usocklnd_create_active_conn(peer, conn->uc_type, &conn2);
                if (rc) {
                        conn->uc_errored = 1;
                        pthread_mutex_unlock(&conn->uc_lock);
                        pthread_mutex_unlock(&peer->up_lock);
                        return rc;
                }

                usocklnd_link_conn_to_peer(conn2, peer, idx);
                conn2->uc_peer = peer;

                /* unlink txs and zcack from the conn */
                cfs_list_add(&tx_list, &conn->uc_tx_list);
                cfs_list_del_init(&conn->uc_tx_list);
                cfs_list_add(&zcack_list, &conn->uc_zcack_list);
                cfs_list_del_init(&conn->uc_zcack_list);

                /* link they to the conn2 */
                cfs_list_add(&conn2->uc_tx_list, &tx_list);
                cfs_list_del_init(&tx_list);
                cfs_list_add(&conn2->uc_zcack_list, &zcack_list);
                cfs_list_del_init(&zcack_list);

                /* make conn zombie */
                conn->uc_peer = NULL;
                usocklnd_peer_decref(peer);

                /* schedule conn2 for processing */
                rc = usocklnd_add_pollrequest(conn2, POLL_ADD_REQUEST, POLLOUT);
                if (rc) {
                        peer->up_conns[idx] = NULL;
                        usocklnd_conn_decref(conn2); /* should destroy conn */
                } else {
                        usocklnd_conn_kill_locked(conn);
                }

                pthread_mutex_unlock(&conn->uc_lock);
                pthread_mutex_unlock(&peer->up_lock);
                usocklnd_conn_decref(conn);

        } else { /* hello->kshm_ctype != SOCKLND_CONN_NONE */
                if (conn->uc_type != usocklnd_invert_type(hello->kshm_ctype))
                        return -EPROTO;

                pthread_mutex_lock(&peer->up_lock);
                usocklnd_cleanup_stale_conns(peer, hello->kshm_src_incarnation,
                                             conn);
                pthread_mutex_unlock(&peer->up_lock);

                /* safely transit to UC_READY state */
                /* rc == 0 */
                pthread_mutex_lock(&conn->uc_lock);
                if (conn->uc_state != UC_DEAD) {
                        usocklnd_rx_ksmhdr_state_transition(conn);

                        /* POLLIN is already set because we just
                         * received hello, but maybe we've smth. to
                         * send? */
                        LASSERT (conn->uc_sending == 0);
                        if ( !cfs_list_empty(&conn->uc_tx_list) ||
                             !cfs_list_empty(&conn->uc_zcack_list) ) {

                                conn->uc_tx_deadline =
                                        cfs_time_shift(usock_tuns.ut_timeout);
                                conn->uc_tx_flag = 1;
                                rc = usocklnd_add_pollrequest(conn,
                                                              POLL_SET_REQUEST,
                                                              POLLIN | POLLOUT);
                        }

                        if (rc == 0)
                                conn->uc_state = UC_READY;
                }
                pthread_mutex_unlock(&conn->uc_lock);
        }

        return rc;
}
示例#24
0
文件: llog.c 项目: Lezval/lustre
int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
		     int flags, struct obd_uuid *uuid)
{
	struct llog_log_hdr	*llh;
	int			 rc;

	ENTRY;
	LASSERT(handle->lgh_hdr == NULL);

	OBD_ALLOC_PTR(llh);
	if (llh == NULL)
		RETURN(-ENOMEM);
	handle->lgh_hdr = llh;
	/* first assign flags to use llog_client_ops */
	llh->llh_flags = flags;
	rc = llog_read_header(env, handle, uuid);
	if (rc == 0) {
		if (unlikely((llh->llh_flags & LLOG_F_IS_PLAIN &&
			      flags & LLOG_F_IS_CAT) ||
			     (llh->llh_flags & LLOG_F_IS_CAT &&
			      flags & LLOG_F_IS_PLAIN))) {
			CERROR("%s: llog type is %s but initializing %s\n",
			       handle->lgh_ctxt->loc_obd->obd_name,
			       llh->llh_flags & LLOG_F_IS_CAT ?
			       "catalog" : "plain",
			       flags & LLOG_F_IS_CAT ? "catalog" : "plain");
			GOTO(out, rc = -EINVAL);
		} else if (llh->llh_flags &
			   (LLOG_F_IS_PLAIN | LLOG_F_IS_CAT)) {
			/*
			 * it is possible to open llog without specifying llog
			 * type so it is taken from llh_flags
			 */
			flags = llh->llh_flags;
		} else {
			/* for some reason the llh_flags has no type set */
			CERROR("llog type is not specified!\n");
			GOTO(out, rc = -EINVAL);
		}
		if (unlikely(uuid &&
			     !obd_uuid_equals(uuid, &llh->llh_tgtuuid))) {
			CERROR("%s: llog uuid mismatch: %s/%s\n",
			       handle->lgh_ctxt->loc_obd->obd_name,
			       (char *)uuid->uuid,
			       (char *)llh->llh_tgtuuid.uuid);
			GOTO(out, rc = -EEXIST);
		}
	}
	if (flags & LLOG_F_IS_CAT) {
		LASSERT(cfs_list_empty(&handle->u.chd.chd_head));
		CFS_INIT_LIST_HEAD(&handle->u.chd.chd_head);
		llh->llh_size = sizeof(struct llog_logid_rec);
	} else if (!(flags & LLOG_F_IS_PLAIN)) {
		CERROR("%s: unknown flags: %#x (expected %#x or %#x)\n",
		       handle->lgh_ctxt->loc_obd->obd_name,
		       flags, LLOG_F_IS_CAT, LLOG_F_IS_PLAIN);
		rc = -EINVAL;
	}
out:
	if (rc) {
		OBD_FREE_PTR(llh);
		handle->lgh_hdr = NULL;
	}
	RETURN(rc);
}
示例#25
0
/*
 * Allocate and initialize a qsd_qtype_info structure for quota type \qtype.
 * This opens the accounting object and initializes the proc file.
 * It's called on OSD start when the qsd_prepare() is invoked on the qsd
 * instance.
 *
 * \param env  - the environment passed by the caller
 * \param qsd  - is the qsd instance which will be in charge of the new
 *               qsd_qtype_info instance.
 * \param qtype - is quota type to set up
 *
 * \retval - 0 on success and qsd->qsd_type_array[qtype] is allocated,
 *           appropriate error on failure
 */
static int qsd_qtype_init(const struct lu_env *env, struct qsd_instance *qsd,
			  int qtype)
{
	struct qsd_qtype_info	*qqi;
	int			 rc;
	struct obd_uuid		 uuid;
	ENTRY;

	LASSERT(qsd->qsd_type_array[qtype] == NULL);

	/* allocate structure for this quota type */
	OBD_ALLOC_PTR(qqi);
	if (qqi == NULL)
		RETURN(-ENOMEM);
	qsd->qsd_type_array[qtype] = qqi;
	atomic_set(&qqi->qqi_ref, 1); /* referenced from qsd */

	/* set backpointer and other parameters */
	qqi->qqi_qsd   = qsd;
	qqi->qqi_qtype = qtype;
	lu_ref_init(&qqi->qqi_reference);
	lquota_generate_fid(&qqi->qqi_fid, qsd->qsd_pool_id, QSD_RES_TYPE(qsd),
			    qtype);
	qqi->qqi_glb_uptodate = false;
	qqi->qqi_slv_uptodate = false;
	qqi->qqi_reint        = false;
	init_waitqueue_head(&qqi->qqi_reint_thread.t_ctl_waitq);
	thread_set_flags(&qqi->qqi_reint_thread, SVC_STOPPED);
	CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_glb);
	CFS_INIT_LIST_HEAD(&qqi->qqi_deferred_slv);

	/* open accounting object */
	LASSERT(qqi->qqi_acct_obj == NULL);
	qqi->qqi_acct_obj = acct_obj_lookup(env, qsd->qsd_dev, qtype);
	if (IS_ERR(qqi->qqi_acct_obj)) {
		CDEBUG(D_QUOTA, "%s: no %s space accounting support rc:%ld\n",
		       qsd->qsd_svname, QTYPE_NAME(qtype),
		       PTR_ERR(qqi->qqi_acct_obj));
		qqi->qqi_acct_obj = NULL;
		qsd->qsd_acct_failed = true;
	}

	/* open global index copy */
	LASSERT(qqi->qqi_glb_obj == NULL);
	qqi->qqi_glb_obj = lquota_disk_glb_find_create(env, qsd->qsd_dev,
						       qsd->qsd_root,
						       &qqi->qqi_fid, true);
	if (IS_ERR(qqi->qqi_glb_obj)) {
		CERROR("%s: can't open global index copy "DFID" %ld\n",
		       qsd->qsd_svname, PFID(&qqi->qqi_fid),
		       PTR_ERR(qqi->qqi_glb_obj));
		GOTO(out, rc = PTR_ERR(qqi->qqi_glb_obj));
	}
	qqi->qqi_glb_ver = dt_version_get(env, qqi->qqi_glb_obj);

	/* open slave index copy */
	LASSERT(qqi->qqi_slv_obj == NULL);
	obd_str2uuid(&uuid, qsd->qsd_svname);
	qqi->qqi_slv_obj = lquota_disk_slv_find_create(env, qsd->qsd_dev,
						       qsd->qsd_root,
						       &qqi->qqi_fid, &uuid,
						       true);
	if (IS_ERR(qqi->qqi_slv_obj)) {
		CERROR("%s: can't open slave index copy "DFID" %ld\n",
		       qsd->qsd_svname, PFID(&qqi->qqi_fid),
		       PTR_ERR(qqi->qqi_slv_obj));
		GOTO(out, rc = PTR_ERR(qqi->qqi_slv_obj));
	}
	qqi->qqi_slv_ver = dt_version_get(env, qqi->qqi_slv_obj);

	/* allocate site */
	qqi->qqi_site = lquota_site_alloc(env, qqi, false, qtype, &qsd_lqe_ops);
	if (IS_ERR(qqi->qqi_site)) {
		CERROR("%s: can't allocate site "DFID" %ld\n", qsd->qsd_svname,
		       PFID(&qqi->qqi_fid), PTR_ERR(qqi->qqi_site));
		GOTO(out, rc = PTR_ERR(qqi->qqi_site));
	}

	/* register proc entry for accounting & global index copy objects */
	rc = lprocfs_seq_create(qsd->qsd_proc,
				qtype == USRQUOTA ? "acct_user" : "acct_group",
				0444, &lprocfs_quota_seq_fops,
				qqi->qqi_acct_obj);
	if (rc) {
		CERROR("%s: can't add procfs entry for accounting file %d\n",
		       qsd->qsd_svname, rc);
		GOTO(out, rc);
	}

	rc = lprocfs_seq_create(qsd->qsd_proc,
				qtype == USRQUOTA ? "limit_user" : "limit_group",
				0444, &lprocfs_quota_seq_fops,
				qqi->qqi_glb_obj);
	if (rc) {
		CERROR("%s: can't add procfs entry for global index copy %d\n",
		       qsd->qsd_svname, rc);
		GOTO(out, rc);
	}
	EXIT;
out:
	if (rc)
		qsd_qtype_fini(env, qsd, qtype);
	return rc;
}
示例#26
0
static int init_libcfs_module(void)
{
	int rc;

	libcfs_arch_init();
	libcfs_init_nidstrings();
	init_rwsem(&cfs_tracefile_sem);
	mutex_init(&cfs_trace_thread_mutex);
	init_rwsem(&ioctl_list_sem);
	CFS_INIT_LIST_HEAD(&ioctl_list);
	init_waitqueue_head(&cfs_race_waitq);

	rc = libcfs_debug_init(5 * 1024 * 1024);
	if (rc < 0) {
		printk(KERN_ERR "LustreError: libcfs_debug_init: %d\n", rc);
		return (rc);
	}

	rc = cfs_cpu_init();
	if (rc != 0)
		goto cleanup_debug;

#if LWT_SUPPORT
	rc = lwt_init();
	if (rc != 0) {
		CERROR("lwt_init: error %d\n", rc);
		goto cleanup_debug;
	}
#endif
	rc = misc_register(&libcfs_dev);
	if (rc) {
		CERROR("misc_register: error %d\n", rc);
		goto cleanup_lwt;
	}

	rc = cfs_wi_startup();
	if (rc) {
		CERROR("initialize workitem: error %d\n", rc);
		goto cleanup_deregister;
	}

	/* max to 4 threads, should be enough for rehash */
	rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4);
	rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY,
				 rc, &cfs_sched_rehash);
	if (rc != 0) {
		CERROR("Startup workitem scheduler: error: %d\n", rc);
		goto cleanup_deregister;
	}

	rc = cfs_crypto_register();
	if (rc) {
		CERROR("cfs_crypto_regster: error %d\n", rc);
		goto cleanup_wi;
	}


	rc = insert_proc();
	if (rc) {
		CERROR("insert_proc: error %d\n", rc);
		goto cleanup_crypto;
	}

	CDEBUG (D_OTHER, "portals setup OK\n");
	return 0;
 cleanup_crypto:
	cfs_crypto_unregister();
 cleanup_wi:
	cfs_wi_shutdown();
 cleanup_deregister:
	misc_deregister(&libcfs_dev);
 cleanup_lwt:
#if LWT_SUPPORT
	lwt_fini();
#endif
 cleanup_debug:
	libcfs_debug_cleanup();
	return rc;
}
示例#27
0
/*
 * Create a new qsd_instance to be associated with backend osd device
 * identified by \dev.
 *
 * \param env    - the environment passed by the caller
 * \param svname - is the service name of the OSD device creating this instance
 * \param dev    - is the dt_device where to store quota index files
 * \param osd_proc - is the procfs parent directory where to create procfs file
 *                   related to this new qsd instance
 *
 * \retval - pointer to new qsd_instance associated with dev \dev on success,
 *           appropriate error on failure
 */
struct qsd_instance *qsd_init(const struct lu_env *env, char *svname,
			      struct dt_device *dev,
			      cfs_proc_dir_entry_t *osd_proc)
{
	struct qsd_thread_info	*qti = qsd_info(env);
	struct qsd_instance	*qsd;
	int			 rc, type, idx;
	ENTRY;

	/* only configure qsd for MDT & OST */
	type = server_name2index(svname, &idx, NULL);
	if (type != LDD_F_SV_TYPE_MDT && type != LDD_F_SV_TYPE_OST)
		RETURN(NULL);

	/* allocate qsd instance */
	OBD_ALLOC_PTR(qsd);
	if (qsd == NULL)
		RETURN(ERR_PTR(-ENOMEM));

	/* generic initializations */
	rwlock_init(&qsd->qsd_lock);
	CFS_INIT_LIST_HEAD(&qsd->qsd_link);
	thread_set_flags(&qsd->qsd_upd_thread, SVC_STOPPED);
	init_waitqueue_head(&qsd->qsd_upd_thread.t_ctl_waitq);
	CFS_INIT_LIST_HEAD(&qsd->qsd_upd_list);
	spin_lock_init(&qsd->qsd_adjust_lock);
	CFS_INIT_LIST_HEAD(&qsd->qsd_adjust_list);
	qsd->qsd_prepared = false;
	qsd->qsd_started = false;

	/* copy service name */
	if (strlcpy(qsd->qsd_svname, svname, sizeof(qsd->qsd_svname))
	    >= sizeof(qsd->qsd_svname))
		GOTO(out, rc = -E2BIG);

	/* grab reference on osd device */
	lu_device_get(&dev->dd_lu_dev);
	lu_ref_add(&dev->dd_lu_dev.ld_reference, "qsd", qsd);
	qsd->qsd_dev = dev;

	/* we only support pool ID 0 (default data or metadata pool) for the
	 * time being. A different pool ID could be assigned to this target via
	 * the configuration log in the future */
	qsd->qsd_pool_id  = 0;

	/* get fsname from svname */
	rc = server_name2fsname(svname, qti->qti_buf, NULL);
	if (rc) {
		CERROR("%s: fail to extract filesystem name\n", svname);
		GOTO(out, rc);
	}

	/* look up quota setting for the filesystem the target belongs to */
	qsd->qsd_fsinfo = qsd_get_fsinfo(qti->qti_buf, 1);
	if (qsd->qsd_fsinfo == NULL) {
		CERROR("%s: failed to locate filesystem information\n", svname);
		GOTO(out, rc = -EINVAL);
	}

	/* add in the list of lquota_fsinfo */
	mutex_lock(&qsd->qsd_fsinfo->qfs_mutex);
	list_add_tail(&qsd->qsd_link, &qsd->qsd_fsinfo->qfs_qsd_list);
	mutex_unlock(&qsd->qsd_fsinfo->qfs_mutex);

	/* register procfs directory */
	qsd->qsd_proc = lprocfs_seq_register(QSD_DIR, osd_proc,
						lprocfs_quota_qsd_vars, qsd);
	if (IS_ERR(qsd->qsd_proc)) {
		rc = PTR_ERR(qsd->qsd_proc);
		qsd->qsd_proc = NULL;
		CERROR("%s: fail to create quota slave proc entry (%d)\n",
		       svname, rc);
		GOTO(out, rc);
        }
	EXIT;
out:
	if (rc) {
		qsd_fini(env, qsd);
		return ERR_PTR(rc);
	}
	RETURN(qsd);
}
示例#28
0
int lfsck_namespace_setup(const struct lu_env *env,
			  struct lfsck_instance *lfsck)
{
	struct lfsck_component	*com;
	struct lfsck_namespace	*ns;
	struct dt_object	*root = NULL;
	struct dt_object	*obj;
	int			 rc;
	ENTRY;

	LASSERT(lfsck->li_master);

	OBD_ALLOC_PTR(com);
	if (com == NULL)
		RETURN(-ENOMEM);

	CFS_INIT_LIST_HEAD(&com->lc_link);
	CFS_INIT_LIST_HEAD(&com->lc_link_dir);
	init_rwsem(&com->lc_sem);
	atomic_set(&com->lc_ref, 1);
	com->lc_lfsck = lfsck;
	com->lc_type = LT_NAMESPACE;
	com->lc_ops = &lfsck_namespace_ops;
	com->lc_file_size = sizeof(struct lfsck_namespace);
	OBD_ALLOC(com->lc_file_ram, com->lc_file_size);
	if (com->lc_file_ram == NULL)
		GOTO(out, rc = -ENOMEM);

	OBD_ALLOC(com->lc_file_disk, com->lc_file_size);
	if (com->lc_file_disk == NULL)
		GOTO(out, rc = -ENOMEM);

	root = dt_locate(env, lfsck->li_bottom, &lfsck->li_local_root_fid);
	if (IS_ERR(root))
		GOTO(out, rc = PTR_ERR(root));

	if (unlikely(!dt_try_as_dir(env, root)))
		GOTO(out, rc = -ENOTDIR);

	obj = local_index_find_or_create(env, lfsck->li_los, root,
					 lfsck_namespace_name,
					 S_IFREG | S_IRUGO | S_IWUSR,
					 &dt_lfsck_features);
	if (IS_ERR(obj))
		GOTO(out, rc = PTR_ERR(obj));

	com->lc_obj = obj;
	rc = obj->do_ops->do_index_try(env, obj, &dt_lfsck_features);
	if (rc != 0)
		GOTO(out, rc);

	rc = lfsck_namespace_load(env, com);
	if (rc > 0)
		rc = lfsck_namespace_reset(env, com, true);
	else if (rc == -ENODATA)
		rc = lfsck_namespace_init(env, com);
	if (rc != 0)
		GOTO(out, rc);

	ns = com->lc_file_ram;
	switch (ns->ln_status) {
	case LS_INIT:
	case LS_COMPLETED:
	case LS_FAILED:
	case LS_STOPPED:
		spin_lock(&lfsck->li_lock);
		cfs_list_add_tail(&com->lc_link, &lfsck->li_list_idle);
		spin_unlock(&lfsck->li_lock);
		break;
	default:
		CERROR("%s: unknown lfsck_namespace status: rc = %u\n",
		       lfsck_lfsck2name(lfsck), ns->ln_status);
		/* fall through */
	case LS_SCANNING_PHASE1:
	case LS_SCANNING_PHASE2:
		/* No need to store the status to disk right now.
		 * If the system crashed before the status stored,
		 * it will be loaded back when next time. */
		ns->ln_status = LS_CRASHED;
		/* fall through */
	case LS_PAUSED:
	case LS_CRASHED:
		spin_lock(&lfsck->li_lock);
		cfs_list_add_tail(&com->lc_link, &lfsck->li_list_scan);
		cfs_list_add_tail(&com->lc_link_dir, &lfsck->li_list_dir);
		spin_unlock(&lfsck->li_lock);
		break;
	}

	GOTO(out, rc = 0);

out:
	if (root != NULL && !IS_ERR(root))
		lu_object_put(env, &root->do_lu);
	if (rc != 0)
		lfsck_component_cleanup(env, com);
	return rc;
}