Esempio n. 1
0
                memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);

                OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
                req->rq_reqbuf = req->rq_reqmsg = newbuf;
                req->rq_reqbuf_len = alloc_size;
        }

        _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
        req->rq_reqlen = newmsg_size;

        return 0;
}

static struct ptlrpc_svc_ctx null_svc_ctx = {
        .sc_refcount    = CFS_ATOMIC_INIT(1),
        .sc_policy      = &null_policy,
};

static
int null_accept(struct ptlrpc_request *req)
{
        LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
                SPTLRPC_POLICY_NULL);

        if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) {
                CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc);
                return SECSVC_DROP;
        }

        req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf);
Esempio n. 2
0
/* for obd_capa.c_list, client capa might stay in three places:
 * 1. ll_capa_list.
 * 2. ll_idle_capas.
 * 3. stand alone: just allocated.
 */

/* capas for oss writeback and those failed to renew */
static CFS_LIST_HEAD(ll_idle_capas);
static struct ptlrpc_thread ll_capa_thread;
static cfs_list_t *ll_capa_list = &capa_list[CAPA_SITE_CLIENT];

/* llite capa renewal timer */
struct timer_list ll_capa_timer;
/* for debug: indicate whether capa on llite is enabled or not */
static cfs_atomic_t ll_capa_debug = CFS_ATOMIC_INIT(0);
static unsigned long long ll_capa_renewed = 0;
static unsigned long long ll_capa_renewal_noent = 0;
static unsigned long long ll_capa_renewal_failed = 0;
static unsigned long long ll_capa_renewal_retries = 0;

static inline void update_capa_timer(struct obd_capa *ocapa, cfs_time_t expiry)
{
        if (cfs_time_before(expiry, ll_capa_timer.expires) ||
            !timer_pending(&ll_capa_timer)) {
                mod_timer(&ll_capa_timer, expiry);
                DEBUG_CAPA(D_SEC, &ocapa->c_capa,
                           "ll_capa_timer update: %lu/%lu by", expiry, jiffies);
        }
}
Esempio n. 3
0
int __obd_fail_check_set(__u32 id, __u32 value, int set)
{
        static cfs_atomic_t obd_fail_count = CFS_ATOMIC_INIT(0);

        LASSERT(!(id & OBD_FAIL_ONCE));

        if ((obd_fail_loc & (OBD_FAILED | OBD_FAIL_ONCE)) ==
            (OBD_FAILED | OBD_FAIL_ONCE)) {
                cfs_atomic_set(&obd_fail_count, 0); /* paranoia */
                return 0;
        }

        /* Fail 1/obd_fail_val times */
        if (obd_fail_loc & OBD_FAIL_RAND) {
                if (obd_fail_val < 2 || cfs_rand() % obd_fail_val > 0)
                        return 0;
        }

        /* Skip the first obd_fail_val, then fail */
        if (obd_fail_loc & OBD_FAIL_SKIP) {
                if (cfs_atomic_inc_return(&obd_fail_count) <= obd_fail_val)
                        return 0;
        }

        /* Fail obd_fail_val times, overridden by FAIL_ONCE */
        if (obd_fail_loc & OBD_FAIL_SOME &&
            (!(obd_fail_loc & OBD_FAIL_ONCE) || obd_fail_val <= 1)) {
                int count = cfs_atomic_inc_return(&obd_fail_count);

                if (count >= obd_fail_val) {
                        cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);
                        cfs_atomic_set(&obd_fail_count, 0);
                        /* we are lost race to increase obd_fail_count */
                        if (count > obd_fail_val)
                                return 0;
                }
        }

        if ((set == OBD_FAIL_LOC_ORSET || set == OBD_FAIL_LOC_RESET) &&
            (value & OBD_FAIL_ONCE))
                cfs_set_bit(OBD_FAIL_ONCE_BIT, &obd_fail_loc);

        /* Lost race to set OBD_FAILED_BIT. */
        if (cfs_test_and_set_bit(OBD_FAILED_BIT, &obd_fail_loc)) {
                /* If OBD_FAIL_ONCE is valid, only one process can fail,
                 * otherwise multi-process can fail at the same time. */
                if (obd_fail_loc & OBD_FAIL_ONCE)
                        return 0;
        }

        switch (set) {
                case OBD_FAIL_LOC_NOSET:
                        break;
                case OBD_FAIL_LOC_ORSET:
                        obd_fail_loc |= value & ~(OBD_FAILED | OBD_FAIL_ONCE);
                        break;
                case OBD_FAIL_LOC_RESET:
                        obd_fail_loc = value;
                        break;
                default:
                        LASSERTF(0, "called with bad set %u\n", set);
                        break;
        }

        return 1;
}
Esempio n. 4
0
int __cfs_fail_check_set(__u32 id, __u32 value, int set)
{
        static cfs_atomic_t cfs_fail_count = CFS_ATOMIC_INIT(0);

        LASSERT(!(id & CFS_FAIL_ONCE));

        if ((cfs_fail_loc & (CFS_FAILED | CFS_FAIL_ONCE)) ==
            (CFS_FAILED | CFS_FAIL_ONCE)) {
                cfs_atomic_set(&cfs_fail_count, 0); /* paranoia */
                return 0;
        }

        /* Fail 1/cfs_fail_val times */
        if (cfs_fail_loc & CFS_FAIL_RAND) {
                if (cfs_fail_val < 2 || cfs_rand() % cfs_fail_val > 0)
                        return 0;
        }

        /* Skip the first cfs_fail_val, then fail */
        if (cfs_fail_loc & CFS_FAIL_SKIP) {
                if (cfs_atomic_inc_return(&cfs_fail_count) <= cfs_fail_val)
                        return 0;
        }

        /* check cfs_fail_val... */
        if (set == CFS_FAIL_LOC_VALUE) {
                if (cfs_fail_val != -1 && cfs_fail_val != value)
                        return 0;
        }

        /* Fail cfs_fail_val times, overridden by FAIL_ONCE */
        if (cfs_fail_loc & CFS_FAIL_SOME &&
            (!(cfs_fail_loc & CFS_FAIL_ONCE) || cfs_fail_val <= 1)) {
                int count = cfs_atomic_inc_return(&cfs_fail_count);

                if (count >= cfs_fail_val) {
			set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
                        cfs_atomic_set(&cfs_fail_count, 0);
                        /* we are lost race to increase  */
                        if (count > cfs_fail_val)
                                return 0;
                }
        }

        if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
            (value & CFS_FAIL_ONCE))
		set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
        /* Lost race to set CFS_FAILED_BIT. */
	if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
                /* If CFS_FAIL_ONCE is valid, only one process can fail,
                 * otherwise multi-process can fail at the same time. */
                if (cfs_fail_loc & CFS_FAIL_ONCE)
                        return 0;
        }

        switch (set) {
                case CFS_FAIL_LOC_NOSET:
                case CFS_FAIL_LOC_VALUE:
                        break;
                case CFS_FAIL_LOC_ORSET:
                        cfs_fail_loc |= value & ~(CFS_FAILED | CFS_FAIL_ONCE);
                        break;
                case CFS_FAIL_LOC_RESET:
                        cfs_fail_loc = value;
                        break;
                default:
                        LASSERTF(0, "called with bad set %u\n", set);
                        break;
        }

        return 1;
}
Esempio n. 5
0
#define DEBUG_SUBSYSTEM S_LNET
#define LUSTRE_TRACEFILE_PRIVATE
#include "tracefile.h"

#include <libcfs/libcfs.h>

/* XXX move things up to the top, comment */
union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS] __cacheline_aligned;

char cfs_tracefile[TRACEFILE_NAME_SIZE];
long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
static struct tracefiled_ctl trace_tctl;
cfs_semaphore_t cfs_trace_thread_sem;
static int thread_running = 0;

cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);

static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
                                         struct cfs_trace_cpu_data *tcd);

static inline struct cfs_trace_page *
cfs_tage_from_list(cfs_list_t *list)
{
        return cfs_list_entry(list, struct cfs_trace_page, linkage);
}

static struct cfs_trace_page *cfs_tage_alloc(int gfp)
{
        cfs_page_t            *page;
        struct cfs_trace_page *tage;
Esempio n. 6
0
unsigned int portal_enter_debugger;
EXPORT_SYMBOL(portal_enter_debugger);

unsigned int libcfs_catastrophe;
EXPORT_SYMBOL(libcfs_catastrophe);

unsigned int libcfs_watchdog_ratelimit = 300;
EXPORT_SYMBOL(libcfs_watchdog_ratelimit);

unsigned int libcfs_panic_on_lbug = 1;
CFS_MODULE_PARM(libcfs_panic_on_lbug, "i", uint, 0644,
                "Lustre kernel panic on LBUG");
EXPORT_SYMBOL(libcfs_panic_on_lbug);

cfs_atomic_t libcfs_kmemory = CFS_ATOMIC_INIT(0);
EXPORT_SYMBOL(libcfs_kmemory);

static cfs_waitq_t debug_ctlwq;

char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;

/* We need to pass a pointer here, but elsewhere this must be a const */
char *libcfs_debug_file_path;
CFS_MODULE_PARM(libcfs_debug_file_path, "s", charp, 0644,
                "Path for dumping debug logs, "
                "set 'NONE' to prevent log dumping");

int libcfs_panic_in_progress;

/* libcfs_debug_token2mask() expects the returned
Esempio n. 7
0
 */

#define DEBUG_SUBSYSTEM S_LDLM
#ifdef __KERNEL__
# include <lustre_dlm.h>
#else
# include <liblustre.h>
#endif

#include <lustre_fid.h>
#include <obd_class.h>
#include "ldlm_internal.h"

cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;

cfs_atomic_t ldlm_srv_namespace_nr = CFS_ATOMIC_INIT(0);
cfs_atomic_t ldlm_cli_namespace_nr = CFS_ATOMIC_INIT(0);

struct mutex ldlm_srv_namespace_lock;
CFS_LIST_HEAD(ldlm_srv_namespace_list);

struct mutex ldlm_cli_namespace_lock;
CFS_LIST_HEAD(ldlm_cli_namespace_list);

cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
cfs_proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
cfs_proc_dir_entry_t *ldlm_svc_proc_dir = NULL;

extern unsigned int ldlm_cancel_unused_locks_before_replay;

/* during debug dump certain amount of granted locks for one resource to avoid
Esempio n. 8
0
#include "gss_api.h"

static struct proc_dir_entry *gss_proc_root = NULL;
static struct proc_dir_entry *gss_proc_lk = NULL;

/*
 * statistic of "out-of-sequence-window"
 */
static struct {
        cfs_spinlock_t  oos_lock;
        cfs_atomic_t    oos_cli_count;       /* client occurrence */
        int             oos_cli_behind;      /* client max seqs behind */
        cfs_atomic_t    oos_svc_replay[3];   /* server replay detected */
        cfs_atomic_t    oos_svc_pass[3];     /* server verified ok */
} gss_stat_oos = {
        .oos_cli_count  = CFS_ATOMIC_INIT(0),
        .oos_cli_behind = 0,
        .oos_svc_replay = { CFS_ATOMIC_INIT(0), },
        .oos_svc_pass   = { CFS_ATOMIC_INIT(0), },
};

void gss_stat_oos_record_cli(int behind)
{
        cfs_atomic_inc(&gss_stat_oos.oos_cli_count);

        cfs_spin_lock(&gss_stat_oos.oos_lock);
        if (behind > gss_stat_oos.oos_cli_behind)
                gss_stat_oos.oos_cli_behind = behind;
        cfs_spin_unlock(&gss_stat_oos.oos_lock);
}