Ejemplo n.º 1
0
void
slcfg_init_resm(struct sl_resm *m)
{
	struct resm_mds_info *rmmi;

	rmmi = resm2rmmi(m);
	psc_atomic32_set(&rmmi->rmmi_refcnt, 0);
}
Ejemplo n.º 2
0
/*
 * Lookup and optionally create a new bmap structure.
 * @f: file's bmap tree to search.
 * @n: bmap index number to search for.
 * @new_bmap: whether to allow creation and also value-result of whether
 * it was newly created or not.
 */
struct bmap *
bmap_lookup_cache(struct fidc_membh *f, sl_bmapno_t n, int bmaprw,
    int *new_bmap)
{
	struct bmap lb, *b, *bnew = NULL;
	int doalloc;

	doalloc = *new_bmap;
	lb.bcm_bmapno = n;

 restart:
	if (bnew)
		pfl_rwlock_wrlock(&f->fcmh_rwlock);
	else
		pfl_rwlock_rdlock(&f->fcmh_rwlock);
	b = RB_FIND(bmaptree, &f->fcmh_bmaptree, &lb);
	if (b) {
		if (!BMAP_TRYLOCK(b)) {
			pfl_rwlock_unlock(&f->fcmh_rwlock);
			usleep(10);
			goto restart;
		}

		if (b->bcm_flags & BMAPF_TOFREE) {
			/*
			 * This bmap is going away; wait for it so we
			 * can reload it back.
			 */
			DEBUG_BMAP(PLL_DIAG, b, "wait on to-free bmap");
			BMAP_ULOCK(b);
			/*
			 * We don't want to spin if we are waiting for a
			 * flush to clear.
			 */
			psc_waitq_waitrelf_us(&f->fcmh_waitq,
			    PFL_LOCKPRIMT_RWLOCK, &f->fcmh_rwlock, 100);
			goto restart;
		}
		bmap_op_start_type(b, BMAP_OPCNT_LOOKUP);
	}
	if (doalloc == 0 || b) {
		pfl_rwlock_unlock(&f->fcmh_rwlock);
		if (bnew)
			psc_pool_return(bmap_pool, bnew);
		*new_bmap = 0;
		OPSTAT_INCR("bmapcache.hit");
		return (b);
	}
	if (bnew == NULL) {
		pfl_rwlock_unlock(&f->fcmh_rwlock);

		if (sl_bmap_ops.bmo_reapf)
			sl_bmap_ops.bmo_reapf();

		bnew = psc_pool_get(bmap_pool);
		goto restart;
	}
	b = bnew;

	OPSTAT_INCR("bmapcache.miss");

	*new_bmap = 1;
	memset(b, 0, bmap_pool->ppm_master->pms_entsize);
	INIT_PSC_LISTENTRY(&b->bcm_lentry);
	INIT_SPINLOCK(&b->bcm_lock);

	psc_atomic32_set(&b->bcm_opcnt, 0);
	b->bcm_fcmh = f;
	b->bcm_bmapno = n;

	/*
	 * Signify that the bmap is newly initialized and therefore may
	 * not contain certain structures.
	 */
	psc_assert(bmaprw == BMAPF_RD || bmaprw == BMAPF_WR);
	b->bcm_flags = bmaprw;

	bmap_op_start_type(b, BMAP_OPCNT_LOOKUP);

	/*
	 * Perform app-specific substructure initialization, which is
	 * msl_bmap_init(), iod_bmap_init(), or mds_bmap_init().
	 */
	sl_bmap_ops.bmo_init_privatef(b);

	/* Add to the fcmh's bmap cache */
	PSC_RB_XINSERT(bmaptree, &f->fcmh_bmaptree, b);

	pfl_rwlock_unlock(&f->fcmh_rwlock);

	fcmh_op_start_type(f, FCMH_OPCNT_BMAP);

	BMAP_LOCK(b);

	return (b);
}
Ejemplo n.º 3
0
int
main(int argc, char *argv[])
{
    struct thr *thr;
    pthread_t pthr;
    int c, rc, i;

    pfl_init();
    progname = argv[0];
    while ((c = getopt(argc, argv, "i:n:")) != -1)
        switch (c) {
        case 'i':
            niter = atoi(optarg);
            break;
        case 'n':
            nthr = atoi(optarg);
            break;
        default:
            usage();
        }
    argc -= optind;
    if (argc)
        usage();

    psc_assert(psc_atomic64_read(&v64) == UINT64_C(100000000000));
    TEST(psc_atomic64, set, &v64, &v64, UINT64_C(2000000000000), UINT64_C(2000000000000));
    TEST(psc_atomic64, add, &v64, &v64, 15, UINT64_C(2000000000015));
    TEST(psc_atomic64, sub, &v64, &v64, 9, UINT64_C(2000000000006));
    TEST1(psc_atomic64, inc, &v64, UINT64_C(2000000000007));
    TEST1(psc_atomic64, dec, &v64, UINT64_C(2000000000006));

    psc_atomic16_set(&v16, 2);
    TEST(psc_atomic16, set, &v16, &v16, 200, 200);
    TEST(psc_atomic16, add, &v16, &v16, 15, 215);
    TEST(psc_atomic16, sub, &v16, &v16, 9, 206);
    TEST1(psc_atomic16, inc, &v16, 207);
    TEST1(psc_atomic16, dec, &v16, 206);
    TEST1V(psc_atomic16, dec_and_test0, &v16, 205, 0);
    TEST(psc_atomic16, set, &v16, &v16, 1, 1);
    TEST1V(psc_atomic16, dec_and_test0, &v16, 0, 1);
    TEST(psc_atomic16, setmask, &v16, &v16, 0x75, 0x75);
    TEST(psc_atomic16, clearmask, &v16, &v16, 0x41, 0x34);
    TEST(psc_atomic16, set, &v16, &v16, 0, 0);

    psc_atomic32_set(&v32, 2);
    TEST(psc_atomic32, set, &v32, &v32, 200, 200);
    TEST(psc_atomic32, add, &v32, &v32, 15, 215);
    TEST(psc_atomic32, sub, &v32, &v32, 9, 206);
    TEST1(psc_atomic32, inc, &v32, 207);
    TEST1(psc_atomic32, dec, &v32, 206);
    TEST1V(psc_atomic32, dec_and_test0, &v32, 205, 0);
    TEST(psc_atomic32, set, &v32, &v32, 1, 1);
    TEST1V(psc_atomic32, dec_and_test0, &v32, 0, 1);
    TEST(psc_atomic32, setmask, &v32, &v32, 0x75, 0x75);
    TEST(psc_atomic32, clearmask, &v32, &v32, 0x41, 0x34);
    TEST(psc_atomic32, set, &v32, &v32, 0, 0);

    psc_atomic64_set(&v64, 2);
    TEST(psc_atomic64, set, &v64, &v64, 200, 200);
    TEST(psc_atomic64, add, &v64, &v64, 15, 215);
    TEST(psc_atomic64, sub, &v64, &v64, 9, 206);
    TEST1(psc_atomic64, inc, &v64, 207);
    TEST1(psc_atomic64, dec, &v64, 206);
    TEST1V(psc_atomic64, dec_and_test0, &v64, 205, 0);
    TEST(psc_atomic64, set, &v64, &v64, 1, 1);
    TEST1V(psc_atomic64, dec_and_test0, &v64, 0, 1);
    TEST(psc_atomic64, setmask, &v64, &v64, 0x75, 0x75);
    TEST(psc_atomic64, clearmask, &v64, &v64, 0x41, 0x34);
    TEST(psc_atomic64, set, &v64, &v64, 0, 0);

    TEST1(psc_atomic16, inc, &v16, 1);
    TEST1V(psc_atomic16, dec_and_test0, &v16, 0, 1);

    rc = pthread_barrier_init(&barrier, NULL, nthr + 1);
    if (rc)
        psc_fatalx("pthread_barrier_init: %s", strerror(rc));
    for (i = 0; i < nthr; i++) {
        thr = PSCALLOC(sizeof(*thr));
        thr->pos = i;
        rc = pthread_create(&pthr, NULL, startf, thr);
        if (rc)
            psc_fatalx("pthread_create: %s", strerror(rc));
    }
    pthread_barrier_wait(&barrier);
    pthread_barrier_wait(&barrier);
    exit(0);
}