Ejemplo n.º 1
0
static int __init init_gfs2_fs(void)
{
	int error;

	error = gfs2_sys_init();
	if (error)
		return error;

	error = gfs2_glock_init();
	if (error)
		goto fail;

	error = -ENOMEM;
	gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
					      sizeof(struct gfs2_glock),
					      0, 0,
					      gfs2_init_glock_once);
	if (!gfs2_glock_cachep)
		goto fail;

	gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
					sizeof(struct gfs2_glock) +
					sizeof(struct address_space),
					0, 0, gfs2_init_gl_aspace_once);

	if (!gfs2_glock_aspace_cachep)
		goto fail;

	gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
					      sizeof(struct gfs2_inode),
					      0,  SLAB_RECLAIM_ACCOUNT|
					          SLAB_MEM_SPREAD,
					      gfs2_init_inode_once);
	if (!gfs2_inode_cachep)
		goto fail;

	gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
						sizeof(struct gfs2_bufdata),
					        0, 0, NULL);
	if (!gfs2_bufdata_cachep)
		goto fail;

	gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd",
					      sizeof(struct gfs2_rgrpd),
					      0, 0, NULL);
	if (!gfs2_rgrpd_cachep)
		goto fail;

	gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad",
					       sizeof(struct gfs2_quota_data),
					       0, 0, NULL);
	if (!gfs2_quotad_cachep)
		goto fail;

	register_shrinker(&qd_shrinker);

	error = register_filesystem(&gfs2_fs_type);
	if (error)
		goto fail;

	error = register_filesystem(&gfs2meta_fs_type);
	if (error)
		goto fail_unregister;

	error = slow_work_register_user(THIS_MODULE);
	if (error)
		goto fail_slow;

	gfs2_register_debugfs();

	printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__);

	return 0;

fail_slow:
	unregister_filesystem(&gfs2meta_fs_type);
fail_unregister:
	unregister_filesystem(&gfs2_fs_type);
fail:
	unregister_shrinker(&qd_shrinker);
	gfs2_glock_exit();

	if (gfs2_quotad_cachep)
		kmem_cache_destroy(gfs2_quotad_cachep);

	if (gfs2_rgrpd_cachep)
		kmem_cache_destroy(gfs2_rgrpd_cachep);

	if (gfs2_bufdata_cachep)
		kmem_cache_destroy(gfs2_bufdata_cachep);

	if (gfs2_inode_cachep)
		kmem_cache_destroy(gfs2_inode_cachep);

	if (gfs2_glock_aspace_cachep)
		kmem_cache_destroy(gfs2_glock_aspace_cachep);

	if (gfs2_glock_cachep)
		kmem_cache_destroy(gfs2_glock_cachep);

	gfs2_sys_uninit();
	return error;
}
Ejemplo n.º 2
0
static int
cifs_init_request_bufs(void)
{
	if (CIFSMaxBufSize < 8192) {
	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
	Unicode path name has to fit in any SMB/CIFS path based frames */
		CIFSMaxBufSize = 8192;
	} else if (CIFSMaxBufSize > 1024*127) {
		CIFSMaxBufSize = 1024 * 127;
	} else {
		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
	}
/*	cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
	cifs_req_cachep = kmem_cache_create("cifs_request",
					    CIFSMaxBufSize +
					    MAX_CIFS_HDR_SIZE, 0,
					    SLAB_HWCACHE_ALIGN, NULL);
	if (cifs_req_cachep == NULL)
		return -ENOMEM;

	if (cifs_min_rcv < 1)
		cifs_min_rcv = 1;
	else if (cifs_min_rcv > 64) {
		cifs_min_rcv = 64;
		cERROR(1, "cifs_min_rcv set to maximum (64)");
	}

	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
						  cifs_req_cachep);

	if (cifs_req_poolp == NULL) {
		kmem_cache_destroy(cifs_req_cachep);
		return -ENOMEM;
	}
	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
	almost all handle based requests (but not write response, nor is it
	sufficient for path based requests).  A smaller size would have
	been more efficient (compacting multiple slab items on one 4k page)
	for the case in which debug was on, but this larger size allows
	more SMBs to use small buffer alloc and is still much more
	efficient to alloc 1 per page off the slab compared to 17K (5page)
	alloc of large cifs buffers even when page debugging is on */
	cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
			NULL);
	if (cifs_sm_req_cachep == NULL) {
		mempool_destroy(cifs_req_poolp);
		kmem_cache_destroy(cifs_req_cachep);
		return -ENOMEM;
	}

	if (cifs_min_small < 2)
		cifs_min_small = 2;
	else if (cifs_min_small > 256) {
		cifs_min_small = 256;
		cFYI(1, "cifs_min_small set to maximum (256)");
	}

	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
						     cifs_sm_req_cachep);

	if (cifs_sm_req_poolp == NULL) {
		mempool_destroy(cifs_req_poolp);
		kmem_cache_destroy(cifs_req_cachep);
		kmem_cache_destroy(cifs_sm_req_cachep);
		return -ENOMEM;
	}

	return 0;
}
Ejemplo n.º 3
0
static int __init blk_ioc_init(void)
{
	iocontext_cachep = kmem_cache_create("blkdev_ioc",
			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
	return 0;
}
Ejemplo n.º 4
0
static int __init s3c2410_init_dma(void)
{
	s3c2410_dma_chan_t *cp;
	int channel;
	int ret;

	printk("S3C2410 DMA Driver, (c) 2003-2004 Simtec Electronics\n");

	dma_base = ioremap(S3C2410_PA_DMA, 0x200);
	if (dma_base == NULL) {
		printk(KERN_ERR "dma failed to remap register block\n");
		return -ENOMEM;
	}

	ret = sysdev_class_register(&dma_sysclass);
	if (ret != 0) {
		printk(KERN_ERR "dma sysclass registration failed\n");
		goto err;
	}

	dma_kmem = kmem_cache_create("dma_desc", sizeof(s3c2410_dma_buf_t), 0,
				     SLAB_HWCACHE_ALIGN,
				     s3c2410_dma_cache_ctor, NULL);

	if (dma_kmem == NULL) {
		printk(KERN_ERR "dma failed to make kmem cache\n");
		ret = -ENOMEM;
		goto err;
	}

	for (channel = 0; channel < S3C2410_DMA_CHANNELS; channel++) {
		cp = &s3c2410_chans[channel];

		memset(cp, 0, sizeof(s3c2410_dma_chan_t));

		/* dma channel irqs are in order.. */
		cp->number = channel;
		cp->irq    = channel + IRQ_DMA0;
		cp->regs   = dma_base + (channel*0x40);

		/* point current stats somewhere */
		cp->stats  = &cp->stats_store;
		cp->stats_store.timeout_shortest = LONG_MAX;

		/* basic channel configuration */

		cp->load_timeout = 1<<18;

		/* register system device */

		cp->dev.cls = &dma_sysclass;
		cp->dev.id  = channel;
		ret = sysdev_register(&cp->dev);

		printk("DMA channel %d at %p, irq %d\n",
		       cp->number, cp->regs, cp->irq);
	}

	return 0;

 err:
	kmem_cache_destroy(dma_kmem);
	iounmap(dma_base);
	dma_base = NULL;
	return ret;
}
Ejemplo n.º 5
0
static void
dtrace_load(void *dummy)
{
	dtrace_provider_id_t id;
	CPU_INFO_ITERATOR cpuind;
	struct cpu_info *cinfo;

	dtrace_debug_init(NULL);
	dtrace_gethrtime_init(NULL);

	/* Hook into the trap handler. */
	dtrace_trap_func = dtrace_trap;

	/* Hang our hook for thread switches. */
	dtrace_vtime_switch_func = dtrace_vtime_switch;

	/* Hang our hook for exceptions. */
	dtrace_invop_init();

	/*
	 * XXX This is a short term hack to avoid having to comment
	 * out lots and lots of lock/unlock calls.
	 */
	mutex_init(&mod_lock,"XXX mod_lock hack", MUTEX_DEFAULT, NULL);

	/*
	 * Initialise the mutexes without 'witness' because the dtrace
	 * code is mostly written to wait for memory. To have the
	 * witness code change a malloc() from M_WAITOK to M_NOWAIT
	 * because a lock is held would surely create a panic in a
	 * low memory situation. And that low memory situation might be
	 * the very problem we are trying to trace.
	 */
	mutex_init(&dtrace_lock,"dtrace probe state", MUTEX_DEFAULT, NULL);
	mutex_init(&dtrace_provider_lock,"dtrace provider state", MUTEX_DEFAULT, NULL);
	mutex_init(&dtrace_meta_lock,"dtrace meta-provider state", MUTEX_DEFAULT, NULL);
	mutex_init(&dtrace_errlock,"dtrace error lock", MUTEX_DEFAULT, NULL);

	mutex_enter(&dtrace_provider_lock);
	mutex_enter(&dtrace_lock);
	mutex_enter(&cpu_lock);

	ASSERT(MUTEX_HELD(&cpu_lock));

	dtrace_arena = vmem_create("dtrace", 1, INT_MAX, 1,
			NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);

	dtrace_state_cache = kmem_cache_create(__UNCONST("dtrace_state_cache"),
	    sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
	    NULL, NULL, NULL, NULL, NULL, 0);

	ASSERT(MUTEX_HELD(&cpu_lock));
	dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
	    offsetof(dtrace_probe_t, dtpr_nextmod),
	    offsetof(dtrace_probe_t, dtpr_prevmod));

	dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
	    offsetof(dtrace_probe_t, dtpr_nextfunc),
	    offsetof(dtrace_probe_t, dtpr_prevfunc));

	dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
	    offsetof(dtrace_probe_t, dtpr_nextname),
	    offsetof(dtrace_probe_t, dtpr_prevname));

	if (dtrace_retain_max < 1) {
		cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
		    "setting to 1", dtrace_retain_max);
		dtrace_retain_max = 1;
	}

	/*
	 * Now discover our toxic ranges.
	 */
	dtrace_toxic_ranges(dtrace_toxrange_add);

	/*
	 * Before we register ourselves as a provider to our own framework,
	 * we would like to assert that dtrace_provider is NULL -- but that's
	 * not true if we were loaded as a dependency of a DTrace provider.
	 * Once we've registered, we can assert that dtrace_provider is our
	 * pseudo provider.
	 */
	(void) dtrace_register("dtrace", &dtrace_provider_attr,
	    DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);

	ASSERT(dtrace_provider != NULL);
	ASSERT((dtrace_provider_id_t)dtrace_provider == id);

	dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
	    dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
	dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
	    dtrace_provider, NULL, NULL, "END", 0, NULL);
	dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
	    dtrace_provider, NULL, NULL, "ERROR", 1, NULL);

	mutex_exit(&cpu_lock);

	/*
	 * If DTrace helper tracing is enabled, we need to allocate the
	 * trace buffer and initialize the values.
	 */
	if (dtrace_helptrace_enabled) {
		ASSERT(dtrace_helptrace_buffer == NULL);
		dtrace_helptrace_buffer =
		    kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
		dtrace_helptrace_next = 0;
		dtrace_helptrace_size = dtrace_helptrace_bufsize;
	}

	mutex_exit(&dtrace_lock);
	mutex_exit(&dtrace_provider_lock);

	mutex_enter(&cpu_lock);

	/* Setup the CPUs */
	for (CPU_INFO_FOREACH(cpuind, cinfo)) {
		(void) dtrace_cpu_setup(CPU_CONFIG, cpu_index(cinfo));
	}

	mutex_exit(&cpu_lock);

	dtrace_anon_init(NULL);
#if 0
	dtrace_dev = make_dev(&dtrace_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "dtrace/dtrace");
#endif

	return;
}
Ejemplo n.º 6
0
static struct kmem_cache * __init zfcp_cache_hw_align(const char *name,
						      unsigned long size)
{
	return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
}
Ejemplo n.º 7
0
void __init pgtable_cache_init(void)
{
	pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL);
	if (!pgd_cache)
		panic("pgtable_cache_init(): Cannot create pgd cache");
}
Ejemplo n.º 8
0
static int
splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
			     int size, int alloc, int max_time)
{
	kmem_cache_priv_t *kcp;
	kthread_t *thr;
	struct timespec start, stop, delta;
	char cache_name[32];
	int i, rc = 0;

	kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc);
	if (!kcp) {
		splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
		return -ENOMEM;
	}

	(void)snprintf(cache_name, 32, "%s-%d-%d",
		       SPLAT_KMEM_CACHE_NAME, size, alloc);
	kcp->kcp_cache =
		kmem_cache_create(cache_name, kcp->kcp_size, 0,
				  splat_kmem_cache_test_constructor,
				  splat_kmem_cache_test_destructor,
				  splat_kmem_cache_test_reclaim,
				  kcp, NULL, 0);
	if (!kcp->kcp_cache) {
		splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
		rc = -ENOMEM;
		goto out_kcp;
	}

	getnstimeofday(&start);

	for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
		thr = thread_create(NULL, 0,
				    splat_kmem_cache_test_thread,
				    kcp, 0, &p0, TS_RUN, defclsyspri);
		if (thr == NULL) {
			rc = -ESRCH;
			goto out_cache;
		}
	}

	/* Sleep until all threads have started, then set the ready
	 * flag and wake them all up for maximum concurrency. */
	wait_event(kcp->kcp_ctl_waitq,
		   splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));

	spin_lock(&kcp->kcp_lock);
	kcp->kcp_flags |= KCP_FLAG_READY;
	spin_unlock(&kcp->kcp_lock);
	wake_up_all(&kcp->kcp_thr_waitq);

	/* Sleep until all thread have finished */
	wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));

	getnstimeofday(&stop);
	delta = timespec_sub(stop, start);

	splat_vprint(file, name,
		     "%-22s %2ld.%09ld\t"
		     "%lu/%lu/%lu\t%lu/%lu/%lu\n",
		     kcp->kcp_cache->skc_name,
		     delta.tv_sec, delta.tv_nsec,
		     (unsigned long)kcp->kcp_cache->skc_slab_total,
		     (unsigned long)kcp->kcp_cache->skc_slab_max,
		     (unsigned long)(kcp->kcp_alloc *
				    SPLAT_KMEM_THREADS /
				    SPL_KMEM_CACHE_OBJ_PER_SLAB),
		     (unsigned long)kcp->kcp_cache->skc_obj_total,
		     (unsigned long)kcp->kcp_cache->skc_obj_max,
		     (unsigned long)(kcp->kcp_alloc *
				     SPLAT_KMEM_THREADS));

	if (delta.tv_sec >= max_time)
		rc = -ETIME;

	if (!rc && kcp->kcp_rc)
		rc = kcp->kcp_rc;

out_cache:
	kmem_cache_destroy(kcp->kcp_cache);
out_kcp:
	splat_kmem_cache_test_kcp_free(kcp);
	return rc;
}
Ejemplo n.º 9
0
/*
 * Validate kmem_cache_reap() by requesting the slab cache free any objects
 * it can.  For a few reasons this may not immediately result in more free
 * memory even if objects are freed.  First off, due to fragmentation we
 * may not be able to reclaim any slabs.  Secondly, even if we do we fully
 * clear some slabs we will not want to immediately reclaim all of them
 * because we may contend with cache allocations and thrash.  What we want
 * to see is the slab size decrease more gradually as it becomes clear they
 * will not be needed.  This should be achievable in less than a minute.
 * If it takes longer than this something has gone wrong.
 */
static int
splat_kmem_test8(struct file *file, void *arg)
{
	kmem_cache_priv_t *kcp;
	kmem_cache_thread_t *kct;
	unsigned int spl_kmem_cache_expire_old;
	int i, rc = 0;

	/* Enable cache aging just for this test if it is disabled */
	spl_kmem_cache_expire_old = spl_kmem_cache_expire;
	spl_kmem_cache_expire = KMC_EXPIRE_AGE;

	kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
					      256, 0, 0);
	if (!kcp) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			     "Unable to create '%s'\n", "kcp");
		rc = -ENOMEM;
		goto out;
	}

	kcp->kcp_cache =
		kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
				  splat_kmem_cache_test_constructor,
				  splat_kmem_cache_test_destructor,
				  splat_kmem_cache_test_reclaim,
				  kcp, NULL, 0);
	if (!kcp->kcp_cache) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			   "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
		goto out_kcp;
	}

	kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
	if (!kct) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			     "Unable to create '%s'\n", "kct");
		rc = -ENOMEM;
		goto out_cache;
	}

	rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, SPLAT_KMEM_OBJ_COUNT);
	if (rc) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to "
			     "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
		goto out_kct;
	}

	/* Force reclaim every 1/10 a second for 60 seconds. */
	for (i = 0; i < 600; i++) {
		kmem_cache_reap_now(kcp->kcp_cache);
		splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);

		if (kcp->kcp_count == 0)
			break;

		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(HZ / 10);
	}

	if (kcp->kcp_count == 0) {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			"Successfully created %d objects "
			"in cache %s and reclaimed them\n",
			SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
	} else {
		splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
			"Failed to reclaim %u/%d objects from cache %s\n",
			(unsigned)kcp->kcp_count,
			SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
		rc = -ENOMEM;
	}

	/* Cleanup our mess (for failure case of time expiring) */
	splat_kmem_cache_test_kcd_free(kcp, kct);
out_kct:
	splat_kmem_cache_test_kct_free(kcp, kct);
out_cache:
	kmem_cache_destroy(kcp->kcp_cache);
out_kcp:
	splat_kmem_cache_test_kcp_free(kcp);
out:
	spl_kmem_cache_expire = spl_kmem_cache_expire_old;

	return rc;
}
Ejemplo n.º 10
0
void
log_init(void)
{
	int log_maxzones;

	/*
	 * Create a backlog queue to consume console messages during periods
	 * when there is no console reader (e.g. before syslogd(1M) starts).
	 */
	log_backlogq = log_consq = log_makeq(0, LOG_HIWAT, NULL);

	/*
	 * Create a queue to hold free message of size <= LOG_MSGSIZE.
	 * Calls from high-level interrupt handlers will do a getq_noenab()
	 * from this queue, so its q_lock must be a maximum SPL spin lock.
	 */
	log_freeq = log_makeq(LOG_MINFREE, LOG_MAXFREE, (void *)ipltospl(SPL8));

	/*
	 * Create a queue for messages from high-level interrupt context.
	 * These messages are drained via softcall, or explicitly by panic().
	 */
	log_intrq = log_makeq(0, LOG_HIWAT, (void *)ipltospl(SPL8));

	/*
	 * Create a queue to hold the most recent 8K of console messages.
	 * Useful for debugging.  Required by the "$<msgbuf" adb macro.
	 */
	log_recentq = log_makeq(0, LOG_RECENTSIZE, NULL);

	/*
	 * Create an id space for clone devices opened via /dev/log.
	 * Need to limit the number of zones to avoid exceeding the
	 * available minor number space.
	 */
	log_maxzones = (L_MAXMIN32 - LOG_LOGMIN) / LOG_NUMCLONES - 1;
	if (log_maxzones < maxzones)
		maxzones = log_maxzones;
	log_minorspace = id_space_create("logminor_space", LOG_LOGMIN + 1,
	    L_MAXMIN32);
	/*
	 * Put ourselves on the ZSD list.  Note that zones have not been
	 * initialized yet, but our constructor will be called on the global
	 * zone when they are.
	 */
	zone_key_create(&log_zone_key, log_zoneinit, NULL, log_zonefree);

	/*
	 * Initialize backlog structure.
	 */
	log_backlog.log_zoneid = GLOBAL_ZONEID;
	log_backlog.log_minor = LOG_BACKLOG;

	/* Allocate kmem cache for conslog's log structures */
	log_cons_cache = kmem_cache_create("log_cons_cache",
	    sizeof (struct log), 0, log_cons_constructor, log_cons_destructor,
	    NULL, NULL, NULL, 0);

	/*
	 * Let the logging begin.
	 */
	log_update(&log_backlog, log_backlogq, SL_CONSOLE, log_console);

	/*
	 * Now that logging is enabled, emit the OS banner.
	 */
	printf("\rSunOS Release %s Version %s %u-bit\n",
	    utsname.release, utsname.version, NBBY * (uint_t)sizeof (void *));
	printf("Copyright (c) 1983, 2010, Oracle and/or its affiliates. "
	    "All rights reserved.\n");
	printf("Copyright 2015 Nexenta Systems, Inc.  All rights reserved.\n");
#ifdef DEBUG
	printf("DEBUG enabled\n");
#endif
}
Ejemplo n.º 11
0
static int
splat_kmem_cache_test(struct file *file, void *arg, char *name,
    int size, int align, int flags)
{
	kmem_cache_priv_t *kcp = NULL;
	kmem_cache_data_t **kcd = NULL;
	int i, rc = 0, objs = 0;

	/* Limit size for low memory machines (1/128 of memory) */
	size = MIN(size, (physmem * PAGE_SIZE) >> 7);

	splat_vprint(file, name,
	    "Testing size=%d, align=%d, flags=0x%04x\n",
	    size, align, flags);

	kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0);
	if (!kcp) {
		splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
		return (-ENOMEM);
	}

	kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
	    kcp->kcp_size, kcp->kcp_align,
	    splat_kmem_cache_test_constructor,
	    splat_kmem_cache_test_destructor,
	    NULL, kcp, NULL, flags);
	if (kcp->kcp_cache == NULL) {
		splat_vprint(file, name, "Unable to create "
		    "name='%s', size=%d, align=%d, flags=0x%x\n",
		    SPLAT_KMEM_CACHE_NAME, size, align, flags);
		rc = -ENOMEM;
		goto out_free;
	}

	/*
	 * Allocate several slabs worth of objects to verify functionality.
	 * However, on 32-bit systems with limited address space constrain
	 * it to a single slab for the purposes of this test.
	 */
#ifdef _LP64
	objs = kcp->kcp_cache->skc_slab_objs * 4;
#else
	objs = 1;
#endif
	kcd = kmem_zalloc(sizeof (kmem_cache_data_t *) * objs, KM_SLEEP);
	if (kcd == NULL) {
		splat_vprint(file, name, "Unable to allocate pointers "
		    "for %d objects\n", objs);
		rc = -ENOMEM;
		goto out_free;
	}

	for (i = 0; i < objs; i++) {
		kcd[i] = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
		if (kcd[i] == NULL) {
			splat_vprint(file, name, "Unable to allocate "
			    "from '%s'\n", SPLAT_KMEM_CACHE_NAME);
			rc = -EINVAL;
			goto out_free;
		}

		if (!kcd[i]->kcd_flag) {
			splat_vprint(file, name, "Failed to run constructor "
			    "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
			rc = -EINVAL;
			goto out_free;
		}

		if (kcd[i]->kcd_magic != kcp->kcp_magic) {
			splat_vprint(file, name,
			    "Failed to pass private data to constructor "
			    "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
			rc = -EINVAL;
			goto out_free;
		}
	}

	for (i = 0; i < objs; i++) {
		kmem_cache_free(kcp->kcp_cache, kcd[i]);

		/* Destructors are run for every kmem_cache_free() */
		if (kcd[i]->kcd_flag) {
			splat_vprint(file, name,
			    "Failed to run destructor for '%s'\n",
			    SPLAT_KMEM_CACHE_NAME);
			rc = -EINVAL;
			goto out_free;
		}
	}

	if (kcp->kcp_count) {
		splat_vprint(file, name,
		    "Failed to run destructor on all slab objects for '%s'\n",
		    SPLAT_KMEM_CACHE_NAME);
		rc = -EINVAL;
	}

	kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs);
	kmem_cache_destroy(kcp->kcp_cache);

	splat_kmem_cache_test_kcp_free(kcp);
	splat_vprint(file, name,
	    "Success ran alloc'd/free'd %d objects of size %d\n",
	    objs, size);

	return (rc);

out_free:
	if (kcd) {
		for (i = 0; i < objs; i++) {
			if (kcd[i] != NULL)
				kmem_cache_free(kcp->kcp_cache, kcd[i]);
		}

		kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs);
	}

	if (kcp->kcp_cache)
		kmem_cache_destroy(kcp->kcp_cache);

	splat_kmem_cache_test_kcp_free(kcp);

	return (rc);
}
Ejemplo n.º 12
0
static int __init lustre_init(void)
{
	struct proc_dir_entry *entry;
	lnet_process_id_t lnet_id;
	struct timeval tv;
	int i, rc, seed[2];

	CLASSERT(sizeof(LUSTRE_VOLATILE_HDR) == LUSTRE_VOLATILE_HDR_LEN + 1);

	/* print an address of _any_ initialized kernel symbol from this
	 * module, to allow debugging with gdb that doesn't support data
	 * symbols from modules.*/
	CDEBUG(D_INFO, "Lustre client module (%p).\n",
	       &lustre_super_operations);

	ll_inode_cachep = kmem_cache_create("lustre_inode_cache",
					    sizeof(struct ll_inode_info),
					    0, SLAB_HWCACHE_ALIGN, NULL);
	if (ll_inode_cachep == NULL)
		GOTO(out_cache, rc = -ENOMEM);

	ll_file_data_slab = kmem_cache_create("ll_file_data",
						 sizeof(struct ll_file_data), 0,
						 SLAB_HWCACHE_ALIGN, NULL);
	if (ll_file_data_slab == NULL)
		GOTO(out_cache, rc = -ENOMEM);

	ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
						  sizeof(struct ll_remote_perm),
						  0, 0, NULL);
	if (ll_remote_perm_cachep == NULL)
		GOTO(out_cache, rc = -ENOMEM);

	ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
						   REMOTE_PERM_HASHSIZE *
						   sizeof(struct hlist_head),
						   0, 0, NULL);
	if (ll_rmtperm_hash_cachep == NULL)
		GOTO(out_cache, rc = -ENOMEM);

	entry = lprocfs_register("llite", proc_lustre_root, NULL, NULL);
	if (IS_ERR(entry)) {
		rc = PTR_ERR(entry);
		CERROR("cannot register '/proc/fs/lustre/llite': rc = %d\n",
		       rc);
		GOTO(out_cache, rc);
	}

	proc_lustre_fs_root = entry;

	cfs_get_random_bytes(seed, sizeof(seed));

	/* Nodes with small feet have little entropy. The NID for this
	 * node gives the most entropy in the low bits. */
	for (i = 0;; i++) {
		if (LNetGetId(i, &lnet_id) == -ENOENT)
			break;

		if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND)
			seed[0] ^= LNET_NIDADDR(lnet_id.nid);
	}

	do_gettimeofday(&tv);
	cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);

	rc = vvp_global_init();
	if (rc != 0)
		GOTO(out_proc, rc);

	cl_inode_fini_env = cl_env_alloc(&cl_inode_fini_refcheck,
					 LCT_REMEMBER | LCT_NOREF);
	if (IS_ERR(cl_inode_fini_env))
		GOTO(out_vvp, rc = PTR_ERR(cl_inode_fini_env));

	cl_inode_fini_env->le_ctx.lc_cookie = 0x4;

	rc = ll_xattr_init();
	if (rc != 0)
		GOTO(out_inode_fini_env, rc);

	lustre_register_client_fill_super(ll_fill_super);
	lustre_register_kill_super_cb(ll_kill_super);
	lustre_register_client_process_config(ll_process_config);

	RETURN(0);

out_inode_fini_env:
	cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
out_vvp:
	vvp_global_fini();
out_proc:
	lprocfs_remove(&proc_lustre_fs_root);
out_cache:
	if (ll_inode_cachep != NULL)
		kmem_cache_destroy(ll_inode_cachep);

	if (ll_file_data_slab != NULL)
		kmem_cache_destroy(ll_file_data_slab);

	if (ll_remote_perm_cachep != NULL)
		kmem_cache_destroy(ll_remote_perm_cachep);

	if (ll_rmtperm_hash_cachep != NULL)
		kmem_cache_destroy(ll_rmtperm_hash_cachep);

	return rc;
}
Ejemplo n.º 13
0
/*
 * Cache initialization routine.  This routine should only be called
 * once.  It performs the following tasks:
 *	- Initalize all global locks
 * 	- Call sub-initialization routines (localize access to variables)
 */
static int
cachefs_init(int fstyp, char *name)
{
	kstat_t *ksp;
	int error;

	ASSERT(cachefs_up == B_FALSE);

	error = cachefs_init_vfsops(fstyp);
	if (error != 0)
		return (error);

	error = cachefs_init_vnops(name);
	if (error != 0)
		return (error);

	mutex_init(&cachefs_cachelock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&cachefs_newnum_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&cachefs_kstat_key_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&cachefs_kmem_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&cachefs_rename_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&cachefs_minor_lock, NULL, MUTEX_DEFAULT, NULL);
	mutex_init(&cachefs_async_lock, NULL, MUTEX_DEFAULT, NULL);
#ifdef CFSRLDEBUG
	mutex_init(&cachefs_rl_debug_mutex, NULL, MUTEX_DEFAULT, NULL);
#endif /* CFSRLDEBUG */

	/*
	 * set up kmem_cache entities
	 */

	cachefs_cnode_cache = kmem_cache_create("cachefs_cnode_cache",
	    sizeof (struct cnode), 0, NULL, NULL, NULL, NULL, NULL, 0);
	cachefs_req_cache = kmem_cache_create("cachefs_async_request",
	    sizeof (struct cachefs_req), 0,
	    cachefs_req_create, cachefs_req_destroy, NULL, NULL, NULL, 0);
	cachefs_fscache_cache = kmem_cache_create("cachefs_fscache",
	    sizeof (fscache_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
	cachefs_filegrp_cache = kmem_cache_create("cachefs_filegrp",
	    sizeof (filegrp_t), 0,
	    filegrp_cache_create, filegrp_cache_destroy, NULL, NULL, NULL, 0);
	cachefs_cache_kmcache = kmem_cache_create("cachefs_cache_t",
	    sizeof (cachefscache_t), 0, NULL, NULL, NULL, NULL, NULL, 0);

	/*
	 * set up the cachefs.0.key kstat
	 */

	cachefs_kstat_key = NULL;
	cachefs_kstat_key_n = 0;
	ksp = kstat_create("cachefs", 0, "key", "misc", KSTAT_TYPE_RAW, 1,
	    KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE);
	if (ksp != NULL) {
		ksp->ks_data = &cachefs_kstat_key;
		ksp->ks_update = cachefs_kstat_key_update;
		ksp->ks_snapshot = cachefs_kstat_key_snapshot;
		ksp->ks_lock = &cachefs_kstat_key_lock;
		kstat_install(ksp);
	}

	/*
	 * Assign unique major number for all nfs mounts
	 */

	if ((cachefs_major = getudev()) == -1) {
		cmn_err(CE_WARN,
			"cachefs: init: can't get unique device number");
		cachefs_major = 0;
	}
	cachefs_up = B_TRUE;
#ifdef CFSRLDEBUG
	cachefs_dbvalid = time;
#endif /* CFSRLDEBUG */

	return (0);
}
void yaffs_InitialiseRawTnodesAndObjects(yaffs_Device *dev)
{
	yaffs_Allocator *allocator;
	unsigned mount_id = yaffs_DeviceToLC(dev)->mount_id;

	T(YAFFS_TRACE_ALLOCATE,(TSTR("Initialising yaffs allocator\n")));

	if(dev->allocator)
		YBUG();
	else if(mount_id >= 10){
		T(YAFFS_TRACE_ALWAYS,(TSTR("Bad mount_id %u\n"),mount_id));
	} else {
		 allocator = YMALLOC(sizeof(yaffs_Allocator));
		 memset(allocator,0,sizeof(yaffs_Allocator));
		 dev->allocator = allocator;

		if(!dev->allocator){
			T(YAFFS_TRACE_ALWAYS,
				(TSTR("yaffs allocator creation failed\n")));
			YBUG();
			return;

		}

		sprintf(allocator->tnode_name,"yaffs_t_%u",mount_id);
		sprintf(allocator->object_name,"yaffs_o_%u",mount_id);

		allocator->tnode_cache =
			kmem_cache_create(allocator->tnode_name,
				dev->tnodeSize,
				0, 0,
				fake_ctor_list[mount_id]);
		if(allocator->tnode_cache)
			T(YAFFS_TRACE_ALLOCATE,
				(TSTR("tnode cache \"%s\" %p\n"),
				allocator->tnode_name,allocator->tnode_cache));
		else {
			T(YAFFS_TRACE_ALWAYS,
				(TSTR("yaffs cache creation failed\n")));
			YBUG();
		}


		allocator->object_cache = 
			kmem_cache_create(allocator->object_name,
				sizeof(yaffs_Object),
				0, 0,
				fake_ctor_list[mount_id]);

		if(allocator->object_cache)
			T(YAFFS_TRACE_ALLOCATE,
				(TSTR("object cache \"%s\" %p\n"),
				allocator->object_name,allocator->object_cache));

		else {
			T(YAFFS_TRACE_ALWAYS,
				(TSTR("yaffs cache creation failed\n")));
			YBUG();
		}
	} 
}
Ejemplo n.º 15
0
void thread_info_cache_init(void)
{
	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
					      THREAD_SIZE, SLAB_PANIC, NULL);
}
Ejemplo n.º 16
0
static int __init init_lustre_lite(void)
{
	int i, rc, seed[2];
	struct timeval tv;
	lnet_process_id_t lnet_id;

	CLASSERT(sizeof(LUSTRE_VOLATILE_HDR) == LUSTRE_VOLATILE_HDR_LEN + 1);

	/* print an address of _any_ initialized kernel symbol from this
	 * module, to allow debugging with gdb that doesn't support data
	 * symbols from modules.*/
	CDEBUG(D_INFO, "Lustre client module (%p).\n",
	       &lustre_super_operations);

        rc = ll_init_inodecache();
        if (rc)
                return -ENOMEM;
	ll_file_data_slab = kmem_cache_create("ll_file_data",
						 sizeof(struct ll_file_data), 0,
						 SLAB_HWCACHE_ALIGN, NULL);
	if (ll_file_data_slab == NULL) {
		ll_destroy_inodecache();
		return -ENOMEM;
	}

	ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
						  sizeof(struct ll_remote_perm),
						  0, 0, NULL);
	if (ll_remote_perm_cachep == NULL) {
		kmem_cache_destroy(ll_file_data_slab);
		ll_file_data_slab = NULL;
		ll_destroy_inodecache();
		return -ENOMEM;
	}

	ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
						   REMOTE_PERM_HASHSIZE *
						   sizeof(cfs_list_t),
						   0, 0, NULL);
	if (ll_rmtperm_hash_cachep == NULL) {
		kmem_cache_destroy(ll_remote_perm_cachep);
		ll_remote_perm_cachep = NULL;
		kmem_cache_destroy(ll_file_data_slab);
		ll_file_data_slab = NULL;
		ll_destroy_inodecache();
		return -ENOMEM;
	}

        proc_lustre_fs_root = proc_lustre_root ?
                              lprocfs_register("llite", proc_lustre_root, NULL, NULL) : NULL;

        lustre_register_client_fill_super(ll_fill_super);
        lustre_register_kill_super_cb(ll_kill_super);

        lustre_register_client_process_config(ll_process_config);

        cfs_get_random_bytes(seed, sizeof(seed));

        /* Nodes with small feet have little entropy
         * the NID for this node gives the most entropy in the low bits */
        for (i=0; ; i++) {
                if (LNetGetId(i, &lnet_id) == -ENOENT) {
                        break;
                }
                if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND) {
                        seed[0] ^= LNET_NIDADDR(lnet_id.nid);
                }
        }

        cfs_gettimeofday(&tv);
        cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);

        init_timer(&ll_capa_timer);
        ll_capa_timer.function = ll_capa_timer_callback;
        rc = ll_capa_thread_start();
        /*
         * XXX normal cleanup is needed here.
         */
        if (rc == 0)
                rc = vvp_global_init();

        return rc;
}
Ejemplo n.º 17
0
static int __init rtas_flash_init(void)
{
	int rc;

	if (rtas_token("ibm,update-flash-64-and-reboot") ==
		       RTAS_UNKNOWN_SERVICE) {
		pr_info("rtas_flash: no firmware flash support\n");
		return 1;
	}

	firmware_flash_pde = create_flash_pde("powerpc/rtas/"
					      FIRMWARE_FLASH_NAME,
					      &rtas_flash_operations);
	if (firmware_flash_pde == NULL) {
		rc = -ENOMEM;
		goto cleanup;
	}

	rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot",
			 	       sizeof(struct rtas_update_flash_t), 
				       firmware_flash_pde);
	if (rc != 0)
		goto cleanup;

	firmware_update_pde = create_flash_pde("powerpc/rtas/"
					       FIRMWARE_UPDATE_NAME,
					       &rtas_flash_operations);
	if (firmware_update_pde == NULL) {
		rc = -ENOMEM;
		goto cleanup;
	}

	rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot",
			 	       sizeof(struct rtas_update_flash_t), 
				       firmware_update_pde);
	if (rc != 0)
		goto cleanup;

	validate_pde = create_flash_pde("powerpc/rtas/" VALIDATE_FLASH_NAME,
			      		&validate_flash_operations);
	if (validate_pde == NULL) {
		rc = -ENOMEM;
		goto cleanup;
	}

	rc = initialize_flash_pde_data("ibm,validate-flash-image",
		                       sizeof(struct rtas_validate_flash_t), 
				       validate_pde);
	if (rc != 0)
		goto cleanup;

	manage_pde = create_flash_pde("powerpc/rtas/" MANAGE_FLASH_NAME,
				      &manage_flash_operations);
	if (manage_pde == NULL) {
		rc = -ENOMEM;
		goto cleanup;
	}

	rc = initialize_flash_pde_data("ibm,manage-flash-image",
			               sizeof(struct rtas_manage_flash_t),
				       manage_pde);
	if (rc != 0)
		goto cleanup;

	rtas_flash_term_hook = rtas_flash_firmware;

	flash_block_cache = kmem_cache_create("rtas_flash_cache",
				RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
				rtas_block_ctor);
	if (!flash_block_cache) {
		printk(KERN_ERR "%s: failed to create block cache\n",
				__func__);
		rc = -ENOMEM;
		goto cleanup;
	}
	return 0;

cleanup:
	remove_flash_pde(firmware_flash_pde);
	remove_flash_pde(firmware_update_pde);
	remove_flash_pde(validate_pde);
	remove_flash_pde(manage_pde);

	return rc;
}
Ejemplo n.º 18
0
/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
static int nfs_write_mapping(struct address_space *mapping, int how)
{
	struct writeback_control wbc = {
		.bdi = mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = 0,
		.range_end = LLONG_MAX,
	};

	return __nfs_write_mapping(mapping, &wbc, how);
}

/*
 * flush the inode to disk.
 */
int nfs_wb_all(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, 0);
}

int nfs_wb_nocommit(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
}

int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{
	struct nfs_page *req;
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret = 0;

	BUG_ON(!PageLocked(page));
	for (;;) {
		req = nfs_page_find_request(page);
		if (req == NULL)
			goto out;
		if (test_bit(PG_CLEAN, &req->wb_flags)) {
			nfs_release_request(req);
			break;
		}
		if (nfs_lock_request_dontget(req)) {
			nfs_inode_remove_request(req);
			/*
			 * In case nfs_inode_remove_request has marked the
			 * page as being dirty
			 */
			cancel_dirty_page(page, PAGE_CACHE_SIZE);
			nfs_unlock_request(req);
			break;
		}
		ret = nfs_wait_on_request(req);
		if (ret < 0)
			goto out;
	}
	if (!PagePrivate(page))
		return 0;
	ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
out:
	return ret;
}

static int nfs_wb_page_priority(struct inode *inode, struct page *page,
				int how)
{
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret;

	do {
		if (clear_page_dirty_for_io(page)) {
			ret = nfs_writepage_locked(page, &wbc);
			if (ret < 0)
				goto out_error;
		} else if (!PagePrivate(page))
			break;
		ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
		if (ret < 0)
			goto out_error;
	} while (PagePrivate(page));
	return 0;
out_error:
	__mark_inode_dirty(inode, I_DIRTY_PAGES);
	return ret;
}

/*
 * Write back all requests on one page - we do this before reading it.
 */
int nfs_wb_page(struct inode *inode, struct page* page)
{
	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
}

#ifdef CONFIG_MIGRATION
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
		struct page *page)
{
	struct nfs_page *req;
	int ret;

	if (PageFsCache(page))
		nfs_fscache_release_page(page, GFP_KERNEL);

	req = nfs_find_and_lock_request(page);
	ret = PTR_ERR(req);
	if (IS_ERR(req))
		goto out;

	ret = migrate_page(mapping, newpage, page);
	if (!req)
		goto out;
	if (ret)
		goto out_unlock;
	page_cache_get(newpage);
	req->wb_page = newpage;
	SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));
	ClearPagePrivate(page);
	set_page_private(page, 0);
	page_cache_release(page);
out_unlock:
	nfs_clear_page_tag_locked(req);
	nfs_release_request(req);
out:
	return ret;
}
#endif

int __init nfs_init_writepagecache(void)
{
	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
					     sizeof(struct nfs_write_data),
					     0, SLAB_HWCACHE_ALIGN,
					     NULL);
	if (nfs_wdata_cachep == NULL)
		return -ENOMEM;

	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
						     nfs_wdata_cachep);
	if (nfs_wdata_mempool == NULL)
		return -ENOMEM;

	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
						      nfs_wdata_cachep);
	if (nfs_commit_mempool == NULL)
		return -ENOMEM;

	/*
	 * NFS congestion size, scale with available memory.
	 *
	 *  64MB:    8192k
	 * 128MB:   11585k
	 * 256MB:   16384k
	 * 512MB:   23170k
	 *   1GB:   32768k
	 *   2GB:   46340k
	 *   4GB:   65536k
	 *   8GB:   92681k
	 *  16GB:  131072k
	 *
	 * This allows larger machines to have larger/more transfers.
	 * Limit the default to 256M
	 */
	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
	if (nfs_congestion_kb > 256*1024)
		nfs_congestion_kb = 256*1024;

	return 0;
}

void nfs_destroy_writepagecache(void)
{
	mempool_destroy(nfs_commit_mempool);
	mempool_destroy(nfs_wdata_mempool);
	kmem_cache_destroy(nfs_wdata_cachep);
}
Ejemplo n.º 19
0
void avtab_cache_init(void)
{
	avtab_node_cachep = kmem_cache_create("avtab_node",
					      sizeof(struct avtab_node),
					      0, SLAB_PANIC, NULL);
}
Ejemplo n.º 20
0
static int
bd_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	int		inst;
	bd_handle_t	hdl;
	bd_t		*bd;
	bd_drive_t	drive;
	int		rv;
	char		name[16];
	char		kcache[32];

	switch (cmd) {
	case DDI_ATTACH:
		break;
	case DDI_RESUME:
		/* We don't do anything native for suspend/resume */
		return (DDI_SUCCESS);
	default:
		return (DDI_FAILURE);
	}

	inst = ddi_get_instance(dip);
	hdl = ddi_get_parent_data(dip);

	(void) snprintf(name, sizeof (name), "%s%d",
	    ddi_driver_name(dip), ddi_get_instance(dip));
	(void) snprintf(kcache, sizeof (kcache), "%s_xfer", name);

	if (hdl == NULL) {
		cmn_err(CE_WARN, "%s: missing parent data!", name);
		return (DDI_FAILURE);
	}

	if (ddi_soft_state_zalloc(bd_state, inst) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "%s: unable to zalloc soft state!", name);
		return (DDI_FAILURE);
	}
	bd = ddi_get_soft_state(bd_state, inst);

	if (hdl->h_dma) {
		bd->d_dma = *(hdl->h_dma);
		bd->d_dma.dma_attr_granular =
		    max(DEV_BSIZE, bd->d_dma.dma_attr_granular);
		bd->d_use_dma = B_TRUE;

		if (bd->d_maxxfer &&
		    (bd->d_maxxfer != bd->d_dma.dma_attr_maxxfer)) {
			cmn_err(CE_WARN,
			    "%s: inconsistent maximum transfer size!",
			    name);
			/* We force it */
			bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer;
		} else {
			bd->d_maxxfer = bd->d_dma.dma_attr_maxxfer;
		}
	} else {
		bd->d_use_dma = B_FALSE;
		if (bd->d_maxxfer == 0) {
			bd->d_maxxfer = 1024 * 1024;
		}
	}
	bd->d_ops = hdl->h_ops;
	bd->d_private = hdl->h_private;
	bd->d_blkshift = 9;	/* 512 bytes, to start */

	if (bd->d_maxxfer % DEV_BSIZE) {
		cmn_err(CE_WARN, "%s: maximum transfer misaligned!", name);
		bd->d_maxxfer &= ~(DEV_BSIZE - 1);
	}
	if (bd->d_maxxfer < DEV_BSIZE) {
		cmn_err(CE_WARN, "%s: maximum transfer size too small!", name);
		ddi_soft_state_free(bd_state, inst);
		return (DDI_FAILURE);
	}

	bd->d_dip = dip;
	bd->d_handle = hdl;
	hdl->h_bd = bd;
	ddi_set_driver_private(dip, bd);

	mutex_init(&bd->d_iomutex, NULL, MUTEX_DRIVER, NULL);
	mutex_init(&bd->d_ocmutex, NULL, MUTEX_DRIVER, NULL);
	mutex_init(&bd->d_statemutex, NULL, MUTEX_DRIVER, NULL);
	cv_init(&bd->d_statecv, NULL, CV_DRIVER, NULL);

	list_create(&bd->d_waitq, sizeof (bd_xfer_impl_t),
	    offsetof(struct bd_xfer_impl, i_linkage));
	list_create(&bd->d_runq, sizeof (bd_xfer_impl_t),
	    offsetof(struct bd_xfer_impl, i_linkage));

	bd->d_cache = kmem_cache_create(kcache, sizeof (bd_xfer_impl_t), 8,
	    bd_xfer_ctor, bd_xfer_dtor, NULL, bd, NULL, 0);

	bd->d_ksp = kstat_create(ddi_driver_name(dip), inst, NULL, "disk",
	    KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
	if (bd->d_ksp != NULL) {
		bd->d_ksp->ks_lock = &bd->d_iomutex;
		kstat_install(bd->d_ksp);
		bd->d_kiop = bd->d_ksp->ks_data;
	} else {
		/*
		 * Even if we cannot create the kstat, we create a
		 * scratch kstat.  The reason for this is to ensure
		 * that we can update the kstat all of the time,
		 * without adding an extra branch instruction.
		 */
		bd->d_kiop = kmem_zalloc(sizeof (kstat_io_t), KM_SLEEP);
	}

	cmlb_alloc_handle(&bd->d_cmlbh);

	bd->d_state = DKIO_NONE;

	bzero(&drive, sizeof (drive));
	bd->d_ops.o_drive_info(bd->d_private, &drive);
	bd->d_qsize = drive.d_qsize;
	bd->d_removable = drive.d_removable;
	bd->d_hotpluggable = drive.d_hotpluggable;

	if (drive.d_maxxfer && drive.d_maxxfer < bd->d_maxxfer)
		bd->d_maxxfer = drive.d_maxxfer;


	rv = cmlb_attach(dip, &bd_tg_ops, DTYPE_DIRECT,
	    bd->d_removable, bd->d_hotpluggable,
	    drive.d_lun >= 0 ? DDI_NT_BLOCK_CHAN : DDI_NT_BLOCK,
	    CMLB_FAKE_LABEL_ONE_PARTITION, bd->d_cmlbh, 0);
	if (rv != 0) {
		cmlb_free_handle(&bd->d_cmlbh);
		kmem_cache_destroy(bd->d_cache);
		mutex_destroy(&bd->d_iomutex);
		mutex_destroy(&bd->d_ocmutex);
		mutex_destroy(&bd->d_statemutex);
		cv_destroy(&bd->d_statecv);
		list_destroy(&bd->d_waitq);
		list_destroy(&bd->d_runq);
		if (bd->d_ksp != NULL) {
			kstat_delete(bd->d_ksp);
			bd->d_ksp = NULL;
		} else {
			kmem_free(bd->d_kiop, sizeof (kstat_io_t));
		}
		ddi_soft_state_free(bd_state, inst);
		return (DDI_FAILURE);
	}

	if (bd->d_ops.o_devid_init != NULL) {
		rv = bd->d_ops.o_devid_init(bd->d_private, dip, &bd->d_devid);
		if (rv == DDI_SUCCESS) {
			if (ddi_devid_register(dip, bd->d_devid) !=
			    DDI_SUCCESS) {
				cmn_err(CE_WARN,
				    "%s: unable to register devid", name);
			}
		}
	}

	/*
	 * Add a zero-length attribute to tell the world we support
	 * kernel ioctls (for layered drivers).  Also set up properties
	 * used by HAL to identify removable media.
	 */
	(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
	    DDI_KERNEL_IOCTL, NULL, 0);
	if (bd->d_removable) {
		(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
		    "removable-media", NULL, 0);
	}
	if (bd->d_hotpluggable) {
		(void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP,
		    "hotpluggable", NULL, 0);
	}

	ddi_report_dev(dip);

	return (DDI_SUCCESS);
}
Ejemplo n.º 21
0
static int dazukofs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct dazukofs_sb_info *sbi;
	struct dentry *root;
	static const struct qstr name = { .name = "/", .len = 1 };
	struct dazukofs_dentry_info *di;

	sbi =  kmem_cache_zalloc(dazukofs_sb_info_cachep, GFP_KERNEL);
	if (!sbi)
		return -ENOMEM;

	sb->s_op = &dazukofs_sops;

	root = d_alloc(NULL, &name);
	if (!root) {
		kmem_cache_free(dazukofs_sb_info_cachep, sbi);
		return -ENOMEM;
	}

	sb->s_root = root;

	sb->s_root->d_op = &dazukofs_dops;
	sb->s_root->d_sb = sb;
	sb->s_root->d_parent = sb->s_root;

	di = kmem_cache_zalloc(dazukofs_dentry_info_cachep, GFP_KERNEL);
	if (!di) {
		kmem_cache_free(dazukofs_sb_info_cachep, sbi);
		dput(sb->s_root);
		return -ENOMEM;
	}

	set_dentry_private(sb->s_root, di);

	set_sb_private(sb, sbi);

	return 0;
}

static int dazukofs_read_super(struct super_block *sb, const char *dev_name)
{
	struct nameidata nd;
	struct dentry *lower_root;
	struct vfsmount *lower_mnt;
	int err;

	memset(&nd, 0, sizeof(struct nameidata));
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
	err = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &nd.path);
#else
	err = path_lookup(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &nd);
#endif
	if (err)
		return err;

	lower_root = dget(nd.path.dentry);
	lower_mnt = mntget(nd.path.mnt);

	if (IS_ERR(lower_root)) {
		err = PTR_ERR(lower_root);
		goto out_put;
	}

	if (!lower_root->d_inode) {
		err = -ENOENT;
		goto out_put;
	}

	if (!S_ISDIR(lower_root->d_inode->i_mode)) {
		err = -EINVAL;
		goto out_put;
	}

	set_lower_sb(sb, lower_root->d_sb);
	sb->s_maxbytes = lower_root->d_sb->s_maxbytes;
	set_lower_dentry(sb->s_root, lower_root, lower_mnt);

	err = dazukofs_interpose(lower_root, sb->s_root, sb, 0);
	if (err)
		goto out_put;
	goto out;

out_put:
	dput(lower_root);
	mntput(lower_mnt);
out:
	path_put(&nd.path);
	return err;
}

// FIXME!
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)

static struct dentry* dazukofs_get_sb(struct file_system_type *fs_type, int flags,
						   const char *dev_name, void *data)
{
	return mount_nodev(fs_type, flags, data, dazukofs_fill_super);
}
#else
static int dazukofs_get_sb(struct file_system_type *fs_type, int flags,
			   const char *dev_name, void *data,
			   struct vfsmount *mnt)
{
	struct super_block *sb;
	int err;

	err = get_sb_nodev(fs_type, flags, data, dazukofs_fill_super, mnt);

	if (err)
		goto out;
	
	sb = mnt->mnt_sb;

	err = dazukofs_parse_mount_options(data, sb);
	if (err)
		goto out_abort;

	err = dazukofs_read_super(sb, dev_name);
	if (err)
		goto out_abort;

	goto out;

out_abort:
	dput(sb->s_root);
	up_write(&sb->s_umount);
	deactivate_super(sb);
out:
	return err;
}
#endif

static void init_once(void *data)
{
	struct dazukofs_inode_info *inode_info =
		(struct dazukofs_inode_info *)data;

	memset(inode_info, 0, sizeof(struct dazukofs_inode_info));
	inode_init_once(&(inode_info->vfs_inode));
}

static void destroy_caches(void)
{
	if (dazukofs_inode_info_cachep) {
		kmem_cache_destroy(dazukofs_inode_info_cachep);
		dazukofs_inode_info_cachep = NULL;
	}

	if (dazukofs_sb_info_cachep) {
		kmem_cache_destroy(dazukofs_sb_info_cachep);
		dazukofs_sb_info_cachep = NULL;
	}

	if (dazukofs_dentry_info_cachep) {
		kmem_cache_destroy(dazukofs_dentry_info_cachep);
		dazukofs_dentry_info_cachep = NULL;
	}

	if (dazukofs_file_info_cachep) {
		kmem_cache_destroy(dazukofs_file_info_cachep);
		dazukofs_file_info_cachep = NULL;
	}
}

static int init_caches(void)
{
	dazukofs_inode_info_cachep =
		kmem_cache_create("dazukofs_inode_info_cache",
				  sizeof(struct dazukofs_inode_info), 0,
				  SLAB_HWCACHE_ALIGN,
				  init_once);
	if (!dazukofs_inode_info_cachep)
		goto out_nomem;

	dazukofs_sb_info_cachep =
		kmem_cache_create("dazukofs_sb_info_cache",
				  sizeof(struct dazukofs_sb_info), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
	if (!dazukofs_sb_info_cachep)
		goto out_nomem;

	dazukofs_dentry_info_cachep =
		kmem_cache_create("dazukofs_dentry_info_cache",
				  sizeof(struct dazukofs_dentry_info), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
	if (!dazukofs_dentry_info_cachep)
		goto out_nomem;

	dazukofs_file_info_cachep =
		kmem_cache_create("dazukofs_file_info_cache",
				  sizeof(struct dazukofs_file_info), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
	if (!dazukofs_file_info_cachep)
		goto out_nomem;

	return 0;

out_nomem:
	destroy_caches();
	return -ENOMEM;
}
Ejemplo n.º 22
0
static int ehca_create_slab_caches(void)
{
	int ret;

	ret = ehca_init_pd_cache();
	if (ret) {
		ehca_gen_err("Cannot create PD SLAB cache.");
		return ret;
	}

	ret = ehca_init_cq_cache();
	if (ret) {
		ehca_gen_err("Cannot create CQ SLAB cache.");
		goto create_slab_caches2;
	}

	ret = ehca_init_qp_cache();
	if (ret) {
		ehca_gen_err("Cannot create QP SLAB cache.");
		goto create_slab_caches3;
	}

	ret = ehca_init_av_cache();
	if (ret) {
		ehca_gen_err("Cannot create AV SLAB cache.");
		goto create_slab_caches4;
	}

	ret = ehca_init_mrmw_cache();
	if (ret) {
		ehca_gen_err("Cannot create MR&MW SLAB cache.");
		goto create_slab_caches5;
	}

	ret = ehca_init_small_qp_cache();
	if (ret) {
		ehca_gen_err("Cannot create small queue SLAB cache.");
		goto create_slab_caches6;
	}

#ifdef CONFIG_PPC_64K_PAGES
	ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
					EHCA_PAGESIZE, H_CB_ALIGNMENT,
					SLAB_HWCACHE_ALIGN,
					NULL);
	if (!ctblk_cache) {
		ehca_gen_err("Cannot create ctblk SLAB cache.");
		ehca_cleanup_small_qp_cache();
		goto create_slab_caches6;
	}
#endif
	return 0;

create_slab_caches6:
	ehca_cleanup_mrmw_cache();

create_slab_caches5:
	ehca_cleanup_av_cache();

create_slab_caches4:
	ehca_cleanup_qp_cache();

create_slab_caches3:
	ehca_cleanup_cq_cache();

create_slab_caches2:
	ehca_cleanup_pd_cache();

	return ret;
}
Ejemplo n.º 23
0
int __init s3c_dma_init(unsigned int channels, unsigned int irq,
			    unsigned int stride)
{
	struct s3c2410_dma_chan *cp;
	s3c_dma_controller_t *dconp;
	int channel, controller;
	int ret;

	printk("S3C PL330-DMA Controller Driver, (c) 2008-2009 Samsung Electronics\n");

	dma_channels = channels;
	printk("Total %d DMA channels will be initialized.\n", channels);

	ret = sysdev_class_register(&dma_sysclass);
	if (ret != 0) {
		printk(KERN_ERR "dma sysclass registration failed.\n");
		goto err;
	}

	dma_kmem = kmem_cache_create("dma_desc",
				     sizeof(struct s3c_dma_buf), 0,
				     SLAB_HWCACHE_ALIGN,
				     s3c_dma_cache_ctor);

	if (dma_kmem == NULL) {
		printk(KERN_ERR "DMA failed to make kmem cache for DMA channel descriptor\n");
		ret = -ENOMEM;
		goto err;
	}

	for (controller = 0; controller < S3C_DMA_CONTROLLERS; controller++) {
		dconp = &s3c_dma_cntlrs[controller];

		memset(dconp, 0, sizeof(s3c_dma_controller_t));

		if(controller == 0) {
			dma_base = ioremap(S3C_PA_DMA, stride);
			if (dma_base == NULL) {
				printk(KERN_ERR "M2M-DMA failed to ioremap register block\n");
				return -ENOMEM;
			}

			/* dma controller's irqs are in order.. */
			dconp->irq = controller + irq;
		}
		else {
			dma_base = ioremap(((S3C_PA_DMA + 0xF00000) + ((controller-1) * 0x200000)), stride);
			if (dma_base == NULL) {
				printk(KERN_ERR "Peri-DMA failed to ioremap register block\n");
				return -ENOMEM;
			}

			/* dma controller's irqs are in order.. */
			dconp->irq = controller + irq;
		}

		dconp->number = controller;
		dconp->regs = dma_base;
		pr_debug("PL330 DMA controller : %d irq %d regs_base %x\n", dconp->number, dconp->irq,
			  dconp->regs);
	}

	for (channel = 0; channel < channels; channel++) {
		controller = channel / S3C_CHANNELS_PER_DMA;
		cp = &s3c_dma_chans[channel];

		memset(cp, 0, sizeof(struct s3c2410_dma_chan));

		cp->dma_con = &s3c_dma_cntlrs[controller];

		/* dma channel irqs are in order.. */
		cp->index = channel;
		cp->number = channel%S3C_CHANNELS_PER_DMA;

		cp->irq = s3c_dma_cntlrs[controller].irq;

		cp->regs = s3c_dma_cntlrs[controller].regs;

		/* point current stats somewhere */
		cp->stats = &cp->stats_store;
		cp->stats_store.timeout_shortest = LONG_MAX;

		/* basic channel configuration */
		cp->load_timeout = 1 << 18;

		/* register system device */
		cp->dev.cls = &dma_sysclass;
		cp->dev.id = channel;

		pr_debug("DMA channel %d at %p, irq %d\n", cp->number, cp->regs, cp->irq);
	}

	return 0;
err:
	kmem_cache_destroy(dma_kmem);
	iounmap(dma_base);
	dma_base = NULL;
	return ret;
}
Ejemplo n.º 24
0
/*
 * Initialize everything, well, just everything in Posix clocks/timers ;)
 */
static __init int init_posix_timers(void)
{
	static struct k_clock clock_realtime = {
		.clock_getres	= hrtimer_get_res,
		.clock_get	= posix_clock_realtime_get,
		.clock_set	= posix_clock_realtime_set,
		.clock_adj	= posix_clock_realtime_adj,
		.nsleep		= common_nsleep,
		.nsleep_restart	= hrtimer_nanosleep_restart,
		.timer_create	= common_timer_create,
		.timer_set	= common_timer_set,
		.timer_get	= common_timer_get,
		.timer_del	= common_timer_del,
	};
	static struct k_clock clock_monotonic = {
		.clock_getres	= hrtimer_get_res,
		.clock_get	= posix_ktime_get_ts,
		.nsleep		= common_nsleep,
		.nsleep_restart	= hrtimer_nanosleep_restart,
		.timer_create	= common_timer_create,
		.timer_set	= common_timer_set,
		.timer_get	= common_timer_get,
		.timer_del	= common_timer_del,
	};
	static struct k_clock clock_monotonic_raw = {
		.clock_getres	= hrtimer_get_res,
		.clock_get	= posix_get_monotonic_raw,
	};
	static struct k_clock clock_realtime_coarse = {
		.clock_getres	= posix_get_coarse_res,
		.clock_get	= posix_get_realtime_coarse,
	};
	static struct k_clock clock_monotonic_coarse = {
		.clock_getres	= posix_get_coarse_res,
		.clock_get	= posix_get_monotonic_coarse,
	};
	static struct k_clock clock_tai = {
		.clock_getres	= hrtimer_get_res,
		.clock_get	= posix_get_tai,
		.nsleep		= common_nsleep,
		.nsleep_restart	= hrtimer_nanosleep_restart,
		.timer_create	= common_timer_create,
		.timer_set	= common_timer_set,
		.timer_get	= common_timer_get,
		.timer_del	= common_timer_del,
	};
	static struct k_clock clock_boottime = {
		.clock_getres	= hrtimer_get_res,
		.clock_get	= posix_get_boottime,
		.nsleep		= common_nsleep,
		.nsleep_restart	= hrtimer_nanosleep_restart,
		.timer_create	= common_timer_create,
		.timer_set	= common_timer_set,
		.timer_get	= common_timer_get,
		.timer_del	= common_timer_del,
	};

	posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
	posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
	posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
	posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
	posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
	posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
	posix_timers_register_clock(CLOCK_TAI, &clock_tai);

	posix_timers_cache = kmem_cache_create("posix_timers_cache",
					sizeof (struct k_itimer), 0, SLAB_PANIC,
					NULL);
	return 0;
}
Ejemplo n.º 25
0
Archivo: fork.c Proyecto: 19Dan01/linux
void thread_info_cache_init(void)
{
	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
					      THREAD_SIZE, 0, NULL);
	BUG_ON(thread_info_cache == NULL);
}
Ejemplo n.º 26
0
static int __init fnic_init_module(void)
{
	size_t len;
	int err = 0;

	printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);

	/* Create debugfs entries for fnic */
	err = fnic_debugfs_init();
	if (err < 0) {
		printk(KERN_ERR PFX "Failed to create fnic directory "
				"for tracing and stats logging\n");
		fnic_debugfs_terminate();
	}

	/* Allocate memory for trace buffer */
	err = fnic_trace_buf_init();
	if (err < 0) {
		printk(KERN_ERR PFX "Trace buffer initialization Failed "
				  "Fnic Tracing utility is disabled\n");
		fnic_trace_free();
	}

	/* Create a cache for allocation of default size sgls */
	len = sizeof(struct fnic_dflt_sgl_list);
	fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
		("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
		 SLAB_HWCACHE_ALIGN,
		 NULL);
	if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
		printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
		err = -ENOMEM;
		goto err_create_fnic_sgl_slab_dflt;
	}

	/* Create a cache for allocation of max size sgls*/
	len = sizeof(struct fnic_sgl_list);
	fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
		("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
		  SLAB_HWCACHE_ALIGN,
		  NULL);
	if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
		printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
		err = -ENOMEM;
		goto err_create_fnic_sgl_slab_max;
	}

	/* Create a cache of io_req structs for use via mempool */
	fnic_io_req_cache = kmem_cache_create("fnic_io_req",
					      sizeof(struct fnic_io_req),
					      0, SLAB_HWCACHE_ALIGN, NULL);
	if (!fnic_io_req_cache) {
		printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
		err = -ENOMEM;
		goto err_create_fnic_ioreq_slab;
	}

	fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
	if (!fnic_event_queue) {
		printk(KERN_ERR PFX "fnic work queue create failed\n");
		err = -ENOMEM;
		goto err_create_fnic_workq;
	}

	spin_lock_init(&fnic_list_lock);
	INIT_LIST_HEAD(&fnic_list);

	fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
	if (!fnic_fip_queue) {
		printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
		err = -ENOMEM;
		goto err_create_fip_workq;
	}

	fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
	if (!fnic_fc_transport) {
		printk(KERN_ERR PFX "fc_attach_transport error\n");
		err = -ENOMEM;
		goto err_fc_transport;
	}

	/* register the driver with PCI system */
	err = pci_register_driver(&fnic_driver);
	if (err < 0) {
		printk(KERN_ERR PFX "pci register error\n");
		goto err_pci_register;
	}
	return err;

err_pci_register:
	fc_release_transport(fnic_fc_transport);
err_fc_transport:
	destroy_workqueue(fnic_fip_queue);
err_create_fip_workq:
	destroy_workqueue(fnic_event_queue);
err_create_fnic_workq:
	kmem_cache_destroy(fnic_io_req_cache);
err_create_fnic_ioreq_slab:
	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
err_create_fnic_sgl_slab_max:
	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
	fnic_trace_free();
	fnic_debugfs_terminate();
	return err;
}
Ejemplo n.º 27
0
Archivo: pos.c Proyecto: chl4651/HEAPO
struct kmem_cache *
pos_kmem_cache_create (const char *name, size_t size, size_t align,
	unsigned long flags, void (*ctor)(void *))
{
	return kmem_cache_create(name, size, align, flags, ctor);
}
Ejemplo n.º 28
0
static int __init init_gfs2_fs(void)
{
	int error;

	gfs2_str2qstr(&gfs2_qdot, ".");
	gfs2_str2qstr(&gfs2_qdotdot, "..");

	error = gfs2_sys_init();
	if (error)
		return error;

	error = gfs2_glock_init();
	if (error)
		goto fail;

	error = -ENOMEM;
	gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
					      sizeof(struct gfs2_glock),
					      0, 0,
					      gfs2_init_glock_once);
	if (!gfs2_glock_cachep)
		goto fail;

	gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
					sizeof(struct gfs2_glock) +
					sizeof(struct address_space),
					0, 0, gfs2_init_gl_aspace_once);

	if (!gfs2_glock_aspace_cachep)
		goto fail;

	gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
					      sizeof(struct gfs2_inode),
					      0,  SLAB_RECLAIM_ACCOUNT|
					          SLAB_MEM_SPREAD,
					      gfs2_init_inode_once);
	if (!gfs2_inode_cachep)
		goto fail;

	gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
						sizeof(struct gfs2_bufdata),
					        0, 0, NULL);
	if (!gfs2_bufdata_cachep)
		goto fail;

	gfs2_rgrpd_cachep = kmem_cache_create("gfs2_rgrpd",
					      sizeof(struct gfs2_rgrpd),
					      0, 0, NULL);
	if (!gfs2_rgrpd_cachep)
		goto fail;

	gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad",
					       sizeof(struct gfs2_quota_data),
					       0, 0, NULL);
	if (!gfs2_quotad_cachep)
		goto fail;

	register_shrinker(&qd_shrinker);

	error = register_filesystem(&gfs2_fs_type);
	if (error)
		goto fail;

	error = register_filesystem(&gfs2meta_fs_type);
	if (error)
		goto fail_unregister;

	error = -ENOMEM;
	gfs_recovery_wq = alloc_workqueue("gfs_recovery",
					  WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
	if (!gfs_recovery_wq)
		goto fail_wq;

	gfs2_control_wq = alloc_workqueue("gfs2_control",
			       WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0);
	if (!gfs2_control_wq)
		goto fail_recovery;

	gfs2_bh_pool = mempool_create(1024, gfs2_bh_alloc, gfs2_bh_free, NULL);
	if (!gfs2_bh_pool)
		goto fail_control;

	gfs2_register_debugfs();

	printk("GFS2 installed\n");

	return 0;

fail_control:
	destroy_workqueue(gfs2_control_wq);
fail_recovery:
	destroy_workqueue(gfs_recovery_wq);
fail_wq:
	unregister_filesystem(&gfs2meta_fs_type);
fail_unregister:
	unregister_filesystem(&gfs2_fs_type);
fail:
	unregister_shrinker(&qd_shrinker);
	gfs2_glock_exit();

	if (gfs2_quotad_cachep)
		kmem_cache_destroy(gfs2_quotad_cachep);

	if (gfs2_rgrpd_cachep)
		kmem_cache_destroy(gfs2_rgrpd_cachep);

	if (gfs2_bufdata_cachep)
		kmem_cache_destroy(gfs2_bufdata_cachep);

	if (gfs2_inode_cachep)
		kmem_cache_destroy(gfs2_inode_cachep);

	if (gfs2_glock_aspace_cachep)
		kmem_cache_destroy(gfs2_glock_aspace_cachep);

	if (gfs2_glock_cachep)
		kmem_cache_destroy(gfs2_glock_cachep);

	gfs2_sys_uninit();
	return error;
}
Ejemplo n.º 29
0
static int __init init_gfs2_fs(void)
{
	int error;

	error = gfs2_sys_init();
	if (error)
		return error;

	error = gfs2_glock_init();
	if (error)
		goto fail;

	error = -ENOMEM;
	gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
					      sizeof(struct gfs2_glock),
					      0, 0,
					      gfs2_init_glock_once, NULL);
	if (!gfs2_glock_cachep)
		goto fail;

	gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
					      sizeof(struct gfs2_inode),
					      0,  SLAB_RECLAIM_ACCOUNT|
					          SLAB_MEM_SPREAD,
					      gfs2_init_inode_once, NULL);
	if (!gfs2_inode_cachep)
		goto fail;

	gfs2_bufdata_cachep = kmem_cache_create("gfs2_bufdata",
						sizeof(struct gfs2_bufdata),
					        0, 0, NULL, NULL);
	if (!gfs2_bufdata_cachep)
		goto fail;

	error = register_filesystem(&gfs2_fs_type);
	if (error)
		goto fail;

	error = register_filesystem(&gfs2meta_fs_type);
	if (error)
		goto fail_unregister;

	printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__);

	return 0;

fail_unregister:
	unregister_filesystem(&gfs2_fs_type);
fail:
	if (gfs2_bufdata_cachep)
		kmem_cache_destroy(gfs2_bufdata_cachep);

	if (gfs2_inode_cachep)
		kmem_cache_destroy(gfs2_inode_cachep);

	if (gfs2_glock_cachep)
		kmem_cache_destroy(gfs2_glock_cachep);

	gfs2_sys_uninit();
	return error;
}
Ejemplo n.º 30
0
void __init ima_iintcache_init(void)
{
    iint_cache =
        kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
                          SLAB_PANIC, init_once);
}