示例#1
0
/*
 * Initialize the mbuf allocator.
 */
void
mbinit(void)
{
	int i;

#if DIAGNOSTIC
	if (mclsizes[0] != MCLBYTES)
		panic("mbinit: the smallest cluster size != MCLBYTES");
	if (mclsizes[nitems(mclsizes) - 1] != MAXMCLBYTES)
		panic("mbinit: the largest cluster size != MAXMCLBYTES");
#endif

	pool_init(&mbpool, MSIZE, 0, 0, 0, "mbufpl", NULL);
	pool_setipl(&mbpool, IPL_NET);
	pool_set_constraints(&mbpool, &kp_dma_contig);
	pool_setlowat(&mbpool, mblowat);

	pool_init(&mtagpool, PACKET_TAG_MAXSIZE + sizeof(struct m_tag),
	    0, 0, 0, "mtagpl", NULL);
	pool_setipl(&mtagpool, IPL_NET);

	for (i = 0; i < nitems(mclsizes); i++) {
		snprintf(mclnames[i], sizeof(mclnames[0]), "mcl%dk",
		    mclsizes[i] >> 10);
		pool_init(&mclpools[i], mclsizes[i], 0, 0, 0,
		    mclnames[i], NULL);
		pool_setipl(&mclpools[i], IPL_NET);
		pool_set_constraints(&mclpools[i], &kp_dma_contig);
		pool_setlowat(&mclpools[i], mcllowat);
	}

	nmbclust_update();
}
示例#2
0
/*
 * dmoverioattach:
 *
 *	Pseudo-device attach routine.
 */
void
dmoverioattach(int count)
{

	pool_init(&dmio_state_pool, sizeof(struct dmio_state),
	    0, 0, 0, "dmiostate", NULL, IPL_SOFTCLOCK);
	pool_init(&dmio_usrreq_state_pool, sizeof(struct dmio_usrreq_state),
	    0, 0, 0, "dmiourstate", NULL, IPL_SOFTCLOCK);
}
示例#3
0
/*
 * Reinitialize inode hash table.
 */
void
nfs_node_init()
{
	malloc_type_attach(M_NFSNODE);
	pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
	    &pool_allocator_nointr, IPL_NONE);
	pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
	    &pool_allocator_nointr, IPL_NONE);
}
示例#4
0
/*
 * Initialize timekeeping.
 */
void
time_init(void)
{

	pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl",
	    &pool_allocator_nointr, IPL_NONE);
	pool_init(&ptimers_pool, sizeof(struct ptimers), 0, 0, 0, "ptimerspl",
	    &pool_allocator_nointr, IPL_NONE);
}
示例#5
0
int
ext2fs_init(struct vfsconf *vfsp)
{
	pool_init(&ext2fs_inode_pool, sizeof(struct inode), 0, 0, 0,
	    "ext2inopl", &pool_allocator_nointr);
	pool_init(&ext2fs_dinode_pool, sizeof(struct ext2fs_dinode), 0, 0, 0,
	    "ext2dinopl", &pool_allocator_nointr);

	return (ufs_init(vfsp));
}
示例#6
0
void
ext2fs_init(void)
{

	pool_init(&ext2fs_inode_pool, sizeof(struct inode), 0, 0, 0,
	    "ext2fsinopl", &pool_allocator_nointr, IPL_NONE);
	pool_init(&ext2fs_dinode_pool, sizeof(struct ext2fs_dinode), 0, 0, 0,
	    "ext2dinopl", &pool_allocator_nointr, IPL_NONE);
	ufs_init();
}
示例#7
0
int
ext2fs_init(struct vfsconf *vfsp)
{
	pool_init(&ext2fs_inode_pool, sizeof(struct inode), 0, 0, PR_WAITOK,
	    "ext2inopl", NULL);
	pool_init(&ext2fs_dinode_pool, sizeof(struct ext2fs_dinode), 0, 0,
	    PR_WAITOK, "ext2dinopl", NULL);

	return (ufs_init(vfsp));
}
示例#8
0
int
udf_init(struct vfsconf *foo)
{
	pool_init(&udf_trans_pool, MAXNAMLEN * sizeof(unicode_t), 0, 0,
	    PR_WAITOK, "udftrpl", NULL);
	pool_init(&unode_pool, sizeof(struct unode), 0, 0,
	    PR_WAITOK, "udfndpl", NULL);
	pool_init(&udf_ds_pool, sizeof(struct udf_dirstream), 0, 0,
	    PR_WAITOK, "udfdspl", NULL);

	return (0);
}
示例#9
0
int
udf_init(struct vfsconf *foo)
{
	pool_init(&udf_trans_pool, MAXNAMLEN * sizeof(unicode_t), 0, 0, 0,
	    "udftrpl", &pool_allocator_nointr);
	pool_init(&unode_pool, sizeof(struct unode), 0, 0, 0,
	    "udfndpl", &pool_allocator_nointr);
	pool_init(&udf_ds_pool, sizeof(struct udf_dirstream), 0, 0, 0,
	    "udfdspl", &pool_allocator_nointr);

	return (0);
}
/*
 * Reinitialize inode hash table.
 */
void
nfs_node_init(void)
{

	pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
	    &pool_allocator_nointr, IPL_NONE);
	pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
	    &pool_allocator_nointr, IPL_NONE);
	if (workqueue_create(&nfs_sillyworkq, "nfssilly", nfs_sillyworker,
	    NULL, PRI_NONE, IPL_NONE, 0) != 0) {
	    	panic("nfs_node_init");
	}
}
示例#11
0
int task_dispatcher_init(struct task_dispatcher_t** ptd, uint32_t nthreads)
{
   LOGI("Starting task dispatcher with %d threads", nthreads);

   struct task_dispatcher_t* td = malloc(sizeof(struct task_dispatcher_t));
   memset(td, 0, sizeof(struct task_dispatcher_t));
   td->nthreads = nthreads;
   td->running = 1;

   if (pool_init(&td->tasks, MAX_TASKS, sizeof(struct task_t)) != 0)
   {
      return -1;
   }

   pthread_mutex_init(&td->event_lock, NULL);
   pthread_cond_init(&td->event, NULL);

   pthread_mutexattr_init(&td->lock_attr);
   pthread_mutexattr_settype(&td->lock_attr, PTHREAD_MUTEX_RECURSIVE);
   pthread_mutex_init(&td->lock, &td->lock_attr);

   uint32_t i = 0;
   for (i = 0; i < td->nthreads; ++i)
   {
      pthread_create(&td->threads[i], NULL, working_thread, td);
   }

   (*ptd) = td;
   return 0;
}
示例#12
0
int main(int argc,char **argv)
{
	/*线程池中最多3个活动线程*/
	pool_init(3);

	int *workingnum=(int *)malloc(sizeof(int)*10);

	/*连续向线程池中投入10个任务*/
	int i;
	for(i=0;i<10;i++)
	{
		workingnum[i]=i;
		pool_add_worker(myprocess,&workingnum[i]);
	}
	
	/*等待所有任务完成*/	
	sleep(5);
	
	/*销毁线程池*/
	pool_destroy();

	free(workingnum);

	return 0;
}
示例#13
0
int main(void)
{
    go = false;
    finish = false;
    pool_init(&pool);
    
    for(int i = 0;i < NUM_PRODUCER;i++)
    {
        pthread_create(&producer[i], NULL, &producer_runner, (void*)(size_t)i);
    }
    
    for(int i = 0;i < NUM_CONSUMER;i++)
    {
        pthread_create(&producer[i], NULL, &thread_runner, (void*)(size_t)i);
    }
    
    go = true;
    
    sleep(1);
    
    finish = true;
    
    for(int i = 0;i < NUM_PRODUCER;i++)
    {
        pthread_join(producer[i], NULL);
    }
    
    for(int i = 0;i < NUM_CONSUMER;i++)
    {
        pthread_join(consumer[i], NULL);
    }
    return 0;
}
示例#14
0
文件: pool_tests.c 项目: jbrd/libmem
static void _ensure_pool_cleanup_gracefully_handles_cleaned_up_pool( void )
{
	pool_t pool;
	pool_init( &pool, 4, 16, allocator_default( ) );
	pool_cleanup( &pool );
	pool_cleanup( &pool );
}
示例#15
0
文件: pool_tests.c 项目: jbrd/libmem
static void _ensure_pool_is_empty_gracefully_handles_cleaned_up_pool( void )
{
	pool_t pool;
	pool_init( &pool, 4, 1, allocator_default( ) );
	pool_cleanup( &pool );
	TEST_REQUIRE( pool_is_empty( &pool ) != 0 );
}
示例#16
0
int nfs4_acls_init(void)
{
    LogDebug(COMPONENT_NFS_V4_ACL, "Initialize NFSv4 ACLs");
    LogDebug(COMPONENT_NFS_V4_ACL,
             "sizeof(fsal_ace_t)=%zu, sizeof(fsal_acl_t)=%zu",
             sizeof(fsal_ace_t), sizeof(fsal_acl_t));

    /* Initialize memory pool of ACLs. */
    fsal_acl_pool =
        pool_init(NULL, sizeof(fsal_acl_t), pool_basic_substrate, NULL,
                  NULL, NULL);

    /* Create hash table. */
    fsal_acl_hash = hashtable_init(&fsal_acl_hash_config);

    if (!fsal_acl_hash) {
        LogCrit(COMPONENT_NFS_V4_ACL,
                "ERROR creating hash table for NFSv4 ACLs");
        return NFS_V4_ACL_INTERNAL_ERROR;
    }

    nfs4_acls_test();

    return NFS_V4_ACL_SUCCESS;
}
示例#17
0
文件: pool_tests.c 项目: jbrd/libmem
static void _ensure_pool_is_empty_returns_non_zero_on_empty_legitimate_pool( void )
{
	pool_t pool;
	pool_init( &pool, 4, 1, allocator_default( ) );
	TEST_REQUIRE( pool_is_empty( &pool ) == 0 );
	pool_cleanup( &pool );
}
示例#18
0
/*
 * Initialize buffers and hash links for buffers.
 */
void
bufinit(void)
{
	struct buf *bp;
	struct bqueues *dp;
	int i;
	int base, residual;

	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
		TAILQ_INIT(dp);
	bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash);
	base = bufpages / nbuf;
	residual = bufpages % nbuf;
	for (i = 0; i < nbuf; i++) {
		bp = &buf[i];
		bzero((char *)bp, sizeof *bp);
		bp->b_dev = NODEV;
		bp->b_vnbufs.le_next = NOLIST;
		bp->b_data = buffers + i * MAXBSIZE;
		LIST_INIT(&bp->b_dep);
		if (i < residual)
			bp->b_bufsize = (base + 1) * PAGE_SIZE;
		else
			bp->b_bufsize = base * PAGE_SIZE;
		bp->b_flags = B_INVAL;
		if (bp->b_bufsize) {
			dp = &bufqueues[BQ_CLEAN];
			numfreepages += btoc(bp->b_bufsize);
			numcleanpages += btoc(bp->b_bufsize);
		} else {
			dp = &bufqueues[BQ_EMPTY];
			numemptybufs++;
		}
		binsheadfree(bp, dp);
		binshash(bp, &invalhash);
	}

	hidirtypages = bufpages / 4;
	lodirtypages = hidirtypages / 2;

	/*
	 * Reserve 5% of bufpages for syncer's needs,
	 * but not more than 25% and if possible
	 * not less then 2 * MAXBSIZE. locleanpages
	 * value must be not too small, but probably
	 * there are no reason to set it more than 1-2 MB.
	 */
	locleanpages = bufpages / 20;
	if (locleanpages < btoc(2 * MAXBSIZE))
		locleanpages = btoc(2 * MAXBSIZE);
	if (locleanpages > bufpages / 4)
		locleanpages = bufpages / 4;
	if (locleanpages > btoc(2 * 1024 * 1024))
		locleanpages = btoc(2 * 1024 * 1024);

#ifdef DEBUG
	mincleanpages = locleanpages;
#endif
}
示例#19
0
/*
 * allocate anons
 */
void
uvm_anon_init()
{
	pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, 0, 0, "anonpl",
	    &pool_allocator_nointr);
	pool_sethiwat(&uvm_anon_pool, uvmexp.free / 16);
}
示例#20
0
/* Initialize the vnode structures and initialize each file system type. */
void
vfsinit(void)
{
	int i;
	struct vfsconf *vfsconflist;
	int vfsconflistlen;

	pool_init(&namei_pool, MAXPATHLEN, 0, 0, 0, "namei",
	    &pool_allocator_nointr);

	/* Initialize the vnode table. */
	vntblinit();

	/* Initialize the vnode name cache. */
	nchinit();

#ifdef WAPBL
	wapbl_init();
#endif

	/*
	 * Stop using vfsconf and maxvfsconf as a temporary storage,
	 * set them to their correct values now.
	 */
	vfsconflist = vfsconf;
	vfsconflistlen = maxvfsconf;
	vfsconf = NULL;
	maxvfsconf = 0;

	for (i = 0; i < vfsconflistlen; i++)
		vfs_register(&vfsconflist[i]);
}
示例#21
0
/******************************************************************
 *              macho_load_debug_info_from_map
 *
 * Loads the symbolic information from a Mach-O module.
 * Returns
 *      FALSE if the file doesn't contain symbolic info (or this info
 *              cannot be read or parsed)
 *      TRUE on success
 */
static BOOL macho_load_debug_info_from_map(struct module* module,
                                           struct macho_file_map* fmap)
{
    BOOL                    ret = FALSE;
    struct macho_debug_info mdi;
    int                     result;

    TRACE("(%p, %p/%d)\n", module, fmap, fmap->fd);

    module->module.SymType = SymExport;

    mdi.fmap = fmap;
    mdi.module = module;
    pool_init(&mdi.pool, 65536);
    hash_table_init(&mdi.pool, &mdi.ht_symtab, 256);
    result = macho_enum_load_commands(fmap, LC_SYMTAB, macho_parse_symtab, &mdi);
    if (result > 0)
        ret = TRUE;
    else if (result < 0)
        WARN("Couldn't correctly read stabs\n");

    macho_finish_stabs(module, &mdi.ht_symtab);

    pool_destroy(&mdi.pool);
    return ret;
}
示例#22
0
// **TODO**: Add an test. This function shows only the usage of `append_cb`.
static char * test_single() {

	mpz_t a, b;
	mpz_array array1, array2, array_expect;
	mpz_pool pool;

	pool_init(&pool, 0);
	array_init(&array1, 3);
	array_init(&array2, 3);
	array_init(&array_expect, 3);

	// primes: 577, 727, 863
	mpz_init_set_str(a, "577", 10);
	array_add(&array_expect, a);
	mpz_set_str(a, "727", 10);
	array_add(&array_expect, a);
	mpz_set_str(a, "863", 10);
	array_add(&array_expect, a);

	// `a = 727 * 577 = 419479`
	// `b = 727 * 863 = 627401`
	mpz_set_str(a, "419479", 10);
	mpz_init_set_str(b, "627401", 10);

	append_cb(&pool, &array1, a, b);
	if (mpz_cmp_ui(a, 419479) != 0) {
		return "append_cb has changed the input value a!";
	}
	if (mpz_cmp_ui(b, 627401) != 0) {
		return "append_cb has changed the input value b!";
	}

	array_msort(&array1);
	if (!array_equal(&array_expect, &array1)) {
		return "array1 and array_expect differ!";
	}

	append_cb(&pool, &array2, b, a);
	if (mpz_cmp_ui(a, 419479) != 0) {
		return "append_cb has changed the input value a!";
	}
	if (mpz_cmp_ui(b, 627401) != 0) {
		return "append_cb has changed the input value b!";
	}

	array_msort(&array2);
	if (!array_equal(&array_expect, &array2)) {
		return "array2 and array_expect differ!";
	}

	mpz_clear(a);
	mpz_clear(b);
	array_clear(&array1);
	array_clear(&array2);
	array_clear(&array_expect);
	pool_clear(&pool);

	return 0;
}
static int
in6pcb_poolinit(void)
{

	pool_init(&in6pcb_pool, sizeof(struct in6pcb), 0, 0, 0, "in6pcbpl",
	    NULL, IPL_SOFTNET);
	return 0;
}
示例#24
0
void
v7fs_init(void)
{

    DPRINTF("\n");
    pool_init(&v7fs_node_pool, sizeof(struct v7fs_node), 0, 0, 0,
              "v7fs_node_pool", &pool_allocator_nointr, IPL_NONE);
}
示例#25
0
static int
inpcb_poolinit(void)
{

	pool_init(&inpcb_pool, sizeof(struct inpcb), IN_CB_COUNT, 0, 0, "inpcbpl", NULL,
	    IPL_NET);
	return 0;
}
示例#26
0
void
amap_init(void)
{
	/* Initialize the vm_amap pool. */
	pool_init(&uvm_amap_pool, sizeof(struct vm_amap), 0, 0, PR_WAITOK,
	    "amappl", NULL);
	pool_sethiwat(&uvm_amap_pool, 4096);
}
示例#27
0
/*
 * Initialize the mbuf allocator.
 */
void
mbinit(void)
{
	pool_init(&mbpool, MSIZE, 0, 0, 0, "mbpl", NULL);
	pool_init(&mclpool, MCLBYTES, 0, 0, 0, "mclpl", NULL);

	nmbclust_update();

	/*
	 * Set a low water mark for both mbufs and clusters.  This should
	 * help ensure that they can be allocated in a memory starvation
	 * situation.  This is important for e.g. diskless systems which
	 * must allocate mbufs in order for the pagedaemon to clean pages.
	 */
	pool_setlowat(&mbpool, mblowat);
	pool_setlowat(&mclpool, mcllowat);
}
示例#28
0
/**
 * @brief Initialize the DRC package.
 */
void dupreq2_pkginit(void)
{
	int code __attribute__ ((unused)) = 0;

	dupreq_pool = pool_init("Duplicate Request Pool",
				sizeof(dupreq_entry_t),
				pool_basic_substrate, NULL, NULL, NULL);
	if (unlikely(!(dupreq_pool)))
		LogFatal(COMPONENT_INIT,
			 "Error while allocating duplicate request pool");

	nfs_res_pool = pool_init("nfs_res_t pool", sizeof(nfs_res_t),
				 pool_basic_substrate,
				 NULL, NULL, NULL);
	if (unlikely(!(nfs_res_pool)))
		LogFatal(COMPONENT_INIT,
			 "Error while allocating nfs_res_t pool");

	tcp_drc_pool = pool_init("TCP DRC Pool", sizeof(drc_t),
				 pool_basic_substrate,
				 NULL, NULL, NULL);
	if (unlikely(!(tcp_drc_pool)))
		LogFatal(COMPONENT_INIT,
			 "Error while allocating TCP DRC pool");

	drc_st = gsh_calloc(1, sizeof(struct drc_st));

	/* init shared statics */
	gsh_mutex_init(&drc_st->mtx, NULL);

	/* recycle_t */
	code =
	    rbtx_init(&drc_st->tcp_drc_recycle_t, drc_recycle_cmpf,
		      nfs_param.core_param.drc.tcp.recycle_npart,
		      RBT_X_FLAG_ALLOC);
	/* XXX error? */

	/* init recycle_q */
	TAILQ_INIT(&drc_st->tcp_drc_recycle_q);
	drc_st->tcp_drc_recycle_qlen = 0;
	drc_st->last_expire_check = time(NULL);
	drc_st->expire_delta = nfs_param.core_param.drc.tcp.recycle_expire_s;

	/* UDP DRC is global, shared */
	init_shared_drc();
}
示例#29
0
文件: pf_if.c 项目: ryo/netbsd-src
void
pfi_initialize(void)
{
	int s;
	int bound;

	if (pfi_all != NULL)	/* already initialized */
		return;

#ifdef __NetBSD__
	pool_init(&pfi_addr_pl, sizeof(struct pfi_dynaddr), 0, 0, 0,
	    "pfiaddrpl", &pool_allocator_nointr, IPL_NONE);
#else
	pool_init(&pfi_addr_pl, sizeof(struct pfi_dynaddr), 0, 0, 0,
	    "pfiaddrpl", &pool_allocator_nointr);
#endif /* !__NetBSD__ */
	pfi_buffer_max = 64;
	pfi_buffer = malloc(pfi_buffer_max * sizeof(*pfi_buffer),
	    PFI_MTYPE, M_WAITOK);

	if ((pfi_all = pfi_kif_get(IFG_ALL)) == NULL)
		panic("pfi_kif_get for pfi_all failed");

#ifdef __NetBSD__
	ifnet_t *ifp;
	bound = curlwp_bind();
	s = pserialize_read_enter();
	IFNET_READER_FOREACH(ifp) {
		struct psref psref;
		psref_acquire(&psref, &ifp->if_psref, ifnet_psref_class);
		pserialize_read_exit(s);

		pfi_init_groups(ifp);
		pfi_attach_ifnet(ifp);

		s = pserialize_read_enter();
		psref_release(&psref, &ifp->if_psref, ifnet_psref_class);
	}
	pserialize_read_exit(s);
	curlwp_bindx(bound);

	pfil_add_hook(pfil_ifnet_wrapper, NULL, PFIL_IFNET, if_pfil);
	pfil_add_hook(pfil_ifaddr_wrapper, NULL, PFIL_IFADDR, if_pfil);
#endif /* __NetBSD__ */
}
示例#30
0
void
adosfs_init(void)
{

	malloc_type_attach(M_ANODE);
	mutex_init(&adosfs_hashlock, MUTEX_DEFAULT, IPL_NONE);
	pool_init(&adosfs_node_pool, sizeof(struct anode), 0, 0, 0, "adosndpl",
	    &pool_allocator_nointr, IPL_NONE);
}