Example #1
0
/*
 * Name cache initialization, from vfs_init() when we are booting
 */
void
nchinit()
{

	TAILQ_INIT(&nclruhead);
	nchashtbl = hashinit(desiredvnodes, M_CACHE, M_WAITOK, &nchash);
	ncvhashtbl = hashinit(desiredvnodes/8, M_CACHE, M_WAITOK, &ncvhash);
	pool_init(&nch_pool, sizeof(struct namecache), 0, 0, 0, "nchpl",
	    &pool_allocator_nointr);
}
Example #2
0
void
udp_init()
{
	LIST_INIT(&udb);
	udbinfo.listhead = &udb;
	udbinfo.hashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.hashmask);
	udbinfo.porthashbase = hashinit(UDBHASHSIZE, M_PCB,
					&udbinfo.porthashmask);
	udbinfo.ipi_zone = zinit("udpcb", sizeof(struct inpcb), maxsockets,
				 ZONE_INTERRUPT, 0);
}
/*
 * Initialize ptyfsnode hash table.
 */
void
ptyfs_hashinit(void)
{
	ptyfs_used_tbl = hashinit(desiredvnodes / 4, HASH_LIST, true,
	    &ptyfs_used_mask);
	ptyfs_free_tbl = hashinit(desiredvnodes / 4, HASH_LIST, true,
	    &ptyfs_free_mask);
	mutex_init(&ptyfs_hashlock, MUTEX_DEFAULT, IPL_NONE);
	mutex_init(&ptyfs_used_slock, MUTEX_DEFAULT, IPL_NONE);
	mutex_init(&ptyfs_free_slock, MUTEX_DEFAULT, IPL_NONE);
}
Example #4
0
void
udp_init()
{
	INP_INFO_LOCK_INIT(&udbinfo, "udp");
	LIST_INIT(&udb);
	udbinfo.listhead = &udb;
	udbinfo.hashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.hashmask);
	udbinfo.porthashbase = hashinit(UDBHASHSIZE, M_PCB,
					&udbinfo.porthashmask);
	udbinfo.ipi_zone = uma_zcreate("udpcb", sizeof(struct inpcb), NULL,
	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
	uma_zone_set_max(udbinfo.ipi_zone, maxsockets);
}
Example #5
0
/*
 * Initialize global process hashing structures.
 */
void
procinit()
{

	LIST_INIT(&allproc);
	LIST_INIT(&zombproc);
#ifdef LCTX
	LIST_INIT(&alllctx);
	alllctx_cnt = 0;
#endif
	pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
	pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
	uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash);
}
Example #6
0
/*
 * IP initialization: fill in IP protocol switch table.
 * All protocols not implemented in kernel go to raw IP protocol handler.
 */
void
ip_init(void)
{
	const struct protosw *pr;
	int i;

	sysctl_net_inet_ip_setup(NULL);

	pool_init(&inmulti_pool, sizeof(struct in_multi), 0, 0, 0, "inmltpl",
	    NULL, IPL_SOFTNET);

	pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW);
	if (pr == 0)
		panic("ip_init");
	for (i = 0; i < IPPROTO_MAX; i++)
		ip_protox[i] = pr - inetsw;
	for (pr = inetdomain.dom_protosw;
	    pr < inetdomain.dom_protoswNPROTOSW; pr++)
		if (pr->pr_domain->dom_family == PF_INET &&
		    pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW)
			ip_protox[pr->pr_protocol] = pr - inetsw;

	ip_reass_init();

	ip_ids = ip_id_init();
	ip_id = time_second & 0xfffff;

	ipintrq.ifq_maxlen = IFQ_MAXLEN;

	TAILQ_INIT(&in_ifaddrhead);
	in_ifaddrhashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true,
	    &in_ifaddrhash);
	in_multihashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true,
	    &in_multihash);
	ip_mtudisc_timeout_q = rt_timer_queue_create(ip_mtudisc_timeout);
#ifdef GATEWAY
	ipflow_init(ip_hashsize);
#endif

	/* Register our Packet Filter hook. */
	inet_pfil_hook = pfil_head_create(PFIL_TYPE_AF, (void *)AF_INET);
	KASSERT(inet_pfil_hook != NULL);

#ifdef MBUFTRACE
	MOWNER_ATTACH(&ip_tx_mowner);
	MOWNER_ATTACH(&ip_rx_mowner);
#endif /* MBUFTRACE */

	ipstat_percpu = percpu_alloc(sizeof(uint64_t) * IP_NSTATS);
}
Example #7
0
void
in_pcbgroup_init(struct inpcbinfo *pcbinfo, u_int hashfields,
    int hash_nelements)
{
	struct inpcbgroup *pcbgroup;
	u_int numpcbgroups, pgn;

	/*
	 * Only enable connection groups for a protocol if it has been
	 * specifically requested.
	 */
	if (hashfields == IPI_HASHFIELDS_NONE)
		return;

	/*
	 * Connection groups are about multi-processor load distribution,
	 * lock contention, and connection CPU affinity.  As such, no point
	 * in turning them on for a uniprocessor machine, it only wastes
	 * memory.
	 */
	if (mp_ncpus == 1)
		return;

	/*
	 * Use one group per CPU for now.  If we decide to do dynamic
	 * rebalancing a la RSS, we'll need to shift left by at least 1.
	 */
	numpcbgroups = mp_ncpus;

	pcbinfo->ipi_hashfields = hashfields;
	pcbinfo->ipi_pcbgroups = malloc(numpcbgroups *
	    sizeof(*pcbinfo->ipi_pcbgroups), M_PCB, M_WAITOK | M_ZERO);
	pcbinfo->ipi_npcbgroups = numpcbgroups;
	pcbinfo->ipi_wildbase = hashinit(hash_nelements, M_PCB,
	    &pcbinfo->ipi_wildmask);
	for (pgn = 0; pgn < pcbinfo->ipi_npcbgroups; pgn++) {
		pcbgroup = &pcbinfo->ipi_pcbgroups[pgn];
		pcbgroup->ipg_hashbase = hashinit(hash_nelements, M_PCB,
		    &pcbgroup->ipg_hashmask);
		INP_GROUP_LOCK_INIT(pcbgroup, "pcbgroup");

		/*
		 * Initialise notional affinity of the pcbgroup -- for RSS,
		 * we want the same notion of affinity as NICs to be used.
		 * Just round robin for the time being.
		 */
		pcbgroup->ipg_cpu = (pgn % mp_ncpus);
	}
}
Example #8
0
void
in_init(void)
{
	pool_init(&inmulti_pool, sizeof(struct in_multi), 0, 0, 0, "inmltpl",
	    NULL, IPL_SOFTNET);
	TAILQ_INIT(&in_ifaddrhead);

	in_ifaddrhashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true,
	    &in_ifaddrhash);
	in_multihashtbl = hashinit(IN_IFADDR_HASH_SIZE, HASH_LIST, true,
	    &in_multihash);
	rw_init(&in_multilock);

	in_sysctl_init(NULL);
}
Example #9
0
/*
 * Tcp initialization
 */
void
tcp_init()
{
	int hashsize = TCBHASHSIZE;
	
	tcp_ccgen = 1;
	tcp_cleartaocache();

	tcp_delacktime = TCPTV_DELACK;
	tcp_keepinit = TCPTV_KEEP_INIT;
	tcp_keepidle = TCPTV_KEEP_IDLE;
	tcp_keepintvl = TCPTV_KEEPINTVL;
	tcp_maxpersistidle = TCPTV_KEEP_IDLE;
	tcp_msl = TCPTV_MSL;
	tcp_rexmit_min = TCPTV_MIN;
	tcp_rexmit_slop = TCPTV_CPU_VAR;

	LIST_INIT(&tcb);
	tcbinfo.listhead = &tcb;
	TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize);
	if (!powerof2(hashsize)) {
		printf("WARNING: TCB hash size not a power of 2\n");
		hashsize = 512; /* safe default */
	}
	tcp_tcbhashsize = hashsize;
	tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask);
	tcbinfo.porthashbase = hashinit(hashsize, M_PCB,
					&tcbinfo.porthashmask);
	tcbinfo.ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets,
				 ZONE_INTERRUPT, 0);

	tcp_reass_maxseg = nmbclusters / 16;
	TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
	    &tcp_reass_maxseg);

#ifdef INET6
#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr))
#else /* INET6 */
#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr))
#endif /* INET6 */
	if (max_protohdr < TCP_MINPROTOHDR)
		max_protohdr = TCP_MINPROTOHDR;
	if (max_linkhdr + TCP_MINPROTOHDR > MHLEN)
		panic("tcp_init");
#undef TCP_MINPROTOHDR

	syncache_init();
}
Example #10
0
static void
start_log(struct log_stream *log)
{
  static int ht_initialized = 0;
  FILE *f;

  if (!log->filename || !*log->filename) {
    log->fp = stderr;
  } else {
    if (!ht_initialized) {
      hashinit(&htab_logfiles, 8);
      ht_initialized = 1;
    }
    if ((f = hashfind(strupper(log->filename), &htab_logfiles))) {
      /* We've already opened this file for another log, so just use that pointer */
      log->fp = f;
    } else {
      log->fp = fopen(log->filename, "a+");
      if (log->fp == NULL) {
        fprintf(stderr, "WARNING: cannot open log %s: %s\n", log->filename,
                strerror(errno));
        log->fp = stderr;
      } else {
        hashadd(strupper(log->filename), log->fp, &htab_logfiles);
        fputs("START OF LOG.\n", log->fp);
        fflush(log->fp);
      }
    }
  }
  if (!log->buffer)
    log->buffer = allocate_bufferq(LOG_BUFFER_SIZE);
}
Example #11
0
void
dqreinit(void)
{
	struct dquot *dq;
	struct dqhashhead *oldhash, *hash;
	struct vnode *dqvp;
	u_long oldmask, mask, hashval;
	int i;

	hash = hashinit(desiredvnodes, HASH_LIST, true, &mask);
	mutex_enter(&dqlock);
	oldhash = dqhashtbl;
	oldmask = dqhash;
	dqhashtbl = hash;
	dqhash = mask;
	for (i = 0; i <= oldmask; i++) {
		while ((dq = LIST_FIRST(&oldhash[i])) != NULL) {
			dqvp = dq->dq_ump->um_quotas[dq->dq_type];
			LIST_REMOVE(dq, dq_hash);
			hashval = DQHASH(dqvp, dq->dq_id);
			LIST_INSERT_HEAD(&dqhashtbl[hashval], dq, dq_hash);
		}
	}
	mutex_exit(&dqlock);
	hashdone(oldhash, HASH_LIST, oldmask);
}
Example #12
0
module_t * bindmodule( OBJECT * name )
{

    if ( !name )
    {
        return &root;
    }
    else
    {
        PROFILE_ENTER( BINDMODULE );

        module_t m_;
        module_t * m = &m_;

        if ( !module_hash )
            module_hash = hashinit( sizeof( module_t ), "modules" );

        m->name = name;

        if ( hashenter( module_hash, (HASHDATA * *)&m ) )
        {
            m->name = object_copy( name );
            m->variables = 0;
            m->rules = 0;
            m->imported_modules = 0;
            m->class_module = 0;
            m->native_rules = 0;
            m->user_module = 0;
        }

        PROFILE_EXIT( BINDMODULE );

        return m;
    }
}
Example #13
0
/*
 * Initialize inode hash table.
 */
void
ufs_ihashinit()
{

	ihashtbl = hashinit(desiredvnodes, M_UFSMNT, &ihash);
	simple_lock_init(&ufs_ihash_slock);
}
Example #14
0
void
vn_initialize_syncerd(void)
{
	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, M_WAITOK,
	    &syncer_mask);
	syncer_maxdelay = syncer_mask + 1;
}
Example #15
0
void profile_enter( char * rulename, profile_frame * frame )
{
    if ( DEBUG_PROFILE )
    {
        clock_t start = clock();
        profile_info info;
        profile_info * p = &info;

        if ( !rulename ) p = &profile_other;

        if ( !profile_hash && rulename )
            profile_hash = hashinit( sizeof( profile_info ), "profile" );

        info.name = rulename;

        if ( rulename && hashenter( profile_hash, (HASHDATA * *)&p ) )
            p->cumulative = p->net = p->num_entries = p->stack_count = p->memory = 0;

        ++p->num_entries;
        ++p->stack_count;

        frame->info = p;

        frame->caller = profile_stack;
        profile_stack = frame;

        frame->entry_time = clock();
        frame->overhead = 0;
        frame->subrules = 0;

        /* caller pays for the time it takes to play with the hash table */
        if ( frame->caller )
            frame->caller->overhead += frame->entry_time - start;
    }
}
Example #16
0
void
chfs_ihashreinit(void)
{
	struct chfs_inode *ip;
	struct ihashhead *oldhash, *hash;
	u_long oldmask, mask, val;
	int i;

	dbg("reiniting\n");

	hash = hashinit(desiredvnodes, HASH_LIST, true, &mask);
	mutex_enter(&chfs_ihash_lock);
	oldhash = chfs_ihashtbl;
	oldmask = chfs_ihash;
	chfs_ihashtbl = hash;
	chfs_ihash = mask;
	for (i = 0; i <= oldmask; i++) {
		while ((ip = LIST_FIRST(&oldhash[i])) != NULL) {
			LIST_REMOVE(ip, hash_entry);
			val = INOHASH(ip->dev, ip->ino);
			LIST_INSERT_HEAD(&hash[val], ip, hash_entry);
		}
	}
	mutex_exit(&chfs_ihash_lock);
	hashdone(oldhash, HASH_LIST, oldmask);
}
Example #17
0
/*
 * Initialize inode hash table.
 */
void
ufs_ihashinit()
{

	ihashtbl = hashinit(desiredvnodes, M_UFSIHASH, &ihash);
	mtx_init(&ufs_ihash_mtx, "ufs ihash", NULL, MTX_DEF);
}
Example #18
0
void path_add_key( OBJECT * path )
{
    struct path_key_entry * result;
    int found;

    if ( ! path_key_cache )
        path_key_cache = hashinit( sizeof( struct path_key_entry ), "path to key" );

    result = (struct path_key_entry *)hash_insert( path_key_cache, path, &found );
    if ( !found )
    {
        string buf[1];
        OBJECT * normalized;
        struct path_key_entry * nresult;
        result->path = path;
        string_copy( buf, object_str( path ) );
        normalize_path( buf );
        normalized = object_new( buf->value );
        string_free( buf );
        nresult = (struct path_key_entry *)hash_insert( path_key_cache, normalized, &found );
        if ( !found || nresult == result )
        {
            nresult->path = object_copy( normalized );
            nresult->key = object_copy( path );
        }
        object_free( normalized );
        if ( nresult != result )
        {
            result->path = object_copy( path );
            result->key = object_copy( nresult->key );
        }
    }
}
Example #19
0
/*
 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
 */
static void
sf_buf_init(void *arg)
{
    struct sf_buf *sf_bufs;
    vm_offset_t sf_base;
    int i;

#ifdef SFBUF_OPTIONAL_DIRECT_MAP
    if (SFBUF_OPTIONAL_DIRECT_MAP)
        return;
#endif

    nsfbufs = NSFBUFS;
    TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);

    sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
    TAILQ_INIT(&sf_buf_freelist);
    sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
    sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
                     M_WAITOK | M_ZERO);
    for (i = 0; i < nsfbufs; i++) {
        sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
        TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
    }
    sf_buf_alloc_want = 0;
    mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
}
Example #20
0
OBJECT * make_class_module( LIST * xname, LIST * bases, FRAME * frame )
{
    OBJECT     * name = class_module_name( list_front( xname ) );
    OBJECT   * * pp;
    module_t   * class_module = 0;
    module_t   * outer_module = frame->module;
    int found;
    LISTITER iter, end;

    if ( !classes )
        classes = hashinit( sizeof( OBJECT * ), "classes" );

    pp = (OBJECT * *)hash_insert( classes, list_front( xname ), &found );
    if ( !found )
    {
        *pp = object_copy( list_front( xname ) );
    }
    else
    {
        printf( "Class %s already defined\n", object_str( list_front( xname ) ) );
        abort();
    }
    check_defined( bases );

    class_module = bindmodule( name );

    var_set( class_module, constant_name, xname, VAR_SET );
    var_set( class_module, constant_bases, bases, VAR_SET );

    iter = list_begin( bases ), end = list_end( bases );
    for ( ; iter != end; iter = list_next( iter ) )
        import_base_rules( class_module, list_item( iter ) );

    return name;
}
Example #21
0
/*
 * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
 */
static void
sf_buf_init(void *arg)
{
    struct sf_buf *sf_bufs;
    vm_offset_t sf_base;
    int i;

    /* Don't bother on systems with a direct map */
    if (hw_direct_map)
        return;

    nsfbufs = NSFBUFS;
    TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);

    sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
    TAILQ_INIT(&sf_buf_freelist);
    sf_base = kva_alloc(nsfbufs * PAGE_SIZE);
    sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
                     M_NOWAIT | M_ZERO);

    for (i = 0; i < nsfbufs; i++) {
        sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
        TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
    }
    sf_buf_alloc_want = 0;
    mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
}
Example #22
0
void declare_native_rule( const char * module, const char * rule, const char * * args,
                          LIST * (*f)( FRAME *, int ), int version )
{
    OBJECT * module_obj = 0;
    module_t * m;
    if ( module )
    {
        module_obj = object_new( module );
    }
    m = bindmodule( module_obj );
    if ( module_obj )
    {
        object_free( module_obj );
    }
    if (m->native_rules == 0) {
        m->native_rules = hashinit( sizeof( native_rule_t ), "native rules");
    }

    {
        native_rule_t n, *np = &n;
        n.name = object_new( rule );
        if (args)
        {
            n.arguments = args_new();
            lol_build( n.arguments->data, args );
        }
        else
        {
            n.arguments = 0;
        }
        n.procedure = function_builtin( f, 0 );
        n.version = version;
        hashenter(m->native_rules, (HASHDATA**)&np);
    }
}
Example #23
0
/* Add you own runtime configuration options here, and you can set
 * them in mush.cnf.
 */
void
local_configs(void)
{
#ifdef EXAMPLE
  /* For each config parameter you add, you should initialize it as a
   * static variable here (or a global variable elsewhere in your
   * code)
   */
  static int config_example = 1;
  static char config_string[BUFFER_LEN];
#endif

  /* Initial size of this hashtable should be close to the number of
   * add_config()'s you plan to do.
   */
  hashinit(&local_options, 4);

#ifdef EXAMPLE
  /* Call add_config for each config parameter you want to add.
   * Note the use of &config_example for simple types (bool, int),
   * but just config_string for strings.
   */
  add_config("use_example", cf_bool, &config_example, sizeof config_example,
             "cosmetic");
  add_config("some_string", cf_str, config_string, sizeof config_string,
             "cosmetic");
#endif
}
Example #24
0
/*
 * Initialize buffers and hash links for buffers.
 */
void
bufinit(void)
{
	struct buf *bp;
	struct bqueues *dp;
	int i;
	int base, residual;

	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
		TAILQ_INIT(dp);
	bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash);
	base = bufpages / nbuf;
	residual = bufpages % nbuf;
	for (i = 0; i < nbuf; i++) {
		bp = &buf[i];
		bzero((char *)bp, sizeof *bp);
		bp->b_dev = NODEV;
		bp->b_vnbufs.le_next = NOLIST;
		bp->b_data = buffers + i * MAXBSIZE;
		LIST_INIT(&bp->b_dep);
		if (i < residual)
			bp->b_bufsize = (base + 1) * PAGE_SIZE;
		else
			bp->b_bufsize = base * PAGE_SIZE;
		bp->b_flags = B_INVAL;
		if (bp->b_bufsize) {
			dp = &bufqueues[BQ_CLEAN];
			numfreepages += btoc(bp->b_bufsize);
			numcleanpages += btoc(bp->b_bufsize);
		} else {
			dp = &bufqueues[BQ_EMPTY];
			numemptybufs++;
		}
		binsheadfree(bp, dp);
		binshash(bp, &invalhash);
	}

	hidirtypages = bufpages / 4;
	lodirtypages = hidirtypages / 2;

	/*
	 * Reserve 5% of bufpages for syncer's needs,
	 * but not more than 25% and if possible
	 * not less then 2 * MAXBSIZE. locleanpages
	 * value must be not too small, but probably
	 * there are no reason to set it more than 1-2 MB.
	 */
	locleanpages = bufpages / 20;
	if (locleanpages < btoc(2 * MAXBSIZE))
		locleanpages = btoc(2 * MAXBSIZE);
	if (locleanpages > bufpages / 4)
		locleanpages = bufpages / 4;
	if (locleanpages > btoc(2 * 1024 * 1024))
		locleanpages = btoc(2 * 1024 * 1024);

#ifdef DEBUG
	mincleanpages = locleanpages;
#endif
}
Example #25
0
file_info_t * file_info( OBJECT * filename )
{
    file_info_t *finfo = &filecache_finfo;
    int found;

    if ( !filecache_hash )
        filecache_hash = hashinit( sizeof( file_info_t ), "file_info" );

    filename = path_as_key( filename );

    finfo = (file_info_t *)hash_insert( filecache_hash, filename, &found );
    if ( !found )
    {
        /* printf( "file_info: %s\n", filename ); */
        finfo->name = object_copy( filename );
        finfo->is_file = 0;
        finfo->is_dir = 0;
        finfo->size = 0;
        finfo->time = 0;
        finfo->files = L0;
    }

    object_free( filename );

    return finfo;
}
Example #26
0
/*
 * Initialize hash links for nfsnodes
 * and build nfsnode free list.
 */
void
nfs_nhinit(void)
{
	nfsnode_objcache = objcache_create_simple(M_NFSNODE, sizeof(struct nfsnode));
	nfsnodehashtbl = hashinit(desiredvnodes, M_NFSHASH, &nfsnodehash);
	lockinit(&nfsnhash_lock, "nfsnht", 0, 0);
}
Example #27
0
/*
 * Initialize the server request cache list
 */
void
nfsrv_initcache()
{

	nfsrvhashtbl = hashinit(desirednfsrvcache, M_NFSD, M_WAITOK, &nfsrvhash);
	TAILQ_INIT(&nfsrvlruhead);
}
Example #28
0
/*
 * Initialize inode hash table.
 */
void
ntfs_nthashinit()
{
	lockinit(&ntfs_hashlock, PINOD, "ntfs_nthashlock", 0, 0);
	ntfs_nthashtbl = hashinit(desiredvnodes, M_NTFSNTHASH, &ntfs_nthash);
	mtx_init(&ntfs_nthash_mtx, "ntfs nthash", NULL, MTX_DEF);
}
Example #29
0
const char *
newstr( const char *string )
{
	STRING str, *s = &str;

	if( !strhash )
	    strhash = hashinit( sizeof( STRING ), "strings" );

	*s = string;

	if( hashenter( strhash, (HASHDATA **)&s ) )
	{
	    int l = strlen( string );
#if 1
		if (!stralloc)
			stralloc = alloc2_init(4096);
		char *m = alloc2_enter(stralloc, l + 1);
#else
	    char *m = (char *)malloc( l + 1 );
#endif
	    if (DEBUG_MEM)
		    printf("newstr: allocating %d bytes\n", l + 1 );

	    strtotal += l + 1;
	    memcpy( m, string, l + 1 );
	    *s = m;
	}

	return *s;
}
/*
 * Initialize global process hashing structures.
 */
void
procinit()
{

	sx_init(&allproc_lock, "allproc");
	sx_init(&proctree_lock, "proctree");
	mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF);
	LIST_INIT(&allproc);
	LIST_INIT(&zombproc);
	pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
	pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
	proc_zone = uma_zcreate("PROC", sched_sizeof_proc(),
	    proc_ctor, proc_dtor, proc_init, proc_fini,
	    UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
	uihashinit();
}