void minix_inode_init(void) { int i; /* init inode slab */ minix_inode_slab = alloc_slab(sizeof(struct minix_inode)); if (!minix_inode_slab) panic("No enough memory for minix inode slab"); /* init hash table */ for (i = 0; i < MINIX_INODE_HASH_SIZE; i++) hlist_head_init(&minix_inode_htable[i]); }
void block_init(void) { int i; /* init block buffer allocator */ block_slab = alloc_slab(sizeof(struct block)); if (!block_slab) panic("No memory for block slab"); /* init block buffer hash table */ for (i = 0; i < BLOCK_HASH_SIZE; i++) hlist_head_init(&block_htable[i]); hd_init(); block_test(); }
/* called by the current thread to initialize a transaction descriptor */ stm_tx_t *stm_new() { stm_tx_t *newtx; if (unused_tx!=NULL) { pthread_mutex_lock(&unused_tx_mutex); if (unused_tx!=NULL) { tx_block_t *cur = unused_tx; unused_tx = unused_tx->next; newtx = (stm_tx_t*)cur->tx; free(cur); pthread_mutex_unlock(&unused_tx_mutex); return newtx; } pthread_mutex_unlock(&unused_tx_mutex); } if ((newtx = (stm_tx_t*)malloc(sizeof(stm_tx_t)))==NULL) { perror("malloc: no free memory!"); exit(1); } DPRINTF("stm new: %p\n", newtx); #ifdef DEBUG memset(newtx, 0x0, sizeof(stm_tx_t)); #endif int ret = posix_memalign((void**)&(newtx->writehash), 64, sizeof(writeset_t*)*WBUF_MAX_HASH_ARRAY_SIZE); newtx->whashsize = WBUF_HASH_ARRAY_SIZE; newtx->whashmask = WBUF_HASH_ARRAY_SIZE-1; if (newtx->writehash==NULL || ret!=0) { perror("malloc: no free memory!"); exit(1); } newtx->status = TX_IDLE; newtx->freeslabs = NULL; newtx->buffers = NULL; newtx->writeset = alloc_slab(newtx); ret = posix_memalign((void**)&(newtx->lockset), 64, NRRLENTRIESINSET*sizeof(lockset_t)); ret = ret + posix_memalign((void**)&newtx->readset, 64, 4*NRRLENTRIESINSET*sizeof(readset_t)); newtx->maxlocks = NRRLENTRIESINSET; newtx->locksize = NRRLENTRIESINSET*sizeof(lockset_t); newtx->maxreads = 4*NRRLENTRIESINSET; newtx->readsize = 4*NRRLENTRIESINSET*sizeof(readset_t); if (newtx->readset==NULL || newtx->lockset==NULL || ret!=0) { perror("malloc: no free memory!"); exit(1); } #ifdef ADAPTIVENESS // adaptiveness newtx->writethrough=1; #ifdef ADAPTIVEHASH newtx->adaptive_hash=0; #endif #ifdef ADAPTIVEWHASH2 newtx->adaptive_hash=8; #endif newtx->wtotal=0; newtx->nrtx=0; #endif /* clear read and writeset */ newtx->nr_uniq_writes = 0; #ifdef GLOBAL_STATS newtx->retries=0; newtx->aborts=0; newtx->commits=0; xxstm_nr_tx++; #endif #ifdef STATS newtx->nb_reads = 0; newtx->nb_writes = 0; newtx->nb_locks=0; newtx->nb_min_reads=-1; newtx->nb_max_reads=0; newtx->nb_min_writes=-1; newtx->nb_max_writes=0; newtx->nb_tot_reads=0; newtx->nb_tot_writes=0; newtx->nb_read_ver_err=0; newtx->nb_read_ver_err_rec=0; newtx->nb_read_ver_change=0; newtx->nb_lock_ver_err=0; newtx->nb_lock_ver_err_rec=0; #endif return newtx; }
static int minit(void) { int err = 0; init_hash_parameters(); if (0 > (err = init_some_parameters())) goto out; if (0 > (err = alloc_percpu_file())) goto err_alloc_file; if (0 > (err = alloc_slab())) goto err_alloc_slab; if (0 > (err = alloc_bitmap())) goto err_bitmap; if (0 > (err = initial_hash_table_cache())) goto err_hash_table_cache; printk(KERN_INFO "Start %s.", THIS_MODULE->name); if (0 > (err = nf_register_hook(&nf_out_ops))) { printk(KERN_ERR "Failed to register nf_out %s.\n", THIS_MODULE->name); goto err_nf_reg_out; } if (0 > (err = nf_register_hook(&nf_in_ops))) { printk(KERN_ERR "Failed to register nf_in %s.\n", THIS_MODULE->name); goto err_nf_reg_in; } if (tcp_alloc_sha1sig_pool() == NULL) { printk(KERN_ERR "Failed to alloc sha1 pool %s.\n", THIS_MODULE->name); goto err_sha1siq_pool; } err = register_jprobe(&jps_netif_receive_skb); if (err < 0) { printk(KERN_ERR "Failed to register jprobe netif_receive_skb %s.\n", THIS_MODULE->name); goto out; } kprobe_in_reged = 1; goto out; err_sha1siq_pool: tcp_free_sha1sig_pool(); err_nf_reg_in: nf_unregister_hook(&nf_in_ops); err_nf_reg_out: nf_unregister_hook(&nf_out_ops); err_hash_table_cache: release_hash_table_cache(); err_bitmap: free_bitmap(); err_alloc_slab: free_slab(); err_alloc_file: free_percpu_file(); out: return err; }