Esempio n. 1
0
File: os.c Progetto: jiamacs/rhype
struct os *
os_create(void)
{
	struct os *newOS;

	newOS = os_alloc(MAX_CPU);
	if (newOS == NULL)
		return NULL;

	rw_lock_init(&newOS->po_mutex);
	logical_mmap_init(newOS);
	newOS->po_state = PO_STATE_CREATED;
	newOS->po_lpid = atomic_add(&cur_lpid, 1);

	/* add to global OS hash */
	lock_acquire(&hype_mutex);
	newOS->os_hashentry.he_key = newOS->po_lpid;
	ht_insert(&os_hash, &newOS->os_hashentry);
	lock_release(&hype_mutex);

	lock_init(&newOS->po_events.oe_lock);
	dlist_init(&newOS->po_events.oe_list);
	dlist_init(&newOS->po_resources);

	return newOS;
}
/*static*/ status_t
VMAddressSpace::Init()
{
	rw_lock_init(&sAddressSpaceTableLock, "address spaces table");

	// create the area and address space hash tables
	{
		new(&sAddressSpaceTable) AddressSpaceTable;
		status_t error = sAddressSpaceTable.Init(ASPACE_HASH_TABLE_SIZE);
		if (error != B_OK)
			panic("vm_init: error creating aspace hash table\n");
	}

	// create the initial kernel address space
	if (Create(B_SYSTEM_TEAM, KERNEL_BASE, KERNEL_SIZE, true,
			&sKernelAddressSpace) != B_OK) {
		panic("vm_init: error creating kernel address space!\n");
	}

	add_debugger_command("aspaces", &_DumpListCommand,
		"Dump a list of all address spaces");
	add_debugger_command("aspace", &_DumpCommand,
		"Dump info about a particular address space");

	return B_OK;
}
Esempio n. 3
0
Inode::Inode(Volume* volume, ino_t id)
	:
	fVolume(volume),
	fID(id),
	fCache(NULL),
	fMap(NULL),
	fCached(false),
	fHasExtraAttributes(false)
{
	rw_lock_init(&fLock, "ext2 inode");
	recursive_lock_init(&fSmallDataLock, "ext2 inode small data");

	TRACE("Inode::Inode(): ext2_inode: %lu, disk inode: %lu\n",
		sizeof(ext2_inode), fVolume->InodeSize());
	fNodeSize = sizeof(ext2_inode) > fVolume->InodeSize()
		? fVolume->InodeSize() : sizeof(ext2_inode);

	fInitStatus = UpdateNodeFromDisk();
	if (fInitStatus == B_OK) {
		fHasExtraAttributes = (fNodeSize == sizeof(ext2_inode)
			&& fNode.ExtraInodeSize() + EXT2_INODE_NORMAL_SIZE
				== sizeof(ext2_inode));

		if (IsDirectory() || (IsSymLink() && Size() < 60)) {
			TRACE("Inode::Inode(): Not creating the file cache\n");
			fCached = false;

			fInitStatus = B_OK;
		} else
			fInitStatus = EnableFileCache();
	} else
		TRACE("Inode: Failed initialization\n");
}
Esempio n. 4
0
status_t
object_depot_init(object_depot* depot, size_t capacity, size_t maxCount,
	uint32 flags, void* cookie, void (*return_object)(object_depot* depot,
		void* cookie, void* object, uint32 flags))
{
	depot->full = NULL;
	depot->empty = NULL;
	depot->full_count = depot->empty_count = 0;
	depot->max_count = maxCount;
	depot->magazine_capacity = capacity;

	rw_lock_init(&depot->outer_lock, "object depot");
	B_INITIALIZE_SPINLOCK(&depot->inner_lock);

	int cpuCount = smp_get_num_cpus();
	depot->stores = (depot_cpu_store*)slab_internal_alloc(
		sizeof(depot_cpu_store) * cpuCount, flags);
	if (depot->stores == NULL) {
		rw_lock_destroy(&depot->outer_lock);
		return B_NO_MEMORY;
	}

	for (int i = 0; i < cpuCount; i++) {
		depot->stores[i].loaded = NULL;
		depot->stores[i].previous = NULL;
	}

	depot->cookie = cookie;
	depot->return_object = return_object;

	return B_OK;
}
Esempio n. 5
0
status_t
init_mutexes()
{
	mtx_init(&Giant, "Banana Giant", NULL, MTX_DEF);
	rw_lock_init(&ifnet_rwlock, "gDevices");
	mtx_init(&gIdStoreLock, "Identity Store", NULL, MTX_DEF);

	return B_OK;
}
Esempio n. 6
0
PackageFSRoot::PackageFSRoot(dev_t deviceID, ino_t nodeID)
	:
	fDeviceID(deviceID),
	fNodeID(nodeID),
	fSystemVolume(NULL),
	fPackageLinksDirectory(NULL)
{
	rw_lock_init(&fLock, "packagefs root");
}
Esempio n. 7
0
Inode::Inode(const char *name,int32 mode)
	:
	fMode(mode)
{
	rw_lock_init(&fLock, "inode lock");
	fFile.SetTo(name,B_CREATE_FILE | B_READ_WRITE | B_ERASE_FILE);
	fSize = 0;
	fVolume = new Volume(&fFile);
}
Esempio n. 8
0
Inode::Inode(Volume* volume)
	:
	fVolume(volume),
	fID(0),
	fCache(NULL),
	fMap(NULL),
	fInitStatus(B_NO_INIT)
{
	rw_lock_init(&fLock, "btrfs inode");
}
Esempio n. 9
0
/* Initialize the data structures for the file system cache */
void
init_fs_cache(void)
{
    rw_lock_init(&sector_lock);

    fs_cache = (struct cached_block*)malloc(CACHE_BLOCKS * sizeof(struct cached_block));
    if (fs_cache == NULL) {
        PANIC("Couldn't allocate file system cache!");
    }

    int i;
    for (i = 0; i < CACHE_BLOCKS; i++)
    {
        struct cached_block *cb = &fs_cache[i];
        rw_lock_init(&cb->rw_lock);
        cb->state = UNOCCUPIED;
        cb->accessed = false;
        cb->data = malloc(BLOCK_SECTOR_SIZE);
        if (cb->data == NULL) {
            PANIC("Couldn't allocate a file system cache buffer!");
        }
    }

    /* The eviction data structures */
    lock_init(&fs_evict_lock);
    fs_cache_arm = 0;

    /* The asynchronous fetching data structures */
    async_fetch_list = malloc(ASYNC_FETCH_SLOTS * sizeof(block_sector_t));
    if (async_fetch_list == NULL) {
        PANIC("Couldn't allocate the asynchronous fetch list!");
    }
    for (i = 0; i < ASYNC_FETCH_SLOTS; i++) {
        async_fetch_list[i] = ASYNC_FETCH_EMPTY;
    }
    async_fetch_arm = 0;
    lock_init(&async_fetch_lock);
    cond_init(&async_list_nonempty);
}
Esempio n. 10
0
static status_t
tcp_init()
{
	rw_lock_init(&sEndpointManagersLock, "endpoint managers");

	status_t status = gStackModule->register_domain_protocols(AF_INET,
		SOCK_STREAM, 0,
		"network/protocols/tcp/v1",
		"network/protocols/ipv4/v1",
		NULL);
	if (status < B_OK)
		return status;
	status = gStackModule->register_domain_protocols(AF_INET6,
		SOCK_STREAM, 0,
		"network/protocols/tcp/v1",
		"network/protocols/ipv6/v1",
		NULL);
	if (status < B_OK)
		return status;

	status = gStackModule->register_domain_protocols(AF_INET, SOCK_STREAM,
		IPPROTO_TCP,
		"network/protocols/tcp/v1",
		"network/protocols/ipv4/v1",
		NULL);
	if (status < B_OK)
		return status;
	status = gStackModule->register_domain_protocols(AF_INET6, SOCK_STREAM,
		IPPROTO_TCP,
		"network/protocols/tcp/v1",
		"network/protocols/ipv6/v1",
		NULL);
	if (status < B_OK)
		return status;

	status = gStackModule->register_domain_receiving_protocol(AF_INET,
		IPPROTO_TCP, "network/protocols/tcp/v1");
	if (status < B_OK)
		return status;
	status = gStackModule->register_domain_receiving_protocol(AF_INET6,
		IPPROTO_TCP, "network/protocols/tcp/v1");
	if (status < B_OK)
		return status;

	add_debugger_command("tcp_endpoints", dump_endpoints,
		"lists all open TCP endpoints");
	add_debugger_command("tcp_endpoint", dump_endpoint,
		"dumps a TCP endpoint internal state");

	return B_OK;
}
VMAddressSpace::VMAddressSpace(team_id id, addr_t base, size_t size,
	const char* name)
	:
	fBase(base),
	fEndAddress(base + (size - 1)),
	fFreeSpace(size),
	fID(id),
	fRefCount(1),
	fFaultCount(0),
	fChangeCount(0),
	fTranslationMap(NULL),
	fDeleting(false)
{
	rw_lock_init(&fLock, name);
//	rw_lock_init(&fLock, kernel ? "kernel address space" : "address space");
}
Esempio n. 12
0
Inode::Inode(Volume* volume)
	:
	fVolume(volume),
	fID(0),
	fCache(NULL),
	fMap(NULL),
	fInitStatus(B_NO_INIT)
{
	rw_lock_init(&fLock, "ext2 inode");
	recursive_lock_init(&fSmallDataLock, "ext2 inode small data");

	TRACE("Inode::Inode(): ext2_inode: %lu, disk inode: %" B_PRIu32 "\n",
		sizeof(ext2_inode), fVolume->InodeSize());
	fNodeSize = sizeof(ext2_inode) > fVolume->InodeSize()
		? fVolume->InodeSize() : sizeof(ext2_inode);
}
Esempio n. 13
0
Inode::Inode(Volume* volume, ino_t id)
	:
	fVolume(volume),
	fID(id),
	fCache(NULL),
	fMap(NULL)
{
	rw_lock_init(&fLock, "btrfs inode");

	fInitStatus = UpdateNodeFromDisk();
	if (fInitStatus == B_OK) {
		if (!IsDirectory() && !IsSymLink()) {
			fCache = file_cache_create(fVolume->ID(), ID(), Size());
			fMap = file_map_create(fVolume->ID(), ID(), Size());
		}
	}
}
Esempio n. 14
0
int fsal_posixdb_cache_init()
{
#ifdef _ENABLE_CACHE_PATH
  unsigned int i;

  memset((char *)cache_array, 0, CACHE_PATH_SIZE * sizeof(cache_path_entry_t));

  for(i = 0; i < CACHE_PATH_SIZE; i++)
    {
      if(rw_lock_init(&cache_array[i].entry_lock))
        return -1;

      cache_array[i].is_set = 0;
      cache_array[i].path_is_set = 0;
      cache_array[i].info_is_set = 0;
    }

#endif
  return 0;
}
Esempio n. 15
0
/* Initialize namespace and create root with the given inode number */
int NamespaceInit(ino_t root_inode, dev_t root_dev, unsigned int *p_root_gen)
{
  hash_buffer_t buffkey;
  hash_buffer_t buffval;
  fsnode_t *root;

  /* Initialize pools.
   */

  STUFF_PREALLOC(peer_pool, POOL_CHUNK_SIZE, lookup_peer_t, p_next);

  STUFF_PREALLOC(node_pool, POOL_CHUNK_SIZE, fsnode_t, p_next);

  /* initialize namespace lock */
  if(rw_lock_init(&ns_lock))
    return ENOMEM;

  /* init the lookup hash table */
  lookup_hash = HashTable_Init(lookup_hash_config);
  nodes_hash = HashTable_Init(nodes_hash_config);

  if(!lookup_hash || !nodes_hash)
    return ENOMEM;

  /* allocate the root entry */
  root = h_insert_new_node(root_inode, root_dev, *p_root_gen, TRUE);

  if(!root)
    return ENOMEM;

  LogFullDebug(COMPONENT_FSAL, "namespace: Root=%lX.%ld (gen:%u)", root_dev, root_inode,
         root->inode.generation);

  *p_root_gen = root->inode.generation;

  /* never remove it */
  root->n_lookup = 1;

  return 0;
}
Esempio n. 16
0
	virtual status_t Setup(TestContext& context)
	{
		rw_lock_init(&fLock, "test r/w lock");
		return B_OK;
	}
Esempio n. 17
0
hash_table_t *HashTable_Init(hash_parameter_t hparam)
{
  hash_table_t *ht;
  unsigned int i = 0;

  pthread_mutexattr_t mutexattr;

  /* Sanity check */
  if((ht = (hash_table_t *) Mem_Alloc(sizeof(hash_table_t))) == NULL)
    return NULL;

  /* we have to keep the discriminant values */
  ht->parameter = hparam;

  if(pthread_mutexattr_init(&mutexattr) != 0)
    return NULL;

  /* Initialization of the node array */
  if((ht->array_rbt =
      (struct rbt_head *)Mem_Alloc(sizeof(struct rbt_head) * hparam.index_size)) == NULL)
    return NULL;

  /* Initialization of the stat array */
  if((ht->stat_dynamic =
      (hash_stat_dynamic_t *) Mem_Alloc(sizeof(hash_stat_dynamic_t) *
                                        hparam.index_size)) == NULL)
    return NULL;

  /* Init the stats */
  memset((char *)ht->stat_dynamic, 0, sizeof(hash_stat_dynamic_t) * hparam.index_size);

  /* Initialization of the semaphores array */
  if((ht->array_lock =
      (rw_lock_t *) Mem_Alloc(sizeof(rw_lock_t) * hparam.index_size)) == NULL)
    return NULL;

  /* Initialize the array of pre-allocated node */
  if((ht->node_prealloc =
      (struct rbt_node **)Mem_Alloc(sizeof(struct rbt_node *) * hparam.index_size)) ==
     NULL)
    return NULL;

  if((ht->pdata_prealloc =
      (hash_data_t **) Mem_Alloc(sizeof(hash_data_t *) * hparam.index_size)) == NULL)
    return NULL;

  for(i = 0; i < hparam.index_size; i++)
    {
#ifndef _NO_BLOCK_PREALLOC
      if((ht->node_prealloc[i] = PreAllocNode(hparam.nb_node_prealloc)) == NULL)
        return NULL;

      if((ht->pdata_prealloc[i] = PreAllocPdata(hparam.nb_node_prealloc)) == NULL)
        return NULL;
#else
      ht->node_prealloc[i] = PreAllocNode(hparam.nb_node_prealloc);
      ht->pdata_prealloc[i] = PreAllocPdata(hparam.nb_node_prealloc);
#endif
    }

  /* Initialize each of the RB-Tree, mutexes and stats */
  for(i = 0; i < hparam.index_size; i++)
    {
      /* RBT Init */
      RBT_HEAD_INIT(&(ht->array_rbt[i]));

      /* Mutex Init */
      if(rw_lock_init(&(ht->array_lock[i])) != 0)
        return NULL;

      /* Initialization of the stats structure */
      ht->stat_dynamic[i].nb_entries = 0;

      ht->stat_dynamic[i].ok.nb_set = 0;
      ht->stat_dynamic[i].ok.nb_get = 0;
      ht->stat_dynamic[i].ok.nb_del = 0;
      ht->stat_dynamic[i].ok.nb_test = 0;

      ht->stat_dynamic[i].err.nb_set = 0;
      ht->stat_dynamic[i].err.nb_get = 0;
      ht->stat_dynamic[i].err.nb_del = 0;
      ht->stat_dynamic[i].err.nb_test = 0;

      ht->stat_dynamic[i].notfound.nb_set = 0;
      ht->stat_dynamic[i].notfound.nb_get = 0;
      ht->stat_dynamic[i].notfound.nb_del = 0;
      ht->stat_dynamic[i].notfound.nb_test = 0;
    }

  /* final return, if we arrive here, then everything is alright */
  return ht;
}                               /* HashTable_Init */
Esempio n. 18
0
int main(int argc, char ** argv) {
    pthread_t * threads = NULL;
    pthread_attr_t * attrs = NULL;
    long num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
    int x = 0;
    cpu_set_t cpuset;
    

    printf("num_cpus=%d\n", num_cpus);

    num_threads = num_cpus;

    threads = malloc(sizeof(pthread_t) * num_threads);
    attrs = malloc(sizeof(pthread_attr_t) * num_threads);

  
    for (x = 0; x < num_threads; x++) {
	CPU_ZERO(&cpuset);
	CPU_SET(x, &cpuset);

	pthread_attr_init(&attrs[x]);
	pthread_attr_setaffinity_np(&attrs[x], sizeof(cpuset), &cpuset);
    }

    pthread_barrier_init(&barrier, NULL, num_threads);
    pthread_barrier_init(&barrier_less_one, NULL, num_threads - 1);

    /* Exercise 1: Simple atomic operations */
    printf("Exercise 1 (sub):\t");
    fflush(stdout);
    {
        int i = 0;
        global_value = (num_threads * ITERATIONS);

        for (i = 0; i < num_threads; i++) {
            pthread_create(&threads[i], &attrs[i], &ex1_sub_fn, NULL);
            
        }

        for (i = 0; i < num_threads; i++) {
            void * ret = NULL;
            pthread_join(threads[i], &ret);
        }


        if (global_value != 0) {
            printf("ERROR\n");
        } else {
            printf("SUCCESS\n");
        }
    }



    /* Barriers */
    printf("Barrier Test:\t\t");
    fflush(stdout);
    {
        // barrier init
        int i = 0;
        struct barrier test_barrier;
        unsigned long long test_ret = 0;
        global_value = 0;
        
        barrier_init(&test_barrier, num_threads);
        
        for (i = 0; i < num_threads; i++) {
            pthread_create(&threads[i], &attrs[i], &ex2_fn, &test_barrier);
        }
        
        for (i = 0; i < num_threads; i++) {
            void * ret = NULL;
            pthread_join(threads[i], &ret);
            test_ret |= (unsigned long long)ret;
        }
        

        if (test_ret != 0) {
            printf("ERROR\n");
        } else {
            printf("SUCCESS\n");
        }
    }

    /* Spinlocks */
    printf("Spinlocks:\t\t");
    fflush(stdout);
    {
        int i = 0;
        struct spinlock lock;

        global_value = 0;

        spinlock_init(&lock);
        
        for (i = 0; i < num_threads; i++) {
            pthread_create(&threads[i], &attrs[i], &ex3_fn, &lock);
        }
        
        for (i = 0; i < num_threads; i++) {
            void * ret = NULL;
            pthread_join(threads[i], &ret);
        }
        

        if (global_value != ITERATIONS * num_threads) {
            printf("ERROR\n");
        } else {
            printf("SUCCESS\n");
        }
    }


    /* Reader/writer Locks */
    printf("Reader Writer Locks:\t");
    fflush(stdout);
    {
        int i = 0;
        struct read_write_lock lock;
	unsigned long long test_ret = 0;

        global_value = 0;

	rw_lock_init(&lock);
        
        for (i = 0; i < num_threads - 1; i++) {
            pthread_create(&threads[i], &attrs[i], &ex4_read_fn, &lock);
        }
        
	pthread_create(&threads[num_threads - 1], &attrs[num_threads - 1], &ex4_write_fn, &lock);

        for (i = 0; i < num_threads; i++) {
            void * ret = NULL;
            pthread_join(threads[i], &ret);
	    test_ret |= (unsigned long long)ret;
        }
	


        if (test_ret != 0) {
            printf("ERROR\n");
        } else {
            printf("SUCCESS\n");
        }
    }


    /* Lock-free queue */
    printf("Lock Free Queue:\t");
    fflush(stdout);
    {
        int i = 0;
        struct lf_queue queue;
	unsigned long long test_ret = 0;


	lf_queue_init(&queue);
        
        for (i = 0; i < num_threads - 1; i++) {
            pthread_create(&threads[i], &attrs[i], &enqueue_fn, &queue);
        }
        
	pthread_create(&threads[num_threads - 1], &attrs[num_threads - 1], &dequeue_fn, &queue);

        for (i = 0; i < num_threads; i++) {
            void * ret = NULL;
            pthread_join(threads[i], &ret);
	    test_ret |= (unsigned long long)ret;
        }
	

        if (test_ret != 0) {
            printf("ERROR\n");
        } else {
            printf("SUCCESS\n");
        }
    }

}
Esempio n. 19
0
int main(int argc, char *argv[])
{
  SetDefaultLogging("TEST");
  SetNamePgm("test_rw");
  pthread_attr_t attr_thr;
  pthread_t ThrReaders[MAX_READERS];
  pthread_t ThrWritters[MAX_WRITTERS];
  int i;
  int rc;

  pthread_attr_init(&attr_thr);
  pthread_attr_setscope(&attr_thr, PTHREAD_SCOPE_SYSTEM);
  pthread_attr_setdetachstate(&attr_thr, PTHREAD_CREATE_JOINABLE);

  LogTest("Init lock: %d", rw_lock_init(&lock));

  LogTest("ESTIMATED TIME OF TEST: %d s",
         (MAX_WRITTERS + MAX_READERS) * NB_ITER + MARGE_SECURITE);
  fflush(stdout);

  for(i = 0; i < MAX_WRITTERS; i++)
    {
      if((rc =
          pthread_create(&ThrWritters[i], &attr_thr, thread_writter, (void *)NULL)) != 0)
        {
          LogTest("pthread_create: Error %d %d ", rc, errno);
          LogTest("RW_Lock Test FAILED: Bad allocation thread");
          exit(1);
        }
    }

  for(i = 0; i < MAX_READERS; i++)
    {
      if((rc =
          pthread_create(&ThrReaders[i], &attr_thr, thread_reader, (void *)NULL)) != 0)
        {
          LogTest("pthread_create: Error %d %d ", rc, errno);
          LogTest("RW_Lock Test FAILED: Bad allocation thread");
          exit(1);
        }
    }

  LogTest("Main thread sleeping while threads run locking tests");

  sleep((MAX_WRITTERS + MAX_READERS) * NB_ITER + MARGE_SECURITE);

  LogTest("End of sleep( %d ) ",
         (MAX_WRITTERS + MAX_READERS) * NB_ITER + MARGE_SECURITE);
  if(OkWrite == 1 & OkRead == 1)
    {
      LogTest("Test RW_Lock succeeded: no deadlock detected");
      exit(0);
      return 0;                 /* for compiler */
    }
  else
    {
      if(OkWrite == 0)
        LogTest("RW_Lock test FAIL: deadlock in the editors");
      if(OkRead == 0)
        LogTest("RW_Lock Test FAILED: deadlock in the drive");
      exit(1);
      return 1;                 /* for compiler */

    }

}                               /* main */
Esempio n. 20
0
fsal_acl_t *nfs4_acl_new_entry(fsal_acl_data_t *pacldata, fsal_acl_status_t *pstatus)
{
  fsal_acl_t *pacl = NULL;
  hash_buffer_t buffkey;
  hash_buffer_t buffvalue;
  int rc;

  /* Set the return default to NFS_V4_ACL_SUCCESS */
  *pstatus = NFS_V4_ACL_SUCCESS;

  LogDebug(COMPONENT_NFS_V4_ACL, "nfs4_acl_new_entry: acl hash table size = %u",
           HashTable_GetSize(fsal_acl_hash));

  /* Turn the input to a hash key */
  if(nfs4_acldata_2_key(&buffkey, pacldata))
    {
      *pstatus = NFS_V4_ACL_UNAPPROPRIATED_KEY;

      nfs4_release_acldata_key(&buffkey);

      nfs4_ace_free(pacldata->aces);

      return NULL;
    }

  /* Check if the entry doesn't already exists */
  if(HashTable_Get(fsal_acl_hash, &buffkey, &buffvalue) == HASHTABLE_SUCCESS)
    {
      /* Entry is already in the cache, do not add it */
      pacl = (fsal_acl_t *) buffvalue.pdata;
      *pstatus = NFS_V4_ACL_EXISTS;

      nfs4_release_acldata_key(&buffkey);

      nfs4_ace_free(pacldata->aces);

      return pacl;
    }

  /* Adding the entry in the cache */
  pacl = nfs4_acl_alloc();
  if(rw_lock_init(&(pacl->lock)) != 0)
    {
      nfs4_acl_free(pacl);
      LogCrit(COMPONENT_NFS_V4_ACL,
              "nfs4_acl_new_entry: rw_lock_init returned %d (%s)",
              errno, strerror(errno));
      *pstatus = NFS_V4_ACL_INIT_ENTRY_FAILED;

      nfs4_release_acldata_key(&buffkey);

      nfs4_ace_free(pacldata->aces);

      return NULL;
    }

  pacl->naces = pacldata->naces;
  pacl->aces = pacldata->aces;
  pacl->ref = 0;

  /* Build the value */
  buffvalue.pdata = (caddr_t) pacl;
  buffvalue.len = sizeof(fsal_acl_t);

  if((rc =
      HashTable_Test_And_Set(fsal_acl_hash, &buffkey, &buffvalue,
                             HASHTABLE_SET_HOW_SET_NO_OVERWRITE)) != HASHTABLE_SUCCESS)
    {
      /* Put the entry back in its pool */
      nfs4_acl_free(pacl);
      LogWarn(COMPONENT_NFS_V4_ACL,
              "nfs4_acl_new_entry: entry could not be added to hash, rc=%d",
              rc);

      if( rc != HASHTABLE_ERROR_KEY_ALREADY_EXISTS )
       {
         *pstatus = NFS_V4_ACL_HASH_SET_ERROR;

         nfs4_release_acldata_key(&buffkey);

         return NULL;
       }
     else
      {
        LogDebug(COMPONENT_NFS_V4_ACL,
                 "nfs4_acl_new_entry: concurrency detected during acl insertion");

        /* This situation occurs when several threads try to init the same uncached entry
         * at the same time. The first creates the entry and the others got  HASHTABLE_ERROR_KEY_ALREADY_EXISTS
         * In this case, the already created entry (by the very first thread) is returned */
        if((rc = HashTable_Get(fsal_acl_hash, &buffkey, &buffvalue)) != HASHTABLE_SUCCESS)
         {
            *pstatus = NFS_V4_ACL_HASH_SET_ERROR;

            nfs4_release_acldata_key(&buffkey);

            return NULL;
         }

        pacl = (fsal_acl_t *) buffvalue.pdata;
        *pstatus = NFS_V4_ACL_SUCCESS;

        nfs4_release_acldata_key(&buffkey);

        return pacl;
      }
    }

  return pacl;
}
Esempio n. 21
0
void
Node::_Init()
{
	rw_lock_init(&fLock, "checkfs node");
}