示例#1
0
/**
 *
 * nfs_ip_name_remove: Tries to remove an entry for ip_name cache
 *
 * Tries to remove an entry for ip_name cache.
 *
 * @param ipaddr           [IN]    the ip address to be uncached.
 *
 * @return the result previously set if *pstatus == IP_NAME_SUCCESS
 *
 */
int nfs_ip_name_remove(sockaddr_t *ipaddr)
{
	struct gsh_buffdesc buffkey, old_value;
	nfs_ip_name_t *nfs_ip_name = NULL;
	char ipstring[SOCK_NAME_MAX + 1];

	sprint_sockip(ipaddr, ipstring, sizeof(ipstring));

	buffkey.addr = ipaddr;
	buffkey.len = sizeof(sockaddr_t);

	if (HashTable_Del(ht_ip_name, &buffkey, NULL, &old_value) ==
	    HASHTABLE_SUCCESS) {
		nfs_ip_name = (nfs_ip_name_t *) old_value.addr;

		LogFullDebug(COMPONENT_DISPATCH, "Cache remove hit for %s->%s",
			     ipstring, nfs_ip_name->hostname);

		gsh_free(nfs_ip_name);
		return IP_NAME_SUCCESS;
	}

	LogFullDebug(COMPONENT_DISPATCH, "Cache remove miss for %s", ipstring);

	return IP_NAME_NOT_FOUND;
}				/* nfs_ip_name_remove */
/**
 * @brief Remove a state from the stateid table
 *
 * @param[in] other stateid4.other
 *
 * This really can't fail.
 */
void nfs4_State_Del(char other[OTHERSIZE])
{
	struct gsh_buffdesc buffkey, old_key, old_value;
	hash_error_t err;

	buffkey.addr = other;
	buffkey.len = OTHERSIZE;

	err = HashTable_Del(ht_state_id, &buffkey, &old_key, &old_value);

	if (err == HASHTABLE_SUCCESS) {
		/* free the key that was stored in hash table */
		LogFullDebug(COMPONENT_STATE, "Freeing stateid key %p",
			     old_key.addr);
		gsh_free(old_key.addr);

		/* State is managed in stuff alloc, no free is needed for
		 * old_value.addr
		 */
	} else {
		LogCrit(COMPONENT_STATE,
			"Failure to delete state %s",
			hash_table_err_to_str(err));
	}
}
示例#3
0
/**
 *
 * nfs_ip_stats_remove: Tries to remove an entry for ip_stats cache
 *
 * Tries to remove an entry for ip_stats cache.
 *
 * @param ipaddr           [IN]    the ip address to be uncached.
 * @param ip_stats_pool    [INOUT] values pool for hash table
 *
 * @return the result previously set if *pstatus == IP_STATS_SUCCESS
 *
 */
int nfs_ip_stats_remove(hash_table_t * ht_ip_stats,
                        sockaddr_t * ipaddr, pool_t *ip_stats_pool)
{
    hash_buffer_t buffkey, old_key, old_value;
    int status = IP_STATS_SUCCESS;
    nfs_ip_stats_t *g = NULL;

    buffkey.pdata = (caddr_t) ipaddr;
    buffkey.len = sizeof(sockaddr_t);

    /* Do nothing if configuration disables IP_Stats */
    if(nfs_param.core_param.dump_stats_per_client == 0)
        return IP_STATS_SUCCESS;

    if(HashTable_Del(ht_ip_stats, &buffkey, &old_key, &old_value) == HASHTABLE_SUCCESS)
    {
        gsh_free(old_key.pdata);
        g = old_value.pdata;
        pool_free(ip_stats_pool, g);
    }
    else
    {
        status = IP_STATS_NOT_FOUND;
    }

    return status;
}                               /* nfs_ip_stats_remove */
示例#4
0
/**
 *
 * nfs41_Session_Del
 *
 * This routine removes a session from the sessions's hashtable.
 *
 * @param sessionid [IN] sessionid, used as a hash key
 *
 * @return 1 if ok, 0 otherwise.
 *
 */
int nfs41_Session_Del(char sessionid[NFS4_SESSIONID_SIZE])
{
  hash_buffer_t buffkey, old_key, old_value;

  if(isFullDebug(COMPONENT_SESSIONS))
    {
      char str[NFS4_SESSIONID_SIZE *2 + 1];

      sprint_mem(str, (char *)sessionid, NFS4_SESSIONID_SIZE);
      LogFullDebug(COMPONENT_SESSIONS, "         -----  DelSession : %s\n", str);
    }

  buffkey.pdata = (caddr_t) sessionid;
  buffkey.len = NFS4_SESSIONID_SIZE;

  if(HashTable_Del(ht_session_id, &buffkey, &old_key, &old_value) == HASHTABLE_SUCCESS)
    {
      /* free the key that was stored in hash table */
      Mem_Free((void *)old_key.pdata);

      /* State is managed in stuff alloc, no fre is needed for old_value.pdata */

      return 1;
    }
  else
    return 0;
}                               /* nfs41_Session_Del */
示例#5
0
/**
 *
 * nfs_ip_stats_remove: Tries to remove an entry for ip_stats cache
 *
 * Tries to remove an entry for ip_stats cache.
 *
 * @param ipaddr           [IN]    the ip address to be uncached.
 * @param nfs_ip_stats_pool [INOUT] values pool for hash table
 *
 * @return the result previously set if *pstatus == IP_STATS_SUCCESS
 *
 */
int nfs_ip_stats_remove(hash_table_t * ht_ip_stats,
                        int ipaddr, nfs_ip_stats_t * nfs_ip_stats_pool)
{
    hash_buffer_t buffkey, old_value;
    int status = IP_STATS_SUCCESS;
    nfs_ip_stats_t *pnfs_ip_stats = NULL;
    unsigned long int long_ipaddr;

    long_ipaddr = (unsigned long int)ipaddr;

    buffkey.pdata = (caddr_t) long_ipaddr;
    buffkey.len = 0;

    /* Do nothing if configuration disables IP_Stats */
    if(nfs_param.core_param.dump_stats_per_client == 0)
        return IP_STATS_SUCCESS;

    if(HashTable_Del(ht_ip_stats, &buffkey, NULL, &old_value) == HASHTABLE_SUCCESS)
    {
        pnfs_ip_stats = (nfs_ip_stats_t *) old_value.pdata;
        RELEASE_PREALLOC(pnfs_ip_stats, nfs_ip_stats_pool, next_alloc);
    }
    else
    {
        status = IP_STATS_NOT_FOUND;
    }

    return status;
}                               /* nfs_ip_stats_remove */
示例#6
0
/**
 *
 * nfs_ip_stats_remove: Tries to remove an entry for ip_stats cache
 *
 * Tries to remove an entry for ip_stats cache.
 * 
 * @param ipaddr           [IN]    the ip address to be uncached.
 * @param ip_stats_pool    [INOUT] values pool for hash table
 *
 * @return the result previously set if *pstatus == IP_STATS_SUCCESS
 *
 */
int nfs_ip_stats_remove(hash_table_t * ht_ip_stats,
                        sockaddr_t * ipaddr, struct prealloc_pool *ip_stats_pool)
{
  hash_buffer_t buffkey, old_key, old_value;
  int status = IP_STATS_SUCCESS;
  nfs_ip_stats_t *pnfs_ip_stats = NULL;

  buffkey.pdata = (caddr_t) ipaddr;
  buffkey.len = sizeof(sockaddr_t);

  /* Do nothing if configuration disables IP_Stats */
  if(nfs_param.core_param.dump_stats_per_client == 0)
    return IP_STATS_SUCCESS;

  if(HashTable_Del(ht_ip_stats, &buffkey, &old_key, &old_value) == HASHTABLE_SUCCESS)
    {
      Mem_Free((sockaddr_t *) old_key.pdata);
      pnfs_ip_stats = (nfs_ip_stats_t *) old_value.pdata;
      ReleaseToPool(pnfs_ip_stats, ip_stats_pool);
    }
  else
    {
      status = IP_STATS_NOT_FOUND;
    }

  return status;
}                               /* nfs_ip_stats_remove */
示例#7
0
/**
 * Remove a handle from the map
 * when it was removed from the filesystem
 * or when it is stale.
 */
int HandleMap_DelFH(nfs23_map_handle_t *p_in_nfs23_digest)
{
	int rc;
	struct gsh_buffdesc buffkey, stored_buffkey;
	struct gsh_buffdesc stored_buffval;

	digest_pool_entry_t digest;

	digest_pool_entry_t *p_stored_digest;
	handle_pool_entry_t *p_stored_handle;

	/* first, delete it from hash table */

	digest.nfs23_digest = *p_in_nfs23_digest;

	buffkey.addr = (caddr_t) &digest;
	buffkey.len = sizeof(digest_pool_entry_t);

	rc = HashTable_Del(handle_map_hash, &buffkey, &stored_buffkey,
			   &stored_buffval);

	if (rc != HASHTABLE_SUCCESS)
		return HANDLEMAP_STALE;

	p_stored_digest = (digest_pool_entry_t *) stored_buffkey.addr;
	p_stored_handle = (handle_pool_entry_t *) stored_buffval.addr;

	digest_free(p_stored_digest);
	handle_free(p_stored_handle);

	/* then, submit the request to the database */

	return handlemap_db_delete(p_in_nfs23_digest);

}
示例#8
0
/* remove the node corresponding to an inode number */
int h_del_node(ino_t inode, dev_t device)
{
  hash_buffer_t buffkey;
  inode_t key;

  /* test if inode is referenced in nodes hashtable */
  key.inum = inode;
  key.dev = device;

  /* note: generation not needed for Hashtable_Get/Del */

  buffkey.pdata = (caddr_t) & key;
  buffkey.len = sizeof(key);

  return HashTable_Del(nodes_hash, &buffkey, NULL, NULL);

}                               /* h_del_node */
int nfs41_Session_Del(char sessionid[NFS4_SESSIONID_SIZE])
{
    struct gsh_buffdesc key, old_key, old_value;

    key.addr = sessionid;
    key.len = NFS4_SESSIONID_SIZE;

    if (HashTable_Del(ht_session_id, &key, &old_key, &old_value) ==
            HASHTABLE_SUCCESS) {
        nfs41_session_t *session = old_value.addr;

        /* unref session */
        dec_session_ref(session);

        return true;
    } else {
        return false;
    }
}
示例#10
0
cache_inode_status_t cache_inode_kill_entry( cache_entry_t          * pentry,
                                             cache_inode_lock_how_t   lock_how,  
                                             hash_table_t           * ht,
                                             cache_inode_client_t   * pclient,
                                             cache_inode_status_t   * pstatus )
{
  fsal_handle_t *pfsal_handle = NULL;
  cache_inode_fsal_data_t fsaldata;
  cache_inode_parent_entry_t *parent_iter = NULL;
  cache_inode_parent_entry_t *parent_iter_next = NULL;
  hash_buffer_t key, old_key;
  hash_buffer_t old_value;
  int rc;
  fsal_status_t fsal_status;

  memset( (char *)&fsaldata, 0, sizeof( fsaldata ) ) ;

  LogInfo(COMPONENT_CACHE_INODE,
          "Using cache_inode_kill_entry for entry %p", pentry);

  /* Invalidation is not for junctions or special files */
  if( ( pentry->internal_md.type == FS_JUNCTION )    ||
      ( pentry->internal_md.type == SOCKET_FILE )    ||
      ( pentry->internal_md.type == FIFO_FILE )      ||
      ( pentry->internal_md.type == CHARACTER_FILE ) ||
      ( pentry->internal_md.type == BLOCK_FILE ) )
   {
     free_lock( pentry, lock_how ) ; 

     *pstatus = CACHE_INODE_SUCCESS;
     return *pstatus;
   }

#if 0
  /** @todo: BUGAZOMEU : directory invalidation seems quite tricky, temporarily avoid it */
  if( pentry->internal_md.type == DIRECTORY )
   {
     free_lock( pentry, lock_how ) ; 

     *pstatus = CACHE_INODE_SUCCESS;
     return *pstatus;
   }

  /** @todo: BUGAZOMEU : file invalidation seems quite tricky, temporarily avoid it */
  /* We need to know how to manage how to deal with "files with states"  */
  if( pentry->internal_md.type == REGULAR_FILE )
   {
     free_lock( pentry, lock_how ) ; 

     *pstatus = CACHE_INODE_SUCCESS;
     return *pstatus;
   }
#endif

  if(pstatus == NULL)
    return CACHE_INODE_INVALID_ARGUMENT;

  if(pentry == NULL || pclient == NULL || ht == NULL)
    {
      free_lock( pentry, lock_how ) ; 

      *pstatus = CACHE_INODE_INVALID_ARGUMENT;
      return *pstatus;
    }

  /* Get the FSAL handle */
  if((pfsal_handle = cache_inode_get_fsal_handle(pentry, pstatus)) == NULL)
    {
      free_lock( pentry, lock_how ) ; 

      LogCrit(COMPONENT_CACHE_INODE,
              "cache_inode_kill_entry: unable to retrieve pentry's specific filesystem info");
      return *pstatus;
    }

  /* Invalidate the related LRU gc entry (no more required) */
  if(pentry->gc_lru_entry != NULL)
    {
      if(LRU_invalidate(pentry->gc_lru, pentry->gc_lru_entry) != LRU_LIST_SUCCESS)
        {
          free_lock( pentry, lock_how ) ; 

          *pstatus = CACHE_INODE_LRU_ERROR;
          return *pstatus;
        }
    }

  fsaldata.handle = *pfsal_handle;
  fsaldata.cookie = DIR_START;

  /* Use the handle to build the key */
  if(cache_inode_fsaldata_2_key(&key, &fsaldata, pclient))
    {
      free_lock( pentry, lock_how ) ; 

      LogCrit(COMPONENT_CACHE_INODE,
              "cache_inode_kill_entry: could not build hashtable key");

      cache_inode_release_fsaldata_key(&key, pclient);
      *pstatus = CACHE_INODE_NOT_FOUND;
      return *pstatus;
    }

  /* use the key to delete the entry */
  if((rc = HashTable_Del(ht, &key, &old_key, &old_value)) != HASHTABLE_SUCCESS)
    {
      if( rc != HASHTABLE_ERROR_NO_SUCH_KEY) /* rc=3 => Entry was previously removed */
        LogCrit( COMPONENT_CACHE_INODE,
                 "cache_inode_kill_entry: entry could not be deleted, status = %d",
                 rc);

      cache_inode_release_fsaldata_key(&key, pclient);

      *pstatus = CACHE_INODE_NOT_FOUND;
      return *pstatus;
    }

  /* Release the hash key data */
  cache_inode_release_fsaldata_key(&old_key, pclient);

  /* Clean up the associated ressources in the FSAL */
  if(FSAL_IS_ERROR(fsal_status = FSAL_CleanObjectResources(pfsal_handle)))
    {
      LogCrit(COMPONENT_CACHE_INODE,
              "cache_inode_kill_entry: Couldn't free FSAL ressources fsal_status.major=%u",
              fsal_status.major);
    }

  /* Sanity check: old_value.pdata is expected to be equal to pentry,
   * and is released later in this function */
  if((cache_entry_t *) old_value.pdata != pentry)
    {
      LogCrit(COMPONENT_CACHE_INODE,
              "cache_inode_kill_entry: unexpected pdata %p from hash table (pentry=%p)",
              old_value.pdata, pentry);
    }

  /* Release the current key */
  cache_inode_release_fsaldata_key(&key, pclient);

  /* Recover the parent list entries */
  parent_iter = pentry->parent_list;
  while(parent_iter != NULL)
    {
      parent_iter_next = parent_iter->next_parent;

      ReleaseToPool(parent_iter, &pclient->pool_parent);

      parent_iter = parent_iter_next;
    }

  /* If entry is datacached, remove it from the cache */
  if(pentry->internal_md.type == REGULAR_FILE)
    {
      cache_content_status_t cache_content_status;

      if(pentry->object.file.pentry_content != NULL)
        if(cache_content_release_entry
           ((cache_content_entry_t *) pentry->object.file.pentry_content,
            (cache_content_client_t *) pclient->pcontent_client,
            &cache_content_status) != CACHE_CONTENT_SUCCESS)
          LogCrit(COMPONENT_CACHE_INODE,
                  "Could not removed datacached entry for pentry %p", pentry);
    }

  /* If entry is a DIRECTORY, invalidate dirents */
  if(pentry->internal_md.type == DIRECTORY)
    {
	cache_inode_invalidate_related_dirents(pentry, pclient);
    }

  // free_lock( pentry, lock_how ) ; /* Really needed ? The pentry is unaccessible now and will be destroyed */

  /* Destroy the mutex associated with the pentry */
  cache_inode_mutex_destroy(pentry);

  /* Put the pentry back to the pool */
  ReleaseToPool(pentry, &pclient->pool_entry);

  *pstatus = CACHE_INODE_SUCCESS;
  return *pstatus;
}                               /* cache_inode_kill_entry */
示例#11
0
文件: nfs_dupreq.c 项目: ic-hep/emi3
static int _remove_dupreq(hash_buffer_t *buffkey, dupreq_entry_t *pdupreq,
                          struct prealloc_pool *dupreq_pool, int nfs_req_status)
{
  int rc;
  nfs_function_desc_t funcdesc = nfs2_func_desc[0];

  rc = HashTable_Del(ht_dupreq, buffkey, NULL, NULL);

  /* if hashtable no such key => dupreq garbaged by another thread */
  if(rc != HASHTABLE_SUCCESS && rc != HASHTABLE_ERROR_NO_SUCH_KEY)
    return 1;                   /* Error while cleaning */
  else if(rc == HASHTABLE_ERROR_NO_SUCH_KEY)
    return 0;                   /* don't free the dupreq twice */

  /* Locate the function descriptor associated with this cached request */
  if(pdupreq->rq_prog == nfs_param.core_param.program[P_NFS])
    {
      switch (pdupreq->rq_vers)
        {
        case NFS_V2:
          funcdesc = nfs2_func_desc[pdupreq->rq_proc];
          break;

        case NFS_V3:
          funcdesc = nfs3_func_desc[pdupreq->rq_proc];
          break;

        case NFS_V4:
          funcdesc = nfs4_func_desc[pdupreq->rq_proc];
          break;

        default:
          /* We should never go there (this situation is filtered in nfs_rpc_getreq) */
          LogMajor(COMPONENT_DUPREQ,
                   "NFS Protocol version %d unknown in dupreq_gc",
                   (int)pdupreq->rq_vers);
        }
    }
  else if(pdupreq->rq_prog == nfs_param.core_param.program[P_MNT])
    {
      switch (pdupreq->rq_vers)
        {
        case MOUNT_V1:
          funcdesc = mnt1_func_desc[pdupreq->rq_proc];
          break;

        case MOUNT_V3:
          funcdesc = mnt3_func_desc[pdupreq->rq_proc];
          break;

        default:
          /* We should never go there (this situation is filtered in nfs_rpc_getreq) */
          LogMajor(COMPONENT_DUPREQ,
                   "MOUNT Protocol version %d unknown in dupreq_gc",
                   (int)pdupreq->rq_vers);
          break;
        }                       /* switch( pdupreq->vers ) */
    }
#ifdef _USE_NLM
  else if(pdupreq->rq_prog == nfs_param.core_param.program[P_NLM])
    {

      switch (pdupreq->rq_vers)
        {
        case NLM4_VERS:
          funcdesc = nlm4_func_desc[pdupreq->rq_proc];
          break;
        }                       /* switch( pdupreq->vers ) */
    }
#endif                          /* _USE_NLM */
#ifdef _USE_QUOTA
  else if(pdupreq->rq_prog == nfs_param.core_param.program[P_RQUOTA])
    {

      switch (pdupreq->rq_vers)
        {
        case RQUOTAVERS:
          funcdesc = rquota1_func_desc[pdupreq->rq_proc];
          break;

        case EXT_RQUOTAVERS:
          funcdesc = rquota2_func_desc[pdupreq->rq_proc];
          break;
        }                       /* switch( pdupreq->vers ) */
    }
#endif
  else
    {
      /* We should never go there (this situation is filtered in nfs_rpc_getreq) */
      LogMajor(COMPONENT_DUPREQ,
               "protocol %d is not managed",
               (int)pdupreq->rq_prog);
    }

  /* Call the free function */
  if (nfs_req_status == NFS_REQ_OK)
    funcdesc.free_function(&(pdupreq->res_nfs));

  /* Send the entry back to the pool */
  ReleaseToPool(pdupreq, dupreq_pool);

  return DUPREQ_SUCCESS;
}
示例#12
0
/**
 * cache_inode_clean_internal: remove a pentry from cache and all LRUs,
 *                             and release related resources.
 *
 * @param pentry [IN] entry to be deleted from cache
 * @param hash_table_t [IN] The cache hash table
 * @param pclient [INOUT] ressource allocated by the client for the nfs management.
 */
cache_inode_status_t cache_inode_clean_internal(cache_entry_t * to_remove_entry,
                                                hash_table_t * ht,
                                                cache_inode_client_t * pclient)
{
  fsal_handle_t *pfsal_handle_remove;
  cache_inode_parent_entry_t *parent_iter = NULL;
  cache_inode_parent_entry_t *parent_iter_next = NULL;
  cache_inode_fsal_data_t fsaldata;
  cache_inode_status_t status;
  hash_buffer_t key, old_key, old_value;
  int rc;
 
  memset( (char *)&fsaldata, 0, sizeof( fsaldata ) ) ;

  if((pfsal_handle_remove =
      cache_inode_get_fsal_handle(to_remove_entry, &status)) == NULL)
    {
      return status;
    }

  /* Invalidate the related LRU gc entry (no more required) */
  if(to_remove_entry->gc_lru_entry != NULL)
    {
      if(LRU_invalidate(to_remove_entry->gc_lru, to_remove_entry->gc_lru_entry)
         != LRU_LIST_SUCCESS)
        {
          return CACHE_INODE_LRU_ERROR;
        }
    }

  /* delete the entry from the cache */
  fsaldata.handle = *pfsal_handle_remove;

  /* XXX always DIR_START */
  fsaldata.cookie = DIR_START;

  if(cache_inode_fsaldata_2_key(&key, &fsaldata, pclient))
    {
      return CACHE_INODE_INCONSISTENT_ENTRY;
    }

  /* use the key to delete the entry */
  rc = HashTable_Del(ht, &key, &old_key, &old_value);

  if(rc)
    LogCrit(COMPONENT_CACHE_INODE,
            "HashTable_Del error %d in cache_inode_clean_internal", rc);

  if((rc != HASHTABLE_SUCCESS) && (rc != HASHTABLE_ERROR_NO_SUCH_KEY))
    {
      cache_inode_release_fsaldata_key(&key, pclient);
      return CACHE_INODE_INCONSISTENT_ENTRY;
    }

  /* release the key that was stored in hash table */
  if(rc != HASHTABLE_ERROR_NO_SUCH_KEY)
    {
      cache_inode_release_fsaldata_key(&old_key, pclient);

      /* Sanity check: old_value.pdata is expected to be equal to pentry,
       * and is released later in this function */
      if((cache_entry_t *) old_value.pdata != to_remove_entry)
        {
          LogCrit(COMPONENT_CACHE_INODE,
                  "cache_inode_remove: unexpected pdata %p from hash table (pentry=%p)",
                  old_value.pdata, to_remove_entry);
        }
    }

  /* release the key used for hash query */
  cache_inode_release_fsaldata_key(&key, pclient);

  /* Free the parent list entries */

  parent_iter = to_remove_entry->parent_list;
  while(parent_iter != NULL)
    {
      parent_iter_next = parent_iter->next_parent;

      ReleaseToPool(parent_iter, &pclient->pool_parent);

      parent_iter = parent_iter_next;
    }

  return CACHE_INODE_SUCCESS;
}                               /* cache_inode_clean_internal */
示例#13
0
/**
 *
 * cache_inode_gc_clean_entry: cleans a entry in the cache_inode.
 *
 * cleans an entry in the cache_inode.
 *
 * @param pentry [INOUT] entry to be cleaned.
 * @param addparam [IN] additional parameter used for cleaning.
 *
 * @return  LRU_LIST_SET_INVALID if ok,  LRU_LIST_DO_NOT_SET_INVALID otherwise
 *
 */
static int cache_inode_gc_clean_entry(cache_entry_t * pentry,
                                      cache_inode_param_gc_t * pgcparam)
{
  fsal_handle_t *pfsal_handle = NULL;
  cache_inode_parent_entry_t *parent_iter = NULL;
  cache_inode_parent_entry_t *parent_iter_next = NULL;
  cache_inode_fsal_data_t fsaldata;
  cache_inode_status_t status;
  fsal_status_t fsal_status;
  hash_buffer_t key, old_key, old_value;
  int rc;

  LogFullDebug(COMPONENT_CACHE_INODE_GC,
               "(pthread_self=%p): About to remove pentry=%p, type=%d",
               (caddr_t)pthread_self(),
               pentry, pentry->internal_md.type);

  /* sanity check */
  if((pentry->gc_lru_entry != NULL) &&
     ((cache_entry_t *) pentry->gc_lru_entry->buffdata.pdata) != pentry)
    {
      LogCrit(COMPONENT_CACHE_INODE_GC,
              "cache_inode_gc_clean_entry: LRU entry pointed by this pentry doesn't match the GC LRU");
    }

  /* Get the FSAL handle */
  if((pfsal_handle = cache_inode_get_fsal_handle(pentry, &status)) == NULL)
    {
      LogCrit(COMPONENT_CACHE_INODE_GC,
              "cache_inode_gc_clean_entry: unable to retrieve pentry's specific filesystem info");
      return LRU_LIST_DO_NOT_SET_INVALID;
    }

  fsaldata.handle = *pfsal_handle;

  if(pentry->internal_md.type != DIR_CONTINUE)
    fsaldata.cookie = DIR_START;
  else
    fsaldata.cookie = pentry->object.dir_cont.dir_cont_pos;

  /* Use the handle to build the key */
  if(cache_inode_fsaldata_2_key(&key, &fsaldata, pgcparam->pclient))
    {
      LogCrit(COMPONENT_CACHE_INODE_GC,
              "cache_inode_gc_clean_entry: could not build hashtable key");

      cache_inode_release_fsaldata_key(&key, pgcparam->pclient);

      return LRU_LIST_DO_NOT_SET_INVALID;
    }

  /* use the key to delete the entry */
  rc = HashTable_Del(pgcparam->ht, &key, &old_key, &old_value);

  if((rc != HASHTABLE_SUCCESS) && (rc != HASHTABLE_ERROR_NO_SUCH_KEY))
    {
      LogCrit(COMPONENT_CACHE_INODE_GC,
              "cache_inode_gc_clean_entry: entry could not be deleted, status = %d",
              rc);

      cache_inode_release_fsaldata_key(&key, pgcparam->pclient);

      return LRU_LIST_DO_NOT_SET_INVALID;
    }
  else if(rc == HASHTABLE_ERROR_NO_SUCH_KEY)
    {
      LogEvent(COMPONENT_CACHE_INODE_GC,
               "cache_inode_gc_clean_entry: entry already deleted, type=%d, status=%d",
               pentry->internal_md.type, rc);

      cache_inode_release_fsaldata_key(&key, pgcparam->pclient);
      return LRU_LIST_SET_INVALID;
    }

  /* Clean up the associated ressources in the FSAL */
  if(FSAL_IS_ERROR(fsal_status = FSAL_CleanObjectResources(pfsal_handle)))
    {
      LogCrit(COMPONENT_CACHE_INODE_GC,
              "cache_inode_gc_clean_entry: Could'nt free FSAL ressources fsal_status.major=%u",
              fsal_status.major);
    }
  LogFullDebug(COMPONENT_CACHE_INODE_GC,
               "++++> pentry %p deleted from HashTable", pentry);

  /* Release the hash key data */
  cache_inode_release_fsaldata_key(&old_key, pgcparam->pclient);

  /* Sanity check: old_value.pdata is expected to be equal to pentry,
   * and is released later in this function */
  if((cache_entry_t *) old_value.pdata != pentry)
    {
      LogCrit(COMPONENT_CACHE_INODE_GC,
              "cache_inode_gc_clean_entry: unexpected pdata %p from hash table (pentry=%p)",
              old_value.pdata, pentry);
    }

  cache_inode_release_fsaldata_key(&key, pgcparam->pclient);

  /* Recover the parent list entries */
  parent_iter = pentry->parent_list;
  while(parent_iter != NULL)
    {
      parent_iter_next = parent_iter->next_parent;

      ReleaseToPool(parent_iter, &pgcparam->pclient->pool_parent);

      parent_iter = parent_iter_next;
    }

  LogFullDebug(COMPONENT_CACHE_INODE_GC,
               "++++> parent directory sent back to pool");

  /* If entry is a DIR_CONTINUE or a DIR_BEGINNING, release pdir_data */
  if(pentry->internal_md.type == DIR_BEGINNING)
    {
      /* Put the pentry back to the pool */
      ReleaseToPool(pentry->object.dir_begin.pdir_data, &pgcparam->pclient->pool_dir_data);
    }

  if(pentry->internal_md.type == DIR_CONTINUE)
    {
      /* Put the pentry back to the pool */
      ReleaseToPool(pentry->object.dir_cont.pdir_data, &pgcparam->pclient->pool_dir_data);
    }
  LogFullDebug(COMPONENT_CACHE_INODE_GC,
               "++++> pdir_data (if needed) sent back to pool");

#ifdef _USE_NFS4_ACL
  /* If entry has NFS4 ACL, release it. */
  cache_inode_gc_acl(pentry);
#endif                          /* _USE_NFS4_ACL */

  /* Free and Destroy the mutex associated with the pentry */
  V_w(&pentry->lock);

  cache_inode_mutex_destroy(pentry);

  /* Put the pentry back to the pool */
  ReleaseToPool(pentry, &pgcparam->pclient->pool_entry);

  /* Regular exit */
  pgcparam->nb_to_be_purged = pgcparam->nb_to_be_purged - 1;

  LogFullDebug(COMPONENT_CACHE_INODE_GC,
               "++++> pentry %p: clean entry is ok", pentry);

  return LRU_LIST_SET_INVALID;  /* Cleaning ok */
}
示例#14
0
/* remove a lookup entry and return the pointed node */
fsnode_t *h_del_lookup(ino_t parent_inode, dev_t parent_dev, unsigned int parent_gen,
                       char *name, int *p_rc)
{
  lookup_peer_t lpeer;
  lookup_peer_t *p_lpeer;
  lookup_peer_t *p_lpeer_last;
  hash_buffer_t buffkey_in, buffkey_out;
  hash_buffer_t buffdata;
  fsnode_t *p_node;
  int rc;

  /* test if lookup peer is referenced in lookup hashtable */
  lpeer.parent.inum = parent_inode;
  lpeer.parent.dev = parent_dev;
  /* note: generation not needed for Hashtable_Get/Del */

  strncpy(lpeer.name, name, FSAL_MAX_NAME_LEN);

  buffkey_in.pdata = (caddr_t) & lpeer;
  buffkey_in.len = sizeof(lpeer);

  rc = HashTable_Del(lookup_hash, &buffkey_in, &buffkey_out, &buffdata);

  if(p_rc)
    *p_rc = rc;

  if(rc == HASHTABLE_SUCCESS)
    {
      /* Remove lookup from node and decrease n_lookup count */
      p_node = (fsnode_t *) buffdata.pdata;

      assert(p_node->n_lookup > 0);
      p_node->n_lookup--;

      /* find lpeer in node */
      p_lpeer_last = NULL;

      for(p_lpeer = p_node->parent_list; p_lpeer != NULL; p_lpeer = p_lpeer->p_next)
        {
          if(p_lpeer == (lookup_peer_t *) buffkey_out.pdata)
            {
              /* check parent inode and entry name */
              if((p_lpeer->parent.inum != parent_inode)
                 || (p_lpeer->parent.dev != parent_dev)
                 || (p_lpeer->parent.generation != parent_gen)
                 || strncmp(p_lpeer->name, name, FSAL_MAX_NAME_LEN))
                {
                  LogCrit(COMPONENT_FSAL,
                          "NAMESPACE MANAGER: An incompatible direntry was found. In node: %lu.%lu (gen:%u) ,%s  Deleted:%lu.%lu (gen:%u),%s",
                          p_lpeer->parent.inum, p_lpeer->parent.dev,
                          p_lpeer->parent.generation, p_lpeer->name, parent_dev,
                          parent_inode, parent_gen, name);
                  /* remove it anyway... */
                }

              /* remove it from list */

              if(p_lpeer_last)
                p_lpeer_last->p_next = p_lpeer->p_next;
              else
                p_node->parent_list = p_lpeer->p_next;

              /* free it */

              peer_free(p_lpeer);

              /* finished */
              break;

            }
          /* if lpeer found */
          p_lpeer_last = p_lpeer;
        }

      /* return the pointer node */
      return p_node;
    }
  else
    return NULL;

}                               /* h_del_lookup */
示例#15
0
文件: nfs4_acls.c 项目: ic-hep/emi3
void nfs4_acl_release_entry(fsal_acl_t *pacl, fsal_acl_status_t *pstatus)
{
  fsal_acl_data_t acldata;
  hash_buffer_t key, old_key;
  hash_buffer_t old_value;
  int rc;

  /* Set the return default to NFS_V4_ACL_SUCCESS */
  *pstatus = NFS_V4_ACL_SUCCESS;

  P_w(&pacl->lock);
  nfs4_acl_entry_dec_ref(pacl);
  if(pacl->ref)
    {
      V_w(&pacl->lock);
      return;
    }
  else
      LogDebug(COMPONENT_NFS_V4_ACL, "nfs4_acl_release_entry: free acl %p", pacl);

  /* Turn the input to a hash key */
  acldata.naces = pacl->naces;
  acldata.aces = pacl->aces;

  if(nfs4_acldata_2_key(&key, &acldata))
  {
    *pstatus = NFS_V4_ACL_UNAPPROPRIATED_KEY;

    nfs4_release_acldata_key(&key);

    V_w(&pacl->lock);

    return;
  }

  /* use the key to delete the entry */
  if((rc = HashTable_Del(fsal_acl_hash, &key, &old_key, &old_value)) != HASHTABLE_SUCCESS)
    {
      LogCrit(COMPONENT_NFS_V4_ACL,
              "nfs4_acl_release_entry: entry could not be deleted, status = %d",
              rc);

      nfs4_release_acldata_key(&key);

      *pstatus = NFS_V4_ACL_NOT_FOUND;

      V_w(&pacl->lock);

      return;
    }

  /* Release the hash key data */
  nfs4_release_acldata_key(&old_key);

  /* Sanity check: old_value.pdata is expected to be equal to pacl,
   * and is released later in this function */
  if((fsal_acl_t *) old_value.pdata != pacl)
    {
      LogCrit(COMPONENT_NFS_V4_ACL,
              "nfs4_acl_release_entry: unexpected pdata %p from hash table (pacl=%p)",
              old_value.pdata, pacl);
    }

  /* Release the current key */
  nfs4_release_acldata_key(&key);

  V_w(&pacl->lock);

  /* Release acl */
  nfs4_acl_free(pacl);
}
示例#16
0
int main(int argc, char *argv[])
{
  SetDefaultLogging("TEST");
  SetNamePgm("test_cmchash");

  hash_table_t *ht = NULL;
  hash_parameter_t hparam;
  hash_buffer_t buffval;
  hash_buffer_t buffkey;
  hash_buffer_t buffval2;
  hash_buffer_t buffkey2;
  hash_stat_t statistiques;
  int i;
  int val;
  int rc;
  int res;
  struct Temps debut, fin;
  char tmpstr[10];
  char tmpstr2[10];
  char tmpstr3[10];
  char strtab[MAXTEST][10];
  int critere_recherche = 0;
  int random_val = 0;

  hparam.index_size = PRIME;
  hparam.alphabet_length = 10;
  hparam.nb_node_prealloc = NB_PREALLOC;
  hparam.hash_func_key = simple_hash_func;
  hparam.hash_func_rbt = rbt_hash_func;
  hparam.compare_key = compare_string_buffer;
  hparam.key_to_str = display_buff;
  hparam.val_to_str = display_buff;

  BuddyInit(NULL);

  /* Init de la table */
  if((ht = HashTable_Init(hparam)) == NULL)
    {
      LogTest("Test FAILED: Bad init");
      exit(1);
    }

  MesureTemps(&debut, NULL);
  LogTest("Created the table");

  for(i = 0; i < MAXTEST; i++)
    {
      sprintf(strtab[i], "%d", i);

      buffkey.len = strlen(strtab[i]);
      buffkey.pdata = strtab[i];

      buffval.len = strlen(strtab[i]);
      buffval.pdata = strtab[i];

      rc = HashTable_Set(ht, &buffkey, &buffval);
      LogFullDebug(COMPONENT_HASHTABLE,"Added %s , %d , return = %d", strtab[i], i, rc);
    }

  MesureTemps(&fin, &debut);
  LogTest("Time to insert %d entries: %s", MAXTEST,
         ConvertiTempsChaine(fin, NULL));

  LogFullDebug(COMPONENT_HASHTABLE, "-----------------------------------------");
  HashTable_Log(COMPONENT_HASHTABLE,ht);

  LogTest("=========================================");

  /* Premier test simple: verif de la coherence des valeurs lues */
  critere_recherche = CRITERE;

  sprintf(tmpstr, "%d", critere_recherche);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;

  MesureTemps(&debut, NULL);
  rc = HashTable_Get(ht, &buffkey, &buffval);
  MesureTemps(&fin, &debut);

  LogTest("Recovery of %d th key ->%d", critere_recherche, rc);

  LogTest("Time to recover = %s", ConvertiTempsChaine(fin, NULL));

  if(rc != HASHTABLE_SUCCESS)
    {
      LogTest("Test FAILED: The key is not found");
      exit(1);
    }

  sprintf(tmpstr, "%d", critere_recherche);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;

  MesureTemps(&debut, NULL);
  rc = HashTable_Get(ht, &buffkey, &buffval);
  MesureTemps(&fin, &debut);

  LogTest("Recovery of %d th key (test 2) -> %s", critere_recherche, rc);

  LogTest("Time to recover = %s", ConvertiTempsChaine(fin, NULL));

  if(rc != HASHTABLE_SUCCESS)
    {
      LogTest("Test FAILED: The key is not found (test 2)");
      exit(1);
    }

  LogTest("----> retrieved value = len %d ; val = %s", buffval.len, buffval.pdata);
  val = atoi(buffval.pdata);

  if(val != critere_recherche)
    {
      LogTest("Test FAILED: the reading is incorrect");
      exit(1);
    }

  LogTest("Now, I try to retrieve %d entries (taken at random, almost)",
         MAXGET);
  MesureTemps(&debut, NULL);
  for(i = 0; i < MAXGET; i++)
    {
      random_val = random() % MAXTEST;
      sprintf(tmpstr, "%d", random_val);
      buffkey2.len = strlen(tmpstr);
      buffkey2.pdata = tmpstr;

      rc = HashTable_Get(ht, &buffkey2, &buffval2);
      LogFullDebug(COMPONENT_HASHTABLE,"\tPlaying key = %s  --> %s", buffkey2.pdata, buffval2.pdata);
      if(rc != HASHTABLE_SUCCESS)
        {
          LogTest("Error reading %d = %d", i, rc);
          LogTest("Test FAILED: the reading is incorrect");
          exit(1);
        }
    }
  MesureTemps(&fin, &debut);
  LogTest("Time to read %d elements = %s", MAXGET,
         ConvertiTempsChaine(fin, NULL));

  LogTest("-----------------------------------------");

  sprintf(tmpstr, "%d", critere_recherche);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;

  rc = HashTable_Del(ht, &buffkey, NULL, NULL);
  LogTest("Deleting the key %d --> %d", critere_recherche, rc);

  if(rc != HASHTABLE_SUCCESS)
    {
      LogTest("Test FAILED: delete incorrect");
      exit(1);
    }

  LogTest("=========================================");

  sprintf(tmpstr, "%d", critere_recherche);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;

  rc = HashTable_Del(ht, &buffkey, NULL, NULL);
  LogTest("Deleting the key %d (2nd try) --> %d", critere_recherche, rc);

  if(rc != HASHTABLE_ERROR_NO_SUCH_KEY)
    {
      printf("Test FAILED: delete incorrect");
      exit(1);
    }

  LogTest("=========================================");

  sprintf(tmpstr, "%d", critere_recherche);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;

  rc = HashTable_Get(ht, &buffkey, &buffval);
  LogTest
      ("Recovery of the %d key (erased) (must return HASH_ERROR_NO_SUCH_KEY) = %d --> %d",
       critere_recherche, HASHTABLE_ERROR_NO_SUCH_KEY, rc);

  if(rc != HASHTABLE_ERROR_NO_SUCH_KEY)
    {
      LogTest("Test FAILED: the reading is incorrect");
      exit(1);
    }
  LogTest("-----------------------------------------");

  LogTest
      ("Destruction of %d items, taken at random (well if you want ... I use srandom)",
       MAXDESTROY);
  srandom(getpid());
  random_val = random() % MAXTEST;

  MesureTemps(&debut, NULL);
  for(i = 0; i < MAXDESTROY; i++)
    {
      /* 
      it used to be that the random values were chosen with
      repeated calls to random(), but if the same key comes up twice,
      that causes a fail.  This way we start with a random value and
      just linearly delete from it
      */

      random_val = (random_val + 1) % MAXTEST;
      sprintf(tmpstr, "%d", random_val);
      LogTest("\t Delete %d", random_val);
      buffkey.len = strlen(tmpstr);
      buffkey.pdata = tmpstr;

      rc = HashTable_Del(ht, &buffkey, NULL, NULL);
      

      if(rc != HASHTABLE_SUCCESS)
        {
          LogTest("Error on delete %d = %d", i, rc);
          LogTest("Test FAILED: delete incorrect");
          exit(1);
        }
    }
  MesureTemps(&fin, &debut);
  LogTest("Time to delete %d elements = %s", MAXDESTROY,
         ConvertiTempsChaine(fin, NULL));

  LogTest("-----------------------------------------");

  LogTest("Now, I try to retrieve %d entries (if necessary destroyed)",
         MAXGET);
  MesureTemps(&debut, NULL);
  for(i = 0; i < MAXGET; i++)
    {
      random_val = random() % MAXTEST;
      sprintf(tmpstr, "%d", random_val);
      buffkey.len = strlen(tmpstr);
      buffkey.pdata = tmpstr;

      rc = HashTable_Get(ht, &buffkey, &buffval);
    }
  MesureTemps(&fin, &debut);
  LogTest("Tie to read %d elements = %s", MAXGET,
         ConvertiTempsChaine(fin, NULL));

  LogTest("-----------------------------------------");
  LogTest("Writing a duplicate key ");
  sprintf(tmpstr, "%d", CRITERE_2);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;
  rc = HashTable_Test_And_Set(ht, &buffkey, &buffval, HASHTABLE_SET_HOW_SET_NO_OVERWRITE);
  LogTest("The value should be HASHTABLE_ERROR_KEY_ALREADY_EXISTS  = %d --> %d",
         HASHTABLE_ERROR_KEY_ALREADY_EXISTS, rc);
  if(rc != HASHTABLE_ERROR_KEY_ALREADY_EXISTS)
    {
      LogTest("Test FAILED: duplicate key");
      exit(1);
    }
  LogTest("-----------------------------------------");

  HashTable_Log(COMPONENT_HASHTABLE,ht);
  LogFullDebug(COMPONENT_HASHTABLE,"-----------------------------------------");

  LogTest("Displaying table statistics ");
  HashTable_GetStats(ht, &statistiques);
  LogTest(" Number of entries = %d", statistiques.dynamic.nb_entries);

  LogTest("   Successful operations  : Set = %d,  Get = %d,  Del = %d,  Test = %d",
         statistiques.dynamic.ok.nb_set, statistiques.dynamic.ok.nb_get,
         statistiques.dynamic.ok.nb_del, statistiques.dynamic.ok.nb_test);

  LogTest("   Failed operations : Set = %d,  Get = %d,  Del = %d,  Test = %d",
         statistiques.dynamic.err.nb_set, statistiques.dynamic.err.nb_get,
         statistiques.dynamic.err.nb_del, statistiques.dynamic.err.nb_test);

  LogTest("   Operations 'NotFound': Set = %d,  Get = %d,  Del = %d,  Test = %d",
         statistiques.dynamic.notfound.nb_set, statistiques.dynamic.notfound.nb_get,
         statistiques.dynamic.notfound.nb_del, statistiques.dynamic.notfound.nb_test);

  LogTest
      ("  Calculated statistics: min_rbt_node = %d,  max_rbt_node = %d,  average_rbt_node = %d",
       statistiques.computed.min_rbt_num_node, statistiques.computed.max_rbt_num_node,
       statistiques.computed.average_rbt_num_node);

  /* Test sur la pertinence des valeurs de statistiques */
  if(statistiques.dynamic.ok.nb_set != MAXTEST)
    {
      LogTest("Test FAILED: Incorrect statistics: ok.nb_set ");
      exit(1);
    }

  if(statistiques.dynamic.ok.nb_get + statistiques.dynamic.notfound.nb_get !=
     2 * MAXGET + 3)
    {
      LogTest("Test FAILED: Incorrect statistics: *.nb_get ");
      exit(1);
    }

  if(statistiques.dynamic.ok.nb_del != MAXDESTROY + 1
     || statistiques.dynamic.notfound.nb_del != 1)
    {
      LogTest("Test ECHOUE : statistiques incorrectes: *.nb_del ");
      exit(1);
    }

  if(statistiques.dynamic.err.nb_test != 1)
    {
      LogTest("Test ECHOUE : statistiques incorrectes: err.nb_test ");
      exit(1);
    }

  /* Tous les tests sont ok */
  BuddyDumpMem(stdout);

  LogTest("\n-----------------------------------------");
  LogTest("Test succeeded: all tests pass successfully");

  exit(0);
}
示例#17
0
int main(int argc, char *argv[])
{
  SetDefaultLogging("TEST");
  SetNamePgm("test_libcmc_bugdelete");
  LogTest("Initialized test program");
  
  hash_table_t *ht = NULL;
  hash_parameter_t hparam;
  hash_buffer_t buffval;
  hash_buffer_t buffkey;
  hash_buffer_t buffval2;
  hash_buffer_t buffkey2;
  hash_stat_t statistiques;
  int i;
  int rc;
  struct Temps debut, fin;
  char tmpstr[10];
  char strtab[MAXTEST][10];
  int critere_recherche = 0;
  int random_val = 0;

  hparam.index_size = PRIME;
  hparam.alphabet_length = 10;
  hparam.nb_node_prealloc = NB_PREALLOC;
  hparam.hash_func_key = simple_hash_func;
  hparam.hash_func_rbt = rbt_hash_func;
  hparam.hash_func_both = NULL ; /* BUGAZOMEU */
  hparam.compare_key = compare_string_buffer;
  hparam.key_to_str = display_buff;
  hparam.val_to_str = display_buff;

  BuddyInit(NULL);

  /* Init de la table */
  if((ht = HashTable_Init(hparam)) == NULL)
    {
      LogTest("Test FAILED: Bad init");
      exit(1);
    }

  MesureTemps(&debut, NULL);
  LogTest("Created hash table");

  for(i = 0; i < MAXTEST; i++)
    {
      sprintf(strtab[i], "%d", i);

      buffkey.len = strlen(strtab[i]);
      buffkey.pdata = strtab[i];

      buffval.len = strlen(strtab[i]);
      buffval.pdata = strtab[i];

      rc = HashTable_Set(ht, &buffkey, &buffval);
      LogFullDebug(COMPONENT_HASHTABLE,
                   "Added %s , %d , return code = %d", strtab[i], i, rc);
    }

  MesureTemps(&fin, &debut);
  LogTest("Time to insert %d entries: %s", MAXTEST,
         ConvertiTempsChaine(fin, NULL));

  LogFullDebug(COMPONENT_HASHTABLE,
               "-----------------------------------------");
  HashTable_Log(COMPONENT_HASHTABLE, ht);
  LogFullDebug(COMPONENT_HASHTABLE,
               "=========================================");

  /* Premier test simple: verif de la coherence des valeurs lues */
  critere_recherche = CRITERE;

  sprintf(tmpstr, "%d", critere_recherche);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;

  MesureTemps(&debut, NULL);
  rc = HashTable_Get(ht, &buffkey, &buffval);
  MesureTemps(&fin, &debut);

  LogTest("Now, I try to retrieve %d entries (taken at random, almost)",
          MAXGET);

  MesureTemps(&debut, NULL);
  for(i = 0; i < MAXGET; i++)
    {
      random_val = random() % MAXTEST;
      sprintf(tmpstr, "%d", random_val);
      buffkey2.len = strlen(tmpstr);
      buffkey2.pdata = tmpstr;

      rc = HashTable_Get(ht, &buffkey2, &buffval2);
      LogTest("\tPlaying key = %s  --> %s", buffkey2.pdata, buffval2.pdata);
      if(rc != HASHTABLE_SUCCESS)
        {
          LogTest("Error reading %d = %d", i, rc);
          LogTest("Test FAILED: the reading is incorrect");
          exit(1);
        }
    }
  MesureTemps(&fin, &debut);
  LogTest("Time to read elements %d = %s", MAXGET,
         ConvertiTempsChaine(fin, NULL));

  LogTest("-----------------------------------------");

  sprintf(tmpstr, "%d", critere_recherche);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;

  sprintf(tmpstr, "%d", critere_recherche);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;

  sprintf(tmpstr, "%d", critere_recherche);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;

  srandom(getpid());

  MesureTemps(&debut, NULL);
  for(i = 0; i < MAXDESTROY; i++)
    {
      random_val = bugdelete_key_array[i];
      sprintf(tmpstr, "%d", random_val);

      buffkey.len = strlen(tmpstr);
      buffkey.pdata = tmpstr;
      LogFullDebug(COMPONENT_HASHTABLE, "\t Erase %u -> %lu | %lu",
                   random_val,
                   simple_hash_func(&hparam, &buffkey),
                   rbt_hash_func(&hparam, &buffkey));

      rc = HashTable_Del(ht, &buffkey, NULL, NULL);
      if(rc != HASHTABLE_SUCCESS)
        {
          LogTest("Erreur lors de la destruction de %d = %d", random_val, rc);
          LogTest("Test FAILED: delete incorrect");
          exit(1);
        }
    }
  MesureTemps(&fin, &debut);
  LogTest("Time to delete %d elements = %s", MAXDESTROY,
          ConvertiTempsChaine(fin, NULL));

  LogTest("-----------------------------------------");

  LogTest("Now, I try to retrieve %d entries (possibly destroyed)",
          MAXGET);

  MesureTemps(&debut, NULL);
  for(i = 0; i < MAXGET; i++)
    {
      random_val = random() % MAXTEST;
      sprintf(tmpstr, "%d", random_val);
      buffkey.len = strlen(tmpstr);
      buffkey.pdata = tmpstr;

      rc = HashTable_Get(ht, &buffkey, &buffval);
    }
  MesureTemps(&fin, &debut);
  LogTest("Time to read %d elements = %s", MAXGET,
          ConvertiTempsChaine(fin, NULL));

  LogTest("-----------------------------------------");
  LogTest("Writing a duplicated key");
  sprintf(tmpstr, "%d", CRITERE_2);
  buffkey.len = strlen(tmpstr);
  buffkey.pdata = tmpstr;
  rc = HashTable_Test_And_Set(ht, &buffkey, &buffval, HASHTABLE_SET_HOW_SET_NO_OVERWRITE);
  LogTest("The value must be HASHTABLE_ERROR_KEY_ALREADY_EXISTS  = %d --> %d",
         HASHTABLE_ERROR_KEY_ALREADY_EXISTS, rc);
  if(rc != HASHTABLE_ERROR_KEY_ALREADY_EXISTS)
    {
      LogTest("Test ECHOUE : Clef redondante");
      exit(1);
    }
  LogTest("-----------------------------------------");

  HashTable_Log(COMPONENT_HASHTABLE,ht);
  LogFullDebug(COMPONENT_HASHTABLE,"-----------------------------------------");

  LogTest("Displaying table statistics");
  HashTable_GetStats(ht, &statistiques);
  LogTest(" Number of Entrees = %d", statistiques.dynamic.nb_entries);

  LogTest(" Successful operations : Set = %d,  Get = %d,  Del = %d,  Test = %d",
          statistiques.dynamic.ok.nb_set, statistiques.dynamic.ok.nb_get,
          statistiques.dynamic.ok.nb_del, statistiques.dynamic.ok.nb_test);

  LogTest("   Failed operations : Set = %d,  Get = %d,  Del = %d,  Test = %d",
          statistiques.dynamic.err.nb_set, statistiques.dynamic.err.nb_get,
          statistiques.dynamic.err.nb_del, statistiques.dynamic.err.nb_test);

  LogTest("   Operations 'NotFound': Set = %d,  Get = %d,  Del = %d,  Test = %d",
          statistiques.dynamic.notfound.nb_set, statistiques.dynamic.notfound.nb_get,
          statistiques.dynamic.notfound.nb_del, statistiques.dynamic.notfound.nb_test);

  LogTest("  Statistics computed: min_rbt_node = %d,  max_rbt_node = %d,  average_rbt_node = %d",
          statistiques.computed.min_rbt_num_node, statistiques.computed.max_rbt_num_node,
          statistiques.computed.average_rbt_num_node);

  /* Test sur la pertinence des valeurs de statistiques */
  if(statistiques.dynamic.ok.nb_set != MAXTEST)
    {
      LogTest("Test FAILED: Incorrect statistics: ok.nb_set ");
      exit(1);
    }

  if(statistiques.dynamic.ok.nb_get + statistiques.dynamic.notfound.nb_get !=
     2 * MAXGET + 1)
    {
      LogTest("Test FAILED: Incorrect statistics: *.nb_get.  Expected %d, got %d",
              2 * MAXGET + 1,
              statistiques.dynamic.ok.nb_get + statistiques.dynamic.notfound.nb_get);
      exit(1);
    }

  if(statistiques.dynamic.ok.nb_del != MAXDESTROY)
    {
      LogTest("Test FAILED: Incorrect statistics: *.nb_del. Expected %d, got %d",
              MAXDESTROY, statistiques.dynamic.ok.nb_del);
      exit(1);
    }

  if(statistiques.dynamic.notfound.nb_del != 0)
    {
      LogTest("Test FAILED: Incorrect statistics: *.nb_del. Expected %d, got %d",
              0, statistiques.dynamic.notfound.nb_del);
      exit(1);
    }



  if(statistiques.dynamic.err.nb_test != 1)
    {
      LogTest("Test FAILED: Incorrect statistics: err.nb_test ");
      exit(1);
    }

  /* Tous les tests sont ok */
  BuddyDumpMem(stdout);

  LogTest("\n-----------------------------------------");
  LogTest("Test succeeded: all tests pass successfully");

  exit(0);
}