예제 #1
0
static fsal_status_t lustre_readsymlink(struct fsal_obj_handle *obj_hdl,
					struct gsh_buffdesc *link_content,
					bool refresh)
{
	struct lustre_fsal_obj_handle *myself = NULL;
	char mypath[MAXPATHLEN];
	int rc = 0;
	fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR;

	if (obj_hdl->type != SYMBOLIC_LINK) {
		fsal_error = ERR_FSAL_FAULT;
		goto out;
	}
	myself =
	    container_of(obj_hdl, struct lustre_fsal_obj_handle, obj_handle);
	if (refresh) {		/* lazy load or LRU'd storage */
		ssize_t retlink;
		char link_buff[PATH_MAX+1];

		if (myself->u.symlink.link_content != NULL) {
			gsh_free(myself->u.symlink.link_content);
			myself->u.symlink.link_content = NULL;
			myself->u.symlink.link_size = 0;
		}
		lustre_handle_to_path(obj_hdl->fs->path,
				      myself->handle, mypath);
		retlink = readlink(mypath, link_buff, PATH_MAX);
		 if (retlink < 0 || retlink == PATH_MAX) {
			rc = errno;
			if (retlink == PATH_MAX)
				rc = ENAMETOOLONG;
			fsal_error = posix2fsal_error(rc);
			goto out;
		}

		myself->u.symlink.link_content = gsh_malloc(retlink + 1);
		if (myself->u.symlink.link_content == NULL) {
			fsal_error = ERR_FSAL_NOMEM;
			goto out;
		}
		memcpy(myself->u.symlink.link_content, link_buff, retlink);
		myself->u.symlink.link_content[retlink] = '\0';
		myself->u.symlink.link_size = retlink + 1;
	}
	if (myself->u.symlink.link_content == NULL) {
		fsal_error = ERR_FSAL_FAULT;	/* probably a better error?? */
		goto out;
	}
	link_content->len = myself->u.symlink.link_size;
	link_content->addr = gsh_malloc(link_content->len);
	if (link_content->addr == NULL) {
		fsal_error = ERR_FSAL_NOMEM;
		goto out;
	}
	memcpy(link_content->addr, myself->u.symlink.link_content,
	       myself->u.symlink.link_size);

 out:
	return fsalstat(fsal_error, rc);
}
예제 #2
0
/* Allocate supplementary groups buffer */
static bool my_getgrouplist_alloc(char *user,
				  gid_t gid,
				  struct group_data *gdata)
{
	int ngroups = 0;
	gid_t *groups, *groups2;

	/* We call getgrouplist() with 0 ngroups first. This should always
	 * return -1, and ngroups should be set to the actual number of
	 * groups the user is in.  The manpage doesn't say anything
	 * about errno value, it was usually zero but was set to 34
	 * (ERANGE) under some environments. ngroups was set correctly
	 * no matter what the errno value is!
	 *
	 * We assume that ngroups is correctly set, no matter what the
	 * errno value is. The man page says, "The ngroups argument
	 * is a value-result argument: on  return  it always contains
	 * the  number  of  groups found for user."
	 */
	(void)getgrouplist(user, gid, NULL, &ngroups);

	/* Allocate gdata->groups with the right size then call
	 * getgrouplist() a second time to get the actual group list
	 */
	groups = gsh_malloc(ngroups * sizeof(gid_t));
	if (groups == NULL)
		return false;

	if (getgrouplist(user, gid, groups, &ngroups) == -1) {
		LogEvent(COMPONENT_IDMAPPER,
			 "getgrouplist for user: %s failed retrying", user);

		gsh_free(groups);

		/* Try with the largest ngroups we support */
		ngroups = 1000;
		groups2 = gsh_malloc(ngroups * sizeof(gid_t));
		if (groups2 == NULL)
			return false;

		if (getgrouplist(user, gid, groups2, &ngroups) == -1) {
			LogWarn(COMPONENT_IDMAPPER,
				"getgrouplist for user:%s failed, ngroups: %d",
				user, ngroups);
			gsh_free(groups2);
			return false;
		}

		/* Resize the buffer */
		groups = gsh_realloc(groups2, ngroups * sizeof(gid_t));
		if (groups == NULL) /* Use the large buffer! */
			groups = groups2;
	}

	gdata->groups = groups;
	gdata->nbgroups = ngroups;

	return true;
}
예제 #3
0
static fsal_status_t readsymlink(struct fsal_obj_handle *obj_hdl,
				 struct gsh_buffdesc *link_content,
				 bool refresh)
{
	fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR;
	int retval = 0;
	struct gpfs_fsal_obj_handle *myself = NULL;
	fsal_status_t status;

	if (obj_hdl->type != SYMBOLIC_LINK) {
		fsal_error = ERR_FSAL_FAULT;
		goto out;
	}
	myself = container_of(obj_hdl, struct gpfs_fsal_obj_handle, obj_handle);
	if (refresh) {		/* lazy load or LRU'd storage */
		size_t retlink;
		char link_buff[PATH_MAX];

		retlink = PATH_MAX - 1;

		if (myself->u.symlink.link_content != NULL) {
			gsh_free(myself->u.symlink.link_content);
			myself->u.symlink.link_content = NULL;
			myself->u.symlink.link_size = 0;
		}

		status =
		    GPFSFSAL_readlink(obj_hdl, op_ctx, link_buff, &retlink);
		if (FSAL_IS_ERROR(status))
			return status;

		myself->u.symlink.link_content = gsh_malloc(retlink + 1);

		memcpy(myself->u.symlink.link_content, link_buff, retlink);
		myself->u.symlink.link_content[retlink] = '\0';
		myself->u.symlink.link_size = retlink + 1;
	}
	if (myself->u.symlink.link_content == NULL) {
		fsal_error = ERR_FSAL_FAULT;	/* probably a better error?? */
		goto out;
	}
	link_content->len = myself->u.symlink.link_size;
	link_content->addr = gsh_malloc(link_content->len);

	memcpy(link_content->addr, myself->u.symlink.link_content,
	       link_content->len);

 out:

	return fsalstat(fsal_error, retval);
}
예제 #4
0
bool idmapper_init(void)
{
#ifdef USE_NFSIDMAP
	if (!nfs_param.nfsv4_param.use_getpwnam) {
		if (nfs4_init_name_mapping(nfs_param.nfsv4_param.idmapconf)
		    != 0) {
			return false;
		}
		owner_domain.addr = gsh_malloc(NFS4_MAX_DOMAIN_LEN + 1);
		if (owner_domain.addr == NULL)
			return false;

		if (nfs4_get_default_domain
		    (NULL, owner_domain.addr, NFS4_MAX_DOMAIN_LEN) != 0) {
			gsh_free(owner_domain.addr);
			return false;
		}
		owner_domain.len = strlen(owner_domain.addr);
	}
#endif				/* USE_NFSIDMAP */
	if (nfs_param.nfsv4_param.use_getpwnam) {
		owner_domain.addr = gsh_strdup(nfs_param.nfsv4_param
					       .domainname);
		if (owner_domain.addr == NULL)
			return false;

		owner_domain.len = strlen(nfs_param.nfsv4_param.domainname);
	}

	idmapper_cache_init();
	return true;
}
예제 #5
0
/**
 * @brief Set a state into the stateid hashtable.
 *
 * @param[in] other stateid4.other
 * @param[in] state The state to add
 *
 * @retval 1 if ok.
 * @retval 0 if not ok.
 */
int nfs4_State_Set(char other[OTHERSIZE], state_t *state)
{
	struct gsh_buffdesc buffkey;
	struct gsh_buffdesc buffval;

	buffkey.addr = gsh_malloc(OTHERSIZE);

	if (buffkey.addr == NULL)
		return 0;

	LogFullDebug(COMPONENT_STATE, "Allocating stateid key %p",
		     buffkey.addr);

	memcpy(buffkey.addr, other, OTHERSIZE);
	buffkey.len = OTHERSIZE;

	buffval.addr = state;
	buffval.len = sizeof(state_t);

	if (hashtable_test_and_set
	    (ht_state_id, &buffkey, &buffval,
	     HASHTABLE_SET_HOW_SET_NO_OVERWRITE) != HASHTABLE_SUCCESS) {
		LogCrit(COMPONENT_STATE,
			"hashtable_test_and_set failed for key %p",
			buffkey.addr);
		gsh_free(buffkey.addr);
		return 0;
	}

	return 1;
}
예제 #6
0
/**
 * @brief Deep copy a LOCK4denied
 *
 * @param[out] denied_dst Target
 * @param[in]  denied_src Source
 */
void Copy_nfs4_denied(LOCK4denied *denied_dst,
		      LOCK4denied *denied_src)
{
  memcpy(denied_dst, denied_src, sizeof(*denied_dst));

  if(denied_src->owner.owner.owner_val != unknown_owner.so_owner_val &&
     denied_src->owner.owner.owner_val != NULL)
    {
      denied_dst->owner.owner.owner_val
        = gsh_malloc(denied_src->owner.owner.owner_len);
      LogFullDebug(COMPONENT_STATE,
                   "denied_dst->owner.owner.owner_val = %p",
                   denied_dst->owner.owner.owner_val);
      if(denied_dst->owner.owner.owner_val)
        memcpy(denied_dst->owner.owner.owner_val,
               denied_src->owner.owner.owner_val,
               denied_src->owner.owner.owner_len);
    }

  if(denied_dst->owner.owner.owner_val == NULL)
    {
      denied_dst->owner.owner.owner_len = unknown_owner.so_owner_len;
      denied_dst->owner.owner.owner_val = unknown_owner.so_owner_val;
    }
}
int fsal_internal_proxy_fsal_path_2_utf8(fsal_path_t * ppath, utf8string * utf8str)
{
  char tmpstr[FSAL_MAX_PATH_LEN];
  fsal_status_t fsal_status;

  if(ppath == NULL || utf8str == NULL)
    return FALSE;

  fsal_status = FSAL_path2str(ppath, tmpstr, FSAL_MAX_NAME_LEN);
  if(fsal_status.major != ERR_FSAL_NO_ERROR)
    return FALSE;

  if(utf8str->utf8string_len == 0)
    {
      if((utf8str->utf8string_val = gsh_malloc(ppath->len)) == NULL)
        return FALSE;
      else
        utf8str->utf8string_len = ppath->len;
    }

  if(str2utf8(tmpstr, utf8str) == -1)
    return FALSE;

  return TRUE;
}                               /* fsal_internal_proxy_fsal_path_2_utf8 */
예제 #8
0
int nlm_send_async_res_nlm4(state_nlm_client_t *host, state_async_func_t func,
			    nfs_res_t *pres)
{
	state_async_queue_t *arg = gsh_malloc(sizeof(*arg));
	state_nlm_async_data_t *nlm_arg;
	state_status_t status;

	if (arg != NULL) {
		nlm_arg = &arg->state_async_data.state_nlm_async_data;
		memset(arg, 0, sizeof(*arg));
		arg->state_async_func = func;
		nlm_arg->nlm_async_host = host;
		nlm_arg->nlm_async_args.nlm_async_res = *pres;
		if (!copy_netobj
		    (&nlm_arg->nlm_async_args.nlm_async_res.res_nlm4.cookie,
		     &pres->res_nlm4.cookie)) {
			LogCrit(COMPONENT_NLM,
				"Unable to copy async response file handle");
			gsh_free(arg);
			return NFS_REQ_DROP;
		}
	} else {
		LogCrit(COMPONENT_NLM, "Unable to allocate async response");
		return NFS_REQ_DROP;
	}

	status = state_async_schedule(arg);

	if (status != STATE_SUCCESS) {
		gsh_free(arg);
		return NFS_REQ_DROP;
	}

	return NFS_REQ_OK;
}
예제 #9
0
avl_unit_val_t *avl_unit_new_val(unsigned long intval)
{
	avl_unit_val_t *v = gsh_malloc(sizeof(avl_unit_val_t));
	memset(v, 0, sizeof(avl_unit_val_t));
	v->val = (intval + 1);

	return v;
}
예제 #10
0
avl_unit_val_t *avl_unit_new_val(const char *name)
{
	avl_unit_val_t *v = gsh_malloc(sizeof(avl_unit_val_t));

	memset(v, 0, sizeof(avl_unit_val_t));
	v->name = (char *)name;

	return v;
}
예제 #11
0
/**
 * @brief Add a user entry to the cache
 *
 * @note The caller must hold uid2grp_user_lock for write.
 *
 * @param[in] group_data that has supplementary groups allocated
 *
 * @retval true on success.
 * @retval false if our reach exceeds our grasp.
 */
bool uid2grp_add_user(struct group_data *gdata)
{
	struct avltree_node *name_node;
	struct avltree_node *id_node;
	struct avltree_node *name_node2 = NULL;
	struct avltree_node *id_node2 = NULL;
	struct cache_info *info;
	struct cache_info *tmp;

	info = gsh_malloc(sizeof(struct cache_info));
	if (!info) {
		LogEvent(COMPONENT_IDMAPPER, "memory alloc failed");
		return false;
	}
	info->uid = gdata->uid;
	info->uname.addr = gdata->uname.addr;
	info->uname.len = gdata->uname.len;
	info->gdata = gdata;

	/* The refcount on group_data should be 1 when we put it in
	 * AVL trees.
	 */
	uid2grp_hold_group_data(gdata);

	/* We may have lost the race to insert. We remove existing
	 * entry and insert this new entry if so!
	 */
	name_node = avltree_insert(&info->uname_node, &uname_tree);
	if (unlikely(name_node)) {
		tmp = avltree_container_of(name_node,
					   struct cache_info,
					   uname_node);
		uid2grp_remove_user(tmp);
		name_node2 = avltree_insert(&info->uname_node, &uname_tree);
	}

	id_node = avltree_insert(&info->uid_node, &uid_tree);
	if (unlikely(id_node)) {
		/* We should not come here unless someone changed uid of
		 * a user. Remove old entry and re-insert the new
		 * entry.
		 */
		tmp = avltree_container_of(id_node,
					   struct cache_info,
					   uid_node);
		uid2grp_remove_user(tmp);
		id_node2 = avltree_insert(&info->uid_node, &uid_tree);
	}
	uid_grplist_cache[info->uid % id_cache_size] = &info->uid_node;

	if (name_node && id_node)
		LogWarn(COMPONENT_IDMAPPER, "shouldn't happen, internal error");
	if ((name_node && name_node2) || (id_node && id_node2))
		LogWarn(COMPONENT_IDMAPPER, "shouldn't happen, internal error");

	return true;
}
예제 #12
0
void fill_netobj(netobj *dst, char *data, int len)
{
	dst->n_len = 0;
	dst->n_bytes = NULL;
	if (len != 0) {
		dst->n_bytes = gsh_malloc(len);
		dst->n_len = len;
		memcpy(dst->n_bytes, data, len);
	}
}
예제 #13
0
void copy_netobj(netobj *dst, netobj *src)
{
	if (src->n_len != 0) {
		dst->n_bytes = gsh_malloc(src->n_len);
		memcpy(dst->n_bytes, src->n_bytes, src->n_len);
	} else
		dst->n_bytes = NULL;

	dst->n_len = src->n_len;
}
예제 #14
0
int nfs_ip_stats_add(hash_table_t * ht_ip_stats,
                     sockaddr_t * ipaddr, pool_t *ip_stats_pool)
{
    hash_buffer_t buffkey;
    hash_buffer_t buffdata;
    nfs_ip_stats_t *g = NULL;
    sockaddr_t *pipaddr = NULL;

    /* Do nothing if configuration disables IP_Stats */
    if(nfs_param.core_param.dump_stats_per_client == 0)
        return IP_STATS_SUCCESS;

    /* Entry to be cached */
    g = pool_alloc(ip_stats_pool, NULL);

    if(g == NULL)
        return IP_STATS_INSERT_MALLOC_ERROR;

    if((pipaddr = gsh_malloc(sizeof(sockaddr_t))) == NULL)
    {
        pool_free(ip_stats_pool, g);
        return IP_STATS_INSERT_MALLOC_ERROR;
    }

    /* I have to keep an integer as key, I wil use the pointer
     * buffkey->pdata for this, this also means that buffkey->len will
     * be 0 */
    memcpy(pipaddr, ipaddr, sizeof(sockaddr_t));

    buffkey.pdata = pipaddr;
    buffkey.len = sizeof(sockaddr_t);

    /* I build the data with the request pointer that should be in state 'IN USE' */
    g->nb_call = 0;
    g->nb_req_nfs2 = 0;
    g->nb_req_nfs3 = 0;
    g->nb_req_nfs4 = 0;
    g->nb_req_mnt1 = 0;
    g->nb_req_mnt3 = 0;

    memset(g->req_mnt1, 0, MNT_V1_NB_COMMAND * sizeof(int));
    memset(g->req_mnt3, 0, MNT_V3_NB_COMMAND * sizeof(int));
    memset(g->req_nfs2, 0, NFS_V2_NB_COMMAND * sizeof(int));
    memset(g->req_nfs3, 0, NFS_V3_NB_COMMAND * sizeof(int));

    buffdata.pdata = (caddr_t) g;
    buffdata.len = sizeof(nfs_ip_stats_t);

    if(HashTable_Set(ht_ip_stats, &buffkey, &buffdata) != HASHTABLE_SUCCESS)
        return IP_STATS_INSERT_MALLOC_ERROR;

    return IP_STATS_SUCCESS;
}                               /* nfs_ip_stats_add */
예제 #15
0
void * fridgethr_freeze( )
{
  fridge_entry_t * pfe = NULL ;
  struct timespec timeout ;
  struct timeval    tp;
  int rc = 0 ;
  void *arg = NULL;

  if( ( rc = gettimeofday( &tp, NULL ) ) != 0 )
    return NULL ;

  /* Be careful : pthread_cond_timedwait take an *absolute* time as time specification, not a duration */ 
  timeout.tv_sec = tp.tv_sec + nfs_param.core_param.tcp_fridge_expiration_delay ;
  timeout.tv_nsec = 0 ; 

  if ((pfe = gsh_malloc(sizeof(fridge_entry_t))) == NULL)
    return NULL ;

  pfe->thrid = pthread_self();
  pthread_mutex_init(&(pfe->condmutex), NULL);
  pthread_cond_init(&(pfe->condvar), NULL);
  pfe->pprev = NULL;
  pfe->pnext = NULL;
  pfe->frozen = TRUE;

  P(fridge_mutex);
  if( fridge_content == NULL )
   {
     pfe->pprev = NULL ;
     pfe->pnext = NULL ;
   }
  else
   {
     pfe->pprev = fridge_content ;
     pfe->pnext = NULL ;
     fridge_content->pnext = pfe ;
   }
  fridge_content = pfe ;
  V( fridge_mutex ) ;

  P( pfe->condmutex ) ;
  while( pfe->frozen == TRUE && rc == 0 ) 
    if( nfs_param.core_param.tcp_fridge_expiration_delay > 0 )
       rc = pthread_cond_timedwait( &pfe->condvar, &pfe->condmutex, &timeout ) ;
    else
       rc = pthread_cond_wait( &pfe->condvar, &pfe->condmutex ) ;

  if( rc != ETIMEDOUT )
    arg = pfe->arg;

  fridgethr_remove( pfe );  
  return arg;
} /* fridgethr_freeze */
예제 #16
0
/* config_ParseFile:
 * Reads the content of a configuration file and
 * stores it in a memory structure.
 */
config_file_t config_ParseFile(char *file_path)
{

	FILE *configuration_file;
	config_struct_t *output_struct;

	/* Inits error message */

	extern_errormsg[0] = '\0';

	/* First, opens the file. */

	configuration_file = fopen(file_path, "r");

	if (!configuration_file) {
		strcpy(extern_errormsg, strerror(errno));
		return NULL;
	}

	/* Then, parse the file. */
	program_result = NULL;

	ganesha_yyreset();

	ganesha_yy_set_current_file(file_path);
	ganesha_yyin = configuration_file;

	if (ganesha_yyparse()) {
		fclose(configuration_file);
		return NULL;
	}

  /** @todo : ganesha_yyparse fait exit en cas d'erreur. Remedier au probleme. */

	/* Finally, build the output struct. */

	output_struct = gsh_malloc(sizeof(config_struct_t));

	if (!output_struct) {
		strcpy(extern_errormsg, strerror(errno));
		fclose(configuration_file);
		return NULL;
	}

	output_struct->syntax_tree = program_result;

	/* converts pointer to pointer */
	fclose(configuration_file);
	return (config_file_t) output_struct;

}
예제 #17
0
int nl_rangelist_init(nl_rangelist_t *array)
{
	int fstatus = -1;
	array->pre_allocated_ranges = DEFAULT_RANGELIST_SIZE;
	array->ranges_nb = 0;
	array->array = gsh_malloc(array->pre_allocated_ranges *
				  sizeof(nl_range_t));
	if (array->array != NULL)
		fstatus = 0;
	else
		array->pre_allocated_ranges = 0;

	return fstatus;
}
예제 #18
0
bool fill_netobj(netobj *dst, char *data, int len)
{
	dst->n_len = 0;
	dst->n_bytes = NULL;
	if (len != 0) {
		dst->n_bytes = gsh_malloc(len);
		if (dst->n_bytes != NULL) {
			dst->n_len = len;
			memcpy(dst->n_bytes, data, len);
		} else
			return false;
	}
	return true;
}
예제 #19
0
int vfs_readlink(struct vfs_fsal_obj_handle *myself,
		 fsal_errors_t *fsal_error)
{
	int retval = 0;
	int fd;
	ssize_t retlink;
	struct stat st;
	int flags = O_PATH | O_NOACCESS | O_NOFOLLOW;

	if (myself->u.symlink.link_content != NULL) {
		gsh_free(myself->u.symlink.link_content);
		myself->u.symlink.link_content = NULL;
		myself->u.symlink.link_size = 0;
	}
	fd = vfs_fsal_open(myself, flags, fsal_error);
	if (fd < 0)
		return fd;

	retval = vfs_stat_by_handle(fd, myself->handle, &st, flags);
	if (retval < 0)
		goto error;

	myself->u.symlink.link_size = st.st_size + 1;
	myself->u.symlink.link_content =
	    gsh_malloc(myself->u.symlink.link_size);
	if (myself->u.symlink.link_content == NULL)
		goto error;

	retlink =
	    vfs_readlink_by_handle(myself->handle, fd, "",
				   myself->u.symlink.link_content,
				   myself->u.symlink.link_size);
	if (retlink < 0)
		goto error;
	myself->u.symlink.link_content[retlink] = '\0';
	close(fd);

	return retval;

 error:
	retval = -errno;
	*fsal_error = posix2fsal_error(errno);
	close(fd);
	if (myself->u.symlink.link_content != NULL) {
		gsh_free(myself->u.symlink.link_content);
		myself->u.symlink.link_content = NULL;
		myself->u.symlink.link_size = 0;
	}
	return retval;
}
예제 #20
0
파일: handle.c 프로젝트: JevonQ/nfs-ganesha
static fsal_status_t readsymlink(struct fsal_obj_handle *obj_hdl,
				 struct gsh_buffdesc *link_content,
				 bool refresh)
{
	struct vfs_fsal_obj_handle *myself = NULL;
	int retval = 0;
	fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR;

	if (obj_hdl->type != SYMBOLIC_LINK) {
		fsal_error = ERR_FSAL_INVAL;
		goto out;
	}
	myself = container_of(obj_hdl, struct vfs_fsal_obj_handle, obj_handle);
	if (obj_hdl->fsal != obj_hdl->fs->fsal) {
		LogDebug(COMPONENT_FSAL,
			 "FSAL %s operation for handle belonging to FSAL %s, return EXDEV",
			 obj_hdl->fsal->name,
			 obj_hdl->fs->fsal != NULL
				? obj_hdl->fs->fsal->name
				: "(none)");
		retval = EXDEV;
		goto hdlerr;
	}
	if (refresh) {		/* lazy load or LRU'd storage */
		retval = vfs_readlink(myself, &fsal_error);
		if (retval < 0) {
			retval = -retval;
			goto hdlerr;
		}
	}
	if (myself->u.symlink.link_content == NULL) {
		fsal_error = ERR_FSAL_FAULT;	/* probably a better error?? */
		goto out;
	}

	link_content->len = myself->u.symlink.link_size;
	link_content->addr = gsh_malloc(myself->u.symlink.link_size);
	if (link_content->addr == NULL) {
		fsal_error = ERR_FSAL_NOMEM;
		goto out;
	}
	memcpy(link_content->addr, myself->u.symlink.link_content,
	       link_content->len);

 hdlerr:
	fsal_error = posix2fsal_error(retval);
 out:
	return fsalstat(fsal_error, retval);
}
예제 #21
0
cache_inode_status_t
cache_inode_add_cached_dirent(cache_entry_t *parent,
			      const char *name,
			      cache_entry_t *entry,
			      cache_inode_dir_entry_t **dir_entry)
{
	cache_inode_dir_entry_t *new_dir_entry = NULL;
	size_t namesize = strlen(name) + 1;
	int code = 0;
	cache_inode_status_t status = CACHE_INODE_SUCCESS;

	/* Sanity check */
	if (parent->type != DIRECTORY) {
		status = CACHE_INODE_NOT_A_DIRECTORY;
		return status;
	}

	/* in cache inode avl, we always insert on pentry_parent */
	new_dir_entry = gsh_malloc(sizeof(cache_inode_dir_entry_t) + namesize);
	if (new_dir_entry == NULL) {
		status = CACHE_INODE_MALLOC_ERROR;
		return status;
	}

	new_dir_entry->flags = DIR_ENTRY_FLAG_NONE;

	memcpy(&new_dir_entry->name, name, namesize);
	cache_inode_key_dup(&new_dir_entry->ckey, &entry->fh_hk.key);

	/* add to avl */
	code = cache_inode_avl_qp_insert(parent, new_dir_entry);
	if (code < 0) {
		/* collision, tree not updated--release both pool objects and
		 * return err */
		gsh_free(new_dir_entry->ckey.kv.addr);
		gsh_free(new_dir_entry);
		status = CACHE_INODE_ENTRY_EXISTS;
		return status;
	}

	if (dir_entry)
		*dir_entry = new_dir_entry;

	/* we're going to succeed */
	parent->object.dir.nbactive++;

	return status;
}
예제 #22
0
netobj *copy_netobj(netobj *dst, netobj *src)
{
	if (dst == NULL)
		return NULL;
	dst->n_len = 0;
	if (src->n_len != 0) {
		dst->n_bytes = gsh_malloc(src->n_len);
		if (!dst->n_bytes)
			return NULL;
		memcpy(dst->n_bytes, src->n_bytes, src->n_len);
	} else
		dst->n_bytes = NULL;

	dst->n_len = src->n_len;
	return dst;
}
예제 #23
0
/* Allocate and fill in group_data structure */
static struct group_data *uid2grp_allocate_by_name(
		const struct gsh_buffdesc *name)
{
	struct passwd p;
	struct passwd *pp;
	char *namebuff = alloca(name->len + 1);
	struct group_data *gdata = NULL;
	char *buff;
	long buff_size;

	memcpy(namebuff, name->addr, name->len);
	*(namebuff + name->len) = '\0';

	buff_size = sysconf(_SC_GETPW_R_SIZE_MAX);
	if (buff_size == -1) {
		LogMajor(COMPONENT_IDMAPPER, "sysconf failure: %d", errno);
		return NULL;
	}

	buff = alloca(buff_size);
	if ((getpwnam_r(namebuff, &p, buff, buff_size, &pp) != 0)
	    || (pp == NULL)) {
		LogEvent(COMPONENT_IDMAPPER, "getpwnam_r %s failed", namebuff);
		return gdata;
	}

	gdata = gsh_malloc(sizeof(struct group_data) + strlen(p.pw_name));
	if (gdata == NULL) {
		LogEvent(COMPONENT_IDMAPPER, "failed to allocate group data");
		return gdata;
	}

	gdata->uname.len = strlen(p.pw_name);
	gdata->uname.addr = (char *)gdata + sizeof(struct group_data);
	memcpy(gdata->uname.addr, p.pw_name, gdata->uname.len);
	gdata->uid = p.pw_uid;
	gdata->gid = p.pw_gid;
	if (!my_getgrouplist_alloc(p.pw_name, p.pw_gid, gdata)) {
		gsh_free(gdata);
		return NULL;
	}

	PTHREAD_MUTEX_init(&gdata->lock, NULL);
	gdata->epoch = time(NULL);
	gdata->refcount = 0;
	return gdata;
}
예제 #24
0
파일: handle.c 프로젝트: asias/nfs-ganesha
static fsal_status_t readsymlink(struct fsal_obj_handle *obj_hdl,
				 const struct req_op_context *opctx,
				 struct gsh_buffdesc *link_content,
				 bool refresh)
{
	int rc = 0;
	fsal_status_t status = { ERR_FSAL_NO_ERROR, 0 };
	struct glusterfs_export *glfs_export =
	    container_of(obj_hdl->export, struct glusterfs_export, export);
	struct glusterfs_handle *objhandle =
	    container_of(obj_hdl, struct glusterfs_handle, handle);
#ifdef GLTIMING
	struct timespec s_time, e_time;

	now(&s_time);
#endif

	link_content->len = 1024;	// bad bad!!! need to determine size
	link_content->addr = gsh_malloc(link_content->len);
	if (link_content->addr == NULL) {
		status = gluster2fsal_error(rc);
		goto out;
	}

	rc = glfs_h_readlink(glfs_export->gl_fs, objhandle->glhandle,
			     link_content->addr, link_content->len);
	if (rc < 0) {
		status = gluster2fsal_error(errno);
		goto out;
	}

	/* Check if return buffer overflowed, it is still '\0' terminated */
	link_content->len = (strlen(link_content->addr) + 1);

 out:
	if (status.major != ERR_FSAL_NO_ERROR) {
		gsh_free(link_content->addr);
		link_content->addr = NULL;
		link_content->len = 0;
	}
#ifdef GLTIMING
	now(&e_time);
	latency_update(&s_time, &e_time, lat_readsymlink);
#endif

	return status;
}
예제 #25
0
/**
 * fsal_internal_getstats:
 * (For internal use in the FSAL).
 * Retrieve call statistics for current thread.
 *
 * \param output_stats (output):
 *        Pointer to the call statistics structure.
 *
 * \return Nothing.
 */
void fsal_internal_getstats(fsal_statistics_t * output_stats)
{

  fsal_statistics_t *bythread_stat = NULL;

  /* first, we init the keys if this is the first time */
  if(pthread_once(&once_key, init_keys) != 0)
    {
      LogError(COMPONENT_FSAL, ERR_SYS, ERR_PTHREAD_ONCE, errno);
      return;
    }

  /* we get the specific value */
  bythread_stat = (fsal_statistics_t *) pthread_getspecific(key_stats);

  /* we allocate stats if this is the first time */
  if(bythread_stat == NULL)
    {
      int i;

      bythread_stat = gsh_malloc(sizeof(fsal_statistics_t));

      if(bythread_stat == NULL)
        {
          LogError(COMPONENT_FSAL, ERR_SYS, ERR_MALLOC, ENOMEM);
          return;
        }
      /* inits the struct */
      for(i = 0; i < FSAL_NB_FUNC; i++)
        {
          bythread_stat->func_stats.nb_call[i] = 0;
          bythread_stat->func_stats.nb_success[i] = 0;
          bythread_stat->func_stats.nb_err_retryable[i] = 0;
          bythread_stat->func_stats.nb_err_unrecover[i] = 0;
        }

      /* set the specific value */
      pthread_setspecific(key_stats, (void *)bythread_stat);

    }

  if(output_stats)
    (*output_stats) = (*bythread_stat);

  return;

}
예제 #26
0
void Process_nfs4_conflict(LOCK4denied          * denied,    /* NFS v4 LOck4denied structure to fill in */
                           state_owner_t        * holder,    /* owner that holds conflicting lock */
                           fsal_lock_param_t    * conflict)  /* description of conflicting lock */
{
  /* A  conflicting lock from a different lock_owner, returns NFS4ERR_DENIED */
  denied->offset = conflict->lock_start;
  denied->length = conflict->lock_length;

  if(conflict->lock_type == FSAL_LOCK_R)
    denied->locktype = READ_LT;
  else
    denied->locktype = WRITE_LT;

  if(holder != NULL && holder->so_owner_len != 0)
    denied->owner.owner.owner_val = gsh_malloc(holder->so_owner_len);
  else
    denied->owner.owner.owner_val = NULL;

  LogFullDebug(COMPONENT_STATE,
               "denied->owner.owner.owner_val = %p",
               denied->owner.owner.owner_val);

  if(denied->owner.owner.owner_val != NULL)
    {
      denied->owner.owner.owner_len = holder->so_owner_len;

      memcpy(denied->owner.owner.owner_val,
             holder->so_owner_val,
             holder->so_owner_len);
    }
  else
    {
      denied->owner.owner.owner_len = unknown_owner.so_owner_len;
      denied->owner.owner.owner_val = unknown_owner.so_owner_val;
    }

  if(holder != NULL && holder->so_type == STATE_LOCK_OWNER_NFSV4)
    denied->owner.clientid = holder->so_owner.so_nfs4_owner.so_clientid;
  else
    denied->owner.clientid = 0;

  /* Release any lock owner reference passed back from SAL */
  if(holder != NULL)
    dec_state_owner_ref(holder);
}
예제 #27
0
/**
 * @brief Return gid given a group name
 *
 * @param[in]  name  group name
 * @param[out] gid   address for gid to be filled in
 *
 * @return 0 on success and errno on failure.
 *
 * NOTE: If a group name doesn't exist, getgrnam_r returns 0 with the
 * result pointer set to NULL. We turn that into ENOENT error! Also,
 * getgrnam_r fails with ERANGE if there is a group with a large number
 * of users that it can't fill all those users into the supplied buffer.
 * This need not be the group we are asking for! ERANGE is handled here,
 * so this function never ends up returning ERANGE back to the caller.
 */
static int name_to_gid(const char *name, gid_t *gid)
{
	struct group g;
	struct group *gres = NULL;
	char *buf;
	size_t buflen = sysconf(_SC_GETGR_R_SIZE_MAX);

	/* Upper bound on the buffer length. Just to bailout if there is
	 * a bug in getgrname_r returning ERANGE incorrectly. 64MB
	 * should be good enough for now.
	 */
	size_t maxlen = 64 * 1024 * 1024;
	int err;

	if (buflen == -1)
		buflen = PWENT_BEST_GUESS_LEN;

	do {
		buf = gsh_malloc(buflen);
		if (buf == NULL) {
			LogCrit(COMPONENT_IDMAPPER,
				"gsh_malloc failed, buflen: %zu", buflen);

			return ENOMEM;
		}

		err = getgrnam_r(name, &g, buf, buflen, &gres);
		if (err == ERANGE) {
			buflen *= 16;
			gsh_free(buf);
		}
	} while (buflen <= maxlen && err == ERANGE);

	if (err == 0) {
		if (gres == NULL)
			err = ENOENT;
		else
			*gid = gres->gr_gid;
	}

	if (err != ERANGE)
		gsh_free(buf);

	return err;
}
static int ChangeoverExports()
{

#if 0
  exportlist_t *pcurrent = NULL;

  /**
   * @@TODO@@ This is all totally bogus code now that exports are under the
   * control of the export manager. Left as unfinished business.
   */
  if (nfs_param.pexportlist)
    pcurrent = nfs_param.pexportlist->next;

  while(pcurrent != NULL)
    {
      /* Leave the head so that the list may be replaced later without
       * changing the reference pointer in worker threads. */

      if (pcurrent == nfs_param.pexportlist)
        break;

      nfs_param.pexportlist->next = RemoveExportEntry(pcurrent);
      pcurrent = nfs_param.pexportlist->next;
    }

  /* Allocate memory if needed, could have started with NULL exports */
  if (nfs_param.pexportlist == NULL)
    nfs_param.pexportlist = gsh_malloc(sizeof(exportlist_t));

  if (nfs_param.pexportlist == NULL)
    return ENOMEM;

  /* Changed the old export list head to the new export list head.
   * All references to the exports list should be up-to-date now. */
  memcpy(nfs_param.pexportlist, temp_pexportlist, sizeof(exportlist_t));

  /* We no longer need the head that was created for
   * the new list since the export list is built as a linked list. */
  gsh_free(temp_pexportlist);
  temp_pexportlist = NULL;
  return 0;
#else
  return ENOTSUP;
#endif
}
예제 #29
0
/**
 *
 * gid2utf8: converts a gid to a utf8 string descriptor.
 *
 * Converts a gid to a utf8 string descriptor.
 *
 * @param gid     [IN]  the input gid
 * @param utf8str [OUT] computed UTF8 string descriptor
 *
 * @return the length of the utf8 buffer if succesfull, -1 if failed
 *
 */
int gid2utf8(gid_t gid, utf8string * utf8str)
{
  char buff[NFS4_MAX_DOMAIN_LEN];
  unsigned int len = 0;

  if(gid2str(gid, buff) == -1)
    return -1;

  len = strlen(buff);

  /* A matching gid was found */
  /* Do the conversion to uft8 format */
  if((utf8str->utf8string_val = gsh_malloc(len)) == NULL)
    return -1;
  else
    utf8str->utf8string_len = len;

  return str2utf8(buff, utf8str);
}                               /* gid2utf8 */
예제 #30
0
/**
 * @brief Construct the fs opaque part of a pseudofs nfsv4 handle
 *
 * Given the components of a pseudofs nfsv4 handle, the nfsv4 handle is
 * created by concatenating the components. This is the fs opaque piece
 * of struct file_handle_v4 and what is sent over the wire.
 *
 * @param[in] pseudopath Full patch of the pseudofs node
 * @param[in] len length of the pseudopath parameter
 * @param[in] hashkey a 64 bit hash of the pseudopath parameter
 *
 * @return The nfsv4 pseudofs file handle as a char *
 */
char *package_pseudo_handle(char *pseudopath, ushort len, uint64 hashkey)
{
	char *buff = NULL;
	int opaque_bytes_used = 0, pathlen = 0;

	/* This is the size of the v4 file handle opaque area used for pseudofs
	 * or FSAL file handles.
	 */
	buff = gsh_malloc(V4_FH_OPAQUE_SIZE);
	if (buff == NULL) {
		LogCrit(COMPONENT_NFS_V4_PSEUDO,
			"Failed to malloc space for pseudofs handle.");
		return NULL;
	}

	memcpy(buff, &hashkey, sizeof(hashkey));
	opaque_bytes_used += sizeof(hashkey);

	/* include length of the path in the handle.
	 * MAXPATHLEN=4096 ... max path length can be contained in a short int.
	 */
	memcpy(buff + opaque_bytes_used, &len, sizeof(ushort));
	opaque_bytes_used += sizeof(ushort);

	/* Either the nfsv4 fh opaque size or the length of the pseudopath.
	 * Ideally we can include entire pseudofs pathname for guaranteed
	 * uniqueness of pseudofs handles.
	 */
	pathlen = MIN(V4_FH_OPAQUE_SIZE - opaque_bytes_used, len);
	memcpy(buff + opaque_bytes_used, pseudopath, pathlen);
	opaque_bytes_used += pathlen;

	/* If there is more space in the opaque handle due to a short pseudofs
	 * path ... zero it.
	 */
	if (opaque_bytes_used < V4_FH_OPAQUE_SIZE) {
		memset(buff + opaque_bytes_used, 0,
		       V4_FH_OPAQUE_SIZE - opaque_bytes_used);
	}

	return buff;
}