Example #1
0
/*!
 * \brief Initialize a bridge node pattern structure by dumping an other one
 *
 * by default, padding is set to 0, prefix and suffix to NULL
 * and the node pattern is basic
 *
 * \param np pointer on a bridge node pattern structure to initialize
 * \param npin pointer on a bridge node pattern to copy
 *
 * \retval  0 operation successfully done
 * \retval -1 operation failed
*/
int nl_nodepattern_init_by_copy(nl_nodepattern_t *np,
				      nl_nodepattern_t *npin)
{
	int fstatus = -1;
	np->padding = npin->padding;
	np->basic = npin->basic;
	np->prefix = NULL;
	np->suffix = NULL;
	if (npin->prefix != NULL) {
		np->prefix = gsh_strdup(npin->prefix);
		if (np->prefix == NULL) {
			nl_nodepattern_free_contents(np);
			return fstatus;
		}
	}
	if (npin->suffix != NULL) {
		np->suffix = gsh_strdup(npin->suffix);
		if (np->suffix == NULL) {
			nl_nodepattern_free_contents(np);
			return fstatus;
		}
	}
	fstatus = 0;
	return fstatus;
}
Example #2
0
bool idmapper_init(void)
{
#ifdef USE_NFSIDMAP
	if (!nfs_param.nfsv4_param.use_getpwnam) {
		if (nfs4_init_name_mapping(nfs_param.nfsv4_param.idmapconf)
		    != 0) {
			return false;
		}
		owner_domain.addr = gsh_malloc(NFS4_MAX_DOMAIN_LEN + 1);
		if (owner_domain.addr == NULL)
			return false;

		if (nfs4_get_default_domain
		    (NULL, owner_domain.addr, NFS4_MAX_DOMAIN_LEN) != 0) {
			gsh_free(owner_domain.addr);
			return false;
		}
		owner_domain.len = strlen(owner_domain.addr);
	}
#endif				/* USE_NFSIDMAP */
	if (nfs_param.nfsv4_param.use_getpwnam) {
		owner_domain.addr = gsh_strdup(nfs_param.nfsv4_param
					       .domainname);
		if (owner_domain.addr == NULL)
			return false;

		owner_domain.len = strlen(nfs_param.nfsv4_param.domainname);
	}

	idmapper_cache_init();
	return true;
}
Example #3
0
/* fs_specific_has() parses the fs_specific string for a particular key,
 * returns true if found, and optionally returns a val if the string is
 * of the form key=val.
 *
 * The fs_specific string is a comma (,) separated options where each option
 * can be of the form key=value or just key. Example:
 *	FS_specific = "foo=baz,enable_A";
 */
bool fs_specific_has(const char *fs_specific, const char *key, char *val,
		     int *max_val_bytes)
{
	char *next_comma, *option;
	bool ret;
	char *fso_dup = NULL;

	if (!fs_specific || !fs_specific[0])
		return false;

	fso_dup = gsh_strdup(fs_specific);

	for (option = strtok_r(fso_dup, ",", &next_comma); option;
	     option = strtok_r(NULL, ",", &next_comma)) {
		char *k = option;
		char *v = k;

		strsep(&v, "=");
		if (strcmp(k, key) == 0) {
			if (val)
				strncpy(val, v, *max_val_bytes);
			if (max_val_bytes)
				*max_val_bytes = strlen(v) + 1;
			ret = true;
			goto cleanup;
		}
	}

	ret = false;
 cleanup:
	gsh_free(fso_dup);
	return ret;
}
Example #4
0
bool nsm_connect()
{
	struct utsname utsname;

	if (nsm_clnt != NULL)
		return true;

	if (uname(&utsname) == -1) {
		LogCrit(COMPONENT_NLM,
			"uname failed with errno %d (%s)",
			errno, strerror(errno));
		return false;
	}

	nodename = gsh_strdup(utsname.nodename);
	if (nodename == NULL) {
		LogCrit(COMPONENT_NLM,
			"failed to allocate memory for nodename");
		return false;
	}

	nsm_clnt = gsh_clnt_create("localhost", SM_PROG, SM_VERS, "tcp");

	if (nsm_clnt == NULL) {
		LogCrit(COMPONENT_NLM, "failed to connect to statd");
		gsh_free(nodename);
		nodename = NULL;
	}

	/* split auth (for authnone, idempotent) */
	nsm_auth = authnone_create();

	return nsm_clnt != NULL;
}
Example #5
0
int vfs_readlink(struct vfs_fsal_obj_handle *hdl,
		 fsal_errors_t *ferr)
{
	char ldata[MAXPATHLEN + 1];
	int retval;
	LogXFSHandle(hdl->handle);
	retval = readlink_by_handle(hdl->handle->handle_data,
				    hdl->handle->handle_len,
				    ldata, sizeof(ldata));
	if (retval < 0) {
		retval = -errno;
		*ferr = posix2fsal_error(retval);
		goto out;
	}

	ldata[retval] = '\0';

	hdl->u.symlink.link_content = gsh_strdup(ldata);
	if (hdl->u.symlink.link_content == NULL) {
		*ferr = ERR_FSAL_NOMEM;
		retval = -ENOMEM;
	} else {
		hdl->u.symlink.link_size = retval + 1;
		retval = 0;
	}
 out:
	return retval;
}
fsal_fs_locations_t *nfs4_fs_locations_new(const char *fs_root,
					   const char *rootpath,
					   const unsigned int count)
{
	fsal_fs_locations_t *fs_locations;

	fs_locations = nfs4_fs_locations_alloc(count);
	if (fs_locations == NULL) {
		LogCrit(COMPONENT_NFS_V4, "Could not allocate fs_locations");
		return NULL;
	}

	fs_locations->fs_root = gsh_strdup(fs_root);
	fs_locations->rootpath = gsh_strdup(rootpath);
	fs_locations->ref = 1;

	return fs_locations;
}
Example #7
0
int nl_nodepattern_set_suffix(nl_nodepattern_t *np, char *suffix)
{
	int fstatus = -1;
	if (np != NULL && suffix != NULL) {
		xfree(np->suffix);
		np->suffix = gsh_strdup(suffix);
		if (np->suffix != NULL)
			fstatus = 0;
	}
	return fstatus;
}
Example #8
0
/*
 * A common routine for getting the Kerberos error message
 */
static char *gssd_k5_err_msg(krb5_context context, krb5_error_code code)
{
#if HAVE_KRB5_GET_ERROR_MESSAGE
	if (context != NULL) {
		const char *origmsg;
		char *msg = NULL;

		origmsg = krb5_get_error_message(context, code);
		msg = gsh_strdup(origmsg);
		krb5_free_error_message(context, origmsg);
		return msg;
	}
#endif
#if HAVE_KRB5
	return gsh_strdup(error_message(code));
#else
	if (context != NULL)
		return gsh_strdup(krb5_get_err_text(context, code));
	else
		return gsh_strdup(error_message(code));
#endif
}
Example #9
0
void inserts_tree_2(void)
{
	avl_unit_val_t *v;
	char s[256];
	int ix, code;

	for (ix = 0; ix < 100000; ++ix) {
		sprintf(s, "file%d", ix);
		v = avl_unit_new_val(gsh_strdup(s));
		code = qp_avl_insert(&avl_tree_1, v);
		if (code == -1)
			abort();
		if (v->hk.p > 0)
			printf("%d positive p %d %s\n", ix, v->hk.p, s);
	}
}
Example #10
0
void inserts_tree_1(void)
{
	avl_unit_val_t *v;
	char *s;
	int ix, code;

	ix = 0;
	while ((s = dir_data[ix].name) != NULL) {
		v = avl_unit_new_val(gsh_strdup(s));
		code = qp_avl_insert(&avl_tree_1, v);
		if (code == -1)
			abort();
		if (v->hk.p > 0)
			printf("%d positive p %d %s\n", ix, v->hk.p, s);
		++ix;
	}
}
Example #11
0
fsal_errors_t nfs3_readdir_callback(void *opaque,
				    struct fsal_obj_handle *obj,
				    const struct attrlist *attr,
				    uint64_t mounted_on_fileid,
				    uint64_t cookie,
				    enum cb_state cb_state)
{
	/* Not-so-opaque pointer to callback data` */
	struct fsal_readdir_cb_parms *cb_parms = opaque;
	struct nfs3_readdir_cb_data *tracker = cb_parms->opaque;
	/* Length of the current filename */
	size_t namelen = strlen(cb_parms->name);
	entry3 *e3 = tracker->entries + tracker->count;
	size_t need =
	    sizeof(entry3) + ((namelen + 3) & ~3) + 4 - sizeof(char *) -
	    sizeof(entry3 *);

	if (tracker->count == tracker->total_entries) {
		cb_parms->in_result = false;
		return ERR_FSAL_NO_ERROR;
	}
	if (tracker->mem_left < need) {
		if (tracker->count == 0)
			tracker->error = NFS3ERR_TOOSMALL;

		cb_parms->in_result = false;
		return ERR_FSAL_NO_ERROR;
	}

	e3->fileid = obj->fileid;
	e3->name = gsh_strdup(cb_parms->name);
	e3->cookie = cookie;

	if (tracker->count > 0)
		tracker->entries[tracker->count - 1].nextentry = e3;

	tracker->mem_left -= need;
	++(tracker->count);
	cb_parms->in_result = true;
	return ERR_FSAL_NO_ERROR;
}				/* */
Example #12
0
state_status_t nlm_granted_callback(cache_entry_t        * pentry,
                                    state_lock_entry_t   * lock_entry,
                                    state_status_t       * pstatus)
{
  fsal_op_context_t        fsal_context, *pcontext = &fsal_context;
  state_block_data_t     * block_data     = lock_entry->sle_block_data;
  state_nlm_block_data_t * nlm_block_data = &block_data->sbd_block_data.sbd_nlm_block_data;
  state_cookie_entry_t   * cookie_entry = NULL;
  state_async_queue_t    * arg;
  nlm4_testargs          * inarg;
  state_nlm_owner_t      * nlm_grant_owner  = &lock_entry->sle_owner->so_owner.so_nlm_owner;
  state_nlm_client_t     * nlm_grant_client = nlm_grant_owner->so_client;
  granted_cookie_t         nlm_grant_cookie;
  state_status_t           dummy_status;

  if(nlm_block_data_to_fsal_context(block_data, &fsal_context) != TRUE)
    {
      *pstatus = STATE_INCONSISTENT_ENTRY;
      return *pstatus;
    }

  arg = gsh_malloc(sizeof(*arg));
  if(arg == NULL)
    {
      /* If we fail allocation the best is to delete the block entry
      * so that client can try again and get the lock. May be
      * by then we are able to allocate objects
      */
      *pstatus = STATE_MALLOC_ERROR;
      return *pstatus;
   }

  memset(arg, 0, sizeof(*arg));

  /* Get a cookie to use for this grant */
  next_granted_cookie(&nlm_grant_cookie);

  /* Add a cookie to the blocked lock pending grant.
   * It will also request lock from FSAL.
   * Could return STATE_LOCK_BLOCKED because FSAL would have had to block.
   */
  if(state_add_grant_cookie(pentry,
                            pcontext,
                            &nlm_grant_cookie,
                            sizeof(nlm_grant_cookie),
                            lock_entry,
                            &cookie_entry,
                            pstatus) != STATE_SUCCESS)
    {
      free_grant_arg(arg);
      return *pstatus;
    }

  /* Fill in the arguments for the NLMPROC4_GRANTED_MSG call */
  inc_nlm_client_ref(nlm_grant_client);
  arg->state_async_func = nlm4_send_grant_msg;
  arg->state_async_data.state_nlm_async_data.nlm_async_host = nlm_grant_client;
  arg->state_async_data.state_nlm_async_data.nlm_async_key  = cookie_entry;
  inarg = &arg->state_async_data.state_nlm_async_data.nlm_async_args.nlm_async_grant;

  if(!copy_netobj(&inarg->alock.fh, &nlm_block_data->sbd_nlm_fh))
    goto grant_fail_malloc;

  if(!fill_netobj(&inarg->alock.oh,
                  lock_entry->sle_owner->so_owner_val,
                  lock_entry->sle_owner->so_owner_len))
    goto grant_fail_malloc;

  if(!fill_netobj(&inarg->cookie,
                  (char *) &nlm_grant_cookie,
                  sizeof(nlm_grant_cookie)))
    goto grant_fail_malloc;

  inarg->alock.caller_name = gsh_strdup(nlm_grant_client->slc_nlm_caller_name);
  if(!inarg->alock.caller_name)
    goto grant_fail_malloc;

  inarg->exclusive      = lock_entry->sle_lock.lock_type == FSAL_LOCK_W;
  inarg->alock.svid     = nlm_grant_owner->so_nlm_svid;
  inarg->alock.l_offset = lock_entry->sle_lock.lock_start;
  inarg->alock.l_len    = lock_entry->sle_lock.lock_length;
  if(isDebug(COMPONENT_NLM))
    {
      char buffer[1024];

      netobj_to_string(&inarg->cookie, buffer, sizeof(buffer));

      LogDebug(COMPONENT_NLM,
               "Sending GRANTED for arg=%p svid=%d start=%llx len=%llx cookie=%s",
               arg, inarg->alock.svid,
               (unsigned long long) inarg->alock.l_offset, (unsigned long long) inarg->alock.l_len,
               buffer);
    }

  /* Now try to schedule NLMPROC4_GRANTED_MSG call */
  *pstatus = state_async_schedule(arg);

  if(*pstatus != STATE_SUCCESS)
    goto grant_fail;

  return *pstatus;

 grant_fail_malloc:
  *pstatus = STATE_MALLOC_ERROR;

 grant_fail:

  /* Something went wrong after we added a grant cookie, need to clean up */

  dec_nlm_client_ref(nlm_grant_client);

  /* Clean up NLMPROC4_GRANTED_MSG arguments */
  free_grant_arg(arg);

  /* Cancel the pending grant to release the cookie */
  if(state_cancel_grant(pcontext,
                        cookie_entry,
                        &dummy_status) != STATE_SUCCESS)
    {
      /* Not much we can do other than log that something bad happened. */
      LogCrit(COMPONENT_NLM,
              "Unable to clean up GRANTED lock after error");
    }

  return *pstatus;
}
int nfs4_op_setclientid(struct nfs_argop4 *op, compound_data_t *data,
			struct nfs_resop4 *resp)
{
	SETCLIENTID4args * const arg_SETCLIENTID4 =
	    &op->nfs_argop4_u.opsetclientid;
	SETCLIENTID4res * const res_SETCLIENTID4 =
	    &resp->nfs_resop4_u.opsetclientid;
	clientaddr4 * const res_SETCLIENTID4_INUSE =
	    &resp->nfs_resop4_u.opsetclientid.SETCLIENTID4res_u.client_using;
	char str_verifier[NFS4_VERIFIER_SIZE * 2 + 1];
	struct display_buffer dspbuf_verifier = {
			sizeof(str_verifier), str_verifier, str_verifier};
	char str_client[NFS4_OPAQUE_LIMIT * 2 + 1];
	struct display_buffer dspbuf_client = {
			sizeof(str_client), str_client, str_client};
	const char *str_client_addr = "(unknown)";
	/* The clientid4 broken down into fields */
	char str_clientid4[DISPLAY_CLIENTID_SIZE];
	/* Display buffer for clientid4 */
	struct display_buffer dspbuf_clientid4 = {
		sizeof(str_clientid4), str_clientid4, str_clientid4};
	nfs_client_record_t *client_record;
	nfs_client_id_t *conf;
	nfs_client_id_t *unconf;
	clientid4 clientid;
	verifier4 verifier;
	int rc;

	resp->resop = NFS4_OP_SETCLIENTID;

	if (data->minorversion > 0) {
		res_SETCLIENTID4->status = NFS4ERR_NOTSUPP;
		return res_SETCLIENTID4->status;
	}

	if (op_ctx->client != NULL)
		str_client_addr = op_ctx->client->hostaddr_str;

	if (isDebug(COMPONENT_CLIENTID)) {
		display_opaque_value(&dspbuf_client,
				     arg_SETCLIENTID4->client.id.id_val,
				     arg_SETCLIENTID4->client.id.id_len);

		display_opaque_bytes(&dspbuf_verifier,
				     arg_SETCLIENTID4->client.verifier,
				     NFS4_VERIFIER_SIZE);
	} else {
		str_client[0] = '\0';
		str_verifier[0] = '\0';
	}

	LogDebug(COMPONENT_CLIENTID,
		 "SETCLIENTID Client addr=%s id=%s verf=%s callback={program=%u r_addr=%s r_netid=%s} ident=%u",
		 str_client_addr, str_client, str_verifier,
		 arg_SETCLIENTID4->callback.cb_program,
		 arg_SETCLIENTID4->callback.cb_location.r_addr,
		 arg_SETCLIENTID4->callback.cb_location.r_netid,
		 arg_SETCLIENTID4->callback_ident);

	/* Do we already have one or more records for client id (x)? */
	client_record = get_client_record(arg_SETCLIENTID4->client.id.id_val,
					  arg_SETCLIENTID4->client.id.id_len,
					  0,
					  0);

	if (client_record == NULL) {
		/* Some major failure */
		LogCrit(COMPONENT_CLIENTID, "SETCLIENTID failed");

		res_SETCLIENTID4->status = NFS4ERR_SERVERFAULT;
		return res_SETCLIENTID4->status;
	}

	/* The following checks are based on RFC3530bis draft 16
	 *
	 * This attempts to implement the logic described in
	 * 15.35.5. IMPLEMENTATION Consider the major bullets as CASE
	 * 1, CASE 2, CASE 3, CASE 4, and CASE 5.
	 */

	PTHREAD_MUTEX_lock(&client_record->cr_mutex);

	if (isFullDebug(COMPONENT_CLIENTID)) {
		char str[LOG_BUFF_LEN] = "\0";
		struct display_buffer dspbuf = {sizeof(str), str, str};

		display_client_record(&dspbuf, client_record);

		LogFullDebug(COMPONENT_CLIENTID,
			     "Client Record %s cr_confirmed_rec=%p cr_unconfirmed_rec=%p",
			     str,
			     client_record->cr_confirmed_rec,
			     client_record->cr_unconfirmed_rec);
	}

	conf = client_record->cr_confirmed_rec;

	if (conf != NULL) {
		bool credmatch;

		/* Need a reference to the confirmed record for below */
		inc_client_id_ref(conf);

		clientid = conf->cid_clientid;
		display_clientid(&dspbuf_clientid4, clientid);

		credmatch = nfs_compare_clientcred(&conf->cid_credential,
						   &data->credential)
			    && op_ctx->client != NULL
			    && conf->gsh_client != NULL
			    && op_ctx->client == conf->gsh_client;

		/* Only reject if the principal doesn't match and the
		 * clientid has live state associated. Per RFC 7530
		 * Section 9.1.2. Server Release of Client ID.
		 */
		if (!credmatch && clientid_has_state(conf)) {
			/* CASE 1:
			 *
			 * Confirmed record exists and not the same principal
			 */
			if (isDebug(COMPONENT_CLIENTID)) {
				char *confirmed_addr = "(unknown)";

				if (conf->gsh_client != NULL)
					confirmed_addr =
					    conf->gsh_client->hostaddr_str;

				LogDebug(COMPONENT_CLIENTID,
					 "Confirmed ClientId %s->'%s': Principals do not match... confirmed addr=%s Return NFS4ERR_CLID_INUSE",
					 str_clientid4, str_client,
					 confirmed_addr);
			}

			res_SETCLIENTID4->status = NFS4ERR_CLID_INUSE;
			res_SETCLIENTID4_INUSE->r_netid =
			    (char *)netid_nc_table[conf->cid_cb.v40.cb_addr.nc]
			    .netid;
			res_SETCLIENTID4_INUSE->r_addr =
			    gsh_strdup(conf->cid_cb.v40.cb_client_r_addr);

			/* Release our reference to the confirmed clientid. */
			dec_client_id_ref(conf);
			goto out;
		}

		/* Check if confirmed record is for (v, x, c, l, s) */
		if (credmatch && memcmp(arg_SETCLIENTID4->client.verifier,
					conf->cid_incoming_verifier,
					NFS4_VERIFIER_SIZE) == 0) {
			/* CASE 2:
			 *
			 * A confirmed record exists for this long
			 * form client id and verifier.
			 *
			 * Consider this to be a possible update of
			 * the call-back information.
			 *
			 * Remove any pre-existing unconfirmed record
			 * for (v, x, c).
			 *
			 * Return the same short form client id (c),
			 * but a new setclientid_confirm verifier (t).
			 */
			LogFullDebug(COMPONENT_CLIENTID,
				     "Update ClientId %s->%s",
				     str_clientid4, str_client);

			new_clientid_verifier(verifier);
		} else {
			/* Must be CASE 3 or CASE 4
			 *
			 * Confirmed record is for (u, x, c, l, s).
			 *
			 * These are actually the same, doesn't really
			 * matter if an unconfirmed record exists or
			 * not. Any existing unconfirmed record will
			 * be removed and a new unconfirmed record
			 * added.
			 *
			 * Return a new short form clientid (d) and a
			 * new setclientid_confirm verifier (t). (Note
			 * the spec calls the values e and r for CASE
			 * 4).
			 */
			LogFullDebug(COMPONENT_CLIENTID,
				     "Replace ClientId %s->%s",
				     str_clientid4, str_client);

			clientid = new_clientid();
			new_clientid_verifier(verifier);
		}

		/* Release our reference to the confirmed clientid. */
		dec_client_id_ref(conf);
	} else {
		/* CASE 5:
		 *
		 *
		 * Remove any existing unconfirmed record.
		 *
		 * Return a new short form clientid (d) and a new
		 * setclientid_confirm verifier (t).
		 */
		clientid = new_clientid();
		display_clientid(&dspbuf_clientid4, clientid);
		LogFullDebug(COMPONENT_CLIENTID,
			     "New client %s", str_clientid4);
		new_clientid_verifier(verifier);
	}

	/* At this point, no matter what the case was above, we should
	 * remove any pre-existing unconfirmed record.
	 */

	unconf = client_record->cr_unconfirmed_rec;

	if (unconf != NULL) {
		/* Delete the unconfirmed clientid record. Because we
		 * have the cr_mutex, we have won any race to deal
		 * with this clientid record (whether we raced with a
		 * SETCLIENTID_CONFIRM or the reaper thread (if either
		 * of those operations had won the race,
		 * cr_punconfirmed_id would have been NULL).
		 */
		if (isDebug(COMPONENT_CLIENTID)) {
			char str[LOG_BUFF_LEN] = "\0";
			struct display_buffer dspbuf = {sizeof(str), str, str};

			display_client_id_rec(&dspbuf, unconf);

			LogDebug(COMPONENT_CLIENTID, "Replacing %s", str);
		}

		/* unhash the clientid record */
		remove_unconfirmed_client_id(unconf);
		unconf = NULL;
	}

	/* Now we can proceed to build the new unconfirmed record. We
	 * have determined the clientid and setclientid_confirm values
	 * above.
	 */

	unconf = create_client_id(clientid,
				  client_record,
				  &data->credential,
				  0);

	if (unconf == NULL) {
		/* Error already logged, return */
		res_SETCLIENTID4->status = NFS4ERR_RESOURCE;
		goto out;
	}

	if (strmaxcpy(unconf->cid_cb.v40.cb_client_r_addr,
		      arg_SETCLIENTID4->callback.cb_location.r_addr,
		      sizeof(unconf->cid_cb.v40.cb_client_r_addr)) == -1) {
		LogCrit(COMPONENT_CLIENTID, "Callback r_addr %s too long",
			arg_SETCLIENTID4->callback.cb_location.r_addr);
		res_SETCLIENTID4->status = NFS4ERR_INVAL;

		goto out;
	}

	nfs_set_client_location(unconf,
				&arg_SETCLIENTID4->callback.cb_location);

	memcpy(unconf->cid_incoming_verifier,
	       arg_SETCLIENTID4->client.verifier,
	       NFS4_VERIFIER_SIZE);

	memcpy(unconf->cid_verifier, verifier, sizeof(NFS4_write_verifier));

	unconf->cid_cb.v40.cb_program = arg_SETCLIENTID4->callback.cb_program;
	unconf->cid_cb.v40.cb_callback_ident = arg_SETCLIENTID4->callback_ident;

	rc = nfs_client_id_insert(unconf);

	if (rc != CLIENT_ID_SUCCESS) {
		/* Record is already freed, return. */
		res_SETCLIENTID4->status =
					clientid_error_to_nfsstat_no_expire(rc);
		goto out;
	}

	if (isDebug(COMPONENT_CLIENTID)) {
		char str[LOG_BUFF_LEN] = "\0";
		struct display_buffer dspbuf = {sizeof(str), str, str};

		sprint_mem(str_verifier, verifier, NFS4_VERIFIER_SIZE);

		display_client_id_rec(&dspbuf, unconf);
		LogDebug(COMPONENT_CLIENTID, "SETCLIENTID reply Verifier=%s %s",
			 str_verifier, str);
	}

	res_SETCLIENTID4->status = NFS4_OK;
	res_SETCLIENTID4->SETCLIENTID4res_u.resok4.clientid = clientid;
	memcpy(res_SETCLIENTID4->SETCLIENTID4res_u.resok4.setclientid_confirm,
	       &verifier, NFS4_VERIFIER_SIZE);

 out:

	PTHREAD_MUTEX_unlock(&client_record->cr_mutex);

	/* Release our reference to the client record */
	dec_client_record_ref(client_record);

	return res_SETCLIENTID4->status;
}
Example #14
0
	if (retval != 0) {
		/* seriously bad */
		LogMajor(COMPONENT_FSAL,
			 "Could not attach export");
		gsh_free(myself->export_path);
		gsh_free(myself->root_handle);
		free_export_ops(&myself->export);
		gsh_free(myself);	/* elvis has left the building */

		return fsalstat(posix2fsal_error(retval), retval);
	}

	myself->export.fsal = fsal_hdl;

	/* Save the export path. */
	myself->export_path = gsh_strdup(op_ctx->ctx_export->fullpath);
	op_ctx->fsal_export = &myself->export;

	/* Stack MDCACHE on top */
	status = mdcache_export_init(up_ops, &myself->export.up_ops);
	if (FSAL_IS_ERROR(status)) {
		LogDebug(COMPONENT_FSAL, "MDCACHE creation failed for PSEUDO");
		return status;
	}

	LogDebug(COMPONENT_FSAL,
		 "Created exp %p - %s",
		 myself, myself->export_path);

	return fsalstat(ERR_FSAL_NO_ERROR, 0);
}
Example #15
0
state_nsm_client_t *get_nsm_client(care_t    care,
                                   SVCXPRT * xprt,
                                   char    * caller_name)
{
  state_nsm_client_t   key;
  state_nsm_client_t * pclient;
  char                 sock_name[SOCK_NAME_MAX];
  char                 str[HASHTABLE_DISPLAY_STRLEN];
  struct hash_latch    latch;
  hash_error_t         rc;
  hash_buffer_t        buffkey;
  hash_buffer_t        buffval;

  if(caller_name == NULL)
    return NULL;

  memset(&key, 0, sizeof(key));

  if(nfs_param.core_param.nsm_use_caller_name)
    {
      key.ssc_nlm_caller_name_len = strlen(caller_name);

      if(key.ssc_nlm_caller_name_len > LM_MAXSTRLEN)
        {
          return NULL;
        }

      key.ssc_nlm_caller_name = caller_name;
    }
  else if(xprt == NULL)
    {
      int rc = ipstring_to_sockaddr(caller_name, &key.ssc_client_addr);
      if(rc != 0)
        {
          LogEvent(COMPONENT_STATE,
                   "Error %s, converting caller_name %s to an ipaddress",
                   gai_strerror(rc), caller_name);

          return NULL;
        }

      key.ssc_nlm_caller_name_len = strlen(caller_name);

      if(key.ssc_nlm_caller_name_len > LM_MAXSTRLEN)
        {
          return NULL;
        }

      key.ssc_nlm_caller_name = caller_name;
    }
  else
    {
      key.ssc_nlm_caller_name = sock_name;

      if(copy_xprt_addr(&key.ssc_client_addr, xprt) == 0)
        {
          LogCrit(COMPONENT_STATE,
                  "Error converting caller_name %s to an ipaddress",
                  caller_name);

          return NULL;
        }

      if(sprint_sockip(&key.ssc_client_addr,
                       key.ssc_nlm_caller_name,
                       sizeof(sock_name)) == 0)
        {
          LogCrit(COMPONENT_STATE,
                  "Error converting caller_name %s to an ipaddress",
                  caller_name);

          return NULL;
        }

      key.ssc_nlm_caller_name_len = strlen(key.ssc_nlm_caller_name);
    }

  
  if(isFullDebug(COMPONENT_STATE))
    {
      display_nsm_client(&key, str);
      LogFullDebug(COMPONENT_STATE,
                   "Find {%s}", str);
    }

  buffkey.pdata = &key;
  buffkey.len   = sizeof(key);

  rc = HashTable_GetLatch(ht_nsm_client,
                          &buffkey,
                          &buffval,
                          TRUE,
                          &latch);

  /* If we found it, return it */
  if(rc == HASHTABLE_SUCCESS)
    {
      pclient = buffval.pdata;

      /* Return the found NSM Client */
      if(isFullDebug(COMPONENT_STATE))
        {
          display_nsm_client(pclient, str);
          LogFullDebug(COMPONENT_STATE,
                       "Found {%s}",
                       str);
        }

      /* Increment refcount under hash latch.
       * This prevents dec ref from removing this entry from hash if a race
       * occurs.
       */
      inc_nsm_client_ref(pclient);

      HashTable_ReleaseLatched(ht_nsm_client, &latch);

      if(care == CARE_MONITOR && !nsm_monitor(pclient))
          {
            dec_nsm_client_ref(pclient);
            pclient = NULL;
          }

      return pclient;
    }

  /* An error occurred, return NULL */
  if(rc != HASHTABLE_ERROR_NO_SUCH_KEY)
    {
      display_nsm_client(&key, str);

      LogCrit(COMPONENT_STATE,
              "Error %s, could not find {%s}",
              hash_table_err_to_str(rc), str);

      return NULL;
    }

  /* Not found, but we don't care, return NULL */
  if(care == CARE_NOT)
    {
      /* Return the found NSM Client */
      if(isFullDebug(COMPONENT_STATE))
        {
          display_nsm_client(&key, str);
          LogFullDebug(COMPONENT_STATE,
                       "Ignoring {%s}",
                       str);
        }

      HashTable_ReleaseLatched(ht_nsm_client, &latch);

      return NULL;
    }

  pclient = gsh_malloc(sizeof(*pclient));

  if(pclient == NULL)
    {
      display_nsm_client(&key, str);
      LogCrit(COMPONENT_STATE,
              "No memory for {%s}",
              str);

      return NULL;
    }

  /* Copy everything over */
  memcpy(pclient, &key, sizeof(key));

  if(pthread_mutex_init(&pclient->ssc_mutex, NULL) == -1)
    {
      /* Mutex initialization failed, free the created client */
      display_nsm_client(&key, str);
      LogCrit(COMPONENT_STATE,
              "Could not init mutex for {%s}",
              str);

      gsh_free(pclient);
      return NULL;
    }

  pclient->ssc_nlm_caller_name = gsh_strdup(key.ssc_nlm_caller_name);

  if(pclient->ssc_nlm_caller_name == NULL)
    {
      /* Discard the created client */
      free_nsm_client(pclient);
      return NULL;
    }

  init_glist(&pclient->ssc_lock_list);
  init_glist(&pclient->ssc_share_list);
  pclient->ssc_refcount = 1;

  if(isFullDebug(COMPONENT_STATE))
    {
      display_nsm_client(pclient, str);
      LogFullDebug(COMPONENT_STATE,
                   "New {%s}", str);
    }

  buffkey.pdata = pclient;
  buffkey.len   = sizeof(*pclient);
  buffval.pdata = pclient;
  buffval.len   = sizeof(*pclient);

  rc = HashTable_SetLatched(ht_nsm_client,
                            &buffval,
                            &buffval,
                            &latch,
                            FALSE,
                            NULL,
                            NULL);

  /* An error occurred, return NULL */
  if(rc != HASHTABLE_SUCCESS)
    {
      display_nsm_client(pclient, str);

      LogCrit(COMPONENT_STATE,
              "Error %s, inserting {%s}",
              hash_table_err_to_str(rc), str);

      free_nsm_client(pclient);

      return NULL;
    }

  if(care != CARE_MONITOR || nsm_monitor(pclient))
    return pclient;

  /* Failed to monitor, release client reference
   * and almost certainly remove it from the hash table.
   */
  dec_nsm_client_ref(pclient);

  return NULL;
}
Example #16
0
state_nlm_client_t *get_nlm_client(care_t               care,
                                   SVCXPRT            * xprt,
                                   state_nsm_client_t * pnsm_client,
                                   char               * caller_name)
{
  state_nlm_client_t   key;
  state_nlm_client_t * pclient;
  char                 str[HASHTABLE_DISPLAY_STRLEN];
  struct hash_latch    latch;
  hash_error_t         rc;
  hash_buffer_t        buffkey;
  hash_buffer_t        buffval;

  if(caller_name == NULL)
    return NULL;

  memset(&key, 0, sizeof(key));

  key.slc_nsm_client          = pnsm_client;
  key.slc_nlm_caller_name_len = strlen(caller_name);
  key.slc_client_type         = svc_get_xprt_type(xprt);

  if(key.slc_nlm_caller_name_len > LM_MAXSTRLEN)
    return NULL;

  key.slc_nlm_caller_name = caller_name;
  
  if(isFullDebug(COMPONENT_STATE))
    {
      display_nlm_client(&key, str);
      LogFullDebug(COMPONENT_STATE,
                   "Find {%s}", str);
    }

  buffkey.pdata = &key;
  buffkey.len   = sizeof(key);

  rc = HashTable_GetLatch(ht_nlm_client,
                          &buffkey,
                          &buffval,
                          TRUE,
                          &latch);

  /* If we found it, return it */
  if(rc == HASHTABLE_SUCCESS)
    {
      pclient = buffval.pdata;

      /* Return the found NLM Client */
      if(isFullDebug(COMPONENT_STATE))
        {
          display_nlm_client(pclient, str);
          LogFullDebug(COMPONENT_STATE,
                       "Found {%s}",
                       str);
        }

      /* Increment refcount under hash latch.
       * This prevents dec ref from removing this entry from hash if a race
       * occurs.
       */
      inc_nlm_client_ref(pclient);

      HashTable_ReleaseLatched(ht_nlm_client, &latch);

      if(care == CARE_MONITOR && !nsm_monitor(pnsm_client))
          {
            dec_nlm_client_ref(pclient);
            pclient = NULL;
          }

      return pclient;
    }

  /* An error occurred, return NULL */
  if(rc != HASHTABLE_ERROR_NO_SUCH_KEY)
    {
      display_nlm_client(&key, str);

      LogCrit(COMPONENT_STATE,
              "Error %s, could not find {%s}",
              hash_table_err_to_str(rc), str);

      return NULL;
    }

  /* Not found, but we don't care, return NULL */
  if(care == CARE_NOT)
    {
      /* Return the found NLM Client */
      if(isFullDebug(COMPONENT_STATE))
        {
          display_nlm_client(&key, str);
          LogFullDebug(COMPONENT_STATE,
                       "Ignoring {%s}",
                       str);
        }

      HashTable_ReleaseLatched(ht_nlm_client, &latch);

      return NULL;
    }

  pclient = gsh_malloc(sizeof(*pclient));

  if(pclient == NULL)
    {
      display_nlm_client(&key, str);
      LogCrit(COMPONENT_STATE,
              "No memory for {%s}",
              str);

      return NULL;
    }

  /* Copy everything over */
  memcpy(pclient, &key, sizeof(key));

  pclient->slc_nlm_caller_name = gsh_strdup(key.slc_nlm_caller_name);

  /* Take a reference to the NSM Client */
  inc_nsm_client_ref(pnsm_client);

  if(pclient->slc_nlm_caller_name == NULL)
    {
      /* Discard the created client */
      free_nlm_client(pclient);
      return NULL;
    }

  pclient->slc_refcount = 1;

  if(isFullDebug(COMPONENT_STATE))
    {
      display_nlm_client(pclient, str);
      LogFullDebug(COMPONENT_STATE,
                   "New {%s}", str);
    }

  buffkey.pdata = pclient;
  buffkey.len   = sizeof(*pclient);
  buffval.pdata = pclient;
  buffval.len   = sizeof(*pclient);

  rc = HashTable_SetLatched(ht_nlm_client,
                            &buffval,
                            &buffval,
                            &latch,
                            FALSE,
                            NULL,
                            NULL);

  /* An error occurred, return NULL */
  if(rc != HASHTABLE_SUCCESS)
    {
      display_nlm_client(pclient, str);

      LogCrit(COMPONENT_STATE,
              "Error %s, inserting {%s}",
              hash_table_err_to_str(rc), str);

      free_nlm_client(pclient);

      return NULL;
    }

  if(care != CARE_MONITOR || nsm_monitor(pnsm_client))
    return pclient;

  /* Failed to monitor, release client reference
   * and almost certainly remove it from the hash table.
   */
  dec_nlm_client_ref(pclient);

  return NULL;
}
Example #17
0
/**
 * @brief Get an NLM client
 *
 * @param[in] care        Care status
 * @param[in] xprt        RPC transport
 * @param[in] nsm_client  Related NSM client
 * @param[in] caller_name Caller name
 *
 * @return NLM client or NULL.
 */
state_nlm_client_t *get_nlm_client(care_t care, SVCXPRT *xprt,
				   state_nsm_client_t *nsm_client,
				   char *caller_name)
{
	state_nlm_client_t key;
	state_nlm_client_t *pclient;
	char str[LOG_BUFF_LEN];
	struct display_buffer dspbuf = {sizeof(str), str, str};
	struct hash_latch latch;
	hash_error_t rc;
	struct gsh_buffdesc buffkey;
	struct gsh_buffdesc buffval;
	struct sockaddr_storage local_addr;
	socklen_t addr_len;

	if (caller_name == NULL)
		return NULL;

	memset(&key, 0, sizeof(key));

	key.slc_nsm_client = nsm_client;
	key.slc_nlm_caller_name_len = strlen(caller_name);
	key.slc_client_type = svc_get_xprt_type(xprt);

	addr_len = sizeof(local_addr);
	if (getsockname(xprt->xp_fd, (struct sockaddr *)&local_addr, &addr_len)
	    == -1) {
		LogEvent(COMPONENT_CLIENTID, "Failed to get local addr.");
	} else {
		memcpy(&(key.slc_server_addr), &local_addr,
		       sizeof(struct sockaddr_storage));
	}

	if (key.slc_nlm_caller_name_len > LM_MAXSTRLEN)
		return NULL;

	key.slc_nlm_caller_name = caller_name;

	if (isFullDebug(COMPONENT_STATE)) {
		display_nlm_client(&dspbuf, &key);
		LogFullDebug(COMPONENT_STATE, "Find {%s}", str);
	}

	buffkey.addr = &key;
	buffkey.len = sizeof(key);

	rc = hashtable_getlatch(ht_nlm_client, &buffkey, &buffval, true,
				&latch);

	/* If we found it, return it */
	if (rc == HASHTABLE_SUCCESS) {
		pclient = buffval.addr;

		/* Return the found NLM Client */
		if (isFullDebug(COMPONENT_STATE)) {
			display_nlm_client(&dspbuf, pclient);
			LogFullDebug(COMPONENT_STATE, "Found {%s}", str);
		}

		/* Increment refcount under hash latch.
		 * This prevents dec ref from removing this entry from hash
		 * if a race occurs.
		 */
		inc_nlm_client_ref(pclient);

		hashtable_releaselatched(ht_nlm_client, &latch);

		if (care == CARE_MONITOR && !nsm_monitor(nsm_client)) {
			dec_nlm_client_ref(pclient);
			pclient = NULL;
		}

		return pclient;
	}

	/* An error occurred, return NULL */
	if (rc != HASHTABLE_ERROR_NO_SUCH_KEY) {
		display_nlm_client(&dspbuf, &key);

		LogCrit(COMPONENT_STATE, "Error %s, could not find {%s}",
			hash_table_err_to_str(rc), str);

		return NULL;
	}

	/* Not found, but we don't care, return NULL */
	if (care == CARE_NOT) {
		/* Return the found NLM Client */
		if (isFullDebug(COMPONENT_STATE)) {
			display_nlm_client(&dspbuf, &key);
			LogFullDebug(COMPONENT_STATE, "Ignoring {%s}", str);
		}

		hashtable_releaselatched(ht_nlm_client, &latch);

		return NULL;
	}

	pclient = gsh_malloc(sizeof(*pclient));

	if (pclient == NULL) {
		display_nlm_client(&dspbuf, &key);
		LogCrit(COMPONENT_STATE, "No memory for {%s}", str);

		return NULL;
	}

	/* Copy everything over */
	memcpy(pclient, &key, sizeof(key));

	pclient->slc_nlm_caller_name = gsh_strdup(key.slc_nlm_caller_name);

	/* Take a reference to the NSM Client */
	inc_nsm_client_ref(nsm_client);

	if (pclient->slc_nlm_caller_name == NULL) {
		/* Discard the created client */
		free_nlm_client(pclient);
		return NULL;
	}

	pclient->slc_refcount = 1;

	if (isFullDebug(COMPONENT_STATE)) {
		display_nlm_client(&dspbuf, pclient);
		LogFullDebug(COMPONENT_STATE, "New {%s}", str);
	}

	buffkey.addr = pclient;
	buffkey.len = sizeof(*pclient);
	buffval.addr = pclient;
	buffval.len = sizeof(*pclient);

	rc = hashtable_setlatched(ht_nlm_client, &buffval, &buffval, &latch,
				  false, NULL, NULL);

	/* An error occurred, return NULL */
	if (rc != HASHTABLE_SUCCESS) {
		display_nlm_client(&dspbuf, pclient);

		LogCrit(COMPONENT_STATE, "Error %s, inserting {%s}",
			hash_table_err_to_str(rc), str);

		free_nlm_client(pclient);

		return NULL;
	}

	if (care != CARE_MONITOR || nsm_monitor(nsm_client))
		return pclient;

	/* Failed to monitor, release client reference
	 * and almost certainly remove it from the hash table.
	 */
	dec_nlm_client_ref(pclient);

	return NULL;
}
Example #18
0
/**
 * @brief Get an NSM client
 *
 * @param[in] care        Care status
 * @param[in] xprt        RPC transport
 * @param[in] caller_name Caller name
 *
 * @return NSM client or NULL.
 */
state_nsm_client_t *get_nsm_client(care_t care, SVCXPRT *xprt,
				   char *caller_name)
{
	state_nsm_client_t key;
	state_nsm_client_t *pclient;
	char str[LOG_BUFF_LEN];
	struct display_buffer dspbuf = {sizeof(str), str, str};
	struct hash_latch latch;
	hash_error_t rc;
	struct gsh_buffdesc buffkey;
	struct gsh_buffdesc buffval;

	if (caller_name == NULL)
		return NULL;

	memset(&key, 0, sizeof(key));

	if (nfs_param.core_param.nsm_use_caller_name) {
		key.ssc_nlm_caller_name_len = strlen(caller_name);

		if (key.ssc_nlm_caller_name_len > LM_MAXSTRLEN)
			return NULL;

		key.ssc_nlm_caller_name = caller_name;
	} else if (op_ctx->client == NULL) {
		LogCrit(COMPONENT_STATE,
			"No gsh_client for caller_name %s", caller_name);

		return NULL;
	} else {
		key.ssc_nlm_caller_name = op_ctx->client->hostaddr_str;
		key.ssc_nlm_caller_name_len = strlen(key.ssc_nlm_caller_name);
		key.ssc_client = op_ctx->client;
	}

	if (isFullDebug(COMPONENT_STATE)) {
		display_nsm_client(&dspbuf, &key);
		LogFullDebug(COMPONENT_STATE, "Find {%s}", str);
	}

	buffkey.addr = &key;
	buffkey.len = sizeof(key);

	rc = hashtable_getlatch(ht_nsm_client, &buffkey, &buffval, true,
				&latch);

	/* If we found it, return it */
	if (rc == HASHTABLE_SUCCESS) {
		pclient = buffval.addr;

		/* Return the found NSM Client */
		if (isFullDebug(COMPONENT_STATE)) {
			display_nsm_client(&dspbuf, pclient);
			LogFullDebug(COMPONENT_STATE, "Found {%s}", str);
		}

		/* Increment refcount under hash latch.
		 * This prevents dec ref from removing this entry from hash
		 * if a race occurs.
		 */
		inc_nsm_client_ref(pclient);

		hashtable_releaselatched(ht_nsm_client, &latch);

		if (care == CARE_MONITOR && !nsm_monitor(pclient)) {
			dec_nsm_client_ref(pclient);
			pclient = NULL;
		}

		return pclient;
	}

	/* An error occurred, return NULL */
	if (rc != HASHTABLE_ERROR_NO_SUCH_KEY) {
		display_nsm_client(&dspbuf, &key);

		LogCrit(COMPONENT_STATE, "Error %s, could not find {%s}",
			hash_table_err_to_str(rc), str);

		return NULL;
	}

	/* Not found, but we don't care, return NULL */
	if (care == CARE_NOT) {
		/* Return the found NSM Client */
		if (isFullDebug(COMPONENT_STATE)) {
			display_nsm_client(&dspbuf, &key);
			LogFullDebug(COMPONENT_STATE, "Ignoring {%s}", str);
		}

		hashtable_releaselatched(ht_nsm_client, &latch);

		return NULL;
	}

	pclient = gsh_malloc(sizeof(*pclient));

	if (pclient == NULL) {
		display_nsm_client(&dspbuf, &key);
		LogCrit(COMPONENT_STATE, "No memory for {%s}", str);

		return NULL;
	}

	/* Copy everything over */
	memcpy(pclient, &key, sizeof(key));

	PTHREAD_MUTEX_init(&pclient->ssc_mutex, NULL);

	pclient->ssc_nlm_caller_name = gsh_strdup(key.ssc_nlm_caller_name);

	if (pclient->ssc_nlm_caller_name == NULL) {
		/* Discard the created client */
		PTHREAD_MUTEX_destroy(&pclient->ssc_mutex);
		free_nsm_client(pclient);
		return NULL;
	}

	glist_init(&pclient->ssc_lock_list);
	glist_init(&pclient->ssc_share_list);
	pclient->ssc_refcount = 1;

	if (op_ctx->client != NULL) {
		pclient->ssc_client = op_ctx->client;
		inc_gsh_client_refcount(op_ctx->client);
	}

	if (isFullDebug(COMPONENT_STATE)) {
		display_nsm_client(&dspbuf, pclient);
		LogFullDebug(COMPONENT_STATE, "New {%s}", str);
	}

	buffkey.addr = pclient;
	buffkey.len = sizeof(*pclient);
	buffval.addr = pclient;
	buffval.len = sizeof(*pclient);

	rc = hashtable_setlatched(ht_nsm_client, &buffval, &buffval, &latch,
				  false, NULL, NULL);

	/* An error occurred, return NULL */
	if (rc != HASHTABLE_SUCCESS) {
		display_nsm_client(&dspbuf, pclient);

		LogCrit(COMPONENT_STATE, "Error %s, inserting {%s}",
			hash_table_err_to_str(rc), str);

		PTHREAD_MUTEX_destroy(&pclient->ssc_mutex);
		free_nsm_client(pclient);

		return NULL;
	}

	if (care != CARE_MONITOR || nsm_monitor(pclient))
		return pclient;

	/* Failed to monitor, release client reference
	 * and almost certainly remove it from the hash table.
	 */
	dec_nsm_client_ref(pclient);

	return NULL;
}
Example #19
0
/**
 * build the export entry
 */
fsal_status_t GPFSFSAL_BuildExportContext(fsal_export_context_t *export_context, /* OUT */
                                      fsal_path_t * p_export_path,      /* IN */
                                      char *fs_specific_options /* IN */
    )
{
  int                  rc, fd, mntexists;
  FILE               * fp;
  struct mntent      * p_mnt;
  char               * mnt_dir = NULL;
  struct statfs        stat_buf;
  gpfs_fsal_up_ctx_t * gpfs_fsal_up_ctx;
  bool_t               start_fsal_up_thread = FALSE;

  fsal_status_t status;
  fsal_op_context_t op_context;
  gpfsfsal_export_context_t *p_export_context = (gpfsfsal_export_context_t *)export_context;

  /* Make sure the FSAL UP context list is initialized */
  if(glist_null(&gpfs_fsal_up_ctx_list))
    init_glist(&gpfs_fsal_up_ctx_list);

  /* sanity check */
  if((p_export_context == NULL) || (p_export_path == NULL))
    {
      LogCrit(COMPONENT_FSAL,
              "NULL mandatory argument passed to %s()", __FUNCTION__);
      Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_BuildExportContext);
    }

  /* open mnt file */
  fp = setmntent(MOUNTED, "r");

  if(fp == NULL)
    {
      rc = errno;
      LogCrit(COMPONENT_FSAL, "Error %d in setmntent(%s): %s", rc, MOUNTED,
                      strerror(rc));
      Return(posix2fsal_error(rc), rc, INDEX_FSAL_BuildExportContext);
    }

  /* Check if mount point is really a gpfs share. If not, we can't continue.*/
  mntexists = 0;
  while((p_mnt = getmntent(fp)) != NULL)
    if(p_mnt->mnt_dir != NULL  && p_mnt->mnt_type != NULL)
      /* There is probably a macro for "gpfs" type ... not sure where it is. */
      if (strncmp(p_mnt->mnt_type, "gpfs", 4) == 0)
        {
          LogFullDebug(COMPONENT_FSAL,
                       "Checking Export Path %s against GPFS fs %s",
                       p_export_path->path, p_mnt->mnt_dir);

          /* If export path is shorter than fs path, then this isn't a match */
          if(strlen(p_export_path->path) < strlen(p_mnt->mnt_dir))
            continue;

          /* If export path doesn't have a path separator after mnt_dir, then it
           * isn't a proper sub-directory of mnt_dir.
           */
          if((p_export_path->path[strlen(p_mnt->mnt_dir)] != '/') &&
             (p_export_path->path[strlen(p_mnt->mnt_dir)] != '\0'))
            continue;

          if (strncmp(p_mnt->mnt_dir, p_export_path->path, strlen(p_mnt->mnt_dir)) == 0)
            {
              mnt_dir = gsh_strdup(p_mnt->mnt_dir);
              mntexists = 1;
              break;
            }
        }
  
  endmntent(fp);

  if (mntexists == 0)
    {
      LogMajor(COMPONENT_FSAL,
               "GPFS mount point %s does not exist.",
               p_export_path->path);
      gsh_free(mnt_dir);
      ReturnCode(ERR_FSAL_INVAL, 0);
    }

  /* save file descriptor to root of GPFS share */
  fd = open(p_export_path->path, O_RDONLY | O_DIRECTORY);
  if(fd < 0)
    {
      if(errno == ENOENT)
        LogMajor(COMPONENT_FSAL,
                 "GPFS export path %s does not exist.",
                 p_export_path->path);
      else if (errno == ENOTDIR)
        LogMajor(COMPONENT_FSAL,
                 "GPFS export path %s is not a directory.",
                 p_export_path->path);
      else
        LogMajor(COMPONENT_FSAL,
                 "Could not open GPFS export path %s: rc = %d(%s)",
                 p_export_path->path, errno, strerror(errno));

      if(mnt_dir != NULL)
        gsh_free(mnt_dir);

      ReturnCode(ERR_FSAL_INVAL, 0);
    }

  p_export_context->mount_root_fd = fd;

  LogFullDebug(COMPONENT_FSAL,
               "GPFSFSAL_BuildExportContext: %d",
               p_export_context->mount_root_fd);

  /* Save pointer to fsal_staticfsinfo_t in export context */
  p_export_context->fe_static_fs_info = &global_fs_info;

  /* save filesystem ID */
  rc = statfs(p_export_path->path, &stat_buf);

  if(rc)
    {
      close(fd);
      LogMajor(COMPONENT_FSAL,
               "statfs call failed on file %s: %d(%s)",
               p_export_path->path, errno, strerror(errno));

      if(mnt_dir != NULL)
        gsh_free(mnt_dir);

      ReturnCode(ERR_FSAL_INVAL, 0);
    }

  p_export_context->fsid[0] = stat_buf.f_fsid.__val[0];
  p_export_context->fsid[1] = stat_buf.f_fsid.__val[1];

  /* save file handle to root of GPFS share */
  op_context.export_context = export_context;

  // op_context.credential = ???
  status = fsal_internal_get_handle(&op_context,
                                    p_export_path,
                                    (fsal_handle_t *)(&(p_export_context->mount_root_handle)));

  if(FSAL_IS_ERROR(status))
    {
      close(p_export_context->mount_root_fd);
      LogMajor(COMPONENT_FSAL,
               "FSAL BUILD EXPORT CONTEXT: ERROR: Conversion from gpfs filesystem root path to handle failed : %d",
               status.minor);

      if(mnt_dir != NULL)
        gsh_free(mnt_dir);

      ReturnCode(ERR_FSAL_INVAL, 0);
    }

  gpfs_fsal_up_ctx = gpfsfsal_find_fsal_up_context(p_export_context);

  if(gpfs_fsal_up_ctx == NULL)
    {
      gpfs_fsal_up_ctx = gsh_calloc(1, sizeof(gpfs_fsal_up_ctx_t));

      if(gpfs_fsal_up_ctx == NULL || mnt_dir == NULL)
        {
          LogFatal(COMPONENT_FSAL,
                   "Out of memory can not continue.");
        }

      /* Initialize the gpfs_fsal_up_ctx */
      init_glist(&gpfs_fsal_up_ctx->gf_exports);
      gpfs_fsal_up_ctx->gf_fs = mnt_dir;
      gpfs_fsal_up_ctx->gf_fsid[0] = p_export_context->fsid[0];
      gpfs_fsal_up_ctx->gf_fsid[1] = p_export_context->fsid[1];

      /* Add it to the list of contexts */
      glist_add_tail(&gpfs_fsal_up_ctx_list, &gpfs_fsal_up_ctx->gf_list);

      start_fsal_up_thread = TRUE;
    }
  else
    {
      if(mnt_dir != NULL)
        gsh_free(mnt_dir);
    }

  /* Add this export context to the list for it's gpfs_fsal_up_ctx */
  glist_add_tail(&gpfs_fsal_up_ctx->gf_exports, &p_export_context->fe_list);
  p_export_context->fe_fsal_up_ctx = gpfs_fsal_up_ctx;

  if(start_fsal_up_thread)
    {
      pthread_attr_t attr_thr;

      memset(&attr_thr, 0, sizeof(attr_thr));

      /* Initialization of thread attributes borrowed from nfs_init.c */
      if(pthread_attr_init(&attr_thr) != 0)
        LogCrit(COMPONENT_THREAD, "can't init pthread's attributes");

      if(pthread_attr_setscope(&attr_thr, PTHREAD_SCOPE_SYSTEM) != 0)
        LogCrit(COMPONENT_THREAD, "can't set pthread's scope");

      if(pthread_attr_setdetachstate(&attr_thr, PTHREAD_CREATE_JOINABLE) != 0)
        LogCrit(COMPONENT_THREAD, "can't set pthread's join state");

      if(pthread_attr_setstacksize(&attr_thr, 2116488) != 0)
        LogCrit(COMPONENT_THREAD, "can't set pthread's stack size");

      rc = pthread_create(&gpfs_fsal_up_ctx->gf_thread,
                          &attr_thr,
                          GPFSFSAL_UP_Thread,
                          gpfs_fsal_up_ctx);

      if(rc != 0)
        {
          LogFatal(COMPONENT_THREAD,
                   "Could not create GPFSFSAL_UP_Thread, error = %d (%s)",
                   errno, strerror(errno));
        }
    }

  Return(ERR_FSAL_NO_ERROR, 0, INDEX_FSAL_BuildExportContext);
}
Example #20
0
/*
 * Create, initialize, and add a new ple structure to the global list
 */
static struct gssd_k5_kt_princ *new_ple(krb5_context context,
					krb5_principal princ)
{
	struct gssd_k5_kt_princ *ple = NULL, *p;
	krb5_error_code code;
	char *default_realm;
	int is_default_realm = 0;

	ple = gsh_malloc(sizeof(struct gssd_k5_kt_princ));
	if (ple == NULL)
		goto outerr;
	memset(ple, 0, sizeof(*ple));

#ifdef HAVE_KRB5
	ple->realm = gsh_malloc(princ->realm.length + 1);
	if (ple->realm == NULL)
		goto outerr;
	strmaxcpy(ple->realm, princ->realm.data, princ->realm.length);
	ple->realm[princ->realm.length] = '\0';
#else
	ple->realm = gsh_strdup(princ->realm);
	if (ple->realm == NULL)
		goto outerr;
#endif
	code = krb5_copy_principal(context, princ, &ple->princ);
	if (code)
		goto outerr;

	/*
	 * Add new entry onto the list (if this is the default
	 * realm, always add to the front of the list)
	 */

	code = krb5_get_default_realm(context, &default_realm);
	if (code == 0) {
		if (strcmp(ple->realm, default_realm) == 0)
			is_default_realm = 1;
		k5_free_default_realm(context, default_realm);
	}

	if (is_default_realm) {
		ple->next = gssd_k5_kt_princ_list;
		gssd_k5_kt_princ_list = ple;
	} else {
		p = gssd_k5_kt_princ_list;
		while (p != NULL && p->next != NULL)
			p = p->next;
		if (p == NULL)
			gssd_k5_kt_princ_list = ple;
		else
			p->next = ple;
	}

	return ple;
 outerr:
	if (ple) {
		if (ple->realm)
			gsh_free(ple->realm);
		gsh_free(ple);
	}
	return NULL;
}
Example #21
0
static struct vfs_fsal_obj_handle *alloc_handle(int dirfd,
						vfs_file_handle_t *fh,
						struct fsal_filesystem *fs,
						struct stat *stat,
						vfs_file_handle_t *dir_fh,
						const char *path,
						struct fsal_export *exp_hdl)
{
	struct vfs_fsal_export *myself =
	    container_of(exp_hdl, struct vfs_fsal_export, export);
	struct vfs_fsal_obj_handle *hdl;
	fsal_status_t st;

	hdl = gsh_calloc(1,
			 (sizeof(struct vfs_fsal_obj_handle) +
			  sizeof(vfs_file_handle_t)));
	if (hdl == NULL)
		return NULL;
	hdl->handle = (vfs_file_handle_t *) &hdl[1];
	memcpy(hdl->handle, fh, sizeof(vfs_file_handle_t));
	hdl->obj_handle.type = posix2fsal_type(stat->st_mode);
	hdl->dev = posix2fsal_devt(stat->st_dev);
	hdl->up_ops = exp_hdl->up_ops;
	hdl->obj_handle.fs = fs;

	if (hdl->obj_handle.type == REGULAR_FILE) {
		hdl->u.file.fd = -1;	/* no open on this yet */
		hdl->u.file.openflags = FSAL_O_CLOSED;
	} else if (hdl->obj_handle.type == SYMBOLIC_LINK) {
		ssize_t retlink;
		size_t len = stat->st_size + 1;
		char *link_content = gsh_malloc(len);

		if (link_content == NULL)
			goto spcerr;

		retlink =
		    vfs_readlink_by_handle(fh, dirfd, path, link_content, len);
		if (retlink < 0 || retlink == len)
			goto spcerr;
		link_content[retlink] = '\0';
		hdl->u.symlink.link_content = link_content;
		hdl->u.symlink.link_size = len;
	} else if (vfs_unopenable_type(hdl->obj_handle.type)) {
		/* AF_UNIX sockets, character special, and block
		   special files  require craziness */
		if (dir_fh == NULL) {
			int retval;
			vfs_alloc_handle(dir_fh);
			retval =
			    vfs_fd_to_handle(dirfd, hdl->obj_handle.fs, fh);
			if (retval < 0)
				goto spcerr;
		}
		hdl->u.unopenable.dir = gsh_malloc(sizeof(vfs_file_handle_t));
		if (hdl->u.unopenable.dir == NULL)
			goto spcerr;
		memcpy(hdl->u.unopenable.dir, dir_fh,
		       sizeof(vfs_file_handle_t));
		hdl->u.unopenable.name = gsh_strdup(path);
		if (hdl->u.unopenable.name == NULL)
			goto spcerr;
	}
	hdl->obj_handle.attributes.mask =
	    exp_hdl->exp_ops.fs_supported_attrs(exp_hdl);
	st = posix2fsal_attributes(stat, &hdl->obj_handle.attributes);
	if (FSAL_IS_ERROR(st))
		goto spcerr;
	hdl->obj_handle.attributes.fsid = fs->fsid;
	fsal_obj_handle_init(&hdl->obj_handle, exp_hdl,
			     posix2fsal_type(stat->st_mode));
	vfs_handle_ops_init(&hdl->obj_handle.obj_ops);
	vfs_sub_init_handle_ops(myself, &hdl->obj_handle.obj_ops);

	return hdl;

 spcerr:
	if (hdl->obj_handle.type == SYMBOLIC_LINK) {
		if (hdl->u.symlink.link_content != NULL)
			gsh_free(hdl->u.symlink.link_content);
	} else if (vfs_unopenable_type(hdl->obj_handle.type)) {
		if (hdl->u.unopenable.name != NULL)
			gsh_free(hdl->u.unopenable.name);
		if (hdl->u.unopenable.dir != NULL)
			gsh_free(hdl->u.unopenable.dir);
	}
	gsh_free(hdl);		/* elvis has left the building */
	return NULL;
}
Example #22
0
int main(int argc, char *argv[])
{
	char *tempo_exec_name = NULL;
	char localmachine[MAXHOSTNAMELEN + 1];
	int c;
	int pidfile;
#ifndef HAVE_DAEMON
	pid_t son_pid;
#endif
	sigset_t signals_to_block;

	/* Set the server's boot time and epoch */
	now(&ServerBootTime);
	ServerEpoch = (time_t) ServerBootTime.tv_sec;

	tempo_exec_name = strrchr(argv[0], '/');
	if (tempo_exec_name != NULL) {
		exec_name = gsh_strdup(tempo_exec_name + 1);
		if (!exec_name) {
			fprintf(stderr,
				"Unable to allocate memory for exec name, exiting...\n");
			exit(1);
		}
	}

	if (*exec_name == '\0')
		exec_name = argv[0];

	/* get host name */
	if (gethostname(localmachine, sizeof(localmachine)) != 0) {
		fprintf(stderr, "Could not get local host name, exiting...\n");
		exit(1);
	} else {
		host_name = gsh_strdup(localmachine);
		if (!host_name) {
			fprintf(stderr,
				"Unable to allocate memory for hostname, exiting...\n");
			exit(1);
		}
	}

	/* now parsing options with getopt */
	while ((c = getopt(argc, argv, options)) != EOF) {
		switch (c) {
		case '@':
			/* A litlle backdoor to keep track of binary versions */
			printf("%s compiled on %s at %s\n", exec_name, __DATE__,
			       __TIME__);
			printf("Release = %s\n", VERSION);
			printf("Release comment = %s\n", VERSION_COMMENT);
			printf("Git HEAD = %s\n", _GIT_HEAD_COMMIT);
			printf("Git Describe = %s\n", _GIT_DESCRIBE);
			exit(0);
			break;

		case 'L':
			/* Default Log */
			log_path = gsh_strdup(optarg);
			if (!log_path) {
				fprintf(stderr,
					"Unable to allocate memory for log path.\n");
				exit(1);
			}
			break;

		case 'N':
			/* debug level */
			debug_level = ReturnLevelAscii(optarg);
			if (debug_level == -1) {
				fprintf(stderr,
					"Invalid value for option 'N': NIV_NULL, NIV_MAJ, NIV_CRIT, NIV_EVENT, NIV_DEBUG, NIV_MID_DEBUG or NIV_FULL_DEBUG expected.\n");
				exit(1);
			}
			break;

		case 'f':
			/* config file */

			config_path = gsh_strdup(optarg);
			if (!config_path) {
				fprintf(stderr,
					"Unable to allocate memory for config path.\n");
				exit(1);
			}
			break;

		case 'p':
			/* PID file */
			pidfile_path = gsh_strdup(optarg);
			if (!pidfile_path) {
				fprintf(stderr,
					"Path %s too long for option 'f'.\n",
					optarg);
				exit(1);
			}
			break;

		case 'd':
			/* Detach or not detach ? */
			detach_flag = true;
			break;

		case 'R':
			/* Shall we manage  RPCSEC_GSS ? */
			fprintf(stderr,
				"\n\nThe -R flag is deprecated, use this syntax in the configuration file instead:\n\n");
			fprintf(stderr, "NFS_KRB5\n");
			fprintf(stderr, "{\n");
			fprintf(stderr,
				"\tPrincipalName = nfs@<your_host> ;\n");
			fprintf(stderr, "\tKeytabPath = /etc/krb5.keytab ;\n");
			fprintf(stderr, "\tActive_krb5 = true ;\n");
			fprintf(stderr, "}\n\n\n");
			exit(1);
			break;

		case 'T':
			/* Dump the default configuration on stdout */
			my_nfs_start_info.dump_default_config = true;
			break;

		case 'E':
			ServerEpoch = (time_t) atoll(optarg);
			break;

		case '?':
		case 'h':
		default:
			/* display the help */
			fprintf(stderr, usage, exec_name);
			exit(0);
			break;
		}
	}

	/* initialize memory and logging */
	nfs_prereq_init(exec_name, host_name, debug_level, log_path);
	LogEvent(COMPONENT_MAIN,
		 "%s Starting: Version %s, built at %s %s on %s",
		 exec_name, GANESHA_VERSION, __DATE__, __TIME__, BUILD_HOST);

	/* Start in background, if wanted */
	if (detach_flag) {
#ifdef HAVE_DAEMON
		/* daemonize the process (fork, close xterm fds,
		 * detach from parent process) */
		if (daemon(0, 0))
			LogFatal(COMPONENT_MAIN,
				 "Error detaching process from parent: %s",
				 strerror(errno));
#else
		/* Step 1: forking a service process */
		switch (son_pid = fork()) {
		case -1:
			/* Fork failed */
			LogFatal(COMPONENT_MAIN,
				 "Could not start nfs daemon (fork error %d (%s)",
				 errno, strerror(errno));
			break;

		case 0:
			/* This code is within the son (that will actually work)
			 * Let's make it the leader of its group of process */
			if (setsid() == -1) {
				LogFatal(COMPONENT_MAIN,
					 "Could not start nfs daemon (setsid error %d (%s)",
					 errno, strerror(errno));
			}
			break;

		default:
			/* This code is within the parent process,
			 * it is useless, it must die */
			LogFullDebug(COMPONENT_MAIN,
				     "Starting a child of pid %d",
				     son_pid);
			exit(0);
			break;
		}
#endif
	}

	/* Make sure Linux file i/o will return with error
	 * if file size is exceeded. */
#ifdef _LINUX
	signal(SIGXFSZ, SIG_IGN);
#endif

	/* Echo PID into pidfile */
	pidfile = open(pidfile_path, O_CREAT | O_RDWR, 0644);
	if (pidfile == -1) {
		LogFatal(COMPONENT_MAIN, "Can't open pid file %s for writing",
			 pidfile_path);
	} else {
		char linebuf[1024];
		struct flock lk;

		/* Try to obtain a lock on the file */
		lk.l_type = F_WRLCK;
		lk.l_whence = SEEK_SET;
		lk.l_start = (off_t) 0;
		lk.l_len = (off_t) 0;
		if (fcntl(pidfile, F_SETLK, &lk) == -1)
			LogFatal(COMPONENT_MAIN, "Ganesha already started");

		/* Put pid into file, then close it */
		(void)snprintf(linebuf, sizeof(linebuf), "%u\n", getpid());
		if (write(pidfile, linebuf, strlen(linebuf)) == -1)
			LogCrit(COMPONENT_MAIN, "Couldn't write pid to file %s",
				pidfile_path);
	}

	/* Set up for the signal handler.
	 * Blocks the signals the signal handler will handle.
	 */
	sigemptyset(&signals_to_block);
	sigaddset(&signals_to_block, SIGTERM);
	sigaddset(&signals_to_block, SIGHUP);
	sigaddset(&signals_to_block, SIGPIPE);
	if (pthread_sigmask(SIG_BLOCK, &signals_to_block, NULL) != 0)
		LogFatal(COMPONENT_MAIN,
			 "Could not start nfs daemon, pthread_sigmask failed");

	/* Parse the configuration file so we all know what is going on. */

	if (config_path == NULL) {
		LogFatal(COMPONENT_INIT,
			 "start_fsals: No configuration file named.");
		return 1;
	}
	config_struct = config_ParseFile(config_path);

	if (!config_struct) {
		LogFatal(COMPONENT_INIT, "Error while parsing %s: %s",
			 config_path, config_GetErrorMsg());
	}

	/* We need all the fsal modules loaded so we can have
	 * the list available at exports parsing time.
	 */
	start_fsals(config_struct);

	/* parse configuration file */

	if (nfs_set_param_from_conf(config_struct, &my_nfs_start_info)) {
		LogFatal(COMPONENT_INIT,
			 "Error setting parameters from configuration file.");
	}

	if (nfs_check_param_consistency()) {
		LogFatal(COMPONENT_INIT,
			 "Inconsistent parameters found. Exiting...");
	}
	if (init_fsals(config_struct)) { /* init the FSALs from the config */
		LogFatal(COMPONENT_INIT,
			 "FSALs could not initialize. Exiting...");
	}

	/* freeing syntax tree : */

	config_Free(config_struct);

	/* Everything seems to be OK! We can now start service threads */
	nfs_start(&my_nfs_start_info);

	return 0;

}
Example #23
0
static fsal_status_t renamefile(struct fsal_obj_handle *obj_hdl,
				struct fsal_obj_handle *olddir_hdl,
				const char *old_name,
				struct fsal_obj_handle *newdir_hdl,
				const char *new_name)
{
	struct vfs_fsal_obj_handle *olddir, *newdir, *obj;
	int oldfd = -1, newfd = -1;
	fsal_errors_t fsal_error = ERR_FSAL_NO_ERROR;
	int retval = 0;

	olddir =
	    container_of(olddir_hdl, struct vfs_fsal_obj_handle, obj_handle);
	if (olddir_hdl->fsal != olddir_hdl->fs->fsal) {
		LogDebug(COMPONENT_FSAL,
			 "FSAL %s operation for handle belonging to FSAL %s, return EXDEV",
			 olddir_hdl->fsal->name,
			 olddir_hdl->fs->fsal != NULL
				? olddir_hdl->fs->fsal->name
				: "(none)");
		retval = EXDEV;
		fsal_error = posix2fsal_error(retval);
		goto out;
	}
	oldfd = vfs_fsal_open(olddir, O_PATH | O_NOACCESS, &fsal_error);
	if (oldfd < 0) {
		retval = -oldfd;
		goto out;
	}
	newdir =
	    container_of(newdir_hdl, struct vfs_fsal_obj_handle, obj_handle);
	if (newdir_hdl->fsal != newdir_hdl->fs->fsal) {
		LogDebug(COMPONENT_FSAL,
			 "FSAL %s operation for handle belonging to FSAL %s, return EXDEV",
			 newdir_hdl->fsal->name,
			 newdir_hdl->fs->fsal != NULL
				? newdir_hdl->fs->fsal->name
				: "(none)");
		retval = EXDEV;
		fsal_error = posix2fsal_error(retval);
		goto out;
	}
	obj = container_of(obj_hdl, struct vfs_fsal_obj_handle, obj_handle);
	newfd = vfs_fsal_open(newdir, O_PATH | O_NOACCESS, &fsal_error);
	if (newfd < 0) {
		retval = -newfd;
		goto out;
	}
	/* Become the user because we are creating/removing objects
	 * in these dirs which messes with quotas and perms.
	 */
	fsal_set_credentials(op_ctx->creds);
	retval = renameat(oldfd, old_name, newfd, new_name);
	if (retval < 0) {
		retval = errno;
		fsal_error = posix2fsal_error(retval);
	} else if (vfs_unopenable_type(obj->obj_handle.type)) {
		/* A block, char, or socket has been renamed. Fixup
		 * our information in the handle so we can still stat it.
		 * Save the name in case we have to sort of undo.
		 */
		char *saved_name = obj->u.unopenable.name;
		memcpy(obj->u.unopenable.dir, newdir->handle,
		       sizeof(vfs_file_handle_t));
		obj->u.unopenable.name = gsh_strdup(new_name);
		if (obj->u.unopenable.name != NULL) {
			/* Discard saved_name */
			gsh_free(saved_name);
		} else {
			/* It's a bad day, we're going to be messed up
			 * no matter what we try and do, maybe leave
			 * things not completely hosed.
			 */
			LogCrit(COMPONENT_FSAL,
				"Failed to allocate memory to rename special inode from %s to %s",
				old_name, new_name);
			obj->u.unopenable.name = saved_name;
		}
	}
	fsal_restore_ganesha_credentials();
 out:
	if (oldfd >= 0)
		close(oldfd);
	if (newfd >= 0)
		close(newfd);
	return fsalstat(fsal_error, retval);
}
Example #24
0
int nfs4_op_setclientid(struct nfs_argop4 *op, compound_data_t *data,
			struct nfs_resop4 *resp)
{
	SETCLIENTID4args * const arg_SETCLIENTID4 =
	    &op->nfs_argop4_u.opsetclientid;
	SETCLIENTID4res * const res_SETCLIENTID4 =
	    &resp->nfs_resop4_u.opsetclientid;
	clientaddr4 * const res_SETCLIENTID4_INUSE =
	    &resp->nfs_resop4_u.opsetclientid.SETCLIENTID4res_u.client_using;
	char str_verifier[NFS4_VERIFIER_SIZE * 2 + 1];
	char str_client[NFS4_OPAQUE_LIMIT * 2 + 1];
	char str_client_addr[SOCK_NAME_MAX + 1];
	nfs_client_record_t *client_record;
	nfs_client_id_t *conf;
	nfs_client_id_t *unconf;
	clientid4 clientid;
	verifier4 verifier;
	sockaddr_t client_addr;
	int rc;

	resp->resop = NFS4_OP_SETCLIENTID;

	if (data->minorversion > 0) {
		res_SETCLIENTID4->status = NFS4ERR_NOTSUPP;
		return res_SETCLIENTID4->status;
	}

	copy_xprt_addr(&client_addr, data->req->rq_xprt);

	if (isDebug(COMPONENT_CLIENTID)) {
		sprint_sockip(&client_addr, str_client_addr,
			      sizeof(str_client_addr));

		DisplayOpaqueValue(arg_SETCLIENTID4->client.id.id_val,
				   arg_SETCLIENTID4->client.id.id_len,
				   str_client);

		sprint_mem(str_verifier, arg_SETCLIENTID4->client.verifier,
			   NFS4_VERIFIER_SIZE);
	}

	LogDebug(COMPONENT_CLIENTID,
		 "SETCLIENTID Client addr=%s id=%s verf=%s callback={program=%u r_addr=%s r_netid=%s} ident=%u",
		 str_client_addr, str_client, str_verifier,
		 arg_SETCLIENTID4->callback.cb_program,
		 arg_SETCLIENTID4->callback.cb_location.r_addr,
		 arg_SETCLIENTID4->callback.cb_location.r_netid,
		 arg_SETCLIENTID4->callback_ident);

	/* Do we already have one or more records for client id (x)? */
	client_record = get_client_record(arg_SETCLIENTID4->client.id.id_val,
					  arg_SETCLIENTID4->client.id.id_len,
					  0,
					  0);

	if (client_record == NULL) {
		/* Some major failure */
		LogCrit(COMPONENT_CLIENTID, "SETCLIENTID failed");

		res_SETCLIENTID4->status = NFS4ERR_SERVERFAULT;
		return res_SETCLIENTID4->status;
	}

	/* The following checks are based on RFC3530bis draft 16
	 *
	 * This attempts to implement the logic described in
	 * 15.35.5. IMPLEMENTATION Consider the major bullets as CASE
	 * 1, CASE 2, CASE 3, CASE 4, and CASE 5.
	 */

	pthread_mutex_lock(&client_record->cr_mutex);

	if (isFullDebug(COMPONENT_CLIENTID)) {
		char str[HASHTABLE_DISPLAY_STRLEN];

		display_client_record(client_record, str);

		LogFullDebug(COMPONENT_CLIENTID,
			     "Client Record %s cr_confirmed_rec=%p "
			     "cr_unconfirmed_rec=%p", str,
			     client_record->cr_confirmed_rec,
			     client_record->cr_unconfirmed_rec);
	}

	conf = client_record->cr_confirmed_rec;

	if (conf != NULL) {
		/* Need a reference to the confirmed record for below */
		inc_client_id_ref(conf);

		if (!nfs_compare_clientcred(&conf->cid_credential,
					    &data->credential)
		    || !cmp_sockaddr(&conf->cid_client_addr,
				     &client_addr,
				     true)) {
			/* CASE 1:
			 *
			 * Confirmed record exists and not the same principal
			 */
			if (isDebug(COMPONENT_CLIENTID)) {
				char confirmed_addr[SOCK_NAME_MAX + 1];

				sprint_sockip(&conf->cid_client_addr,
					      confirmed_addr,
					      sizeof(confirmed_addr));

				LogDebug(COMPONENT_CLIENTID,
					 "Confirmed ClientId %" PRIx64
					 "->'%s': Principals do not match... confirmed addr=%s Return NFS4ERR_CLID_INUSE",
					 conf->cid_clientid, str_client,
					 confirmed_addr);
			}

			res_SETCLIENTID4->status = NFS4ERR_CLID_INUSE;
			res_SETCLIENTID4_INUSE->r_netid =
			    (char *)netid_nc_table[conf->cid_cb.v40.cb_addr.nc]
			    .netid;
			res_SETCLIENTID4_INUSE->r_addr =
			    gsh_strdup(conf->cid_cb.v40.cb_client_r_addr);

			/* Release our reference to the confirmed clientid. */
			dec_client_id_ref(conf);
			goto out;
		}

		/* Check if confirmed record is for (v, x, c, l, s) */
		if (memcmp
		    (arg_SETCLIENTID4->client.verifier,
		     conf->cid_incoming_verifier, NFS4_VERIFIER_SIZE) == 0) {
			/* CASE 2:
			 *
			 * A confirmed record exists for this long
			 * form client id and verifier.
			 *
			 * Consider this to be a possible update of
			 * the call-back information.
			 *
			 * Remove any pre-existing unconfirmed record
			 * for (v, x, c).
			 *
			 * Return the same short form client id (c),
			 * but a new setclientid_confirm verifier (t).
			 */
			LogFullDebug(COMPONENT_CLIENTID,
				     "Update ClientId %" PRIx64 "->%s",
				     conf->cid_clientid, str_client);

			clientid = conf->cid_clientid;
			new_clientid_verifier(verifier);
		} else {
			/* Must be CASE 3 or CASE 4
			 *
			 * Confirmed record is for (u, x, c, l, s).
			 *
			 * These are actually the same, doesn't really
			 * matter if an unconfirmed record exists or
			 * not. Any existing unconfirmed record will
			 * be removed and a new unconfirmed record
			 * added.
			 *
			 * Return a new short form clientid (d) and a
			 * new setclientid_confirm verifier (t). (Note
			 * the spec calls the values e and r for CASE
			 * 4).
			 */
			LogFullDebug(COMPONENT_CLIENTID,
				     "Replace ClientId %" PRIx64 "->%s",
				     conf->cid_clientid, str_client);

			clientid = new_clientid();
			new_clientid_verifier(verifier);
		}

		/* Release our reference to the confirmed clientid. */
		dec_client_id_ref(conf);
	} else {
		/* CASE 5:
		 *
		 *
		 * Remove any existing unconfirmed record.
		 *
		 * Return a new short form clientid (d) and a new
		 * setclientid_confirm verifier (t).
		 */
		LogFullDebug(COMPONENT_CLIENTID, "New client");
		clientid = new_clientid();
		new_clientid_verifier(verifier);
	}

	/* At this point, no matter what the case was above, we should
	 * remove any pre-existing unconfirmed record.
	 */

	unconf = client_record->cr_unconfirmed_rec;

	if (unconf != NULL) {
		/* Delete the unconfirmed clientid record. Because we
		 * have the cr_mutex, we have won any race to deal
		 * with this clientid record (whether we raced with a
		 * SETCLIENTID_CONFIRM or the reaper thread (if either
		 * of those operations had won the race,
		 * cr_punconfirmed_id would have been NULL).
		 */
		if (isDebug(COMPONENT_CLIENTID)) {
			char str[HASHTABLE_DISPLAY_STRLEN];

			display_client_id_rec(unconf, str);

			LogDebug(COMPONENT_CLIENTID, "Replacing %s", str);
		}

		/* unhash the clientid record */
		remove_unconfirmed_client_id(unconf);
		unconf = NULL;
	}

	/* Now we can proceed to build the new unconfirmed record. We
	 * have determined the clientid and setclientid_confirm values
	 * above.
	 */

	unconf = create_client_id(clientid,
				  client_record,
				  &client_addr,
				  &data->credential,
				  0);

	if (unconf == NULL) {
		/* Error already logged, return */
		res_SETCLIENTID4->status = NFS4ERR_RESOURCE;
		goto out;
	}

	if (strmaxcpy(unconf->cid_cb.v40.cb_client_r_addr,
		      arg_SETCLIENTID4->callback.cb_location.r_addr,
		      sizeof(unconf->cid_cb.v40.cb_client_r_addr)) == -1) {
		LogCrit(COMPONENT_CLIENTID, "Callback r_addr %s too long",
			arg_SETCLIENTID4->callback.cb_location.r_addr);
		res_SETCLIENTID4->status = NFS4ERR_INVAL;

		goto out;
	}

	nfs_set_client_location(unconf,
				&arg_SETCLIENTID4->callback.cb_location);

	memcpy(unconf->cid_incoming_verifier,
	       arg_SETCLIENTID4->client.verifier,
	       NFS4_VERIFIER_SIZE);

	memcpy(unconf->cid_verifier, verifier, sizeof(NFS4_write_verifier));

	unconf->cid_cb.v40.cb_program = arg_SETCLIENTID4->callback.cb_program;
	unconf->cid_cb.v40.cb_callback_ident = arg_SETCLIENTID4->callback_ident;

	rc = nfs_client_id_insert(unconf);

	if (rc != CLIENT_ID_SUCCESS) {
		/* Record is already freed, return. */
		res_SETCLIENTID4->status = clientid_error_to_nfsstat(rc);
		goto out;
	}

	if (isDebug(COMPONENT_CLIENTID)) {
		char str[HASHTABLE_DISPLAY_STRLEN];

		sprint_mem(str_verifier, verifier, NFS4_VERIFIER_SIZE);

		display_client_id_rec(unconf, str);
		LogDebug(COMPONENT_CLIENTID, "SETCLIENTID reply Verifier=%s %s",
			 str_verifier, str);
	}

	res_SETCLIENTID4->status = NFS4_OK;
	res_SETCLIENTID4->SETCLIENTID4res_u.resok4.clientid = clientid;
	memcpy(res_SETCLIENTID4->SETCLIENTID4res_u.resok4.setclientid_confirm,
	       &verifier, NFS4_VERIFIER_SIZE);

 out:

	pthread_mutex_unlock(&client_record->cr_mutex);

	/* Release our reference to the client record */
	dec_client_record_ref(client_record);

	return res_SETCLIENTID4->status;
}
Example #25
0
/*
 * Obtain credentials via a key in the keytab given
 * a keytab handle and a gssd_k5_kt_princ structure.
 * Checks to see if current credentials are expired,
 * if not, uses the keytab to obtain new credentials.
 *
 * Returns:
 *	0 => success (or credentials have not expired)
 *	nonzero => error
 */
static int gssd_get_single_krb5_cred(krb5_context context, krb5_keytab kt,
				     struct gssd_k5_kt_princ *ple, int nocache)
{
#if HAVE_KRB5_GET_INIT_CREDS_OPT_SET_ADDRESSLESS
	krb5_get_init_creds_opt *init_opts = NULL;
#else
	krb5_get_init_creds_opt options;
#endif
	krb5_get_init_creds_opt *opts;
	krb5_creds my_creds;
	krb5_ccache ccache = NULL;
	char kt_name[BUFSIZ];
	char cc_name[BUFSIZ];
	int code;
	time_t now = time(0);
	char *cache_type;
	char *pname = NULL;
	char *k5err = NULL;

	memset(&my_creds, 0, sizeof(my_creds));

	if (ple->ccname && ple->endtime > now && !nocache) {
		printerr(2, "INFO: Credentials in CC '%s' are good until %d\n",
			 ple->ccname, ple->endtime);
		code = 0;
		goto out;
	}

	code = krb5_kt_get_name(context, kt, kt_name, BUFSIZ);
	if (code != 0) {
		printerr(0,
			 "ERROR: Unable to get keytab name in "
			 "gssd_get_single_krb5_cred\n");
		goto out;
	}

	if ((krb5_unparse_name(context, ple->princ, &pname)))
		pname = NULL;

#if HAVE_KRB5_GET_INIT_CREDS_OPT_SET_ADDRESSLESS
	code = krb5_get_init_creds_opt_alloc(context, &init_opts);
	if (code) {
		k5err = gssd_k5_err_msg(context, code);
		printerr(0, "ERROR: %s allocating gic options\n", k5err);
		goto out;
	}
	if (krb5_get_init_creds_opt_set_addressless(context, init_opts, 1))
		printerr(1,
			 "WARNING: Unable to set option for addressless "
			 "tickets.  May have problems behind a NAT.\n");
#ifdef TEST_SHORT_LIFETIME
	/* set a short lifetime (for debugging only!) */
	printerr(0, "WARNING: Using (debug) short machine cred lifetime!\n");
	krb5_get_init_creds_opt_set_tkt_life(init_opts, 5 * 60);
#endif
	opts = init_opts;

#else /* HAVE_KRB5_GET_INIT_CREDS_OPT_SET_ADDRESSLESS */

	krb5_get_init_creds_opt_init(&options);
	krb5_get_init_creds_opt_set_address_list(&options, NULL);
#ifdef TEST_SHORT_LIFETIME
	/* set a short lifetime (for debugging only!) */
	printerr(0, "WARNING: Using (debug) short machine cred lifetime!\n");
	krb5_get_init_creds_opt_set_tkt_life(&options, 5 * 60);
#endif
	opts = &options;
#endif

	code = krb5_get_init_creds_keytab(context, &my_creds,
					  ple->princ, kt, 0,
					  NULL, opts);
	if (code != 0) {
		k5err = gssd_k5_err_msg(context, code);
		printerr(1,
			 "WARNING: %s while getting initial ticket for "
			 "principal '%s' using keytab '%s'\n", k5err,
			 pname ? pname : "<unparsable>", kt_name);
		goto out;
	}

	/*
	 * Initialize cache file which we're going to be using
	 */

	if (use_memcache)
		cache_type = "MEMORY";
	else
		cache_type = "FILE";
	snprintf(cc_name, sizeof(cc_name), "%s:%s/%s%s_%s", cache_type,
		 ccachesearch[0], GSSD_DEFAULT_CRED_PREFIX,
		 GSSD_DEFAULT_MACHINE_CRED_SUFFIX, ple->realm);
	ple->endtime = my_creds.times.endtime;
	if (ple->ccname != NULL)
		gsh_free(ple->ccname);
	ple->ccname = gsh_strdup(cc_name);
	if (ple->ccname == NULL) {
		printerr(0,
			 "ERROR: no storage to duplicate credentials "
			 "cache name '%s'\n", cc_name);
		code = ENOMEM;
		goto out;
	}
	code = krb5_cc_resolve(context, cc_name, &ccache);
	if (code != 0) {
		k5err = gssd_k5_err_msg(context, code);
		printerr(0, "ERROR: %s while opening credential cache '%s'\n",
			 k5err, cc_name);
		goto out;
	}
	code = krb5_cc_initialize(context, ccache, ple->princ);
	if (code != 0) {
		k5err = gssd_k5_err_msg(context, code);
		printerr(0,
			 "ERROR: %s while initializing credential "
			 "cache '%s'\n", k5err, cc_name);
	}
	code = krb5_cc_store_cred(context, ccache, &my_creds);
	if (code != 0) {
		k5err = gssd_k5_err_msg(context, code);
		printerr(0, "ERROR: %s while storing credentials in '%s'\n",
			 k5err, cc_name);
		goto out;
	}
	/* if we get this far, let gss mech know */
	gssd_set_krb5_ccache_name(cc_name);
	code = 0;
	printerr(2,
		 "Successfully obtained machine credentials for "
		 "principal '%s' stored in ccache '%s'\n", pname, cc_name);
 out:
#if HAVE_KRB5_GET_INIT_CREDS_OPT_SET_ADDRESSLESS
	if (init_opts)
		krb5_get_init_creds_opt_free(context, init_opts);
#endif
	if (pname)
		k5_free_unparsed_name(context, pname);
	if (ccache)
		krb5_cc_close(context, ccache);
	krb5_free_cred_contents(context, &my_creds);
	gsh_free(k5err);
	return code;
}