예제 #1
0
/**
 * @brief Initialize an NFS4 open owner object
 *
 * @param[in] owner The owner record
 *
 */
static void init_nfs4_owner(state_owner_t *owner)
{
	glist_init(&owner->so_owner.so_nfs4_owner.so_state_list);

	/* Increment refcount on related owner */
	if (owner->so_owner.so_nfs4_owner.so_related_owner != NULL)
		inc_state_owner_ref(owner->so_owner.so_nfs4_owner.
				    so_related_owner);

	/* Increment reference count for clientid record */
	inc_client_id_ref(owner->so_owner.so_nfs4_owner.so_clientrec);

	PTHREAD_MUTEX_lock(&owner->so_owner.so_nfs4_owner.so_clientrec
			   ->cid_mutex);

	if (owner->so_type == STATE_OPEN_OWNER_NFSV4) {
		/* If open owner, add to clientid lock owner list */
		glist_add_tail(&owner->so_owner.so_nfs4_owner.so_clientrec->
			       cid_openowners,
			       &owner->so_owner.so_nfs4_owner.so_perclient);
	} else if (owner->so_type == STATE_LOCK_OWNER_NFSV4) {
		/* If lock owner, add to clientid open owner list */
		glist_add_tail(&owner->so_owner.so_nfs4_owner.so_clientrec->
			       cid_lockowners,
			       &owner->so_owner.so_nfs4_owner.so_perclient);
	}

	PTHREAD_MUTEX_unlock(&owner->so_owner.so_nfs4_owner.so_clientrec
			     ->cid_mutex);
}
예제 #2
0
int
parse_mapping_cfg_params(cfg_t *map, conf_mapping_t *conf_mapping, uint8_t is_local)
{

    int ctr;
    cfg_t *rl;
    conf_loc_t *conf_loc;
    conf_loc_iface_t *conf_loc_iface;
    int afi;

    strcpy(conf_mapping->eid_prefix,cfg_getstr(map, "eid-prefix"));
    conf_mapping->iid = cfg_getint(map, "iid");

    for (ctr = 0; ctr < cfg_size(map, "rloc-address"); ctr++){
        rl = cfg_getnsec(map, "rloc-address", ctr);
        conf_loc = conf_loc_new_init(
                cfg_getstr(rl, "address"),
                cfg_getint(rl, "priority"),
                cfg_getint(rl, "weight"),
                255,0);
        glist_add_tail(conf_loc,conf_mapping->conf_loc_list);
    }

    if (is_local){

        for (ctr = 0; ctr < cfg_size(map, "rloc-iface"); ctr++){
            rl = cfg_getnsec(map, "rloc-iface", ctr);
            afi = cfg_getint(rl, "ip_version");
            if (afi == 4){
                afi = AF_INET;
            }else if (afi == 6){
                afi = AF_INET6;
            }else{
                OOR_LOG(LERR,"Configuration file: The conf_loc_iface->ip_version of the locator should be 4 (IPv4) or 6 (IPv6)");
                return (BAD);
            }
            conf_loc_iface = conf_loc_iface_new_init(
                    cfg_getstr(rl, "interface"),
                    afi,
                    cfg_getint(rl, "priority"),
                    cfg_getint(rl, "weight"),
                    255,0);
            glist_add_tail(conf_loc_iface,conf_mapping->conf_loc_iface_list);
        }
    }

    return (GOOD);
}
예제 #3
0
fsal_status_t  schedule_fsal_up_event_process(fsal_up_event_t *arg)
{
  int rc;
  fsal_status_t ret = {0, 0};

  /* Events which needs quick response, and locking events wich
     has its own queue gets processed here, rest will be queued. */
  if (arg->event_type == FSAL_UP_EVENT_LOCK_GRANT ||
      arg->event_type == FSAL_UP_EVENT_LOCK_AVAIL)
    {
      arg->event_process_func(&arg->event_data);

      gsh_free(arg->event_data.event_context.fsal_data.fh_desc.start);
      pool_free(fsal_up_event_pool, arg);
      return ret;
    }

  if(arg->event_type == FSAL_UP_EVENT_INVALIDATE)
    {
      arg->event_process_func(&arg->event_data);
      /* Step2 where we perform close; which could be expensive operation
         so deffer it to the separate thread. */
      arg->event_process_func = dumb_fsal_up_invalidate_step2;
    }
  if(arg->event_type == FSAL_UP_EVENT_UPDATE)
  {

   /* Invalidate first without scheduling */
    arg->event_process_func(&arg->event_data);

    if ((arg->event_data.type.update.upu_flags & FSAL_UP_NLINK) &&
        (arg->event_data.type.update.upu_stat_buf.st_nlink == 0) )
    {
      /* upu_flags must be set or st_nlink is unreliable. */
      /* Step2 where we perform close; which could be expensive operation
         so defer it to the separate thread. */
      arg->event_process_func = dumb_fsal_up_invalidate_step2;
    } else
            return ret;
  }

  /* Now queue them for further process. */
  LogFullDebug(COMPONENT_FSAL_UP, "Schedule %p", arg);

  P(fsal_up_process_tcb.tcb_mutex);
  glist_add_tail(&fsal_up_process_queue, &arg->event_list);
  rc = pthread_cond_signal(&fsal_up_process_tcb.tcb_condvar);
  LogFullDebug(COMPONENT_FSAL_UP,"Signaling tcb_condvar");
  if (rc == -1)
    {
      LogDebug(COMPONENT_FSAL_UP,
                   "Unable to signal FSAL_UP Process Thread");
      glist_del(&arg->event_list);
      ret.major = ERR_FSAL_FAULT;
    }
  V(fsal_up_process_tcb.tcb_mutex);
  return ret;
}
예제 #4
0
파일: oor_control.c 프로젝트: biels/oor
static void
set_rlocs(oor_ctrl_t *ctrl)
{
    iface_t *iface;
    glist_entry_t *iface_it;

    glist_remove_all(ctrl->rlocs);
    glist_remove_all(ctrl->ipv4_rlocs);
    glist_remove_all(ctrl->ipv6_rlocs);
    ctrl->supported_afis = NO_AFI_SUPPOT;

    glist_for_each_entry(iface_it,interface_list){
        iface = (iface_t *)glist_entry_data(iface_it);
        if (iface->ipv4_address && !lisp_addr_is_no_addr(iface->ipv4_address)) {
            glist_add_tail(iface->ipv4_address, ctrl->ipv4_rlocs);
            glist_add_tail(iface->ipv4_address, ctrl->rlocs);
        }
        if (iface->ipv6_address && !lisp_addr_is_no_addr(iface->ipv6_address)) {
            glist_add_tail(iface->ipv6_address, ctrl->ipv6_rlocs);
            glist_add_tail(iface->ipv6_address, ctrl->rlocs);
        }
    }
예제 #5
0
static void
parse_elp_list(cfg_t *cfg, shash_t *ht)
{
    elp_node_t *enode;
    elp_t *elp;
    lisp_addr_t *laddr;
    char *name;
    int i, j;

    for(i = 0; i < cfg_size(cfg, "explicit-locator-path"); i++) {
        cfg_t *selp = cfg_getnsec(cfg, "explicit-locator-path", i);
        name = cfg_getstr(selp, "elp-name");

        elp = elp_type_new();

        for (j = 0; j < cfg_size(selp, "elp-node");j++) {
            cfg_t *senode = cfg_getnsec(selp, "elp-node", j);
            enode = xzalloc(sizeof(elp_node_t));
            enode->addr = lisp_addr_new();
            if (lisp_addr_ip_from_char(cfg_getstr(senode, "address"),
                    enode->addr) != GOOD) {
                elp_node_del(enode);
                OOR_LOG(LDBG_1, "parse_elp_list: Couldn't parse ELP node %s",
                        cfg_getstr(senode, "address"));
                continue;
            }
            enode->L = cfg_getbool(senode, "lookup") ? 1 : 0;
            enode->P = cfg_getbool(senode, "probe") ? 1 : 0;
            enode->S = cfg_getbool(senode, "strict") ? 1: 0;

            glist_add_tail(enode, elp->nodes);
        }

        laddr = lisp_addr_new_lafi(LM_AFI_LCAF);
        lisp_addr_lcaf_set_type(laddr, LCAF_EXPL_LOC_PATH);
        lisp_addr_lcaf_set_addr(laddr, elp);
        OOR_LOG(LDBG_1, "Configuration file: parsed explicit-locator-path: %s",
                lisp_addr_to_char(laddr));

        shash_insert(ht, strdup(name), laddr);
    }
}
예제 #6
0
static void
parse_rle_list(cfg_t *cfg, shash_t *ht)
{
    rle_node_t *rnode;
    rle_t *rle;
    lisp_addr_t *laddr;
    char *name;
    int i, j;

    for (i = 0; i < cfg_size(cfg, "replication-list"); i++) {
        cfg_t *selp = cfg_getnsec(cfg, "replication-list", i);
        name = cfg_getstr(selp, "rle-name");

        laddr = lisp_addr_new_lafi(LM_AFI_LCAF);
        lisp_addr_lcaf_set_type(laddr, LCAF_RLE);

        rle = rle_type_new();

        for (j = 0; j < cfg_size(selp, "rle-node"); j++) {
            cfg_t *rlenode = cfg_getnsec(selp, "rle-node", j);
            rnode = rle_node_new();
            if (lisp_addr_ip_from_char(cfg_getstr(rlenode, "address"),
                    rnode->addr) != GOOD) {
                rle_node_del(rnode);
                OOR_LOG(LDBG_1, "parse_rle_list: Couldn't parse RLE node %s",
                        cfg_getstr(rlenode, "address"));
            }
            rnode->level = cfg_getint(rlenode, "level");

            glist_add_tail(rnode, rle->nodes);
        }
        lisp_addr_lcaf_set_addr(laddr, (void *)rle);
        OOR_LOG(LDBG_1, "Configuration file: parsed replication-list: %s",
                lisp_addr_to_char(laddr));

        shash_insert(ht, strdup(name), laddr);
    }

}
예제 #7
0
int nfs4_op_lock(struct nfs_argop4 *op, compound_data_t * data, struct nfs_resop4 *resp)
{
  char __attribute__ ((__unused__)) funcname[] = "nfs4_op_lock";

#ifndef _WITH_NFSV4_LOCKS
  /* Lock are not supported */
  resp->resop = NFS4_OP_LOCK;
  res_LOCK4.status = NFS4ERR_LOCK_NOTSUPP;

  return res_LOCK4.status;
#else

  state_status_t            state_status;
  state_data_t              candidate_data;
  state_type_t              candidate_type;
  int                       rc = 0;
  seqid4                    seqid;
  state_t                 * plock_state;    /* state for the lock */
  state_t                 * pstate_open;    /* state for the open owner */
  state_owner_t           * plock_owner;
  state_owner_t           * popen_owner;
  state_owner_t           * presp_owner;    /* Owner to store response in */
  state_owner_t           * conflict_owner = NULL;
  state_nfs4_owner_name_t   owner_name;
  nfs_client_id_t           nfs_client_id;
  state_lock_desc_t         lock_desc, conflict_desc;
  state_blocking_t          blocking = STATE_NON_BLOCKING;
  const char              * tag = "LOCK";

  LogDebug(COMPONENT_NFS_V4_LOCK,
           "Entering NFS v4 LOCK handler -----------------------------------------------------");

  /* Initialize to sane starting values */
  resp->resop = NFS4_OP_LOCK;

  /* If there is no FH */
  if(nfs4_Is_Fh_Empty(&(data->currentFH)))
    {
      res_LOCK4.status = NFS4ERR_NOFILEHANDLE;
      LogDebug(COMPONENT_NFS_V4_LOCK,
               "LOCK failed nfs4_Is_Fh_Empty");
      return res_LOCK4.status;
    }

  /* If the filehandle is invalid */
  if(nfs4_Is_Fh_Invalid(&(data->currentFH)))
    {
      res_LOCK4.status = NFS4ERR_BADHANDLE;
      LogDebug(COMPONENT_NFS_V4_LOCK,
               "LOCK failed nfs4_Is_Fh_Invalid");
      return res_LOCK4.status;
    }

  /* Tests if the Filehandle is expired (for volatile filehandle) */
  if(nfs4_Is_Fh_Expired(&(data->currentFH)))
    {
      res_LOCK4.status = NFS4ERR_FHEXPIRED;
      LogDebug(COMPONENT_NFS_V4_LOCK,
               "LOCK failed nfs4_Is_Fh_Expired");
      return res_LOCK4.status;
    }

  /* Lock is done only on a file */
  if(data->current_filetype != REGULAR_FILE)
    {
      /* Type of the entry is not correct */
      switch (data->current_filetype)
        {
        case DIRECTORY:
          res_LOCK4.status = NFS4ERR_ISDIR;
          break;
        default:
          res_LOCK4.status = NFS4ERR_INVAL;
          break;
        }
      LogDebug(COMPONENT_NFS_V4_LOCK,
               "LOCK failed wrong file type");
      return res_LOCK4.status;
    }

  /* Convert lock parameters to internal types */
  switch(arg_LOCK4.locktype)
    {
      case READ_LT:
        lock_desc.sld_type = STATE_LOCK_R;
        blocking           = STATE_NON_BLOCKING;
        break;

      case WRITE_LT:
        lock_desc.sld_type = STATE_LOCK_W;
        blocking           = STATE_NON_BLOCKING;
        break;

      case READW_LT:
        lock_desc.sld_type = STATE_LOCK_R;
        blocking           = STATE_NFSV4_BLOCKING;
        break;

      case WRITEW_LT:
        lock_desc.sld_type = STATE_LOCK_W;
        blocking           = STATE_NFSV4_BLOCKING;
        break;
    }

  lock_desc.sld_offset = arg_LOCK4.offset;

  if(arg_LOCK4.length != STATE_LOCK_OFFSET_EOF)
    lock_desc.sld_length = arg_LOCK4.length;
  else
    lock_desc.sld_length = 0;

  if(arg_LOCK4.locker.new_lock_owner)
    {
      /* New lock owner, Find the open owner */
      tag = "LOCK (new owner)";

      /* Check stateid correctness and get pointer to state */
      if((rc = nfs4_Check_Stateid(&arg_LOCK4.locker.locker4_u.open_owner.open_stateid,
                                  data->current_entry,
                                  0LL,
                                  &pstate_open,
                                  data,
                                  STATEID_SPECIAL_FOR_LOCK,
                                  tag)) != NFS4_OK)
        {
          res_LOCK4.status = rc;
          LogDebug(COMPONENT_NFS_V4_LOCK,
                   "LOCK failed nfs4_Check_Stateid for open owner");
          return res_LOCK4.status;
        }

      popen_owner = pstate_open->state_powner;
      plock_state = NULL;
      plock_owner = NULL;
      presp_owner = popen_owner;
      seqid       = arg_LOCK4.locker.locker4_u.open_owner.open_seqid;

      LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG,
              "LOCK New lock owner from open owner",
              data->current_entry,
              data->pcontext,
              popen_owner,
              &lock_desc);

      /* Check is the clientid is known or not */
      if(nfs_client_id_get(arg_LOCK4.locker.locker4_u.open_owner.lock_owner.clientid,
                           &nfs_client_id) == CLIENT_ID_NOT_FOUND)
        {
          res_LOCK4.status = NFS4ERR_STALE_CLIENTID;
          LogDebug(COMPONENT_NFS_V4_LOCK,
                   "LOCK failed nfs_client_id_get");
          return res_LOCK4.status;
        }

      /* The related stateid is already stored in pstate_open */

      /* An open state has been found. Check its type */
      if(pstate_open->state_type != STATE_TYPE_SHARE)
        {
          res_LOCK4.status = NFS4ERR_BAD_STATEID;
          LogDebug(COMPONENT_NFS_V4_LOCK,
                   "LOCK failed open stateid is not a SHARE");
          return res_LOCK4.status;
        }

      /* Lock seqid (seqid wanted for new lock) should be 0 (see newpynfs test LOCK8c)  */
      if(arg_LOCK4.locker.locker4_u.open_owner.lock_seqid != 0)
        {
          res_LOCK4.status = NFS4ERR_BAD_SEQID;
          LogDebug(COMPONENT_NFS_V4_LOCK,
                   "LOCK failed new lock stateid is not 0");
          return res_LOCK4.status;
        }

      /* Is this lock_owner known ? */
      convert_nfs4_lock_owner(&arg_LOCK4.locker.locker4_u.open_owner.lock_owner,
                              &owner_name);
    }
  else
    {
      /* Existing lock owner
       * Find the lock stateid
       * From that, get the open_owner
       */
      tag = "LOCK (existing owner)";

      /* There was code here before to handle all-0 stateid, but that
       * really doesn't apply - when we handle temporary locks for
       * I/O operations (which is where we will see all-0 or all-1
       * stateid, those will not come in through nfs4_op_lock.
       */

      /* Check stateid correctness and get pointer to state */
      if((rc = nfs4_Check_Stateid(&arg_LOCK4.locker.locker4_u.lock_owner.lock_stateid,
                                  data->current_entry,
                                  0LL,
                                  &plock_state,
                                  data,
                                  STATEID_SPECIAL_FOR_LOCK,
                                  tag)) != NFS4_OK)
        {
          res_LOCK4.status = rc;
          LogDebug(COMPONENT_NFS_V4_LOCK,
                   "LOCK failed nfs4_Check_Stateid for existing lock owner");
          return res_LOCK4.status;
        }

      /* An lock state has been found. Check its type */
      if(plock_state->state_type != STATE_TYPE_LOCK)
        {
          res_LOCK4.status = NFS4ERR_BAD_STATEID;
          LogDebug(COMPONENT_NFS_V4_LOCK,
                   "LOCK failed existing lock owner, state type is not LOCK");
          return res_LOCK4.status;
        }

      /* Get the old lockowner. We can do the following 'cast', in NFSv4 lock_owner4 and open_owner4
       * are different types but with the same definition*/
      plock_owner = plock_state->state_powner;
      popen_owner = plock_owner->so_owner.so_nfs4_owner.so_related_owner;
      pstate_open = plock_state->state_data.lock.popenstate;
      presp_owner = plock_owner;
      seqid       = arg_LOCK4.locker.locker4_u.lock_owner.lock_seqid;

      LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG,
              "LOCK Existing lock owner",
              data->current_entry,
              data->pcontext,
              plock_owner,
              &lock_desc);

#ifdef _CONFORM_TO_TEST_LOCK8c
      /* Check validity of the seqid */
      if(arg_LOCK4.locker.locker4_u.lock_owner.lock_seqid != 0)
        {
          res_LOCK4.status = NFS4ERR_BAD_SEQID;
          LogDebug(COMPONENT_NFS_V4_LOCK,
                   "LOCK failed existing lock owner, lock seqid != 0");
          return res_LOCK4.status;
        }
#endif
    }                           /* if( arg_LOCK4.locker.new_lock_owner ) */

  /* Check seqid (lock_seqid or open_seqid) */
  if(!Check_nfs4_seqid(presp_owner, seqid, op, data, resp, tag))
    {
      /* Response is all setup for us and LogDebug told what was wrong */
      return res_LOCK4.status;
    }

  /* Lock length should not be 0 */
  if(arg_LOCK4.length == 0LL)
    {
      res_LOCK4.status = NFS4ERR_INVAL;
      LogDebug(COMPONENT_NFS_V4_LOCK,
               "LOCK failed length == 0");

      /* Save the response in the lock or open owner */
      Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag);

      return res_LOCK4.status;
    }

  /* Check for range overflow.
   * Comparing beyond 2^64 is not possible int 64 bits precision,
   * but off+len > 2^64-1 is equivalent to len > 2^64-1 - off
   */
  if(lock_desc.sld_length > (STATE_LOCK_OFFSET_EOF - lock_desc.sld_offset))
    {
      res_LOCK4.status = NFS4ERR_INVAL;
      LogDebug(COMPONENT_NFS_V4_LOCK,
               "LOCK failed length overflow");

      /* Save the response in the lock or open owner */
      Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag);

      return res_LOCK4.status;
    }

  /* check if open state has correct access for type of lock.
   * Don't need to check for conflicting states since this open
   * state assures there are no conflicting states.
   */
  if(((arg_LOCK4.locktype == WRITE_LT || arg_LOCK4.locktype == WRITEW_LT) &&
      ((pstate_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_WRITE) == 0)) ||
     ((arg_LOCK4.locktype == READ_LT || arg_LOCK4.locktype == READW_LT) &&
      ((pstate_open->state_data.share.share_access & OPEN4_SHARE_ACCESS_READ) == 0)))
    {
      /* The open state doesn't allow access based on the type of lock */
      LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
              "LOCK failed, SHARE doesn't allow access",
              data->current_entry,
              data->pcontext,
              plock_owner,
              &lock_desc);

      res_LOCK4.status = NFS4ERR_OPENMODE;

      /* Save the response in the lock or open owner */
      Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag);

      return res_LOCK4.status;
    }

  if(arg_LOCK4.locker.new_lock_owner)
    {
      /* A lock owner is always associated with a previously made open
       * which has itself a previously made stateid
       */

      /* Get reference to open owner */
      inc_state_owner_ref(popen_owner);

      if(nfs4_owner_Get_Pointer(&owner_name, &plock_owner))
        {
          /* Lock owner already existsc, check lock_seqid if it's not 0 */
          if(!Check_nfs4_seqid(plock_owner,
                               arg_LOCK4.locker.locker4_u.open_owner.lock_seqid,
                               op,
                               data,
                               resp,
                               "LOCK (new owner but owner exists)"))
            {
              /* Response is all setup for us and LogDebug told what was wrong */
              return res_LOCK4.status;
            }
        }
      else
        {
          /* This lock owner is not known yet, allocated and set up a new one */
          plock_owner = create_nfs4_owner(data->pclient,
                                          &owner_name,
                                          STATE_LOCK_OWNER_NFSV4,
                                          popen_owner,
                                          0);

          if(plock_owner == NULL)
            {
              res_LOCK4.status = NFS4ERR_RESOURCE;

              LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
                      "LOCK failed to create new lock owner",
                      data->current_entry,
                      data->pcontext,
                      popen_owner,
                      &lock_desc);

              return res_LOCK4.status;
            }
        }

      /* Prepare state management structure */
      memset(&candidate_type, 0, sizeof(candidate_type));
      candidate_type = STATE_TYPE_LOCK;
      candidate_data.lock.popenstate = pstate_open;

      /* Add the lock state to the lock table */
      if(state_add(data->current_entry,
                   candidate_type,
                   &candidate_data,
                   plock_owner,
                   data->pclient,
                   data->pcontext,
                   &plock_state, &state_status) != STATE_SUCCESS)
        {
          res_LOCK4.status = NFS4ERR_RESOURCE;

          LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
                  "LOCK failed to add new stateid",
                  data->current_entry,
                  data->pcontext,
                  plock_owner,
                  &lock_desc);

          dec_state_owner_ref(plock_owner, data->pclient);

          return res_LOCK4.status;
        }

      init_glist(&plock_state->state_data.lock.state_locklist);

      /* Add lock state to the list of lock states belonging to the open state */
      glist_add_tail(&pstate_open->state_data.share.share_lockstates,
                     &plock_state->state_data.lock.state_sharelist);
    }                           /* if( arg_LOCK4.locker.new_lock_owner ) */

  /* Now we have a lock owner and a stateid.
   * Go ahead and push lock into SAL (and FSAL).
   */
  if(state_lock(data->current_entry,
                data->pcontext,
                plock_owner,
                plock_state,
                blocking,
                NULL,     /* No block data for now */
                &lock_desc,
                &conflict_owner,
                &conflict_desc,
                data->pclient,
                &state_status) != STATE_SUCCESS)
    {
      if(state_status == STATE_LOCK_CONFLICT)
        {
          /* A  conflicting lock from a different lock_owner, returns NFS4ERR_DENIED */
          Process_nfs4_conflict(&res_LOCK4.LOCK4res_u.denied,
                                conflict_owner,
                                &conflict_desc,
                                data->pclient);
        }

      LogDebug(COMPONENT_NFS_V4_LOCK,
               "LOCK failed with status %s",
               state_err_str(state_status));

      res_LOCK4.status = nfs4_Errno_state(state_status);

      /* Save the response in the lock or open owner */
      if(res_LOCK4.status != NFS4ERR_RESOURCE &&
         res_LOCK4.status != NFS4ERR_BAD_STATEID)
        Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag);

      if(arg_LOCK4.locker.new_lock_owner)
        {
          /* Need to destroy lock owner and state */
          if(state_del(plock_state,
                       data->pclient,
                       &state_status) != STATE_SUCCESS)
            LogDebug(COMPONENT_NFS_V4_LOCK,
                     "state_del failed with status %s",
                     state_err_str(state_status));
        }

      return res_LOCK4.status;
    }

  res_LOCK4.status = NFS4_OK;

  /* Handle stateid/seqid for success */
  update_stateid(plock_state,
                 &res_LOCK4.LOCK4res_u.resok4.lock_stateid,
                 data,
                 tag);

  LogFullDebug(COMPONENT_NFS_V4_LOCK,
               "LOCK state_seqid = %u, plock_state = %p",
               plock_state->state_seqid,
               plock_state);

  /* Save the response in the lock or open owner */
  Copy_nfs4_state_req(presp_owner, seqid, op, data, resp, tag);

  LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG,
          "LOCK applied",
          data->current_entry,
          data->pcontext,
          plock_owner,
          &lock_desc);

  return res_LOCK4.status;
#endif
}                               /* nfs4_op_lock */
예제 #8
0
state_owner_t *create_nfs4_owner(state_nfs4_owner_name_t * pname,
                                 nfs_client_id_t         * pclientid,
                                 state_owner_type_t        type,
                                 state_owner_t           * related_owner,
                                 unsigned int              init_seqid)
{
  state_owner_t           * powner;
  state_nfs4_owner_name_t * powner_name;

  /* This lock owner is not known yet, allocated and set up a new one */
  powner = pool_alloc(state_owner_pool, NULL);

  if(powner == NULL)
    return NULL;

  powner_name = pool_alloc(state_nfs4_owner_name_pool, NULL);

  if(powner_name == NULL)
    {
      pool_free(state_owner_pool, powner);
      return NULL;
    }

  *powner_name = *pname;

  /* set up the content of the open_owner */
  memset(powner, 0, sizeof(*powner));
  powner->so_type                                 = type;
  powner->so_owner.so_nfs4_owner.so_seqid         = init_seqid;
  powner->so_owner.so_nfs4_owner.so_related_owner = related_owner;
  powner->so_owner.so_nfs4_owner.so_clientid      = pname->son_clientid;
  powner->so_owner.so_nfs4_owner.so_pclientid     = pclientid;
  powner->so_owner_len                            = pname->son_owner_len;
  powner->so_owner.so_nfs4_owner.so_resp.resop    = NFS4_OP_ILLEGAL;
  powner->so_owner.so_nfs4_owner.so_args.argop    = NFS4_OP_ILLEGAL;
  powner->so_refcount                             = 1;
#if 0
  /* WAITING FOR COMMUNITY FIX */
  /* setting lock owner confirmed */
  if ( type == STATE_LOCK_OWNER_NFSV4)
    powner->so_owner.so_nfs4_owner.so_confirmed   = 1;
#endif
  init_glist(&powner->so_lock_list);
  init_glist(&powner->so_owner.so_nfs4_owner.so_state_list);

  memcpy(powner->so_owner_val,
         pname->son_owner_val,
         pname->son_owner_len);

  powner->so_owner_val[powner->so_owner_len] = '\0';

  if(pthread_mutex_init(&powner->so_mutex, NULL) == -1)
    {
      pool_free(state_owner_pool, powner);
      pool_free(state_nfs4_owner_name_pool, powner_name);
      return NULL;
    }

  if(!nfs4_owner_Set(powner_name, powner))
    {
      pool_free(state_owner_pool, powner);
      pool_free(state_nfs4_owner_name_pool, powner_name);
      return NULL;
    }

  if(isFullDebug(COMPONENT_STATE))
    {
      char str[HASHTABLE_DISPLAY_STRLEN];

      DisplayOwner(powner, str);
      LogFullDebug(COMPONENT_STATE,
                   "New Owner %s", str);
    }

  /* Increment refcount on related owner */
  if(related_owner != NULL)
    inc_state_owner_ref(related_owner);

  P(pclientid->cid_mutex);

  if (type == STATE_OPEN_OWNER_NFSV4)
    {
      /* If open owner, add to clientid lock owner list */
      powner->so_refcount++;
      glist_add_tail(&pclientid->cid_openowners, &powner->so_owner.so_nfs4_owner.so_perclient);
    }
  else if(type == STATE_LOCK_OWNER_NFSV4)
    {
      /* If lock owner, add to clientid open owner list */
      powner->so_refcount++;
      glist_add_tail(&pclientid->cid_lockowners, &powner->so_owner.so_nfs4_owner.so_perclient);
    }

  /* Increment reference count for clientid record */
  inc_client_id_ref(pclientid);

  V(pclientid->cid_mutex);

  return powner;
}
예제 #9
0
int nfs4_op_lock(struct nfs_argop4 *op, compound_data_t *data,
		 struct nfs_resop4 *resp)
{
	/* Shorter alias for arguments */
	LOCK4args * const arg_LOCK4 = &op->nfs_argop4_u.oplock;
	/* Shorter alias for response */
	LOCK4res * const res_LOCK4 = &resp->nfs_resop4_u.oplock;
	/* Status code from state calls */
	state_status_t state_status = STATE_SUCCESS;
	/* Data for lock state to be created */
	union state_data candidate_data;
	/* Status code for protocol functions */
	nfsstat4 nfs_status = 0;
	/* Created or found lock state */
	state_t *lock_state = NULL;
	/* Associated open state */
	state_t *state_open = NULL;
	/* The lock owner */
	state_owner_t *lock_owner = NULL;
	/* The open owner */
	state_owner_t *open_owner = NULL;
	/* The owner of a conflicting lock */
	state_owner_t *conflict_owner = NULL;
	/* The owner in which to store the response for NFSv4.0 */
	state_owner_t *resp_owner = NULL;
	/* Sequence ID, for NFSv4.0 */
	seqid4 seqid = 0;
	/* The client performing these operations */
	nfs_client_id_t *clientid = NULL;
	/* Name for the lock owner */
	state_nfs4_owner_name_t owner_name;
	/* Description of requrested lock */
	fsal_lock_param_t lock_desc;
	/* Description of conflicting lock */
	fsal_lock_param_t conflict_desc;
	/* Whether to block */
	state_blocking_t blocking = STATE_NON_BLOCKING;
	/* Tracking data for the lock state */
	struct state_refer refer;
	/* Indicate if we let FSAL to handle requests during grace. */
	bool_t fsal_grace = false;
	int rc;

	LogDebug(COMPONENT_NFS_V4_LOCK,
		 "Entering NFS v4 LOCK handler ----------------------");

	/* Initialize to sane starting values */
	resp->resop = NFS4_OP_LOCK;
	res_LOCK4->status = NFS4_OK;

	/* Record the sequence info */
	if (data->minorversion > 0) {
		memcpy(refer.session,
		       data->session->session_id,
		       sizeof(sessionid4));
		refer.sequence = data->sequence;
		refer.slot = data->slot;
	}

	res_LOCK4->status = nfs4_sanity_check_FH(data, REGULAR_FILE, false);

	if (res_LOCK4->status != NFS4_OK)
		return res_LOCK4->status;

	/* Convert lock parameters to internal types */
	switch (arg_LOCK4->locktype) {
	case READW_LT:
		blocking = STATE_NFSV4_BLOCKING;
		/* Fall through */

	case READ_LT:
		lock_desc.lock_type = FSAL_LOCK_R;
		break;

	case WRITEW_LT:
		blocking = STATE_NFSV4_BLOCKING;
		/* Fall through */

	case WRITE_LT:
		lock_desc.lock_type = FSAL_LOCK_W;
		break;

	default:
		LogDebug(COMPONENT_NFS_V4_LOCK,
			 "Invalid lock type");
		res_LOCK4->status = NFS4ERR_INVAL;
		return res_LOCK4->status;
	}

	lock_desc.lock_start = arg_LOCK4->offset;
	lock_desc.lock_sle_type = FSAL_POSIX_LOCK;
	lock_desc.lock_reclaim = arg_LOCK4->reclaim;

	if (arg_LOCK4->length != STATE_LOCK_OFFSET_EOF)
		lock_desc.lock_length = arg_LOCK4->length;
	else
		lock_desc.lock_length = 0;

	if (arg_LOCK4->locker.new_lock_owner) {
		/* Check stateid correctness and get pointer to state */
		nfs_status = nfs4_Check_Stateid(
			&arg_LOCK4->locker.locker4_u.open_owner.open_stateid,
			data->current_obj,
			&state_open,
			data,
			STATEID_SPECIAL_FOR_LOCK,
			arg_LOCK4->locker.locker4_u.open_owner.open_seqid,
			data->minorversion == 0,
			lock_tag);

		if (nfs_status != NFS4_OK) {
			if (nfs_status == NFS4ERR_REPLAY) {
				open_owner = get_state_owner_ref(state_open);

				LogStateOwner("Open: ", open_owner);

				if (open_owner != NULL) {
					resp_owner = open_owner;
					seqid = arg_LOCK4->locker.locker4_u
						.open_owner.open_seqid;
					goto check_seqid;
				}
			}

			res_LOCK4->status = nfs_status;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed nfs4_Check_Stateid for open owner");
			return res_LOCK4->status;
		}

		open_owner = get_state_owner_ref(state_open);

		LogStateOwner("Open: ", open_owner);

		if (open_owner == NULL) {
			/* State is going stale. */
			res_LOCK4->status = NFS4ERR_STALE;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed nfs4_Check_Stateid, stale open owner");
			goto out2;
		}

		lock_state = NULL;
		lock_owner = NULL;
		resp_owner = open_owner;
		seqid = arg_LOCK4->locker.locker4_u.open_owner.open_seqid;

		LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG,
			"LOCK New lock owner from open owner",
			data->current_obj, open_owner, &lock_desc);

		/* Check is the clientid is known or not */
		rc = nfs_client_id_get_confirmed(
			data->minorversion == 0 ? arg_LOCK4->locker.
				  locker4_u.open_owner.lock_owner.clientid
				: data->session->clientid,
			&clientid);

		if (rc != CLIENT_ID_SUCCESS) {
			res_LOCK4->status = clientid_error_to_nfsstat(rc);
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed nfs_client_id_get");
			goto out2;
		}

		if (isDebug(COMPONENT_CLIENTID) && (clientid !=
		    open_owner->so_owner.so_nfs4_owner.so_clientrec)) {
			char str_open[LOG_BUFF_LEN / 2];
			struct display_buffer dspbuf_open = {
				sizeof(str_open), str_open, str_open};
			char str_lock[LOG_BUFF_LEN / 2];
			struct display_buffer dspbuf_lock = {
				sizeof(str_lock), str_lock, str_lock};

			display_client_id_rec(&dspbuf_open,
					      open_owner->so_owner
					      .so_nfs4_owner.so_clientrec);
			display_client_id_rec(&dspbuf_lock, clientid);

			LogDebug(COMPONENT_CLIENTID,
				 "Unexpected, new lock owner clientid {%s} doesn't match open owner clientid {%s}",
				 str_lock, str_open);
		}

		/* The related stateid is already stored in state_open */

		/* An open state has been found. Check its type */
		if (state_open->state_type != STATE_TYPE_SHARE) {
			res_LOCK4->status = NFS4ERR_BAD_STATEID;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed open stateid is not a SHARE");
			goto out2;
		}

		/* Is this lock_owner known ? */
		convert_nfs4_lock_owner(&arg_LOCK4->locker.locker4_u.open_owner.
					lock_owner, &owner_name);
		LogStateOwner("Lock: ", lock_owner);
	} else {
		/* Existing lock owner Find the lock stateid From
		 * that, get the open_owner
		 *
		 * There was code here before to handle all-0 stateid,
		 * but that really doesn't apply - when we handle
		 * temporary locks for I/O operations (which is where
		 * we will see all-0 or all-1 stateid, those will not
		 * come in through nfs4_op_lock.
		 *
		 * Check stateid correctness and get pointer to state
		 */
		nfs_status = nfs4_Check_Stateid(
			&arg_LOCK4->locker.locker4_u.lock_owner.lock_stateid,
			data->current_obj,
			&lock_state,
			data,
			STATEID_SPECIAL_FOR_LOCK,
			arg_LOCK4->locker.locker4_u.lock_owner.lock_seqid,
			data->minorversion == 0,
			lock_tag);

		if (nfs_status != NFS4_OK) {
			if (nfs_status == NFS4ERR_REPLAY) {
				lock_owner = get_state_owner_ref(lock_state);

				LogStateOwner("Lock: ", lock_owner);

				if (lock_owner != NULL) {
					open_owner = lock_owner->so_owner
						.so_nfs4_owner.so_related_owner;
					inc_state_owner_ref(open_owner);
					resp_owner = lock_owner;
					seqid = arg_LOCK4->locker.locker4_u
						.lock_owner.lock_seqid;
					goto check_seqid;
				}
			}

			res_LOCK4->status = nfs_status;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed nfs4_Check_Stateid for existing lock owner");
			return res_LOCK4->status;
		}

		/* Check if lock state belongs to same export */
		if (!state_same_export(lock_state, op_ctx->ctx_export)) {
			LogEvent(COMPONENT_STATE,
				 "Lock Owner Export Conflict, Lock held for export %"
				 PRIu16" request for export %"PRIu16,
				 state_export_id(lock_state),
				 op_ctx->ctx_export->export_id);
			res_LOCK4->status = NFS4ERR_INVAL;
			goto out2;
		}

		/* A lock state has been found. Check its type */
		if (lock_state->state_type != STATE_TYPE_LOCK) {
			res_LOCK4->status = NFS4ERR_BAD_STATEID;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed existing lock owner,  state type is not LOCK");
			goto out2;
		}

		/* Get the old lockowner. We can do the following
		 * 'cast', in NFSv4 lock_owner4 and open_owner4 are
		 * different types but with the same definition
		 */
		lock_owner = get_state_owner_ref(lock_state);

		LogStateOwner("Lock: ", lock_owner);

		if (lock_owner == NULL) {
			/* State is going stale. */
			res_LOCK4->status = NFS4ERR_STALE;
			LogDebug(COMPONENT_NFS_V4_LOCK,
				 "LOCK failed nfs4_Check_Stateid, stale open owner");
			goto out2;
		}

		open_owner =
			lock_owner->so_owner.so_nfs4_owner.so_related_owner;
		LogStateOwner("Open: ", open_owner);
		inc_state_owner_ref(open_owner);
		state_open = lock_state->state_data.lock.openstate;
		inc_state_t_ref(state_open);
		resp_owner = lock_owner;
		seqid = arg_LOCK4->locker.locker4_u.lock_owner.lock_seqid;

		LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG,
			"LOCK Existing lock owner", data->current_obj,
			lock_owner, &lock_desc);

		/* Get the client for this open owner */
		clientid = open_owner->so_owner.so_nfs4_owner.so_clientrec;
		inc_client_id_ref(clientid);
	}

 check_seqid:

	/* Check seqid (lock_seqid or open_seqid) */
	if (data->minorversion == 0) {
		if (!Check_nfs4_seqid(resp_owner,
				      seqid,
				      op,
				      data->current_obj,
				      resp,
				      lock_tag)) {
			/* Response is all setup for us and LogDebug
			 * told what was wrong
			 */
			goto out2;
		}
	}

	/* Lock length should not be 0 */
	if (arg_LOCK4->length == 0LL) {
		res_LOCK4->status = NFS4ERR_INVAL;
		LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length == 0");
		goto out;
	}

	/* Check for range overflow.  Comparing beyond 2^64 is not
	 * possible int 64 bits precision, but off+len > 2^64-1 is
	 * equivalent to len > 2^64-1 - off
	 */

	if (lock_desc.lock_length >
	    (STATE_LOCK_OFFSET_EOF - lock_desc.lock_start)) {
		res_LOCK4->status = NFS4ERR_INVAL;
		LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed length overflow");
		goto out;
	}

	/* Check if open state has correct access for type of lock.
	 *
	 * Don't need to check for conflicting states since this open
	 * state assures there are no conflicting states.
	 */
	if (((arg_LOCK4->locktype == WRITE_LT ||
	      arg_LOCK4->locktype == WRITEW_LT)
	      &&
	      ((state_open->state_data.share.share_access &
		OPEN4_SHARE_ACCESS_WRITE) == 0))
	    ||
	    ((arg_LOCK4->locktype == READ_LT ||
	      arg_LOCK4->locktype == READW_LT)
	      &&
	      ((state_open->state_data.share.share_access &
		OPEN4_SHARE_ACCESS_READ) == 0))) {
		/* The open state doesn't allow access based on the
		 * type of lock
		 */
		LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
			"LOCK failed, SHARE doesn't allow access",
			data->current_obj, lock_owner, &lock_desc);

		res_LOCK4->status = NFS4ERR_OPENMODE;

		goto out;
	}

	/* Do grace period checking (use resp_owner below since a new
	 * lock request with a new lock owner doesn't have a lock owner
	 * yet, but does have an open owner - resp_owner is always one or
	 * the other and non-NULL at this point - so makes for a better log).
	 */
	if (nfs_in_grace()) {
		if (op_ctx->fsal_export->exp_ops.
			fs_supports(op_ctx->fsal_export, fso_grace_method))
				fsal_grace = true;

		if (!fsal_grace && !arg_LOCK4->reclaim) {
			LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
			"LOCK failed, non-reclaim while in grace",
				data->current_obj, resp_owner, &lock_desc);
			res_LOCK4->status = NFS4ERR_GRACE;
			goto out;
		}
		if (!fsal_grace && arg_LOCK4->reclaim
		    && !clientid->cid_allow_reclaim) {
			LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
				"LOCK failed, invalid reclaim while in grace",
				data->current_obj, resp_owner, &lock_desc);
			res_LOCK4->status = NFS4ERR_NO_GRACE;
			goto out;
		}
	} else {
		if (arg_LOCK4->reclaim) {
			LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
				"LOCK failed, reclaim while not in grace",
				data->current_obj, resp_owner, &lock_desc);
			res_LOCK4->status = NFS4ERR_NO_GRACE;
			goto out;
		}
	}

	/* Test if this request is attempting to create a new lock owner */
	if (arg_LOCK4->locker.new_lock_owner) {
		bool_t isnew;

		/* A lock owner is always associated with a previously
		   made open which has itself a previously made
		   stateid */

		/* This lock owner is not known yet, allocated and set
		   up a new one */
		lock_owner = create_nfs4_owner(&owner_name,
					       clientid,
					       STATE_LOCK_OWNER_NFSV4,
					       open_owner,
					       0,
					       &isnew,
					       CARE_ALWAYS);

		LogStateOwner("Lock: ", lock_owner);

		if (lock_owner == NULL) {
			res_LOCK4->status = NFS4ERR_RESOURCE;
			LogLock(COMPONENT_NFS_V4_LOCK, NIV_EVENT,
				"LOCK failed to create new lock owner",
				data->current_obj, open_owner, &lock_desc);
			goto out2;
		}

		if (!isnew) {
			PTHREAD_MUTEX_lock(&lock_owner->so_mutex);
			/* Check lock_seqid if it has attached locks. */
			if (!glist_empty(&lock_owner->so_lock_list)
			    && (data->minorversion == 0)
			    && !Check_nfs4_seqid(lock_owner,
						 arg_LOCK4->locker.locker4_u.
						     open_owner.lock_seqid,
						 op,
						 data->current_obj,
						 resp,
						 lock_tag)) {
				LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
					"LOCK failed to create new lock owner, re-use",
					data->current_obj,
					open_owner, &lock_desc);
				dump_all_locks(
					"All locks (re-use of lock owner)");

				PTHREAD_MUTEX_unlock(&lock_owner->so_mutex);
				/* Response is all setup for us and
				 * LogDebug told what was wrong
				 */
				goto out2;
			}

			PTHREAD_MUTEX_unlock(&lock_owner->so_mutex);

			/* Lock owner is known, see if we also already have
			 * a stateid. Do this here since it's impossible for
			 * there to be such a state if the lock owner was
			 * previously unknown.
			 */
			lock_state = nfs4_State_Get_Obj(data->current_obj,
							  lock_owner);
		}

		if (lock_state == NULL) {
			/* Prepare state management structure */
			memset(&candidate_data, 0, sizeof(candidate_data));
			candidate_data.lock.openstate = state_open;

			/* Add the lock state to the lock table */
			state_status = state_add(data->current_obj,
						 STATE_TYPE_LOCK,
						 &candidate_data,
						 lock_owner,
						 &lock_state,
						 data->minorversion > 0 ?
							&refer : NULL);

			if (state_status != STATE_SUCCESS) {
				res_LOCK4->status = NFS4ERR_RESOURCE;

				LogLock(COMPONENT_NFS_V4_LOCK, NIV_DEBUG,
					"LOCK failed to add new stateid",
					data->current_obj, lock_owner,
					&lock_desc);

				goto out2;
			}

			glist_init(&lock_state->state_data.lock.state_locklist);

			/* Add lock state to the list of lock states belonging
			   to the open state */
			glist_add_tail(
				&state_open->state_data.share.share_lockstates,
				&lock_state->state_data.lock.state_sharelist);

		}
	}

	if (data->minorversion == 0) {
		op_ctx->clientid =
		    &lock_owner->so_owner.so_nfs4_owner.so_clientid;
	}

	/* Now we have a lock owner and a stateid.  Go ahead and push
	 * lock into SAL (and FSAL). */
	state_status = state_lock(data->current_obj,
				  lock_owner,
				  lock_state,
				  blocking,
				  NULL,	/* No block data for now */
				  &lock_desc,
				  &conflict_owner,
				  &conflict_desc);

	if (state_status != STATE_SUCCESS) {
		if (state_status == STATE_LOCK_CONFLICT) {
			/* A conflicting lock from a different
			   lock_owner, returns NFS4ERR_DENIED */
			Process_nfs4_conflict(&res_LOCK4->LOCK4res_u.denied,
					      conflict_owner,
					      &conflict_desc);
		}

		LogDebug(COMPONENT_NFS_V4_LOCK, "LOCK failed with status %s",
			 state_err_str(state_status));

		res_LOCK4->status = nfs4_Errno_state(state_status);

		/* Save the response in the lock or open owner */
		if (res_LOCK4->status != NFS4ERR_RESOURCE
		    && res_LOCK4->status != NFS4ERR_BAD_STATEID
		    && data->minorversion == 0) {
			Copy_nfs4_state_req(resp_owner,
					    seqid,
					    op,
					    data->current_obj,
					    resp,
					    lock_tag);
		}

		if (arg_LOCK4->locker.new_lock_owner) {
			/* Need to destroy new state */
			state_del(lock_state);
		}
		goto out2;
	}

	if (data->minorversion == 0)
		op_ctx->clientid = NULL;

	res_LOCK4->status = NFS4_OK;

	/* Handle stateid/seqid for success */
	update_stateid(lock_state,
		       &res_LOCK4->LOCK4res_u.resok4.lock_stateid,
		       data,
		       lock_tag);

	if (arg_LOCK4->locker.new_lock_owner) {
		/* Also save the response in the lock owner */
		Copy_nfs4_state_req(lock_owner,
				    arg_LOCK4->locker.locker4_u.open_owner.
				    lock_seqid,
				    op,
				    data->current_obj,
				    resp,
				    lock_tag);

	}

	if (isFullDebug(COMPONENT_NFS_V4_LOCK)) {
		char str[LOG_BUFF_LEN];
		struct display_buffer dspbuf = {sizeof(str), str, str};

		display_stateid(&dspbuf, lock_state);

		LogFullDebug(COMPONENT_NFS_V4_LOCK, "LOCK stateid %s", str);
	}

	LogLock(COMPONENT_NFS_V4_LOCK, NIV_FULL_DEBUG, "LOCK applied",
		data->current_obj, lock_owner, &lock_desc);

 out:

	if (data->minorversion == 0) {
		/* Save the response in the lock or open owner */
		Copy_nfs4_state_req(resp_owner,
				    seqid,
				    op,
				    data->current_obj,
				    resp,
				    lock_tag);
	}

 out2:

	if (state_open != NULL)
		dec_state_t_ref(state_open);

	if (lock_state != NULL)
		dec_state_t_ref(lock_state);

	LogStateOwner("Open: ", open_owner);
	LogStateOwner("Lock: ", lock_owner);

	if (open_owner != NULL)
		dec_state_owner_ref(open_owner);

	if (lock_owner != NULL)
		dec_state_owner_ref(lock_owner);

	if (clientid != NULL)
		dec_client_id_ref(clientid);

	return res_LOCK4->status;
}				/* nfs4_op_lock */
예제 #10
0
/**
 * @brief adds a new state to a file
 *
 * This version of the function does not take the state lock on the
 * entry.  It exists to allow callers to integrate state into a larger
 * operation.
 *
 * The caller may have already allocated a state, in which case state
 * need not be NULL.
 *
 * @note state_lock MUST be held for write
 *
 * @param[in,out] obj         file to operate on
 * @param[in]     state_type  State to be defined
 * @param[in]     state_data  Data related to this state
 * @param[in]     owner_input Related open_owner
 * @param[in,out] state       The new state
 * @param[in]     refer       Reference to compound creating state
 *
 * @return Operation status
 */
state_status_t state_add_impl(struct fsal_obj_handle *obj,
			      enum state_type state_type,
			      union state_data *state_data,
			      state_owner_t *owner_input, state_t **state,
			      struct state_refer *refer)
{
	state_t *pnew_state = *state;
	struct state_hdl *ostate = obj->state_hdl;
	char str[DISPLAY_STATEID_OTHER_SIZE];
	struct display_buffer dspbuf = {sizeof(str), str, str};
	bool str_valid = false;
	bool got_export_ref = false;
	state_status_t status = 0;
	bool mutex_init = false;
	struct state_t *openstate = NULL;
	struct gsh_buffdesc fh_desc;

	if (isFullDebug(COMPONENT_STATE) && pnew_state != NULL) {
		display_stateid(&dspbuf, pnew_state);
		LogFullDebug(COMPONENT_STATE, "pnew_state=%s", str);
		display_reset_buffer(&dspbuf);
	}

	/* Attempt to get a reference to the export. */
	if (!export_ready(op_ctx->ctx_export)) {
		/* If we could not get a reference, return stale.
		 * Release attr_lock
		 */
		LogDebug(COMPONENT_STATE, "Stale export");
		status = STATE_ESTALE;
		goto errout;
	}

	get_gsh_export_ref(op_ctx->ctx_export);

	got_export_ref = true;

	if (pnew_state == NULL) {
		if (state_type == STATE_TYPE_LOCK)
			openstate = state_data->lock.openstate;

		pnew_state = op_ctx->fsal_export->exp_ops.alloc_state(
							op_ctx->fsal_export,
							state_type,
							openstate);
	}

	PTHREAD_MUTEX_init(&pnew_state->state_mutex, NULL);

	mutex_init = true;

	/* Add the stateid.other, this will increment cid_stateid_counter */
	nfs4_BuildStateId_Other(owner_input->so_owner.so_nfs4_owner.
				so_clientrec, pnew_state->stateid_other);

	/* Set the type and data for this state */
	memcpy(&(pnew_state->state_data), state_data, sizeof(*state_data));
	pnew_state->state_type = state_type;
	pnew_state->state_seqid = 0;	/* will be incremented to 1 later */
	pnew_state->state_refcount = 2; /* sentinel plus returned ref */

	if (refer)
		pnew_state->state_refer = *refer;

	if (isFullDebug(COMPONENT_STATE)) {
		display_stateid_other(&dspbuf, pnew_state->stateid_other);
		str_valid = true;

		LogFullDebug(COMPONENT_STATE,
			     "About to call nfs4_State_Set for %s",
			     str);
	}

	glist_init(&pnew_state->state_list);

	/* We need to initialize state_owner, state_export, and state_obj now so
	 * that the state can be indexed by owner/entry. We don't insert into
	 * lists and take references yet since no one else can see this state
	 * until we are completely done since we hold the state_lock.  Might as
	 * well grab export now also...
	 */
	pnew_state->state_export = op_ctx->ctx_export;
	pnew_state->state_owner = owner_input;
	fh_desc.addr = &pnew_state->state_obj.digest;
	fh_desc.len = sizeof(pnew_state->state_obj.digest);
	obj->obj_ops.handle_digest(obj, FSAL_DIGEST_NFSV4, &fh_desc);
	pnew_state->state_obj.len = fh_desc.len;

	/* Add the state to the related hashtable */
	if (!nfs4_State_Set(pnew_state)) {
		if (!str_valid)
			display_stateid_other(&dspbuf,
					      pnew_state->stateid_other);

		LogCrit(COMPONENT_STATE,
			"Can't create a new state id %s for the obj %p (F)",
			str, obj);

		/* Return STATE_MALLOC_ERROR since most likely the
		 * nfs4_State_Set failed to allocate memory.
		 */
		status = STATE_MALLOC_ERROR;
		goto errout;
	}

	/* Each of the following blocks takes the state_mutex and releases it
	 * because we always want state_mutex to be the last lock taken.
	 *
	 * NOTE: We don't have to worry about state_del/state_del_locked being
	 *       called in the midst of things because the state_lock is held.
	 */

	/* Attach this to an export */
	PTHREAD_RWLOCK_wrlock(&op_ctx->ctx_export->lock);
	PTHREAD_MUTEX_lock(&pnew_state->state_mutex);
	glist_add_tail(&op_ctx->ctx_export->exp_state_list,
		&pnew_state->state_export_list);
	PTHREAD_MUTEX_unlock(&pnew_state->state_mutex);
	PTHREAD_RWLOCK_unlock(&op_ctx->ctx_export->lock);

	/* Add state to list for file */
	PTHREAD_MUTEX_lock(&pnew_state->state_mutex);
	glist_add_tail(&ostate->file.list_of_states, &pnew_state->state_list);
	PTHREAD_MUTEX_unlock(&pnew_state->state_mutex);

	/* Add state to list for owner */
	PTHREAD_MUTEX_lock(&owner_input->so_mutex);
	PTHREAD_MUTEX_lock(&pnew_state->state_mutex);

	inc_state_owner_ref(owner_input);

	glist_add_tail(&owner_input->so_owner.so_nfs4_owner.so_state_list,
		       &pnew_state->state_owner_list);

	PTHREAD_MUTEX_unlock(&pnew_state->state_mutex);
	PTHREAD_MUTEX_unlock(&owner_input->so_mutex);


#ifdef DEBUG_SAL
	PTHREAD_MUTEX_lock(&all_state_v4_mutex);

	glist_add_tail(&state_v4_all, &pnew_state->state_list_all);

	PTHREAD_MUTEX_unlock(&all_state_v4_mutex);
#endif

	if (pnew_state->state_type == STATE_TYPE_DELEG &&
	    pnew_state->state_data.deleg.sd_type == OPEN_DELEGATE_WRITE)
		ostate->file.write_delegated = true;

	/* Copy the result */
	*state = pnew_state;

	if (str_valid)
		LogFullDebug(COMPONENT_STATE, "Add State: %p: %s",
			     pnew_state, str);

	/* Regular exit */
	status = STATE_SUCCESS;
	return status;

errout:

	if (mutex_init)
		PTHREAD_MUTEX_destroy(&pnew_state->state_mutex);

	if (pnew_state != NULL) {
		/* Make sure the new state is closed (may have been passed in
		 * with file open).
		 */
		(void) obj->obj_ops.close2(obj, pnew_state);

		pnew_state->state_exp->exp_ops.free_state(pnew_state);
	}

	if (got_export_ref)
		put_gsh_export(op_ctx->ctx_export);

	*state = NULL;

	return status;
}				/* state_add */
예제 #11
0
/**
 * Set attributes of files.
 *
 * @attrs: array of attributes to set
 * @count: the count of tc_attrs in the preceding array
 * @is_transaction: whether to execute the compound as a transaction
 */
tc_res posix_lsetattrsv(struct tc_attrs *attrs, int count, bool is_transaction)
{
	int fd = -1, i = 0;
	struct tc_attrs *cur_attr = NULL;
	tc_res result = { .index = -1, .err_no = 0 };

	while (i < count) {
		cur_attr = attrs + i;

		/*
		 * Set the attributes if corrseponding mask bit is set
		 */
		if (helper_set_attrs(cur_attr) < 0) {
			result = tc_failure(i, errno);
			POSIX_WARN(
			    "posix_lsetattrsv() failed at index : %d (%s)\n",
			    result.index, strerror(errno));
			break;
		}

		i++;
	}

	return result;
}

/*
 * Rename File(s)
 *
 * @pairs[IN] - tc_file_pair structure containing the
 * old and new path of the file
 * @count[IN]: the count of tc_file_pair in the preceding array
 * @is_transaction[IN]: whether to execute the compound as a transaction
 */

tc_res posix_renamev(struct tc_file_pair *pairs, int count, bool is_transaction)
{
	int i = 0;
	tc_file_pair *cur_pair = NULL;
	tc_res result = { .index = -1, .err_no = 0 };

	while (i < count) {
		cur_pair = pairs + i;

		assert(cur_pair->src_file.type == TC_FILE_PATH &&
		       cur_pair->dst_file.type == TC_FILE_PATH &&
		       cur_pair->src_file.path != NULL &&
		       cur_pair->src_file.path != NULL);

		if (rename(cur_pair->src_file.path, cur_pair->dst_file.path) <
		    0) {
			result = tc_failure(i, errno);
			POSIX_WARN("posix_renamev() failed at index %d: %s\n",
				   i, strerror(errno));
			break;
		}

		i++;
	}

	return result;
}

/*
 * Remove File(s)
 *
 * @files[IN] - tc_file structure containing the
 * path of the directory to be removed
 * @count[IN]: the count of tc_target_file in the preceding array
 * @is_transaction[IN]: whether to execute the compound as a transaction
 */

tc_res posix_removev(tc_file *files, int count, bool is_transaction)
{
	int i = 0;
	tc_file *cur_file = NULL;
	tc_res result = { .index = -1, .err_no = 0 };

	while (i < count) {
		cur_file = files + i;

		assert(cur_file->path != NULL);

		if (remove(cur_file->path) < 0) {
			result = tc_failure(i, errno);
			POSIX_WARN("posix_removev() failed at index %d for "
				   "path %s: %s\n",
				   result.index, cur_file->path,
				   strerror(errno));
			return result;
		}

		i++;
	}

	return result;
}

/*
 * Remove Directory
 *
 * @dir[IN] - tc_file structure containing the
 * path of the directory to be removed
 * @count: the count of tc_target_file in the preceding array
 * @is_transaction: whether to execute the compound as a transaction
 */

tc_res posix_remove_dirv(tc_file *dir, int count, bool is_transaction)
{
	int i = 0;
	tc_file *cur_dir = NULL;
	tc_res result = { .index = -1, .err_no = 0 };

	while (i < count) {
		cur_dir = dir + i;

		assert(cur_dir->path != NULL);

		if (rmdir(cur_dir->path) < 0) {
			result = tc_failure(i, errno);
			POSIX_WARN("posix_remove_dirv() failed at index : %d\n",
				   result.index);

			return result;
		}

		i++;
	}

	return result;
}

/*
 * Create Directory
 *
 * @dir[IN] - tc_file structure containing the
 * path of the directory to be created
 * @count: the count of tc_target_file in the preceding array
 * @is_transaction: whether to execute the compound as a transaction
 */

tc_res posix_mkdirv(struct tc_attrs *dirs, int count, bool is_transaction)
{
	int i = 0;
	tc_file *cur_dir = NULL;
	tc_res result = { .index = -1, .err_no = 0 };

	while (i < count) {
		cur_dir = &dirs[i].file;

		assert(cur_dir->path != NULL);

		if (mkdir(cur_dir->path, dirs[i].mode) < 0) {
			result = tc_failure(i, errno);
			POSIX_WARN("posix_mkdirv() failed at index : %d (%s)\n",
				   result.index, strerror(errno));
			return result;
		}

		i++;
	}

	return result;
}

struct tc_posix_dir_to_list {
	struct glist_head list;
	const char *path;
	int origin_index;
	bool need_free_path;
};

static inline struct tc_posix_dir_to_list *
enqueue_dir_to_list(struct glist_head *dir_queue, const char *path,
		    bool own_path, int index)
{
	struct tc_posix_dir_to_list *dle;

	dle = malloc(sizeof(*dle));
	if (!dle) {
		return NULL;
	}
	dle->path = path;
	dle->need_free_path = own_path;
	dle->origin_index = index;
	glist_add_tail(dir_queue, &dle->list);

	return dle;
}

static int posix_listdir(struct glist_head *dir_queue, const char *dir,
			 struct tc_attrs_masks masks, int index, int *limit,
			 bool recursive, tc_listdirv_cb cb, void *cbarg)
{
	DIR *dir_fd;
	struct tc_attrs cur_attr;
	struct dirent *dp;
	int path_len;
	char *path;
	struct stat st;
	struct tc_posix_dir_to_list *dle;
	int ret = 0;

	assert(dir != NULL);

	dir_fd = opendir(dir);
	if (!dir_fd) {
		return -errno;
	}

	while ((dp = readdir(dir_fd)) != NULL && (*limit == -1 || *limit > 0)) {
		POSIX_DEBUG("DirEntry  : %s\n", dp->d_name);
		/* Skip the current and parent directory entry */
		if (!strncmp(dp->d_name, ".", strlen(dp->d_name)) ||
		    !strncmp(dp->d_name, "..", strlen(dp->d_name)))
			continue;

		path_len = strlen(dp->d_name) + strlen(dir) + 2;
		char *path = (char *)malloc(path_len);
		tc_path_join(dir, dp->d_name, path, path_len);

		cur_attr.file = tc_file_from_path(path);
		cur_attr.masks = masks;

		if (lstat(path, &st) < 0) {
			POSIX_WARN("stat failed for file : %s/%s", dir,
				   dp->d_name);
			ret = -errno;
			goto exit;
		}

		/* copy the attributes */
		tc_stat2attrs(&st, &cur_attr);
		if (!cb(&cur_attr, path, cbarg)) {
			ret = 0;
			goto exit;
		}

		if (recursive && S_ISDIR(st.st_mode)) {
			if (!enqueue_dir_to_list(dir_queue, path, true,
						 index)) {
				free(path);
				ret = -ENOMEM;
				goto exit;
			}
		} else {
			free(path);
		}

		if (*limit != -1) {
			--(*limit);
		}
	}

exit:
	closedir(dir_fd);
	return ret;
}
예제 #12
0
/**
 * build the export entry
 */
fsal_status_t GPFSFSAL_BuildExportContext(fsal_export_context_t *export_context, /* OUT */
                                      fsal_path_t * p_export_path,      /* IN */
                                      char *fs_specific_options /* IN */
    )
{
  int                  rc, fd, mntexists;
  FILE               * fp;
  struct mntent      * p_mnt;
  char               * mnt_dir = NULL;
  struct statfs        stat_buf;
  gpfs_fsal_up_ctx_t * gpfs_fsal_up_ctx;
  bool_t               start_fsal_up_thread = FALSE;

  fsal_status_t status;
  fsal_op_context_t op_context;
  gpfsfsal_export_context_t *p_export_context = (gpfsfsal_export_context_t *)export_context;

  /* Make sure the FSAL UP context list is initialized */
  if(glist_null(&gpfs_fsal_up_ctx_list))
    init_glist(&gpfs_fsal_up_ctx_list);

  /* sanity check */
  if((p_export_context == NULL) || (p_export_path == NULL))
    {
      LogCrit(COMPONENT_FSAL,
              "NULL mandatory argument passed to %s()", __FUNCTION__);
      Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_BuildExportContext);
    }

  /* open mnt file */
  fp = setmntent(MOUNTED, "r");

  if(fp == NULL)
    {
      rc = errno;
      LogCrit(COMPONENT_FSAL, "Error %d in setmntent(%s): %s", rc, MOUNTED,
                      strerror(rc));
      Return(posix2fsal_error(rc), rc, INDEX_FSAL_BuildExportContext);
    }

  /* Check if mount point is really a gpfs share. If not, we can't continue.*/
  mntexists = 0;
  while((p_mnt = getmntent(fp)) != NULL)
    if(p_mnt->mnt_dir != NULL  && p_mnt->mnt_type != NULL)
      /* There is probably a macro for "gpfs" type ... not sure where it is. */
      if (strncmp(p_mnt->mnt_type, "gpfs", 4) == 0)
        {
          LogFullDebug(COMPONENT_FSAL,
                       "Checking Export Path %s against GPFS fs %s",
                       p_export_path->path, p_mnt->mnt_dir);

          /* If export path is shorter than fs path, then this isn't a match */
          if(strlen(p_export_path->path) < strlen(p_mnt->mnt_dir))
            continue;

          /* If export path doesn't have a path separator after mnt_dir, then it
           * isn't a proper sub-directory of mnt_dir.
           */
          if((p_export_path->path[strlen(p_mnt->mnt_dir)] != '/') &&
             (p_export_path->path[strlen(p_mnt->mnt_dir)] != '\0'))
            continue;

          if (strncmp(p_mnt->mnt_dir, p_export_path->path, strlen(p_mnt->mnt_dir)) == 0)
            {
              mnt_dir = gsh_strdup(p_mnt->mnt_dir);
              mntexists = 1;
              break;
            }
        }
  
  endmntent(fp);

  if (mntexists == 0)
    {
      LogMajor(COMPONENT_FSAL,
               "GPFS mount point %s does not exist.",
               p_export_path->path);
      gsh_free(mnt_dir);
      ReturnCode(ERR_FSAL_INVAL, 0);
    }

  /* save file descriptor to root of GPFS share */
  fd = open(p_export_path->path, O_RDONLY | O_DIRECTORY);
  if(fd < 0)
    {
      if(errno == ENOENT)
        LogMajor(COMPONENT_FSAL,
                 "GPFS export path %s does not exist.",
                 p_export_path->path);
      else if (errno == ENOTDIR)
        LogMajor(COMPONENT_FSAL,
                 "GPFS export path %s is not a directory.",
                 p_export_path->path);
      else
        LogMajor(COMPONENT_FSAL,
                 "Could not open GPFS export path %s: rc = %d(%s)",
                 p_export_path->path, errno, strerror(errno));

      if(mnt_dir != NULL)
        gsh_free(mnt_dir);

      ReturnCode(ERR_FSAL_INVAL, 0);
    }

  p_export_context->mount_root_fd = fd;

  LogFullDebug(COMPONENT_FSAL,
               "GPFSFSAL_BuildExportContext: %d",
               p_export_context->mount_root_fd);

  /* Save pointer to fsal_staticfsinfo_t in export context */
  p_export_context->fe_static_fs_info = &global_fs_info;

  /* save filesystem ID */
  rc = statfs(p_export_path->path, &stat_buf);

  if(rc)
    {
      close(fd);
      LogMajor(COMPONENT_FSAL,
               "statfs call failed on file %s: %d(%s)",
               p_export_path->path, errno, strerror(errno));

      if(mnt_dir != NULL)
        gsh_free(mnt_dir);

      ReturnCode(ERR_FSAL_INVAL, 0);
    }

  p_export_context->fsid[0] = stat_buf.f_fsid.__val[0];
  p_export_context->fsid[1] = stat_buf.f_fsid.__val[1];

  /* save file handle to root of GPFS share */
  op_context.export_context = export_context;

  // op_context.credential = ???
  status = fsal_internal_get_handle(&op_context,
                                    p_export_path,
                                    (fsal_handle_t *)(&(p_export_context->mount_root_handle)));

  if(FSAL_IS_ERROR(status))
    {
      close(p_export_context->mount_root_fd);
      LogMajor(COMPONENT_FSAL,
               "FSAL BUILD EXPORT CONTEXT: ERROR: Conversion from gpfs filesystem root path to handle failed : %d",
               status.minor);

      if(mnt_dir != NULL)
        gsh_free(mnt_dir);

      ReturnCode(ERR_FSAL_INVAL, 0);
    }

  gpfs_fsal_up_ctx = gpfsfsal_find_fsal_up_context(p_export_context);

  if(gpfs_fsal_up_ctx == NULL)
    {
      gpfs_fsal_up_ctx = gsh_calloc(1, sizeof(gpfs_fsal_up_ctx_t));

      if(gpfs_fsal_up_ctx == NULL || mnt_dir == NULL)
        {
          LogFatal(COMPONENT_FSAL,
                   "Out of memory can not continue.");
        }

      /* Initialize the gpfs_fsal_up_ctx */
      init_glist(&gpfs_fsal_up_ctx->gf_exports);
      gpfs_fsal_up_ctx->gf_fs = mnt_dir;
      gpfs_fsal_up_ctx->gf_fsid[0] = p_export_context->fsid[0];
      gpfs_fsal_up_ctx->gf_fsid[1] = p_export_context->fsid[1];

      /* Add it to the list of contexts */
      glist_add_tail(&gpfs_fsal_up_ctx_list, &gpfs_fsal_up_ctx->gf_list);

      start_fsal_up_thread = TRUE;
    }
  else
    {
      if(mnt_dir != NULL)
        gsh_free(mnt_dir);
    }

  /* Add this export context to the list for it's gpfs_fsal_up_ctx */
  glist_add_tail(&gpfs_fsal_up_ctx->gf_exports, &p_export_context->fe_list);
  p_export_context->fe_fsal_up_ctx = gpfs_fsal_up_ctx;

  if(start_fsal_up_thread)
    {
      pthread_attr_t attr_thr;

      memset(&attr_thr, 0, sizeof(attr_thr));

      /* Initialization of thread attributes borrowed from nfs_init.c */
      if(pthread_attr_init(&attr_thr) != 0)
        LogCrit(COMPONENT_THREAD, "can't init pthread's attributes");

      if(pthread_attr_setscope(&attr_thr, PTHREAD_SCOPE_SYSTEM) != 0)
        LogCrit(COMPONENT_THREAD, "can't set pthread's scope");

      if(pthread_attr_setdetachstate(&attr_thr, PTHREAD_CREATE_JOINABLE) != 0)
        LogCrit(COMPONENT_THREAD, "can't set pthread's join state");

      if(pthread_attr_setstacksize(&attr_thr, 2116488) != 0)
        LogCrit(COMPONENT_THREAD, "can't set pthread's stack size");

      rc = pthread_create(&gpfs_fsal_up_ctx->gf_thread,
                          &attr_thr,
                          GPFSFSAL_UP_Thread,
                          gpfs_fsal_up_ctx);

      if(rc != 0)
        {
          LogFatal(COMPONENT_THREAD,
                   "Could not create GPFSFSAL_UP_Thread, error = %d (%s)",
                   errno, strerror(errno));
        }
    }

  Return(ERR_FSAL_NO_ERROR, 0, INDEX_FSAL_BuildExportContext);
}