static nfsstat4 make_ds_handle(struct fsal_pnfs_ds *const pds, const struct gsh_buffdesc *const desc, struct fsal_ds_handle **const handle, int flags) { struct lustre_file_handle *lustre_fh = (struct lustre_file_handle *)desc->addr; struct lustre_ds *ds; /* Handle to be created */ struct fsal_filesystem *fs; struct fsal_fsid__ fsid; enum fsid_type fsid_type; *handle = NULL; if (desc->len != sizeof(struct lustre_file_handle)) return NFS4ERR_BADHANDLE; lustre_extract_fsid(lustre_fh, &fsid_type, &fsid); fs = lookup_fsid(&fsid, fsid_type); if (fs == NULL) { LogInfo(COMPONENT_FSAL, "Could not find filesystem for fsid=0x%016"PRIx64 ".0x%016"PRIx64" from handle", fsid.major, fsid.minor); return NFS4ERR_STALE; } if (fs->fsal != pds->fsal) { LogInfo(COMPONENT_FSAL, "Non LUSTRE filesystem fsid=0x%016"PRIx64 ".0x%016"PRIx64" from handle", fsid.major, fsid.minor); return NFS4ERR_STALE; } ds = gsh_calloc(sizeof(struct lustre_ds), 1); if (ds == NULL) return NFS4ERR_SERVERFAULT; *handle = &ds->ds; fsal_ds_handle_init(*handle, pds); /* Connect lazily when a FILE_SYNC4 write forces us to, not here. */ ds->connected = false; ds->lustre_fs = fs->private; memcpy(&ds->wire, desc->addr, desc->len); return NFS4_OK; }
/** * @brief Allocate a duplicate request cache * * @param[in] dtype Style DRC to allocate (e.g., TCP, by enum drc_type) * @param[in] maxsz Upper bound on requests to cache * @param[in] cachesz Number of entries in the closed hash partition * @param[in] flags DRC flags * * @return the drc, if successfully allocated, else NULL. */ static inline drc_t *alloc_tcp_drc(enum drc_type dtype) { drc_t *drc = pool_alloc(tcp_drc_pool, NULL); int ix, code __attribute__ ((unused)) = 0; if (unlikely(!drc)) { LogCrit(COMPONENT_DUPREQ, "alloc TCP DRC failed"); goto out; } drc->type = dtype; /* DRC_TCP_V3 or DRC_TCP_V4 */ drc->refcnt = 0; drc->retwnd = 0; drc->d_u.tcp.recycle_time = 0; drc->maxsize = nfs_param.core_param.drc.tcp.size; drc->cachesz = nfs_param.core_param.drc.tcp.cachesz; drc->npart = nfs_param.core_param.drc.tcp.npart; drc->hiwat = nfs_param.core_param.drc.udp.hiwat; PTHREAD_MUTEX_init(&drc->mtx, NULL); /* init dict */ code = rbtx_init(&drc->xt, dupreq_tcp_cmpf, drc->npart, RBT_X_FLAG_ALLOC | RBT_X_FLAG_CACHE_WT); assert(!code); /* completed requests */ TAILQ_INIT(&drc->dupreq_q); /* recycling DRC */ TAILQ_INIT_ENTRY(drc, d_u.tcp.recycle_q); /* init "cache" partition */ for (ix = 0; ix < drc->npart; ++ix) { struct rbtree_x_part *xp = &(drc->xt.tree[ix]); drc->xt.cachesz = drc->cachesz; xp->cache = gsh_calloc(drc->cachesz, sizeof(struct opr_rbtree_node *)); if (unlikely(!xp->cache)) { LogCrit(COMPONENT_DUPREQ, "TCP DRC hash partition allocation failed (ix=%d)", ix); drc->cachesz = 0; break; } } out: return drc; }
static void create_dyn_mntv1_stat(register_get_set ** p_dyn_gs, int *p_dyn_gs_count) { long j; *p_dyn_gs_count = 3 * MNT_V1_NB_COMMAND; *p_dyn_gs = gsh_calloc(3 * MNT_V1_NB_COMMAND, sizeof(register_get_set)); for(j = 0; j < 3 * MNT_V1_NB_COMMAND; j += 3) { (*p_dyn_gs)[j + 0].label = gsh_calloc(256, sizeof(char)); snprintf((*p_dyn_gs)[j + 0].label, 256, "%sV1_total", mnt_function_names[j / 3]); (*p_dyn_gs)[j + 0].desc = "Number of mnt1 commands"; (*p_dyn_gs)[j + 0].type = SNMP_ADM_INTEGER; (*p_dyn_gs)[j + 0].access = SNMP_ADM_ACCESS_RO; (*p_dyn_gs)[j + 0].getter = get_mnt1; (*p_dyn_gs)[j + 0].setter = NULL; (*p_dyn_gs)[j + 0].opt_arg = (void *)(j + 0); (*p_dyn_gs)[j + 1].label = gsh_calloc(256, sizeof(char)); snprintf((*p_dyn_gs)[j + 1].label, 256, "%sV1_success", mnt_function_names[j / 3]); (*p_dyn_gs)[j + 1].desc = "Number of success for this mnt1 command"; (*p_dyn_gs)[j + 1].type = SNMP_ADM_INTEGER; (*p_dyn_gs)[j + 1].access = SNMP_ADM_ACCESS_RO; (*p_dyn_gs)[j + 1].getter = get_mnt1; (*p_dyn_gs)[j + 1].setter = NULL; (*p_dyn_gs)[j + 1].opt_arg = (void *)(j + 1); (*p_dyn_gs)[j + 2].label = gsh_calloc(256, sizeof(char)); snprintf((*p_dyn_gs)[j + 2].label, 256, "%sV1_dropped", mnt_function_names[j / 3]); (*p_dyn_gs)[j + 2].desc = "Number of drop for this mnt1 command"; (*p_dyn_gs)[j + 2].type = SNMP_ADM_INTEGER; (*p_dyn_gs)[j + 2].access = SNMP_ADM_ACCESS_RO; (*p_dyn_gs)[j + 2].getter = get_mnt1; (*p_dyn_gs)[j + 2].setter = NULL; (*p_dyn_gs)[j + 2].opt_arg = (void *)(j + 2); } }
gweakref_table_t *gweakref_init(uint32_t npart, uint32_t cache_sz) { int ix = 0; pthread_rwlockattr_t rwlock_attr; gweakref_partition_t *wp = NULL; gweakref_table_t *wt = NULL; wt = gsh_calloc(1, sizeof(gweakref_table_t)); if (!wt) goto out; /* prior versions of Linux tirpc are subject to default prefer-reader * behavior (so have potential for writer starvation) */ pthread_rwlockattr_init(&rwlock_attr); #ifdef GLIBC pthread_rwlockattr_setkind_np( &rwlock_attr, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP); #endif /* npart should be a small integer */ wt->npart = npart; wt->partition = gsh_calloc(npart, sizeof(gweakref_partition_t)); for (ix = 0; ix < npart; ++ix) { wp = &wt->partition[ix]; pthread_rwlock_init(&wp->lock, &rwlock_attr); avltree_init(&wp->t, wk_cmpf, 0 /* must be 0 */); if (cache_sz > 0) { wt->cache_sz = cache_sz; wp->cache = gsh_calloc(cache_sz, sizeof(struct avltree_node *)); } wp->genctr = 0; } out: return (wt); }
/** * @brief Initialize the DRC package. */ void dupreq2_pkginit(void) { int code __attribute__ ((unused)) = 0; dupreq_pool = pool_init("Duplicate Request Pool", sizeof(dupreq_entry_t), pool_basic_substrate, NULL, NULL, NULL); if (unlikely(!(dupreq_pool))) LogFatal(COMPONENT_INIT, "Error while allocating duplicate request pool"); nfs_res_pool = pool_init("nfs_res_t pool", sizeof(nfs_res_t), pool_basic_substrate, NULL, NULL, NULL); if (unlikely(!(nfs_res_pool))) LogFatal(COMPONENT_INIT, "Error while allocating nfs_res_t pool"); tcp_drc_pool = pool_init("TCP DRC Pool", sizeof(drc_t), pool_basic_substrate, NULL, NULL, NULL); if (unlikely(!(tcp_drc_pool))) LogFatal(COMPONENT_INIT, "Error while allocating TCP DRC pool"); drc_st = gsh_calloc(1, sizeof(struct drc_st)); /* init shared statics */ gsh_mutex_init(&drc_st->mtx, NULL); /* recycle_t */ code = rbtx_init(&drc_st->tcp_drc_recycle_t, drc_recycle_cmpf, nfs_param.core_param.drc.tcp.recycle_npart, RBT_X_FLAG_ALLOC); /* XXX error? */ /* init recycle_q */ TAILQ_INIT(&drc_st->tcp_drc_recycle_q); drc_st->tcp_drc_recycle_qlen = 0; drc_st->last_expire_check = time(NULL); drc_st->expire_delta = nfs_param.core_param.drc.tcp.recycle_expire_s; /* UDP DRC is global, shared */ init_shared_drc(); }
struct state_t *vfs_alloc_state(struct fsal_export *exp_hdl, enum state_type state_type, struct state_t *related_state) { struct state_t *state; struct vfs_fd *my_fd; state = init_state(gsh_calloc(1, sizeof(struct state_t) + sizeof(struct vfs_fd)), exp_hdl, state_type, related_state); my_fd = (struct vfs_fd *)(state + 1); my_fd->fd = -1; return state; }
/** * @brief Create a FSAL data server handle from a wire handle * * This function creates a FSAL data server handle from a client * supplied "wire" handle. This is also where validation gets done, * since PUTFH is the only operation that can return * NFS4ERR_BADHANDLE. * * @param[in] export_pub The export in which to create the handle * @param[in] desc Buffer from which to create the file * @param[out] ds_pub FSAL data server handle * * @return NFSv4.1 error codes. */ static nfsstat4 make_ds_handle(struct fsal_pnfs_ds *const pds, const struct gsh_buffdesc *const hdl_desc, struct fsal_ds_handle **const handle, int flags) { /* Handle to be created for DS */ struct glfs_ds_handle *ds = NULL; unsigned char globjhdl[GFAPI_HANDLE_LENGTH] = {'\0'}; struct stat sb; struct glusterfs_export *glfs_export = container_of(pds->mds_fsal_export, struct glusterfs_export, export); *handle = NULL; if (hdl_desc->len != sizeof(struct glfs_ds_wire)) return NFS4ERR_BADHANDLE; ds = gsh_calloc(1, sizeof(struct glfs_ds_handle)); *handle = &ds->ds; fsal_ds_handle_init(*handle, pds); memcpy(globjhdl, hdl_desc->addr, GFAPI_HANDLE_LENGTH); /* Create glfs_object for the DS handle */ ds->glhandle = glfs_h_create_from_handle(glfs_export->gl_fs->fs, globjhdl, GFAPI_HANDLE_LENGTH, &sb); if (ds->glhandle == NULL) { LogDebug(COMPONENT_PNFS, "glhandle in ds_handle is NULL"); return NFS4ERR_SERVERFAULT; } /* Connect lazily when a FILE_SYNC4 write forces us to, not here. */ ds->connected = false; return NFS4_OK; }
/** * @brief Initialize a shared duplicate request cache */ static inline void init_shared_drc() { drc_t *drc = &drc_st->udp_drc; int ix, code __attribute__ ((unused)) = 0; drc->type = DRC_UDP_V234; drc->refcnt = 0; drc->retwnd = 0; drc->d_u.tcp.recycle_time = 0; drc->maxsize = nfs_param.core_param.drc.udp.size; drc->cachesz = nfs_param.core_param.drc.udp.cachesz; drc->npart = nfs_param.core_param.drc.udp.npart; drc->hiwat = nfs_param.core_param.drc.udp.hiwat; gsh_mutex_init(&drc->mtx, NULL); /* init dict */ code = rbtx_init(&drc->xt, dupreq_shared_cmpf, drc->npart, RBT_X_FLAG_ALLOC | RBT_X_FLAG_CACHE_WT); assert(!code); /* completed requests */ TAILQ_INIT(&drc->dupreq_q); /* init closed-form "cache" partition */ for (ix = 0; ix < drc->npart; ++ix) { struct rbtree_x_part *xp = &(drc->xt.tree[ix]); drc->xt.cachesz = drc->cachesz; xp->cache = gsh_calloc(drc->cachesz, sizeof(struct opr_rbtree_node *)); if (unlikely(!xp->cache)) { LogCrit(COMPONENT_DUPREQ, "UDP DRC hash partition allocation " "failed (ix=%d)", ix); drc->cachesz = 0; break; } } return; }
/* Here and not static because proxy.c needs this function * but we also need access to pxy_exp_ops - I'd rather * keep the later static then the former */ fsal_status_t pxy_create_export(struct fsal_module *fsal_hdl, void *parse_node, struct config_error_type *err_type, const struct fsal_up_vector *up_ops) { struct pxy_export *exp = gsh_calloc(1, sizeof(*exp)); struct pxy_fsal_module *pxy = container_of(fsal_hdl, struct pxy_fsal_module, module); if (!exp) return fsalstat(ERR_FSAL_NOMEM, ENOMEM); if (fsal_export_init(&exp->exp) != 0) { gsh_free(exp); return fsalstat(ERR_FSAL_NOMEM, ENOMEM); } pxy_export_ops_init(&exp->exp.exp_ops); exp->exp.up_ops = up_ops; exp->info = &pxy->special; exp->exp.fsal = fsal_hdl; op_ctx->fsal_export = &exp->exp; return fsalstat(ERR_FSAL_NO_ERROR, 0); }
void construct_handle(struct glusterfs_export *glexport, const struct stat *st, struct glfs_object *glhandle, unsigned char *globjhdl, struct glusterfs_handle **obj, const char *vol_uuid) { struct glusterfs_handle *constructing = NULL; constructing = gsh_calloc(1, sizeof(struct glusterfs_handle)); constructing->glhandle = glhandle; memcpy(constructing->globjhdl, vol_uuid, GLAPI_UUID_LENGTH); memcpy(constructing->globjhdl+GLAPI_UUID_LENGTH, globjhdl, GFAPI_HANDLE_LENGTH); constructing->globalfd.glfd = NULL; fsal_obj_handle_init(&constructing->handle, &glexport->export, posix2fsal_type(st->st_mode)); constructing->handle.fsid = posix2fsal_fsid(st->st_dev); constructing->handle.fileid = st->st_ino; constructing->handle.obj_ops = &GlusterFS.handle_ops; *obj = constructing; }
struct gpfs_fsal_obj_handle *alloc_handle(struct gpfs_file_handle *fh, struct fsal_filesystem *fs, struct attrlist *attributes, const char *link_content, struct fsal_export *exp_hdl) { struct gpfs_fsal_export *myself = container_of(exp_hdl, struct gpfs_fsal_export, export); struct gpfs_fsal_obj_handle *hdl = gsh_calloc(1, sizeof(struct gpfs_fsal_obj_handle) + sizeof(struct gpfs_file_handle)); hdl->handle = (struct gpfs_file_handle *)&hdl[1]; hdl->obj_handle.fs = fs; memcpy(hdl->handle, fh, sizeof(struct gpfs_file_handle)); hdl->obj_handle.type = attributes->type; if (hdl->obj_handle.type == REGULAR_FILE) { hdl->u.file.fd.fd = -1; /* no open on this yet */ hdl->u.file.fd.openflags = FSAL_O_CLOSED; } else if (hdl->obj_handle.type == SYMBOLIC_LINK && link_content != NULL) { size_t len = strlen(link_content) + 1; hdl->u.symlink.link_content = gsh_malloc(len); memcpy(hdl->u.symlink.link_content, link_content, len); hdl->u.symlink.link_size = len; } fsal_obj_handle_init(&hdl->obj_handle, exp_hdl, attributes->type); hdl->obj_handle.fsid = attributes->fsid; hdl->obj_handle.fileid = attributes->fileid; gpfs_handle_ops_init(&hdl->obj_handle.obj_ops); if (myself->pnfs_mds_enabled) handle_ops_pnfs(&hdl->obj_handle.obj_ops); return hdl; }
/** * * nfs_Add_MountList_Entry: Adds a client to the mount list. * * Adds a client to the mount list. * * @param hostname [IN] the hostname for the client * @param dirpath [IN] the mounted path * * @return 1 if successful, 0 otherwise * */ int nfs_Add_MountList_Entry(char *hostname, char *dirpath) { #ifndef _NO_MOUNT_LIST mountlist pnew_mnt_list_entry; #endif /* Sanity check */ if(hostname == NULL || dirpath == NULL) return 0; #ifndef _NO_MOUNT_LIST /* Allocate the new entry */ if((pnew_mnt_list_entry = gsh_calloc(1, sizeof(struct mountbody))) == NULL) return 0; if((pnew_mnt_list_entry->ml_hostname = gsh_calloc(1, MAXHOSTNAMELEN + 1)) == NULL) { gsh_free(pnew_mnt_list_entry); return 0; } if((pnew_mnt_list_entry->ml_directory = gsh_calloc(1, MAXPATHLEN + 1)) == NULL) { gsh_free(pnew_mnt_list_entry->ml_hostname); gsh_free(pnew_mnt_list_entry); return 0; } /* Copy the data */ if (strmaxcpy(pnew_mnt_list_entry->ml_hostname, hostname, MAXHOSTNAMELEN) == -1) { gsh_free(pnew_mnt_list_entry->ml_directory); gsh_free(pnew_mnt_list_entry->ml_hostname); gsh_free(pnew_mnt_list_entry); return 0; } if (strmaxcpy(pnew_mnt_list_entry->ml_directory, dirpath, MAXPATHLEN) == -1) { gsh_free(pnew_mnt_list_entry->ml_directory); gsh_free(pnew_mnt_list_entry->ml_hostname); gsh_free(pnew_mnt_list_entry); return 0; } /* initialize next pointer */ pnew_mnt_list_entry->ml_next = NULL; /* This should occur only for the first mount */ if(MNT_List_head == NULL) { MNT_List_head = pnew_mnt_list_entry; } /* Append to the tail of the list */ if(MNT_List_tail == NULL) MNT_List_tail = pnew_mnt_list_entry; else { MNT_List_tail->ml_next = pnew_mnt_list_entry; MNT_List_tail = pnew_mnt_list_entry; } if(isFullDebug(COMPONENT_NFSPROTO)) nfs_Print_MountList(); #endif return 1; }
struct hash_table * hashtable_init(struct hash_param *hparam) { /* The hash table being constructed */ struct hash_table *ht = NULL; /* The index for initializing each partition */ uint32_t index = 0; /* Read-Write Lock attributes, to prevent write starvation under GLIBC */ pthread_rwlockattr_t rwlockattr; /* Hash partition */ struct hash_partition *partition = NULL; /* The number of fully initialized partitions */ uint32_t completed = 0; if (pthread_rwlockattr_init(&rwlockattr) != 0) return NULL; /* At some point factor this out into the OS directory. it is necessary to prevent writer starvation under GLIBC. */ #ifdef GLIBC if ((pthread_rwlockattr_setkind_np (&rwlockattrs, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)) != 0) { LogCrit(COMPONENT_HASHTABLE, "Unable to set writer-preference on lock attribute."); goto deconstruct; } #endif /* GLIBC */ ht = gsh_calloc(1, sizeof(struct hash_table) + (sizeof(struct hash_partition) * hparam->index_size)); /* Fixup entry size */ if (hparam->flags & HT_FLAG_CACHE) { if (!hparam->cache_entry_count) /* works fine with a good hash algo */ hparam->cache_entry_count = 32767; } /* We need to save copy of the parameters in the table. */ ht->parameter = *hparam; for (index = 0; index < hparam->index_size; ++index) { partition = (&ht->partitions[index]); RBT_HEAD_INIT(&(partition->rbt)); if (pthread_rwlock_init(&partition->lock, &rwlockattr) != 0) { LogCrit(COMPONENT_HASHTABLE, "Unable to initialize lock in hash table."); goto deconstruct; } /* Allocate a cache if requested */ if (hparam->flags & HT_FLAG_CACHE) partition->cache = gsh_calloc(1, cache_page_size(ht)); completed++; } ht->node_pool = pool_basic_init(NULL, sizeof(rbt_node_t)); ht->data_pool = pool_basic_init(NULL, sizeof(struct hash_data)); pthread_rwlockattr_destroy(&rwlockattr); return ht; deconstruct: while (completed != 0) { if (hparam->flags & HT_FLAG_CACHE) gsh_free(ht->partitions[completed - 1].cache); PTHREAD_RWLOCK_destroy(&(ht->partitions[completed - 1].lock)); completed--; } if (ht->node_pool) pool_destroy(ht->node_pool); if (ht->data_pool) pool_destroy(ht->data_pool); gsh_free(ht); return ht = NULL; }
int nlm_process_parameters(struct svc_req * preq, bool_t exclusive, nlm4_lock * alock, fsal_lock_param_t * plock, cache_entry_t ** ppentry, fsal_op_context_t * pcontext, care_t care, state_nsm_client_t ** ppnsm_client, state_nlm_client_t ** ppnlm_client, state_owner_t ** ppowner, state_block_data_t ** ppblock_data) { cache_inode_fsal_data_t fsal_data; fsal_attrib_list_t attr; cache_inode_status_t cache_status; SVCXPRT *ptr_svc = preq->rq_xprt; int rc; *ppnsm_client = NULL; *ppnlm_client = NULL; *ppowner = NULL; /* Convert file handle into a cache entry */ if(alock->fh.n_len > MAX_NETOBJ_SZ || !nfs3_FhandleToFSAL((nfs_fh3 *) &alock->fh, &fsal_data.fh_desc, pcontext)) { /* handle is not valid */ return NLM4_STALE_FH; } /* Now get the cached inode attributes */ *ppentry = cache_inode_get(&fsal_data, &attr, pcontext, NULL, &cache_status); if(*ppentry == NULL) { /* handle is not valid */ return NLM4_STALE_FH; } *ppnsm_client = get_nsm_client(care, ptr_svc, alock->caller_name); if(*ppnsm_client == NULL) { /* If NSM Client is not found, and we don't care (such as unlock), * just return GRANTED (the unlock must succeed, there can't be * any locks). */ if(care != CARE_NOT) rc = NLM4_DENIED_NOLOCKS; else rc = NLM4_GRANTED; goto out_put; } *ppnlm_client = get_nlm_client(care, ptr_svc, *ppnsm_client, alock->caller_name); if(*ppnlm_client == NULL) { /* If NLM Client is not found, and we don't care (such as unlock), * just return GRANTED (the unlock must succeed, there can't be * any locks). */ dec_nsm_client_ref(*ppnsm_client); if(care != CARE_NOT) rc = NLM4_DENIED_NOLOCKS; else rc = NLM4_GRANTED; goto out_put; } *ppowner = get_nlm_owner(care, *ppnlm_client, &alock->oh, alock->svid); if(*ppowner == NULL) { LogDebug(COMPONENT_NLM, "Could not get NLM Owner"); dec_nsm_client_ref(*ppnsm_client); dec_nlm_client_ref(*ppnlm_client); *ppnlm_client = NULL; /* If owner is not found, and we don't care (such as unlock), * just return GRANTED (the unlock must succeed, there can't be * any locks). */ if(care != CARE_NOT) rc = NLM4_DENIED_NOLOCKS; else rc = NLM4_GRANTED; goto out_put; } if(ppblock_data != NULL) { *ppblock_data = gsh_calloc(1, sizeof(**ppblock_data)); /* Fill in the block data, if we don't get one, we will just proceed * without (which will mean the lock doesn't block. */ if(*ppblock_data != NULL) { if(copy_xprt_addr(&(*ppblock_data)->sbd_block_data.sbd_nlm_block_data.sbd_nlm_hostaddr, ptr_svc) == 0) { LogFullDebug(COMPONENT_NLM, "copy_xprt_addr failed for Program %d, Version %d, Function %d", (int)preq->rq_prog, (int)preq->rq_vers, (int)preq->rq_proc); gsh_free(*ppblock_data); *ppblock_data = NULL; rc = NLM4_FAILED; goto out_put; } (*ppblock_data)->sbd_granted_callback = nlm_granted_callback; (*ppblock_data)->sbd_block_data.sbd_nlm_block_data.sbd_nlm_fh.n_bytes = (*ppblock_data)->sbd_block_data.sbd_nlm_block_data.sbd_nlm_fh_buf; (*ppblock_data)->sbd_block_data.sbd_nlm_block_data.sbd_nlm_fh.n_len = alock->fh.n_len; memcpy((*ppblock_data)->sbd_block_data.sbd_nlm_block_data.sbd_nlm_fh_buf, alock->fh.n_bytes, alock->fh.n_len); /* FSF TODO: Ultimately I think the following will go away, we won't need the context, just the export */ /* Copy credentials from pcontext */ #ifdef _USE_HPSS /** @todo : PhD: Think about removing hpsscred_t from FSAL */ (*ppblock_data)->sbd_credential.user = pcontext->credential.hpss_usercred.Uid ; (*ppblock_data)->sbd_credential.group = pcontext->credential.hpss_usercred.Gid ; #else (*ppblock_data)->sbd_credential = pcontext->credential; /* Copy the alt groups list */ if(pcontext->credential.nbgroups != 0) { (*ppblock_data)->sbd_credential.alt_groups = gsh_malloc(sizeof(gid_t) * pcontext->credential.nbgroups); if((*ppblock_data)->sbd_credential.alt_groups == NULL) { gsh_free(*ppblock_data); *ppblock_data = NULL; rc = NLM4_FAILED; goto out_put; } memcpy((*ppblock_data)->sbd_credential.alt_groups, pcontext->credential.alt_groups, pcontext->credential.nbgroups); } #endif } } /* Fill in plock */ plock->lock_type = exclusive ? FSAL_LOCK_W : FSAL_LOCK_R; plock->lock_start = alock->l_offset; plock->lock_length = alock->l_len; LogFullDebug(COMPONENT_NLM, "Parameters Processed"); return -1; out_put: cache_inode_put(*ppentry); *ppentry = NULL; return rc; }
int _9p_xattrwalk(struct _9p_request_data *req9p, void *worker_data, u32 *plenout, char *preply) { char *cursor = req9p->_9pmsg + _9P_HDR_SIZE + _9P_TYPE_SIZE; u16 *msgtag = NULL; u32 *fid = NULL; u32 *attrfid = NULL; u16 *name_len; char *name_str; u64 attrsize = 0LL; fsal_status_t fsal_status; char name[MAXNAMLEN]; fsal_xattrent_t xattrs_tab[255]; int eod_met = false; unsigned int nb_xattrs_read = 0; unsigned int i = 0; char *xattr_cursor = NULL; unsigned int tmplen = 0; struct _9p_fid *pfid = NULL; struct _9p_fid *pxattrfid = NULL; /* Get data */ _9p_getptr(cursor, msgtag, u16); _9p_getptr(cursor, fid, u32); _9p_getptr(cursor, attrfid, u32); LogDebug(COMPONENT_9P, "TXATTRWALK: tag=%u fid=%u attrfid=%u", (u32) *msgtag, *fid, *attrfid); _9p_getstr(cursor, name_len, name_str); if (*name_len == 0) LogDebug(COMPONENT_9P, "TXATTRWALK (component): tag=%u fid=%u attrfid=%u name=(LIST XATTR)", (u32) *msgtag, *fid, *attrfid); else LogDebug(COMPONENT_9P, "TXATTRWALK (component): tag=%u fid=%u attrfid=%u name=%.*s", (u32) *msgtag, *fid, *attrfid, *name_len, name_str); if (*fid >= _9P_FID_PER_CONN) return _9p_rerror(req9p, worker_data, msgtag, ERANGE, plenout, preply); if (*attrfid >= _9P_FID_PER_CONN) return _9p_rerror(req9p, worker_data, msgtag, ERANGE, plenout, preply); pfid = req9p->pconn->fids[*fid]; /* Check that it is a valid fid */ if (pfid == NULL || pfid->pentry == NULL) { LogDebug(COMPONENT_9P, "request on invalid fid=%u", *fid); return _9p_rerror(req9p, worker_data, msgtag, EIO, plenout, preply); } pxattrfid = gsh_calloc(1, sizeof(struct _9p_fid)); if (pxattrfid == NULL) return _9p_rerror(req9p, worker_data, msgtag, ENOMEM, plenout, preply); /* Initiate xattr's fid by copying file's fid in it */ memcpy((char *)pxattrfid, (char *)pfid, sizeof(struct _9p_fid)); snprintf(name, MAXNAMLEN, "%.*s", *name_len, name_str); pxattrfid->specdata.xattr.xattr_content = gsh_malloc(XATTR_BUFFERSIZE); if (pxattrfid->specdata.xattr.xattr_content == NULL) { gsh_free(pxattrfid); return _9p_rerror(req9p, worker_data, msgtag, ENOMEM, plenout, preply); } if (*name_len == 0) { /* xattrwalk is used with an empty name, * this is a listxattr request */ fsal_status = pxattrfid->pentry->obj_handle->ops->list_ext_attrs( pxattrfid->pentry->obj_handle, &pfid->op_context, FSAL_XATTR_RW_COOKIE, /* Start with RW cookie, * hiding RO ones */ xattrs_tab, 100, /* static array size for now */ &nb_xattrs_read, &eod_met); if (FSAL_IS_ERROR(fsal_status)) { gsh_free(pxattrfid->specdata.xattr.xattr_content); gsh_free(pxattrfid); return _9p_rerror(req9p, worker_data, msgtag, _9p_tools_errno (cache_inode_error_convert (fsal_status)), plenout, preply); } /* if all xattrent are not read, * returns ERANGE as listxattr does */ if (eod_met != TRUE) { gsh_free(pxattrfid->specdata.xattr.xattr_content); gsh_free(pxattrfid); return _9p_rerror(req9p, worker_data, msgtag, ERANGE, plenout, preply); } xattr_cursor = pxattrfid->specdata.xattr.xattr_content; attrsize = 0LL; for (i = 0; i < nb_xattrs_read; i++) { tmplen = snprintf(xattr_cursor, MAXNAMLEN, "%s", xattrs_tab[i].xattr_name); xattr_cursor[tmplen] = '\0'; /* Just to be sure */ /* +1 for trailing '\0' */ xattr_cursor += tmplen + 1; attrsize += tmplen + 1; /* Make sure not to go beyond the buffer */ if (attrsize > XATTR_BUFFERSIZE) { gsh_free(pxattrfid->specdata.xattr. xattr_content); gsh_free(pxattrfid); return _9p_rerror(req9p, worker_data, msgtag, ERANGE, plenout, preply); } } } else { /* xattrwalk has a non-empty name, use regular setxattr */ fsal_status = pxattrfid->pentry->obj_handle->ops-> getextattr_id_by_name(pxattrfid->pentry->obj_handle, &pfid->op_context, name, &pxattrfid->specdata.xattr.xattr_id); if (FSAL_IS_ERROR(fsal_status)) { gsh_free(pxattrfid->specdata.xattr.xattr_content); gsh_free(pxattrfid); /* Hook dedicated to ACL management. When attributes * system.posix_acl_access is used, it can't be * created, but can be written anyway. * To do this, return ENODATA instead of ENOATTR * In this case, we do created what's needed to * setxattr() into the special xattr */ if (!strncmp(name, "system.posix_acl_access", MAXNAMLEN)) return _9p_rerror(req9p, worker_data, msgtag, ENODATA, plenout, preply); /* ENOENT for xattr is ENOATTR */ if (fsal_status.major == ERR_FSAL_NOENT) return _9p_rerror(req9p, worker_data, msgtag, ENOATTR, plenout, preply); else return _9p_rerror(req9p, worker_data, msgtag, _9p_tools_errno (cache_inode_error_convert (fsal_status)), plenout, preply); } fsal_status = pxattrfid->pentry->obj_handle->ops-> getextattr_value_by_name(pxattrfid->pentry->obj_handle, &pfid->op_context, name, pxattrfid->specdata.xattr. xattr_content, XATTR_BUFFERSIZE, &attrsize); if (FSAL_IS_ERROR(fsal_status)) { gsh_free(pxattrfid->specdata.xattr.xattr_content); gsh_free(pxattrfid); if (fsal_status.minor == ENODATA) { return _9p_rerror(req9p, worker_data, msgtag, ENODATA, plenout, preply); } return _9p_rerror(req9p, worker_data, msgtag, _9p_tools_errno (cache_inode_error_convert (fsal_status)), plenout, preply); } } req9p->pconn->fids[*attrfid] = pxattrfid; /* Increments refcount so it won't fall below 0 when we clunk later */ cache_inode_lru_ref(pxattrfid->pentry, LRU_REQ_INITIAL); /* Build the reply */ _9p_setinitptr(cursor, preply, _9P_RXATTRWALK); _9p_setptr(cursor, msgtag, u16); _9p_setvalue(cursor, attrsize, u64); _9p_setendptr(cursor, preply); _9p_checkbound(cursor, preply, plenout); LogDebug(COMPONENT_9P, "RXATTRWALK: tag=%u fid=%u attrfid=%u name=%.*s size=%llu", (u32) *msgtag, *fid, *attrfid, *name_len, name_str, (unsigned long long)attrsize); return 1; } /* _9p_xattrwalk */
/** * build the export entry */ fsal_status_t GPFSFSAL_BuildExportContext(fsal_export_context_t *export_context, /* OUT */ fsal_path_t * p_export_path, /* IN */ char *fs_specific_options /* IN */ ) { int rc, fd, mntexists; FILE * fp; struct mntent * p_mnt; char * mnt_dir = NULL; struct statfs stat_buf; gpfs_fsal_up_ctx_t * gpfs_fsal_up_ctx; bool_t start_fsal_up_thread = FALSE; fsal_status_t status; fsal_op_context_t op_context; gpfsfsal_export_context_t *p_export_context = (gpfsfsal_export_context_t *)export_context; /* Make sure the FSAL UP context list is initialized */ if(glist_null(&gpfs_fsal_up_ctx_list)) init_glist(&gpfs_fsal_up_ctx_list); /* sanity check */ if((p_export_context == NULL) || (p_export_path == NULL)) { LogCrit(COMPONENT_FSAL, "NULL mandatory argument passed to %s()", __FUNCTION__); Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_BuildExportContext); } /* open mnt file */ fp = setmntent(MOUNTED, "r"); if(fp == NULL) { rc = errno; LogCrit(COMPONENT_FSAL, "Error %d in setmntent(%s): %s", rc, MOUNTED, strerror(rc)); Return(posix2fsal_error(rc), rc, INDEX_FSAL_BuildExportContext); } /* Check if mount point is really a gpfs share. If not, we can't continue.*/ mntexists = 0; while((p_mnt = getmntent(fp)) != NULL) if(p_mnt->mnt_dir != NULL && p_mnt->mnt_type != NULL) /* There is probably a macro for "gpfs" type ... not sure where it is. */ if (strncmp(p_mnt->mnt_type, "gpfs", 4) == 0) { LogFullDebug(COMPONENT_FSAL, "Checking Export Path %s against GPFS fs %s", p_export_path->path, p_mnt->mnt_dir); /* If export path is shorter than fs path, then this isn't a match */ if(strlen(p_export_path->path) < strlen(p_mnt->mnt_dir)) continue; /* If export path doesn't have a path separator after mnt_dir, then it * isn't a proper sub-directory of mnt_dir. */ if((p_export_path->path[strlen(p_mnt->mnt_dir)] != '/') && (p_export_path->path[strlen(p_mnt->mnt_dir)] != '\0')) continue; if (strncmp(p_mnt->mnt_dir, p_export_path->path, strlen(p_mnt->mnt_dir)) == 0) { mnt_dir = gsh_strdup(p_mnt->mnt_dir); mntexists = 1; break; } } endmntent(fp); if (mntexists == 0) { LogMajor(COMPONENT_FSAL, "GPFS mount point %s does not exist.", p_export_path->path); gsh_free(mnt_dir); ReturnCode(ERR_FSAL_INVAL, 0); } /* save file descriptor to root of GPFS share */ fd = open(p_export_path->path, O_RDONLY | O_DIRECTORY); if(fd < 0) { if(errno == ENOENT) LogMajor(COMPONENT_FSAL, "GPFS export path %s does not exist.", p_export_path->path); else if (errno == ENOTDIR) LogMajor(COMPONENT_FSAL, "GPFS export path %s is not a directory.", p_export_path->path); else LogMajor(COMPONENT_FSAL, "Could not open GPFS export path %s: rc = %d(%s)", p_export_path->path, errno, strerror(errno)); if(mnt_dir != NULL) gsh_free(mnt_dir); ReturnCode(ERR_FSAL_INVAL, 0); } p_export_context->mount_root_fd = fd; LogFullDebug(COMPONENT_FSAL, "GPFSFSAL_BuildExportContext: %d", p_export_context->mount_root_fd); /* Save pointer to fsal_staticfsinfo_t in export context */ p_export_context->fe_static_fs_info = &global_fs_info; /* save filesystem ID */ rc = statfs(p_export_path->path, &stat_buf); if(rc) { close(fd); LogMajor(COMPONENT_FSAL, "statfs call failed on file %s: %d(%s)", p_export_path->path, errno, strerror(errno)); if(mnt_dir != NULL) gsh_free(mnt_dir); ReturnCode(ERR_FSAL_INVAL, 0); } p_export_context->fsid[0] = stat_buf.f_fsid.__val[0]; p_export_context->fsid[1] = stat_buf.f_fsid.__val[1]; /* save file handle to root of GPFS share */ op_context.export_context = export_context; // op_context.credential = ??? status = fsal_internal_get_handle(&op_context, p_export_path, (fsal_handle_t *)(&(p_export_context->mount_root_handle))); if(FSAL_IS_ERROR(status)) { close(p_export_context->mount_root_fd); LogMajor(COMPONENT_FSAL, "FSAL BUILD EXPORT CONTEXT: ERROR: Conversion from gpfs filesystem root path to handle failed : %d", status.minor); if(mnt_dir != NULL) gsh_free(mnt_dir); ReturnCode(ERR_FSAL_INVAL, 0); } gpfs_fsal_up_ctx = gpfsfsal_find_fsal_up_context(p_export_context); if(gpfs_fsal_up_ctx == NULL) { gpfs_fsal_up_ctx = gsh_calloc(1, sizeof(gpfs_fsal_up_ctx_t)); if(gpfs_fsal_up_ctx == NULL || mnt_dir == NULL) { LogFatal(COMPONENT_FSAL, "Out of memory can not continue."); } /* Initialize the gpfs_fsal_up_ctx */ init_glist(&gpfs_fsal_up_ctx->gf_exports); gpfs_fsal_up_ctx->gf_fs = mnt_dir; gpfs_fsal_up_ctx->gf_fsid[0] = p_export_context->fsid[0]; gpfs_fsal_up_ctx->gf_fsid[1] = p_export_context->fsid[1]; /* Add it to the list of contexts */ glist_add_tail(&gpfs_fsal_up_ctx_list, &gpfs_fsal_up_ctx->gf_list); start_fsal_up_thread = TRUE; } else { if(mnt_dir != NULL) gsh_free(mnt_dir); } /* Add this export context to the list for it's gpfs_fsal_up_ctx */ glist_add_tail(&gpfs_fsal_up_ctx->gf_exports, &p_export_context->fe_list); p_export_context->fe_fsal_up_ctx = gpfs_fsal_up_ctx; if(start_fsal_up_thread) { pthread_attr_t attr_thr; memset(&attr_thr, 0, sizeof(attr_thr)); /* Initialization of thread attributes borrowed from nfs_init.c */ if(pthread_attr_init(&attr_thr) != 0) LogCrit(COMPONENT_THREAD, "can't init pthread's attributes"); if(pthread_attr_setscope(&attr_thr, PTHREAD_SCOPE_SYSTEM) != 0) LogCrit(COMPONENT_THREAD, "can't set pthread's scope"); if(pthread_attr_setdetachstate(&attr_thr, PTHREAD_CREATE_JOINABLE) != 0) LogCrit(COMPONENT_THREAD, "can't set pthread's join state"); if(pthread_attr_setstacksize(&attr_thr, 2116488) != 0) LogCrit(COMPONENT_THREAD, "can't set pthread's stack size"); rc = pthread_create(&gpfs_fsal_up_ctx->gf_thread, &attr_thr, GPFSFSAL_UP_Thread, gpfs_fsal_up_ctx); if(rc != 0) { LogFatal(COMPONENT_THREAD, "Could not create GPFSFSAL_UP_Thread, error = %d (%s)", errno, strerror(errno)); } } Return(ERR_FSAL_NO_ERROR, 0, INDEX_FSAL_BuildExportContext); }
char *cidr_to_str(const CIDR * block, int flags) { int i; int zst, zcur, zlen, zmax; short pflen; short lzer; /* Last zero */ char *toret; char tmpbuf[128]; /* We shouldn't need more than ~5 anywhere */ CIDR *nmtmp; char *nmstr; int nmflags; uint8_t moct; uint16_t v6sect; /* Just in case */ if (block->proto == CIDR_NOPROTO) { errno = EINVAL; return (NULL); } /* * Sanity: If we have both ONLYADDR and ONLYPFLEN, we really don't * have anything to *DO*... */ if ((flags & CIDR_ONLYADDR) && (flags & CIDR_ONLYPFLEN)) { errno = EINVAL; return (NULL); } /* * Now, in any case, there's a maximum length for any address, which * is the completely expanded form of a v6-{mapped,compat} address * with a netmask instead of a prefix. That's 8 pieces of 4 * characters each (32), separated by :'s (+7=39), plus the slash * (+1=40), plus another separated-8*4 (+39=79), plus the trailing * null (+1=80). We'll just allocate 128 for kicks. * * I'm not, at this time anyway, going to try and allocate only and * exactly as much as we need for any given address. Whether * consumers of the library can count on this behavior... well, I * haven't decided yet. Lemme alone. */ toret = gsh_calloc(1, 128); /* * If it's a v4 address, we mask off everything but the last 4 * octets, and just proceed from there. */ if ((block->proto == CIDR_IPV4 && !(flags & CIDR_FORCEV6)) || (flags & CIDR_FORCEV4)) { /* First off, creating the in-addr.arpa form is special */ if (flags & CIDR_REVERSE) { /* * Build the d.c.b.a.in-addr.arpa form. Note that we ignore * flags like CIDR_VERBOSE and the like here, since they lead * to non-valid reverse paths (or at least, paths that no DNS * implementation will look for). So it pretty much always * looks exactly the same. Also, we don't mess with dealing * with netmaks or anything here; we just assume it's a * host address, and treat it as such. */ sprintf(toret, "%d.%d.%d.%d.in-addr.arpa", block->addr[15], block->addr[14], block->addr[13], block->addr[12]); return (toret); } /* Are we bothering to show the address? */ if (!(flags & CIDR_ONLYPFLEN)) { /* If we're USEV6'ing, add whatever prefixes we need */ if (flags & CIDR_USEV6) { if (flags & CIDR_NOCOMPACT) { if (flags & CIDR_VERBOSE) strcat(toret, "0000:0000:0000:0000:0000:"); else strcat(toret, "0:0:0:0:0:"); } else strcat(toret, "::"); if (flags & CIDR_USEV4COMPAT) { if (flags & CIDR_NOCOMPACT) { if (flags & CIDR_VERBOSE) strcat(toret, "0000:"); else strcat(toret, "0:"); } } else strcat(toret, "ffff:"); } /* USEV6 */ /* Now, slap on the v4 address */ for (i = 12; i <= 15; i++) { sprintf(tmpbuf, "%u", (block->addr)[i]); strcat(toret, tmpbuf); if (i < 15) strcat(toret, "."); } } /* ! ONLYPFLEN */ /* Are we bothering to show the pf/mask? */ if (!(flags & CIDR_ONLYADDR)) { /* * And the prefix/netmask. Don't show the '/' if we're only * showing the pflen/mask. */ if (!(flags & CIDR_ONLYPFLEN)) strcat(toret, "/"); /* Which are we showing? */ if (flags & CIDR_NETMASK) { /* * In this case, we can just print out like the address * above. */ for (i = 12; i <= 15; i++) { moct = (block->mask)[i]; if (flags & CIDR_WILDCARD) moct = ~(moct); sprintf(tmpbuf, "%u", moct); strcat(toret, tmpbuf); if (i < 15) strcat(toret, "."); } } else { /* * For this, iterate over each octet, * then each bit within the octet. */ pflen = cidr_get_pflen(block); if (pflen == -1) { gsh_free(toret); return (NULL); /* Preserve errno */ } /* Special handling for forced modes */ if (block->proto == CIDR_IPV6 && (flags & CIDR_FORCEV4)) pflen -= 96; sprintf(tmpbuf, "%u", (flags & CIDR_USEV6) ? pflen + 96 : pflen); strcat(toret, tmpbuf); } } /* ! ONLYADDR */ /* That's it for a v4 address, in any of our forms */ } else if ((block->proto == CIDR_IPV6 && !(flags & CIDR_FORCEV4)) || (flags & CIDR_FORCEV6)) { /* First off, creating the .ip6.arpa form is special */ if (flags & CIDR_REVERSE) { /* * Build the ...ip6.arpa form. See notes in the CIDR_REVERSE * section of PROTO_IPV4 above for various notes. */ sprintf(toret, "%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x." "%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x." "%x.%x.%x.%x.%x.ip6.arpa", block->addr[15] & 0x0f, block->addr[15] >> 4, block->addr[14] & 0x0f, block->addr[14] >> 4, block->addr[13] & 0x0f, block->addr[13] >> 4, block->addr[12] & 0x0f, block->addr[12] >> 4, block->addr[11] & 0x0f, block->addr[11] >> 4, block->addr[10] & 0x0f, block->addr[10] >> 4, block->addr[9] & 0x0f, block->addr[9] >> 4, block->addr[8] & 0x0f, block->addr[8] >> 4, block->addr[7] & 0x0f, block->addr[7] >> 4, block->addr[6] & 0x0f, block->addr[6] >> 4, block->addr[5] & 0x0f, block->addr[5] >> 4, block->addr[4] & 0x0f, block->addr[4] >> 4, block->addr[3] & 0x0f, block->addr[3] >> 4, block->addr[2] & 0x0f, block->addr[2] >> 4, block->addr[1] & 0x0f, block->addr[1] >> 4, block->addr[0] & 0x0f, block->addr[0] >> 4); return (toret); } /* Are we showing the address part? */ if (!(flags & CIDR_ONLYPFLEN)) { /* It's a simple, boring, normal v6 address */ /* First, find the longest string of 0's, if there is one */ zst = zcur = -1; zlen = zmax = 0; for (i = 0; i <= 15; i += 2) { if (block->addr[i] == 0 && block->addr[i + 1] == 0) { /* This section is zero */ if (zcur != -1) { /* We're already in a block of 0's */ zlen++; } else { /* Starting a new block */ zcur = i; zlen = 1; } } else { /* This section is non-zero */ if (zcur != -1) { /* * We were in 0's. See if we set a new record, * and if we did, note it and move on. */ if (zlen > zmax) { zst = zcur; zmax = zlen; } /* We're out of 0's, so reset start */ zcur = -1; } } } /* * If zcur is !=-1, we were in 0's when the loop ended. Redo * the "if we have a record, update" logic. */ if (zcur != -1 && zlen > zmax) { zst = zcur; zmax = zlen; } /* * Now, what makes it HARD is the options we have. To make * some things simpler, we'll take two octets at a time for * our run through. */ lzer = 0; for (i = 0; i <= 15; i += 2) { /* * Start with a cheat; if this begins our already-found * longest block of 0's, and we're not NOCOMPACT'ing, * stick in a ::, increment past them, and keep on * playing. */ if (i == zst && !(flags & CIDR_NOCOMPACT)) { strcat(toret, "::"); i += (zmax * 2) - 2; lzer = 1; continue; } /* * First, if we're not the first set, we may need a : * before us. If we're not compacting, we always want * it. If we ARE compacting, we want it unless the * previous octet was a 0 that we're minimizing. */ if (i != 0 && ((flags & CIDR_NOCOMPACT) || lzer == 0)) strcat(toret, ":"); lzer = 0; /* Reset */ /* * From here on, we no longer have to worry about * CIDR_NOCOMPACT. */ /* Combine the pair of octets into one number */ v6sect = 0; v6sect |= (block->addr)[i] << 8; v6sect |= (block->addr)[i + 1]; /* * If we're being VERBOSE, use leading 0's. Otherwise, * only use as many digits as we need. */ if (flags & CIDR_VERBOSE) sprintf(tmpbuf, "%.4x", v6sect); else sprintf(tmpbuf, "%x", v6sect); strcat(toret, tmpbuf); /* And loop back around to the next 2-octet set */ } /* for(each 16-bit set) */ } /* ! ONLYPFLEN */ /* Prefix/netmask */ if (!(flags & CIDR_ONLYADDR)) { /* Only show the / if we're not showing just the prefix */ if (!(flags & CIDR_ONLYPFLEN)) strcat(toret, "/"); if (flags & CIDR_NETMASK) { /* * We already wrote how to build the whole v6 form, so * just call ourselves recurively for this. */ nmtmp = cidr_alloc(); nmtmp->proto = block->proto; for (i = 0; i <= 15; i++) if (flags & CIDR_WILDCARD) nmtmp->addr[i] = ~(block->mask[i]); else nmtmp->addr[i] = block->mask[i]; /* * Strip flags: * - CIDR_NETMASK would make us recurse forever. * - CIDR_ONLYPFLEN would not show the address bit, which * is the part we want here. * Add flag CIDR_ONLYADDR because that's the bit we care * about. */ nmflags = flags; nmflags &= ~(CIDR_NETMASK) & ~(CIDR_ONLYPFLEN); nmflags |= CIDR_ONLYADDR; nmstr = cidr_to_str(nmtmp, nmflags); cidr_free(nmtmp); if (nmstr == NULL) { gsh_free(toret); return (NULL); /* Preserve errno */ } /* No need to strip the prefix, it doesn't have it */ /* Just add it on */ strcat(toret, nmstr); gsh_free(nmstr); } else { /* Just figure the and show prefix length */ pflen = cidr_get_pflen(block); if (pflen == -1) { gsh_free(toret); return (NULL); /* Preserve errno */ } /* Special handling for forced modes */ if (block->proto == CIDR_IPV4 && (flags & CIDR_FORCEV6)) pflen += 96; sprintf(tmpbuf, "%u", pflen); strcat(toret, tmpbuf); } } /* ! ONLYADDR */ } else {
int _9p_walk(struct _9p_request_data *req9p, void *worker_data, u32 *plenout, char *preply) { char *cursor = req9p->_9pmsg + _9P_HDR_SIZE + _9P_TYPE_SIZE; unsigned int i = 0; u16 *msgtag = NULL; u32 *fid = NULL; u32 *newfid = NULL; u16 *nwname = NULL; u16 *wnames_len; char *wnames_str; uint64_t fileid; cache_inode_status_t cache_status; cache_entry_t *pentry = NULL; char name[MAXNAMLEN]; u16 *nwqid; struct _9p_fid *pfid = NULL; struct _9p_fid *pnewfid = NULL; /* Now Get data */ _9p_getptr(cursor, msgtag, u16); _9p_getptr(cursor, fid, u32); _9p_getptr(cursor, newfid, u32); _9p_getptr(cursor, nwname, u16); LogDebug(COMPONENT_9P, "TWALK: tag=%u fid=%u newfid=%u nwname=%u", (u32) *msgtag, *fid, *newfid, *nwname); if (*fid >= _9P_FID_PER_CONN) return _9p_rerror(req9p, worker_data, msgtag, ERANGE, plenout, preply); if (*newfid >= _9P_FID_PER_CONN) return _9p_rerror(req9p, worker_data, msgtag, ERANGE, plenout, preply); pfid = req9p->pconn->fids[*fid]; /* Check that it is a valid fid */ if (pfid == NULL || pfid->pentry == NULL) { LogDebug(COMPONENT_9P, "request on invalid fid=%u", *fid); return _9p_rerror(req9p, worker_data, msgtag, EIO, plenout, preply); } op_ctx = &pfid->op_context; pnewfid = gsh_calloc(1, sizeof(struct _9p_fid)); if (pnewfid == NULL) return _9p_rerror(req9p, worker_data, msgtag, ERANGE, plenout, preply); /* Is this a lookup or a fid cloning operation ? */ if (*nwname == 0) { /* Cloning operation */ memcpy((char *)pnewfid, (char *)pfid, sizeof(struct _9p_fid)); /* Set the new fid id */ pnewfid->fid = *newfid; /* This is not a TATTACH fid */ pnewfid->from_attach = false; /* Increments refcount */ (void) cache_inode_lru_ref(pnewfid->pentry, LRU_REQ_STALE_OK); } else { /* the walk is in fact a lookup */ pentry = pfid->pentry; for (i = 0; i < *nwname; i++) { _9p_getstr(cursor, wnames_len, wnames_str); snprintf(name, MAXNAMLEN, "%.*s", *wnames_len, wnames_str); LogDebug(COMPONENT_9P, "TWALK (lookup): tag=%u fid=%u newfid=%u (component %u/%u :%s)", (u32) *msgtag, *fid, *newfid, i + 1, *nwname, name); if (pnewfid->pentry == pentry) pnewfid->pentry = NULL; /* refcount +1 */ cache_status = cache_inode_lookup(pentry, name, &pnewfid->pentry); if (pnewfid->pentry == NULL) { gsh_free(pnewfid); return _9p_rerror(req9p, worker_data, msgtag, _9p_tools_errno(cache_status), plenout, preply); } if (pentry != pfid->pentry) cache_inode_put(pentry); pentry = pnewfid->pentry; } pnewfid->fid = *newfid; pnewfid->op_context = pfid->op_context; pnewfid->ppentry = pfid->pentry; strncpy(pnewfid->name, name, MAXNAMLEN-1); /* gdata ref is not hold : the pfid, which use same gdata */ /* will be clunked after pnewfid */ /* This clunk release the gdata */ pnewfid->gdata = pfid->gdata; /* This is not a TATTACH fid */ pnewfid->from_attach = false; cache_status = cache_inode_fileid(pnewfid->pentry, &fileid); if (cache_status != CACHE_INODE_SUCCESS) { gsh_free(pnewfid); return _9p_rerror(req9p, worker_data, msgtag, _9p_tools_errno(cache_status), plenout, preply); } /* Build the qid */ /* No cache, we want the client to stay synchronous * with the server */ pnewfid->qid.version = 0; pnewfid->qid.path = fileid; pnewfid->specdata.xattr.xattr_id = 0; pnewfid->specdata.xattr.xattr_content = NULL; switch (pnewfid->pentry->type) { case REGULAR_FILE: case CHARACTER_FILE: case BLOCK_FILE: case SOCKET_FILE: case FIFO_FILE: pnewfid->qid.type = _9P_QTFILE; break; case SYMBOLIC_LINK: pnewfid->qid.type = _9P_QTSYMLINK; break; case DIRECTORY: pnewfid->qid.type = _9P_QTDIR; break; default: LogMajor(COMPONENT_9P, "implementation error, you should not see this message !!!!!!"); gsh_free(pnewfid); return _9p_rerror(req9p, worker_data, msgtag, EINVAL, plenout, preply); break; } } /* keep info on new fid */ req9p->pconn->fids[*newfid] = pnewfid; /* As much qid as requested fid */ nwqid = nwname; /* Hold refcount on gdata */ uid2grp_hold_group_data(pnewfid->gdata); /* Build the reply */ _9p_setinitptr(cursor, preply, _9P_RWALK); _9p_setptr(cursor, msgtag, u16); _9p_setptr(cursor, nwqid, u16); for (i = 0; i < *nwqid; i++) { /** @todo: should be different qids * for each directory walked through */ _9p_setqid(cursor, pnewfid->qid); } _9p_setendptr(cursor, preply); _9p_checkbound(cursor, preply, plenout); LogDebug(COMPONENT_9P, "RWALK: tag=%u fid=%u newfid=%u nwqid=%u fileid=%llu pentry=%p refcount=%i", (u32) *msgtag, *fid, *newfid, *nwqid, (unsigned long long)pnewfid->qid.path, pnewfid->pentry, pnewfid->pentry->lru.refcnt); return 1; }
int nfs3_readdir(nfs_arg_t *arg, struct svc_req *req, nfs_res_t *res) { struct fsal_obj_handle *dir_obj = NULL; struct fsal_obj_handle *parent_dir_obj = NULL; unsigned long count = 0; uint64_t cookie = 0; uint64_t fsal_cookie = 0; cookieverf3 cookie_verifier; unsigned int num_entries = 0; unsigned long estimated_num_entries = 0; object_file_type_t dir_filetype = 0; bool eod_met = false; fsal_status_t fsal_status = {0, 0}; fsal_status_t fsal_status_gethandle = {0, 0}; int rc = NFS_REQ_OK; struct nfs3_readdir_cb_data tracker = { NULL }; bool use_cookie_verifier = op_ctx_export_has_option( EXPORT_OPTION_USE_COOKIE_VERIFIER); if (isDebug(COMPONENT_NFSPROTO) || isDebug(COMPONENT_NFS_READDIR)) { char str[LEN_FH_STR]; log_components_t component; nfs_FhandleToStr(req->rq_vers, &(arg->arg_readdir3.dir), NULL, str); if (isDebug(COMPONENT_NFSPROTO)) component = COMPONENT_NFSPROTO; else component = COMPONENT_NFS_READDIR; LogDebug(component, "REQUEST PROCESSING: Calling nfs_Readdir handle: %s", str); } READDIR3resok * const RES_READDIR3_OK = &res->res_readdir3.READDIR3res_u.resok; /* to avoid setting it on each error case */ res->res_readdir3.READDIR3res_u.resfail.dir_attributes. attributes_follow = FALSE; /* Look up object for filehandle */ dir_obj = nfs3_FhandleToCache(&(arg->arg_readdir3.dir), &(res->res_readdir3.status), &rc); if (dir_obj == NULL) { /* Status and rc have been set by nfs3_FhandleToCache */ goto out; } /* Extract the filetype */ dir_filetype = dir_obj->type; /* Sanity checks -- must be a directory */ if (dir_filetype != DIRECTORY) { res->res_readdir3.status = NFS3ERR_NOTDIR; rc = NFS_REQ_OK; goto out; } /* Parse out request arguments and decide how many entries we * want. For NFSv3, deal with the cookie verifier. */ count = arg->arg_readdir3.count; cookie = arg->arg_readdir3.cookie; estimated_num_entries = MIN(count / (sizeof(entry3) - sizeof(char *)), 120); LogFullDebug(COMPONENT_NFS_READDIR, "---> nfs3_readdir: count=%lu cookie=%" PRIu64 " estimated_num_entries=%lu", count, cookie, estimated_num_entries); if (estimated_num_entries == 0) { res->res_readdir3.status = NFS3ERR_TOOSMALL; rc = NFS_REQ_OK; goto out; } /* To make or check the cookie verifier */ memset(cookie_verifier, 0, sizeof(cookieverf3)); /* If cookie verifier is used, then a * non-trivial value is returned to the * client. * * This value is the ctime of the directory. If verifier is * unused (as in many NFS Servers) then only a set of zeros * is returned (trivial value). */ if (use_cookie_verifier) { struct attrlist attrs; fsal_prepare_attrs(&attrs, ATTR_CTIME); fsal_status = dir_obj->obj_ops.getattrs(dir_obj, &attrs); if (FSAL_IS_ERROR(fsal_status)) { res->res_readdir3.status = nfs3_Errno_status(fsal_status); LogFullDebug(COMPONENT_NFS_READDIR, "getattrs returned %s", msg_fsal_err(fsal_status.major)); goto out; } memcpy(cookie_verifier, &attrs.ctime.tv_sec, sizeof(attrs.ctime.tv_sec)); /* Done with the attrs */ fsal_release_attrs(&attrs); } if (cookie != 0 && use_cookie_verifier) { /* Not the first call, so we have to check the cookie * verifier */ if (memcmp(cookie_verifier, arg->arg_readdir3.cookieverf, NFS3_COOKIEVERFSIZE) != 0) { res->res_readdir3.status = NFS3ERR_BAD_COOKIE; rc = NFS_REQ_OK; goto out; } } tracker.entries = gsh_calloc(estimated_num_entries, sizeof(entry3)); tracker.total_entries = estimated_num_entries; tracker.mem_left = count - sizeof(READDIR3resok); tracker.count = 0; tracker.error = NFS3_OK; /* Adjust the cookie we supply to fsal */ if (cookie > 2) { /* it is not the cookie for "." nor ".." */ fsal_cookie = cookie; } else { fsal_cookie = 0; } /* Fills "." */ if (cookie == 0) { res->res_readdir3.status = nfs_readdir_dot_entry(dir_obj, ".", 1, nfs3_readdir_callback, &tracker); if (res->res_readdir3.status != NFS3_OK) { rc = NFS_REQ_OK; goto out; } } /* Fills ".." */ if ((cookie <= 1) && (estimated_num_entries > 1)) { /* Get parent pentry */ fsal_status_gethandle = fsal_lookupp(dir_obj, &parent_dir_obj, NULL); if (parent_dir_obj == NULL) { res->res_readdir3.status = nfs3_Errno_status(fsal_status_gethandle); rc = NFS_REQ_OK; goto out; } res->res_readdir3.status = nfs_readdir_dot_entry(parent_dir_obj, "..", 2, nfs3_readdir_callback, &tracker); if (res->res_readdir3.status != NFS3_OK) { rc = NFS_REQ_OK; goto out; } parent_dir_obj->obj_ops.put_ref(parent_dir_obj); parent_dir_obj = NULL; } /* Call readdir */ fsal_status = fsal_readdir(dir_obj, fsal_cookie, &num_entries, &eod_met, 0, /* no attr */ nfs3_readdir_callback, &tracker); if (FSAL_IS_ERROR(fsal_status)) { if (nfs_RetryableError(fsal_status.major)) { rc = NFS_REQ_DROP; goto out; } res->res_readdir3.status = nfs3_Errno_status(fsal_status); nfs_SetPostOpAttr(dir_obj, &res->res_readdir3.READDIR3res_u.resfail. dir_attributes, NULL); goto out; } if (tracker.error != NFS3_OK) { res->res_readdir3.status = tracker.error; nfs_SetPostOpAttr(dir_obj, &res->res_readdir3.READDIR3res_u.resfail. dir_attributes, NULL); goto out; } LogFullDebug(COMPONENT_NFS_READDIR, "-- Readdir -> Call to fsal_readdir(cookie=%" PRIu64 ")", fsal_cookie); if ((num_entries == 0) && (cookie > 1)) { RES_READDIR3_OK->reply.entries = NULL; RES_READDIR3_OK->reply.eof = TRUE; } else { RES_READDIR3_OK->reply.entries = tracker.entries; RES_READDIR3_OK->reply.eof = eod_met; } nfs_SetPostOpAttr(dir_obj, &RES_READDIR3_OK->dir_attributes, NULL); memcpy(RES_READDIR3_OK->cookieverf, cookie_verifier, sizeof(cookieverf3)); res->res_readdir3.status = NFS3_OK; rc = NFS_REQ_OK; out: /* return references */ if (dir_obj) dir_obj->obj_ops.put_ref(dir_obj); if (parent_dir_obj) parent_dir_obj->obj_ops.put_ref(parent_dir_obj); /* Deallocate memory in the event of an error */ if (((res->res_readdir3.status != NFS3_OK) || (rc != NFS_REQ_OK) || ((num_entries == 0) && (cookie > 1))) && (tracker.entries != NULL)) { free_entry3s(tracker.entries); RES_READDIR3_OK->reply.entries = NULL; } return rc; } /* nfs3_readdir */
static bool pseudo_node(char *token, void *arg) { struct node_state *state = (struct node_state *)arg; pseudofs_entry_t *node = NULL; pseudofs_entry_t *new_node = NULL; struct gsh_buffdesc key; char fullpseudopath[MAXPATHLEN + 2]; int j = 0; state->retval = 0; /* start off with no errors */ LogFullDebug(COMPONENT_NFS_V4_PSEUDO, "token %s", token); for (node = state->this_node->sons; node != NULL; node = node->next) { /* Looking for a matching entry */ if (!strcmp(node->name, token)) { /* matched entry is new parent node */ state->this_node = node; return true; } j++; } /* not found so create a new entry */ if (gPseudoFs.last_pseudo_id == (MAX_PSEUDO_ENTRY - 1)) { LogMajor(COMPONENT_NFS_V4_PSEUDO, "Too many nodes in Export_Id %d Path=\"%s\" Pseudo=\"%s\"", state->entry->id, state->entry->fullpath, state->entry->pseudopath); state->retval = ENOMEM; return false; } new_node = gsh_calloc(1, sizeof(pseudofs_entry_t)); if (new_node == NULL) { LogMajor(COMPONENT_NFS_V4_PSEUDO, "Insufficient memory to create pseudo fs node"); state->retval = ENOMEM; return false; } strcpy(new_node->name, token); gPseudoFs.last_pseudo_id++; /** @todo: need to fix this ... */ fullpath(fullpseudopath, new_node, state->this_node, MAXPATHLEN); key = create_pseudo_handle_key(fullpseudopath, (ushort) strlen(fullpseudopath)); new_node->fsopaque = (uint8_t *) key.addr; new_node->pseudo_id = *(uint64_t *) new_node->fsopaque; gPseudoFs.reverse_tab[gPseudoFs.last_pseudo_id] = new_node; new_node->last = new_node; if (isMidDebug(COMPONENT_NFS_V4_PSEUDO)) { char str[256]; sprint_mem(str, new_node->fsopaque, V4_FH_OPAQUE_SIZE); LogMidDebug(COMPONENT_NFS_V4_PSEUDO, "Built pseudofs entry " "index:%u name:%s path:%s handle:%s", gPseudoFs.last_pseudo_id, new_node->name, fullpseudopath, str); } /* Step into the new entry and attach it to the tree */ if (state->this_node->sons == NULL) { state->this_node->sons = new_node; } else { state->this_node->sons->last->next = new_node; state->this_node->sons->last = new_node; } new_node->parent = state->this_node; state->this_node = new_node; return true; }
* @param[out] obj Object created * * @return 0 on success, negative error codes on failure. */ int construct_handle(struct rgw_export *export, struct rgw_file_handle *rgw_fh, struct stat *st, struct rgw_handle **obj) { /* Poitner to the handle under construction */ struct rgw_handle *constructing = NULL; *obj = NULL; constructing = gsh_calloc(1, sizeof(struct rgw_handle)); if (constructing == NULL) return -ENOMEM; constructing->rgw_fh = rgw_fh; constructing->up_ops = export->export.up_ops; /* XXXX going away */ constructing->handle.attrs = &constructing->attributes; rgw2fsal_attributes(st, &constructing->attributes); fsal_obj_handle_init(&constructing->handle, &export->export, constructing->attributes.type); handle_ops_init(&constructing->handle.obj_ops); constructing->export = export; *obj = constructing;
fsal_status_t MFSL_Init(mfsl_parameter_t * init_info /* IN */ ) { unsigned long i = 0; unsigned int rc = 0; pthread_attr_t attr_thr; LRU_status_t lru_status; /* Keep the parameter in mind */ mfsl_param = *init_info; /* Init for thread parameter (mostly for scheduling) */ pthread_attr_init(&attr_thr); pthread_attr_setscope(&attr_thr, PTHREAD_SCOPE_SYSTEM); pthread_attr_setdetachstate(&attr_thr, PTHREAD_CREATE_JOINABLE); /* Allocate the synclet related structure */ if((mfsl_async_synclet_thrid = gsh_malloc(init_info->nb_synclet * sizeof(pthread_t))) == NULL) MFSL_return(ERR_FSAL_NOMEM, errno); if((synclet_data = gsh_calloc(init_info->nb_synclet, sizeof(mfsl_synclet_data_t))) == NULL) MFSL_return(ERR_FSAL_NOMEM, errno); for(i = 0; i < init_info->nb_synclet; i++) { synclet_data[i].my_index = i; if(pthread_cond_init(&synclet_data[i].op_condvar, NULL) != 0) MFSL_return(ERR_FSAL_INVAL, 0); if(pthread_mutex_init(&synclet_data[i].mutex_op_condvar, NULL) != 0) MFSL_return(ERR_FSAL_INVAL, 0); if(pthread_mutex_init(&synclet_data[i].mutex_op_lru, NULL) != 0) MFSL_return(ERR_FSAL_INVAL, 0); if((synclet_data[i].op_lru = LRU_Init(mfsl_param.lru_param, &lru_status)) == NULL) MFSL_return(ERR_FSAL_INVAL, 0); synclet_data[i].passcounter = 0; } /* for */ /* Now start the threads */ if((rc = pthread_create(&mfsl_async_adt_thrid, &attr_thr, mfsl_async_asynchronous_dispatcher_thread, (void *)NULL)) != 0) MFSL_return(ERR_FSAL_SERVERFAULT, -rc); for(i = 0; i < init_info->nb_synclet; i++) { if((rc = pthread_create(&mfsl_async_synclet_thrid[i], &attr_thr, mfsl_async_synclet_thread, (void *)i)) != 0) MFSL_return(ERR_FSAL_SERVERFAULT, -rc); } if(!mfsl_async_hash_init()) MFSL_return(ERR_FSAL_SERVERFAULT, 0); /* Regular Exit */ MFSL_return(ERR_FSAL_NO_ERROR, 0); }
static struct vfs_fsal_obj_handle *alloc_handle(int dirfd, vfs_file_handle_t *fh, struct fsal_filesystem *fs, struct stat *stat, vfs_file_handle_t *dir_fh, const char *path, struct fsal_export *exp_hdl) { struct vfs_fsal_export *myself = container_of(exp_hdl, struct vfs_fsal_export, export); struct vfs_fsal_obj_handle *hdl; fsal_status_t st; hdl = gsh_calloc(1, (sizeof(struct vfs_fsal_obj_handle) + sizeof(vfs_file_handle_t))); if (hdl == NULL) return NULL; hdl->handle = (vfs_file_handle_t *) &hdl[1]; memcpy(hdl->handle, fh, sizeof(vfs_file_handle_t)); hdl->obj_handle.type = posix2fsal_type(stat->st_mode); hdl->dev = posix2fsal_devt(stat->st_dev); hdl->up_ops = exp_hdl->up_ops; hdl->obj_handle.fs = fs; if (hdl->obj_handle.type == REGULAR_FILE) { hdl->u.file.fd = -1; /* no open on this yet */ hdl->u.file.openflags = FSAL_O_CLOSED; } else if (hdl->obj_handle.type == SYMBOLIC_LINK) { ssize_t retlink; size_t len = stat->st_size + 1; char *link_content = gsh_malloc(len); if (link_content == NULL) goto spcerr; retlink = vfs_readlink_by_handle(fh, dirfd, path, link_content, len); if (retlink < 0 || retlink == len) goto spcerr; link_content[retlink] = '\0'; hdl->u.symlink.link_content = link_content; hdl->u.symlink.link_size = len; } else if (vfs_unopenable_type(hdl->obj_handle.type)) { /* AF_UNIX sockets, character special, and block special files require craziness */ if (dir_fh == NULL) { int retval; vfs_alloc_handle(dir_fh); retval = vfs_fd_to_handle(dirfd, hdl->obj_handle.fs, fh); if (retval < 0) goto spcerr; } hdl->u.unopenable.dir = gsh_malloc(sizeof(vfs_file_handle_t)); if (hdl->u.unopenable.dir == NULL) goto spcerr; memcpy(hdl->u.unopenable.dir, dir_fh, sizeof(vfs_file_handle_t)); hdl->u.unopenable.name = gsh_strdup(path); if (hdl->u.unopenable.name == NULL) goto spcerr; } hdl->obj_handle.attributes.mask = exp_hdl->exp_ops.fs_supported_attrs(exp_hdl); st = posix2fsal_attributes(stat, &hdl->obj_handle.attributes); if (FSAL_IS_ERROR(st)) goto spcerr; hdl->obj_handle.attributes.fsid = fs->fsid; fsal_obj_handle_init(&hdl->obj_handle, exp_hdl, posix2fsal_type(stat->st_mode)); vfs_handle_ops_init(&hdl->obj_handle.obj_ops); vfs_sub_init_handle_ops(myself, &hdl->obj_handle.obj_ops); return hdl; spcerr: if (hdl->obj_handle.type == SYMBOLIC_LINK) { if (hdl->u.symlink.link_content != NULL) gsh_free(hdl->u.symlink.link_content); } else if (vfs_unopenable_type(hdl->obj_handle.type)) { if (hdl->u.unopenable.name != NULL) gsh_free(hdl->u.unopenable.name); if (hdl->u.unopenable.dir != NULL) gsh_free(hdl->u.unopenable.dir); } gsh_free(hdl); /* elvis has left the building */ return NULL; }
fsal_status_t HPSSFSAL_readdir(hpssfsal_dir_t * dir_descriptor, /* IN */ fsal_op_context_t * p_context, /* IN */ hpssfsal_cookie_t start_position, /* IN */ fsal_attrib_mask_t get_attr_mask, /* IN */ fsal_mdsize_t buffersize, /* IN */ fsal_dirent_t * pdirent, /* OUT */ hpssfsal_cookie_t * end_position, /* OUT */ fsal_count_t * nb_entries, /* OUT */ fsal_boolean_t * end_of_dir /* OUT */ ) { int rc, returned, i; fsal_status_t st; fsal_attrib_mask_t handle_attr_mask; fsal_count_t current_nb_entries, missing_entries, max_dir_entries; /* hpss_ReadRawAttrsHandle arguments. */ u_signed64 curr_start_position; unsigned32 buff_size_in; unsigned32 bool_getattr_in; unsigned32 bool_eod_out; u_signed64 last_offset_out; //ns_DirEntry_t outbuff[FSAL_READDIR_SIZE]; ns_DirEntry_t * outbuff = NULL ; /* sanity checks */ if(!dir_descriptor || !pdirent || !end_position || !nb_entries || !end_of_dir) Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_readdir); if((outbuff = gsh_calloc(FSAL_READDIR_SIZE, sizeof(ns_DirEntry_t))) == NULL) Return(ERR_FSAL_FAULT, 0, INDEX_FSAL_readdir); /* handle provides : suppattr, type, fileid */ /** @todo : does handle provide mounted_on_fileid ? */ handle_attr_mask = FSAL_ATTR_SUPPATTR | FSAL_ATTR_TYPE | FSAL_ATTR_FILEID; /* if the handle cannot provide the requested attributes, * we have to retrieve file attributes. */ if(get_attr_mask & (~handle_attr_mask)) bool_getattr_in = TRUE; else bool_getattr_in = FALSE; /* init values */ curr_start_position = start_position.data; bool_eod_out = 0; current_nb_entries = 0; max_dir_entries = (buffersize / sizeof(fsal_dirent_t)); /* while we haven't filled the output buffer * and the end of dir has not been reached : */ while((current_nb_entries < max_dir_entries) && (!bool_eod_out)) { missing_entries = max_dir_entries - current_nb_entries; /* If the requested count is smaller than the default FSAL_READDIR_SIZE, * we use a smaller output buffer. */ if(missing_entries < FSAL_READDIR_SIZE) buff_size_in = missing_entries * sizeof(ns_DirEntry_t); else buff_size_in = FSAL_READDIR_SIZE * sizeof(ns_DirEntry_t); /* call to hpss clapi */ TakeTokenFSCall(); rc = HPSSFSAL_ReadRawAttrsHandle(&(dir_descriptor->dir_handle.data.ns_handle), curr_start_position, &dir_descriptor->context.credential.hpss_usercred, buff_size_in, bool_getattr_in, ReturnInconsistentDirent, &bool_eod_out, &last_offset_out, outbuff); ReleaseTokenFSCall(); if(rc < 0) { gsh_free( outbuff ) ; Return(hpss2fsal_error(rc), -rc, INDEX_FSAL_readdir); } else returned = rc; /* Fills the fsal dirent list. */ for(i = 0; i < returned; i++) { memset( (char *)&(pdirent[current_nb_entries].handle), 0, sizeof( hpssfsal_handle_t ) ) ; pdirent[current_nb_entries].handle.data.ns_handle = outbuff[i].ObjHandle; pdirent[current_nb_entries].handle.data.obj_type = hpss2fsal_type(outbuff[i].ObjHandle.Type); st = FSAL_str2name((char *)outbuff[i].Name, HPSS_MAX_FILE_NAME, &pdirent[current_nb_entries].name); /** @todo : test returned status */ pdirent[current_nb_entries].cookie.data = outbuff[i].ObjOffset; /* set asked attributes */ pdirent[current_nb_entries].attributes.asked_attributes = get_attr_mask; if(bool_getattr_in) { /* convert HPSS attributes to fsal attributes */ st = hpss2fsal_attributes(&outbuff[i].ObjHandle, &outbuff[i].Attrs, &pdirent[current_nb_entries].attributes); /* on error, we set a special bit in the mask. */ if(FSAL_IS_ERROR(st)) { FSAL_CLEAR_MASK(pdirent[current_nb_entries].attributes. asked_attributes); FSAL_SET_MASK(pdirent[current_nb_entries].attributes.asked_attributes, FSAL_ATTR_RDATTR_ERR); } } else if(get_attr_mask) { /* extract asked attributes from file handle */ st = hpssHandle2fsalAttributes(&outbuff[i].ObjHandle, &pdirent[current_nb_entries].attributes); /* on error, we set a special bit in the mask. */ if(FSAL_IS_ERROR(st)) { FSAL_CLEAR_MASK(pdirent[current_nb_entries].attributes. asked_attributes); FSAL_SET_MASK(pdirent[current_nb_entries].attributes.asked_attributes, FSAL_ATTR_RDATTR_ERR); } } /* set the previous' next */ if(current_nb_entries) pdirent[current_nb_entries - 1].nextentry = &(pdirent[current_nb_entries]); /* current's next */ pdirent[current_nb_entries].nextentry = NULL; /* increment entries count */ current_nb_entries++; curr_start_position = last_offset_out; } } /* At this point, 2 cases : * - the requested count is reached * - the end of dir is reached. * However, the treatment is the same. */ /* setting output vars. */ /* if no item was read, the offset keeps the same. */ end_position->data = (current_nb_entries == 0 ? start_position.data : last_offset_out); *nb_entries = current_nb_entries; *end_of_dir = (bool_eod_out ? TRUE : FALSE); LogDebug(COMPONENT_FSAL, "%s() returned %u entries, end_of_dir=%d", __func__, *nb_entries, *end_of_dir); gsh_free( outbuff ) ; Return(ERR_FSAL_NO_ERROR, 0, INDEX_FSAL_readdir); /* @todo badly set fsal_log ? */ }
int _9p_xattrwalk(struct _9p_request_data *req9p, u32 *plenout, char *preply) { char *cursor = req9p->_9pmsg + _9P_HDR_SIZE + _9P_TYPE_SIZE; u16 *msgtag = NULL; u32 *fid = NULL; u32 *attrfid = NULL; u16 *name_len; char *name_str; size_t attrsize = 0; fsal_status_t fsal_status; char name[MAXNAMLEN]; fsal_xattrent_t xattrs_arr[XATTRS_ARRAY_LEN]; int eod_met = false; unsigned int nb_xattrs_read = 0; unsigned int i = 0; char *xattr_cursor = NULL; unsigned int tmplen = 0; struct _9p_fid *pfid = NULL; struct _9p_fid *pxattrfid = NULL; /* Get data */ _9p_getptr(cursor, msgtag, u16); _9p_getptr(cursor, fid, u32); _9p_getptr(cursor, attrfid, u32); LogDebug(COMPONENT_9P, "TXATTRWALK: tag=%u fid=%u attrfid=%u", (u32) *msgtag, *fid, *attrfid); _9p_getstr(cursor, name_len, name_str); if (*name_len == 0) LogDebug(COMPONENT_9P, "TXATTRWALK (component): tag=%u fid=%u attrfid=%u name=(LIST XATTR)", (u32) *msgtag, *fid, *attrfid); else LogDebug(COMPONENT_9P, "TXATTRWALK (component): tag=%u fid=%u attrfid=%u name=%.*s", (u32) *msgtag, *fid, *attrfid, *name_len, name_str); if (*fid >= _9P_FID_PER_CONN) return _9p_rerror(req9p, msgtag, ERANGE, plenout, preply); if (*attrfid >= _9P_FID_PER_CONN) return _9p_rerror(req9p, msgtag, ERANGE, plenout, preply); pfid = req9p->pconn->fids[*fid]; /* Check that it is a valid fid */ if (pfid == NULL || pfid->pentry == NULL) { LogDebug(COMPONENT_9P, "request on invalid fid=%u", *fid); return _9p_rerror(req9p, msgtag, EIO, plenout, preply); } pxattrfid = gsh_calloc(1, sizeof(struct _9p_fid)); if (pxattrfid == NULL) return _9p_rerror(req9p, msgtag, ENOMEM, plenout, preply); /* set op_ctx, it will be useful if FSAL is later called */ _9p_init_opctx(pfid, req9p); /* Initiate xattr's fid by copying file's fid in it */ memcpy((char *)pxattrfid, (char *)pfid, sizeof(struct _9p_fid)); snprintf(name, MAXNAMLEN, "%.*s", *name_len, name_str); pxattrfid->specdata.xattr.xattr_content = gsh_malloc(XATTR_BUFFERSIZE); if (pxattrfid->specdata.xattr.xattr_content == NULL) { gsh_free(pxattrfid); return _9p_rerror(req9p, msgtag, ENOMEM, plenout, preply); } if (*name_len == 0) { /* xattrwalk is used with an empty name, * this is a listxattr request */ fsal_status = pxattrfid->pentry->obj_handle->obj_ops.list_ext_attrs( pxattrfid->pentry->obj_handle, FSAL_XATTR_RW_COOKIE, /* Start with RW cookie, * hiding RO ones */ xattrs_arr, XATTRS_ARRAY_LEN, /** @todo fix static length */ &nb_xattrs_read, &eod_met); if (FSAL_IS_ERROR(fsal_status)) { gsh_free(pxattrfid->specdata.xattr.xattr_content); gsh_free(pxattrfid); return _9p_rerror(req9p, msgtag, _9p_tools_errno (cache_inode_error_convert (fsal_status)), plenout, preply); } /* if all xattrent are not read, * returns ERANGE as listxattr does */ if (eod_met != true) { gsh_free(pxattrfid->specdata.xattr.xattr_content); gsh_free(pxattrfid); return _9p_rerror(req9p, msgtag, ERANGE, plenout, preply); } xattr_cursor = pxattrfid->specdata.xattr.xattr_content; attrsize = 0; for (i = 0; i < nb_xattrs_read; i++) { tmplen = snprintf(xattr_cursor, MAXNAMLEN, "%s", xattrs_arr[i].xattr_name); xattr_cursor[tmplen] = '\0'; /* Just to be sure */ /* +1 for trailing '\0' */ xattr_cursor += tmplen + 1; attrsize += tmplen + 1; /* Make sure not to go beyond the buffer */ if (attrsize > XATTR_BUFFERSIZE) { gsh_free(pxattrfid->specdata.xattr. xattr_content); gsh_free(pxattrfid); return _9p_rerror(req9p, msgtag, ERANGE, plenout, preply); } } } else { /* xattrwalk has a non-empty name, use regular setxattr */ fsal_status = pxattrfid->pentry->obj_handle->obj_ops. getextattr_id_by_name(pxattrfid->pentry->obj_handle, name, &pxattrfid->specdata.xattr.xattr_id); if (FSAL_IS_ERROR(fsal_status)) { gsh_free(pxattrfid->specdata.xattr.xattr_content); gsh_free(pxattrfid); /* ENOENT for xattr is ENOATTR */ if (fsal_status.major == ERR_FSAL_NOENT) return _9p_rerror(req9p, msgtag, ENOATTR, plenout, preply); else return _9p_rerror(req9p, msgtag, _9p_tools_errno (cache_inode_error_convert (fsal_status)), plenout, preply); } fsal_status = pxattrfid->pentry->obj_handle->obj_ops. getextattr_value_by_name(pxattrfid->pentry->obj_handle, name, pxattrfid->specdata.xattr. xattr_content, XATTR_BUFFERSIZE, &attrsize); if (FSAL_IS_ERROR(fsal_status)) { gsh_free(pxattrfid->specdata.xattr.xattr_content); gsh_free(pxattrfid); /* fsal_status.minor is a valid errno code */ return _9p_rerror(req9p, msgtag, fsal_status.minor, plenout, preply); } } req9p->pconn->fids[*attrfid] = pxattrfid; /* Increments refcount as we're manually making a new copy */ (void) cache_inode_lru_ref(pfid->pentry, LRU_REQ_STALE_OK); /* hold reference on gdata */ uid2grp_hold_group_data(pxattrfid->gdata); get_gsh_export_ref(pfid->export); get_9p_user_cred_ref(pfid->ucred); /* Build the reply */ _9p_setinitptr(cursor, preply, _9P_RXATTRWALK); _9p_setptr(cursor, msgtag, u16); _9p_setvalue(cursor, attrsize, u64); _9p_setendptr(cursor, preply); _9p_checkbound(cursor, preply, plenout); LogDebug(COMPONENT_9P, "RXATTRWALK: tag=%u fid=%u attrfid=%u name=%.*s size=%llu", (u32) *msgtag, *fid, *attrfid, *name_len, name_str, (unsigned long long)attrsize); return 1; } /* _9p_xattrwalk */
static nfsstat4 make_ds_handle(struct fsal_pnfs_ds *const pds, const struct gsh_buffdesc *const desc, struct fsal_ds_handle **const handle, int flags) { struct gpfs_file_handle *fh = (struct gpfs_file_handle *)desc->addr; struct gpfs_ds *ds; /* Handle to be created */ struct fsal_filesystem *fs; struct fsal_fsid__ fsid; *handle = NULL; if (desc->len != sizeof(struct gpfs_file_handle)) return NFS4ERR_BADHANDLE; if (flags & FH_FSAL_BIG_ENDIAN) { #if (BYTE_ORDER != BIG_ENDIAN) fh->handle_size = bswap_16(fh->handle_size); fh->handle_type = bswap_16(fh->handle_type); fh->handle_version = bswap_16(fh->handle_version); fh->handle_key_size = bswap_16(fh->handle_key_size); #endif } else { #if (BYTE_ORDER == BIG_ENDIAN) fh->handle_size = bswap_16(fh->handle_size); fh->handle_type = bswap_16(fh->handle_type); fh->handle_version = bswap_16(fh->handle_version); fh->handle_key_size = bswap_16(fh->handle_key_size); #endif } LogFullDebug(COMPONENT_FSAL, "flags 0x%X size %d type %d ver %d key_size %d FSID 0x%X:%X", flags, fh->handle_size, fh->handle_type, fh->handle_version, fh->handle_key_size, fh->handle_fsid[0], fh->handle_fsid[1]); gpfs_extract_fsid(fh, &fsid); fs = lookup_fsid(&fsid, GPFS_FSID_TYPE); if (fs == NULL) { LogInfo(COMPONENT_FSAL, "Could not find filesystem for fsid=0x%016"PRIx64 ".0x%016"PRIx64" from handle", fsid.major, fsid.minor); return NFS4ERR_STALE; } if (fs->fsal != pds->fsal) { LogInfo(COMPONENT_FSAL, "Non GPFS filesystem fsid=0x%016"PRIx64".0x%016"PRIx64 " from handle", fsid.major, fsid.minor); return NFS4ERR_STALE; } ds = gsh_calloc(1, sizeof(struct gpfs_ds)); *handle = &ds->ds; fsal_ds_handle_init(*handle, pds); /* Connect lazily when a FILE_SYNC4 write forces us to, not here. */ ds->connected = false; ds->gpfs_fs = fs->private_data; memcpy(&ds->wire, desc->addr, desc->len); return NFS4_OK; }