static void workspace_nfs_open(fuse_req_t req, struct workspace_fh_struct *fh)
{
    struct resource_struct *resource=fh->object->resource;
    struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data;
    struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data;
    char *path=fh->pathinfo.path + fh->relpath;
    struct nfsfh *nfsfh=NULL;
    int result=0;

    if (strlen(path)==0) path=(char *) rootpath;

    logoutput("workspace_nfs_open, path %s", path);

    pthread_mutex_lock(&nfs_export->mutex);

    result=nfs_open(nfs_ctx, path, fh->flags, &nfsfh);

    pthread_mutex_unlock(&nfs_export->mutex);

    if (result==0) {

	fh->handle.data=(void *) nfsfh;
	fuse_reply_open(req, fh->fi);

    } else {

	fuse_reply_err(req, abs(result));

    }

    free_path_pathinfo(&fh->pathinfo);

}
static void workspace_nfs_opendir(fuse_req_t req, struct workspace_dh_struct *dh)
{
    struct resource_struct *resource=dh->object->resource;
    struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data;
    struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data;
    char *path=dh->pathinfo.path + dh->relpath;
    unsigned int error=0;
    struct directory_struct *directory=dh->directory;
    struct nfsdir *dir=NULL;
    int result=0;

    if (strlen(path)==0) path=(char *) rootpath;

    logoutput("workspace_nfs_opendir: path %s", path);

    pthread_mutex_lock(&nfs_export->mutex);

    result=nfs_opendir(nfs_ctx, path, &dir);

    pthread_mutex_unlock(&nfs_export->mutex);

    if (result==0) {

	dh->handle.data = (void *) dir;

        fuse_reply_open(req, dh->fi);
	free_path_pathinfo(&dh->pathinfo);

	return;

    } else {

	error=abs(result);

    }

    logoutput("workspace_opendir, error %i", error);

    fuse_reply_err(req, error);
    free_path_pathinfo(&dh->pathinfo);

}
static void workspace_nfs_readlink(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info)
{
    struct resource_struct *resource=call_info->object->resource;
    struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data;
    struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data;
    char *path=call_info->pathinfo.path + call_info->relpath;
    int result=0;
    int len=512;
    char buffer[len];

    if (strlen(path)==0) path=(char *) rootpath;

    logoutput("workspace_nfs_readlink, path %s", path);

    /*
	TODO: make this buffer variable, only how to correct that?
	what error gives nfs_readlink back when buffer is too small?
    */

    pthread_mutex_lock(&nfs_export->mutex);

    result=nfs_readlink(nfs_ctx, path, buffer, len);

    pthread_mutex_unlock(&nfs_export->mutex);

    if (result<0) {

	logoutput("workspace_nfs_readlink, error reading readlink of %s, error %i:%s", path, abs(result), nfs_get_error(nfs_ctx));

	fuse_reply_err(req, abs(result));

    } else {

	fuse_reply_readlink(req, buffer);

    }

    free_path_pathinfo(&call_info->pathinfo);

}
Example #4
0
static void overlay_opendir(fuse_req_t req, struct workspace_dh_struct *dh)
{
    struct resource_struct *resource=dh->object->resource;
    struct localfile_struct *localfile=(struct localfile_struct *) resource->data;
    struct pathinfo_struct *pathinfo=&dh->pathinfo;
    unsigned int len0=pathinfo->len - dh->relpath, len1=localfile->pathinfo.len;
    char path[len0 + len1 + 1];
    struct overlay_readdir_struct *overlay_readdir=NULL;
    unsigned int error=0;
    int fd=-1;
    struct directory_struct *directory=dh->directory;
    struct statfs stfs;

    memcpy(path, localfile->pathinfo.path, len1);

    if (len0>0) {

	memcpy(path+len1, pathinfo->path + dh->relpath, len0);
	len1+=len0;

    }

    path[len1]='\0';

    logoutput("overlayfs_opendir: path %s", path);

    fd=open(path, O_RDONLY | O_DIRECTORY);

    if (fd==-1) {

	error=errno;
	goto error;

    }

    if (fstatfs(fd, &stfs)==-1) {

	error=errno;
	goto error;

    }

    overlay_readdir = malloc(sizeof(struct overlay_readdir_struct));

    if ( ! overlay_readdir ) {

	error=ENOMEM;
	goto error;

    }

    memset(overlay_readdir, 0, sizeof(struct overlay_readdir_struct));

    overlay_readdir->fd=(unsigned int) fd;
    overlay_readdir->data=NULL;
    overlay_readdir->mode=0;

    dh->handle.data = (void *) overlay_readdir;

    /*
	determine the type fileystem
	use a portable generic function here??
    */

    if (stfs.f_bfree==0) {

	/*
	    dealing with a system fs: use readdir
	    and a full or simple synchronize
	*/

	overlay_readdir->data=(void *) init_readdir_readdir(path, fd, &error);

	if (! overlay_readdir->data) {

	    if (error==0) error=EIO;
	    goto error;

	}

	if (directory->synctime.tv_sec==0 && directory->synctime.tv_nsec==0) {

	    overlay_readdir->mode |= _FW_READDIR_MODE_FULL;

	} else {

	    overlay_readdir->mode |= _FW_READDIR_MODE_SIMPLE;

	}

    } else {


	if (directory->synctime.tv_sec==0 && directory->synctime.tv_nsec==0) {

	    /* never synced before, a normal fs: use getdents and full sync*/

	    overlay_readdir->data=(void *) init_readdir_getdents(path, fd, &error);

	    if (! overlay_readdir->data) {

		if (error==0) error=EIO;
		goto error;

	    }

	    overlay_readdir->mode |= _FW_READDIR_MODE_FULL;

	} else {
	    struct stat st;

	    if (fstat(fd, &st)==-1) {

		error=errno;
		goto error;

	    }

	    logoutput("overlayfs_opendir: compare modifytime %li:%li with synctime %li:%li", st.st_mtim.tv_sec, st.st_mtim.tv_nsec, directory->synctime.tv_sec, directory->synctime.tv_nsec);

	    if (st.st_mtim.tv_sec>directory->synctime.tv_sec ||
		(st.st_mtim.tv_sec==directory->synctime.tv_sec && st.st_mtim.tv_nsec>directory->synctime.tv_nsec)) {

		/*
		    directory modification time is changed since last check
		    this means entries are added or removed
		*/

		overlay_readdir->data=(void *) init_readdir_getdents(path, fd, &error);

		if (! overlay_readdir->data) {

		    if (error==0) error=EIO;
		    goto error;

		}

		overlay_readdir->mode |= _FW_READDIR_MODE_SIMPLE;

	    } else {

		overlay_readdir->data=(void *) directory->first;
		overlay_readdir->mode |= _FW_READDIR_MODE_VIRTUAL;

	    }

	}

    }

    fuse_reply_open(req, dh->fi);
    add_pathcache(&dh->pathinfo, dh->parent, dh->object, dh->relpath);
    free_path_pathinfo(&dh->pathinfo);

    return;

    error:

    fuse_reply_err(req, error);

    if (fd>0) {

	close(fd);
	fd=-1;

    }

    if (overlay_readdir) {

	if (overlay_readdir->data && (overlay_readdir->mode & (_FW_READDIR_MODE_SIMPLE | _FW_READDIR_MODE_FULL))) {
	    struct readdir_struct *readdir=(struct readdir_struct *) overlay_readdir->data;

	    if (readdir->close) {

		(* readdir->close) (readdir);

	    } else {

		free(readdir);

	    }

	}

	overlay_readdir->data=NULL;
	free(overlay_readdir);
	overlay_readdir=NULL;

    }

    logoutput("overlayfs_opendir, error %i", error);
    free_path_pathinfo(&dh->pathinfo);

}
Example #5
0
static void overlay_readlink(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info)
{
    struct resource_struct *resource=call_info->object->resource;
    struct localfile_struct *localfile=(struct localfile_struct *) resource->data;
    struct pathinfo_struct *pathinfo=&call_info->pathinfo;
    unsigned int len0=pathinfo->len - call_info->relpath, len1=localfile->pathinfo.len;
    char path[len0 + len1 + 1];
    char *buff=NULL;
    size_t size=512;
    unsigned int error=0;

    memcpy(path, localfile->pathinfo.path, len1);

    if (len0>0) {

	memcpy(path+len1, pathinfo->path + call_info->relpath, len0);
	len1+=len0;

    }

    path[len1]='\0';

    logoutput("overlayfs_readlink: path %s", call_info->pathinfo.path);

    while(size<=PATH_MAX) {
	ssize_t lenread=0;

	if (buff) {

	    buff = realloc(buff, size);

	} else {

	    buff = malloc(size);

	}

	if ( buff ) {

    	    if ((lenread=readlink(path, buff, size))==-1) {

		error=errno;

		free(buff);
		goto out;

	    }

	    if (lenread < size) {

		/* success */

		buff[lenread] = '\0';
		fuse_reply_readlink(req, buff);

		free(buff);
		free_path_pathinfo(&call_info->pathinfo);

		return;

	    }

	    size+=512;

	    if (size>PATH_MAX) {

		error=ENAMETOOLONG;
		break;

	    }

	} else {

	    error=ENOMEM;
	    break;

	}

    }

    out:

    logoutput("overlayfs_readlink: error %i", error);

    fuse_reply_err(req, error);

    free_path_pathinfo(&call_info->pathinfo);

}
Example #6
0
static void overlay_getattr(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info)
{
    struct resource_struct *resource=call_info->object->resource;
    struct localfile_struct *localfile=(struct localfile_struct *) resource->data;
    struct pathinfo_struct *pathinfo=&call_info->pathinfo;
    unsigned int len0=pathinfo->len - call_info->relpath, len1=localfile->pathinfo.len;
    char path[len0 + len1 + 1];
    struct stat st;

    memcpy(path, localfile->pathinfo.path, len1);

    if (len0>0) {

	memcpy(path+len1, pathinfo->path + call_info->relpath, len0);
	len1+=len0;

    }

    path[len1]='\0';

    memset(&st, 0, sizeof(struct stat));

    logoutput("overlayfs_getattr, path %s", path);

    if (lstat(path, &st)==-1) {

	fuse_reply_err(req, ENOENT);

    } else {
	struct inode_struct *inode=entry->inode;

	inode->mode=st.st_mode;
	inode->nlink=st.st_nlink;
	inode->uid=st.st_uid;
	inode->gid=st.st_gid;
	inode->rdev=st.st_rdev;

	if (S_ISDIR(st.st_mode)) {

	    st.st_size=0;

	} else {

	    inode->size=st.st_size;

	}

	inode->mtim.tv_sec=st.st_mtim.tv_sec;
	inode->mtim.tv_nsec=st.st_mtim.tv_nsec;

	inode->ctim.tv_sec=st.st_ctim.tv_sec;
	inode->ctim.tv_nsec=st.st_ctim.tv_nsec;

	st.st_ino=inode->ino;
	st.st_dev=0;

	fuse_reply_attr(req, &st, fs_options.attr_timeout);

    }

    free_path_pathinfo(&call_info->pathinfo);

}
Example #7
0
static void overlay_lookup_noncached(fuse_req_t req, struct inode_struct *pinode, struct name_struct *xname, struct call_info_struct *call_info)
{
    struct resource_struct *resource=call_info->object->resource;
    struct localfile_struct *localfile=(struct localfile_struct *) resource->data;
    struct pathinfo_struct *pathinfo=&call_info->pathinfo;
    unsigned int len0=pathinfo->len - call_info->relpath, len1=localfile->pathinfo.len;
    char path[len0 + len1 + 1];
    struct stat st;

    memcpy(path, localfile->pathinfo.path, len1);

    if (len0>0) {

	memcpy(path+len1, pathinfo->path + call_info->relpath, len0);
	len1+=len0;

    }

    path[len1]='\0';

    memset(&st, 0, sizeof(struct stat));

    logoutput("overlayfs_lookup_cached, path %s", path);

    if (lstat(path, &st)==-1) {

	fuse_reply_err(req, ENOENT);

    } else {
	struct entry_struct *entry=NULL, *parent=pinode->alias;
	struct inode_struct *inode;

	entry=create_entry(parent, xname);
	inode=create_inode();

	if (entry && inode) {
	    struct fuse_entry_param e;
	    unsigned int error=0;

	    add_inode_hashtable(inode, increase_inodes_workspace, (void *) call_info->workspace_mount);
	    insert_entry(entry, &error, 0);

	    adjust_pathmax(call_info->pathinfo.len);

	    e.ino = inode->ino;
	    e.generation = 1;
	    e.attr_timeout = fs_options.attr_timeout;
	    e.entry_timeout = fs_options.entry_timeout;

	    e.attr.st_ino = e.ino;
	    e.attr.st_mode = st.st_mode;
	    e.attr.st_nlink = st.st_nlink;
	    e.attr.st_uid = st.st_uid;
	    e.attr.st_gid = st.st_gid;
	    e.attr.st_rdev = st.st_rdev;
	    e.attr.st_atim.tv_sec = st.st_atim.tv_sec;
	    e.attr.st_atim.tv_nsec = st.st_atim.tv_nsec;
	    e.attr.st_mtim.tv_sec = st.st_mtim.tv_sec;
	    e.attr.st_mtim.tv_nsec = st.st_mtim.tv_nsec;
	    e.attr.st_ctim.tv_sec = st.st_ctim.tv_sec;
	    e.attr.st_ctim.tv_nsec = st.st_ctim.tv_nsec;

	    e.attr.st_blksize=4096;
	    e.attr.st_blocks=0;

	    inode->mode=st.st_mode;
	    inode->nlink=st.st_nlink;
	    inode->uid=st.st_uid;
	    inode->gid=st.st_gid;

	    inode->rdev=st.st_rdev;

	    if (S_ISDIR(st.st_mode)) {

		e.attr.st_size = 0;

	    } else {

		inode->size=st.st_size;
		e.attr.st_size = st.st_size;

	    }

	    inode->mtim.tv_sec=st.st_mtim.tv_sec;
	    inode->mtim.tv_nsec=st.st_mtim.tv_nsec;

	    inode->ctim.tv_sec=st.st_ctim.tv_sec;
	    inode->ctim.tv_nsec=st.st_ctim.tv_nsec;

	    fuse_reply_entry(req, &e);

	} else {

	    /* not enough memory to allocate entry and/or inode */

	    if (entry) {

		destroy_entry(entry);
		entry=NULL;

	    }

	    if (inode) {

		free(inode);
		inode=NULL;

	    }

	    fuse_reply_err(req, ENOMEM);

	}

    }

    free_path_pathinfo(&call_info->pathinfo);

}
Example #8
0
static void overlay_lookup_cached(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info)
{
    struct resource_struct *resource=call_info->object->resource;
    struct localfile_struct *localfile=(struct localfile_struct *) resource->data;
    struct pathinfo_struct *pathinfo=&call_info->pathinfo;
    unsigned int len0=pathinfo->len - call_info->relpath, len1=localfile->pathinfo.len;
    char path[len0 + len1 + 1];
    struct stat st;

    memcpy(path, localfile->pathinfo.path, len1);

    if (len0>0) {

	memcpy(path+len1, pathinfo->path + call_info->relpath, len0);
	len1+=len0;

    }

    path[len1]='\0';

    memset(&st, 0, sizeof(struct stat));

    logoutput("overlayfs_lookup_cached, path %s", path);

    if (lstat(path, &st)==-1) {
	struct inode_struct *inode=entry->inode;
	unsigned int error=0;

	inode=entry->inode;
	inode->alias=NULL;

	remove_entry(entry, &error);
	queue_remove(call_info->object, entry, &error);
	entry=NULL;

	fuse_reply_err(req, ENOENT);

    } else {
	struct fuse_entry_param e;
	struct inode_struct *inode=entry->inode;

	e.ino = inode->ino;
	e.generation = 1;
	e.attr_timeout = fs_options.attr_timeout;
	e.entry_timeout = fs_options.entry_timeout;

	e.attr.st_ino = e.ino;
	e.attr.st_mode = st.st_mode;
	e.attr.st_nlink = st.st_nlink;
	e.attr.st_uid = st.st_uid;
	e.attr.st_gid = st.st_gid;
	e.attr.st_rdev = st.st_rdev;
	e.attr.st_atim.tv_sec = st.st_atim.tv_sec;
	e.attr.st_atim.tv_nsec = st.st_atim.tv_nsec;
	e.attr.st_mtim.tv_sec = st.st_mtim.tv_sec;
	e.attr.st_mtim.tv_nsec = st.st_mtim.tv_nsec;
	e.attr.st_ctim.tv_sec = st.st_ctim.tv_sec;
	e.attr.st_ctim.tv_nsec = st.st_ctim.tv_nsec;

	e.attr.st_blksize=4096;
	e.attr.st_blocks=0;

	inode->mode=st.st_mode;
	inode->nlink=st.st_nlink;
	inode->uid=st.st_uid;
	inode->gid=st.st_gid;

	inode->rdev=st.st_rdev;

	if (S_ISDIR(st.st_mode)) {

	    e.attr.st_size = 0;

	} else {

	    inode->size=st.st_size;
	    e.attr.st_size = st.st_size;

	}

	inode->mtim.tv_sec=st.st_mtim.tv_sec;
	inode->mtim.tv_nsec=st.st_mtim.tv_nsec;

	inode->ctim.tv_sec=st.st_ctim.tv_sec;
	inode->ctim.tv_nsec=st.st_ctim.tv_nsec;

	fuse_reply_entry(req, &e);

    }

    free_path_pathinfo(&call_info->pathinfo);

}
static void workspace_nfs_mknod(fuse_req_t req, struct inode_struct *pinode, struct name_struct *xname, struct call_info_struct *call_info, mode_t mode, dev_t rdev)
{
    struct resource_struct *resource=call_info->object->resource;
    struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data;
    struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data;
    char *path=call_info->pathinfo.path + call_info->relpath;
    struct entry_struct *entry=NULL, *parent=pinode->alias;
    struct inode_struct *inode;

    logoutput("workspace_nfs_mknod, path %s", path);

    entry=create_entry(parent, xname);
    inode=create_inode();

    if (entry && inode) {
	int result=0;

	inode->alias=entry;
	entry->inode=inode;

        pthread_mutex_lock(&nfs_export->mutex);

	result=nfs_mknod(nfs_ctx, path, mode, rdev);

	pthread_mutex_unlock(&nfs_export->mutex);

	if (result==0) {
	    struct fuse_entry_param e;
	    unsigned int error=0;
	    struct stat st;

	    memset(&st, 0, sizeof(struct stat));

	    add_inode_hashtable(inode, increase_inodes_workspace, (void *) call_info->object->workspace_mount);
	    insert_entry(entry, &error, 0);

	    adjust_pathmax(call_info->pathinfo.len);

	    pthread_mutex_lock(&nfs_export->mutex);

	    nfs_chmod(nfs_ctx, path, mode);
	    nfs_stat(nfs_ctx, path, &st);

	    pthread_mutex_unlock(&nfs_export->mutex);

	    inode->nlookup=1;
	    inode->mode=st.st_mode;
	    inode->nlink=st.st_nlink;
	    inode->uid=st.st_uid;
	    inode->gid=st.st_gid;

	    inode->rdev=st.st_rdev;
	    inode->size=st.st_size;

	    inode->mtim.tv_sec=st.st_mtim.tv_sec;
	    inode->mtim.tv_nsec=st.st_mtim.tv_nsec;

	    inode->ctim.tv_sec=st.st_ctim.tv_sec;
	    inode->ctim.tv_nsec=st.st_ctim.tv_nsec;

	    e.ino = inode->ino;
	    e.generation = 1;
	    e.attr_timeout = fs_options.attr_timeout;
	    e.entry_timeout = fs_options.entry_timeout;

	    e.attr.st_ino = e.ino;
	    e.attr.st_mode = st.st_mode;
	    e.attr.st_nlink = st.st_nlink;
	    e.attr.st_uid = st.st_uid;
	    e.attr.st_gid = st.st_gid;
	    e.attr.st_rdev = st.st_rdev;
	    e.attr.st_atim.tv_sec = st.st_atim.tv_sec;
	    e.attr.st_atim.tv_nsec = st.st_atim.tv_nsec;
	    e.attr.st_mtim.tv_sec = st.st_mtim.tv_sec;
	    e.attr.st_mtim.tv_nsec = st.st_mtim.tv_nsec;
	    e.attr.st_ctim.tv_sec = st.st_ctim.tv_sec;
	    e.attr.st_ctim.tv_nsec = st.st_ctim.tv_nsec;

	    e.attr.st_blksize=_DEFAULT_BLOCKSIZE;

	    if (inode->size % e.attr.st_blksize == 0) {

		e.attr.st_blocks=inode->size / e.attr.st_blksize;

	    } else {

		e.attr.st_blocks=1 + inode->size / e.attr.st_blksize;

	    }

	    fuse_reply_entry(req, &e);

	} else {

	    /* error nfs create */

	    destroy_entry(entry);
	    free(inode);

	    fuse_reply_err(req, abs(result));

	}

    } else {

	/* not enough memory to allocate entry and/or inode */

	if (entry) {

	    destroy_entry(entry);
	    entry=NULL;

	}

	if (inode) {

	    free(inode);
	    inode=NULL;

	}

	fuse_reply_err(req, ENOMEM);

    }

    free_path_pathinfo(&call_info->pathinfo);

}
static void workspace_nfs_getattr(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info)
{
    struct resource_struct *resource=call_info->object->resource;
    struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data;
    struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data;
    char *path=call_info->pathinfo.path + call_info->relpath;
    struct stat st;
    int result=0;

    if (strlen(path)==0) path=(char *) rootpath;

    memset(&st, 0, sizeof(struct stat));

    logoutput("workspace_nfs_getattr, path %s", path);

    pthread_mutex_lock(&nfs_export->mutex);

    result=nfs_stat(nfs_ctx, path, &st);

    pthread_mutex_unlock(&nfs_export->mutex);

    if (result<0) {

	fuse_reply_err(req, abs(result));

    } else {
	struct inode_struct *inode=entry->inode;

	inode->mode=st.st_mode;
	inode->nlink=st.st_nlink;
	inode->uid=st.st_uid;
	inode->gid=st.st_gid;
	inode->rdev=st.st_rdev;
	inode->size=st.st_size;

	st.st_blksize=_DEFAULT_BLOCKSIZE;

	if (inode->size % st.st_blksize == 0) {

	    st.st_blocks=inode->size / st.st_blksize;

	} else {

	    st.st_blocks=1 + inode->size / st.st_blksize;

	}

	inode->mtim.tv_sec=st.st_mtim.tv_sec;
	inode->mtim.tv_nsec=st.st_mtim.tv_nsec;

	inode->ctim.tv_sec=st.st_ctim.tv_sec;
	inode->ctim.tv_nsec=st.st_ctim.tv_nsec;

	st.st_ino=inode->ino;
	st.st_dev=0;

	fuse_reply_attr(req, &st, fs_options.attr_timeout);

    }

    free_path_pathinfo(&call_info->pathinfo);

}
static void workspace_nfs_setattr(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info, struct stat *st, int fuse_set)
{
    struct resource_struct *resource=call_info->object->resource;
    struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data;
    struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data;
    char *path=call_info->pathinfo.path + call_info->relpath;
    int result=0;
    struct inode_struct *inode=entry->inode;

    if (strlen(path)==0) path=(char *) rootpath;

    logoutput("workspace_nfs_setattr, path %s", path);

    if (fuse_set & FUSE_SET_ATTR_MODE) {

	pthread_mutex_lock(&nfs_export->mutex);

	result=nfs_chmod(nfs_ctx, path, st->st_mode);

	pthread_mutex_unlock(&nfs_export->mutex);

	if (result<0) {

	    fuse_reply_err(req, -result);
	    free_path_pathinfo(&call_info->pathinfo);
	    return;

	} else {

	    inode->mode=st->st_mode;

	}

    }

    if (fuse_set & (FUSE_SET_ATTR_UID | FUSE_SET_ATTR_GID)) {
	uid_t uid=inode->uid;
	gid_t gid=inode->gid;

	if (fuse_set & FUSE_SET_ATTR_UID) uid=st->st_uid;
	if (fuse_set & FUSE_SET_ATTR_GID) gid=st->st_gid;

	result=nfs_chown(nfs_ctx, path, uid, gid);

	if (result<0) {

	    fuse_reply_err(req, -result);
	    free_path_pathinfo(&call_info->pathinfo);
	    return;

	} else {

	    if (fuse_set & FUSE_SET_ATTR_UID) {

		inode->uid=st->st_uid;

	    } else {

		st->st_uid=inode->uid;

	    }

	    if (fuse_set & FUSE_SET_ATTR_GID) {

		inode->gid=st->st_gid;

	    } else {

		st->st_gid=inode->gid;

	    }

	}

    }

    if (fuse_set & FUSE_SET_ATTR_SIZE) {


	pthread_mutex_lock(&nfs_export->mutex);

	result=nfs_truncate(nfs_ctx, path, st->st_size);

	pthread_mutex_unlock(&nfs_export->mutex);

	if (result<0) {

	    fuse_reply_err(req, -result);
	    free_path_pathinfo(&call_info->pathinfo);
	    return;

	} else {

	    inode->size=st->st_size;

	}

    }

    if (fuse_set & (FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME)) {
	struct timespec rightnow;
	struct timeval newtimes[2];

	newtimes[0].tv_sec=0;
	newtimes[0].tv_usec=0;

	newtimes[1].tv_sec=inode->mtim.tv_sec;
	newtimes[1].tv_usec=inode->mtim.tv_nsec / 1000;

	if (fuse_set & (FUSE_SET_ATTR_ATIME_NOW | FUSE_SET_ATTR_MTIME_NOW)) get_current_time(&rightnow);

	if (fuse_set & FUSE_SET_ATTR_ATIME) {

	    if (fuse_set & FUSE_SET_ATTR_ATIME_NOW) {

		st->st_atim.tv_sec = rightnow.tv_sec;
		st->st_atim.tv_nsec = rightnow.tv_nsec;

	    }

	    newtimes[0].tv_sec=st->st_atim.tv_sec;
	    newtimes[0].tv_usec = st->st_atim.tv_nsec / 1000;


	}

	if (fuse_set & FUSE_SET_ATTR_MTIME) {

	    if (fuse_set & FUSE_SET_ATTR_MTIME_NOW) {

		st->st_mtim.tv_sec = rightnow.tv_sec;
		st->st_mtim.tv_nsec = rightnow.tv_nsec;

	    }

	    newtimes[1].tv_sec = st->st_mtim.tv_sec;
	    newtimes[1].tv_usec = st->st_mtim.tv_nsec / 1000;

	}

	pthread_mutex_lock(&nfs_export->mutex);

	result=nfs_utimes(nfs_ctx, path, newtimes);

	pthread_mutex_unlock(&nfs_export->mutex);

	if (result<0) {

	    fuse_reply_err(req, -result);
	    free_path_pathinfo(&call_info->pathinfo);
	    return;

	} else {

	    if (fuse_set & FUSE_SET_ATTR_MTIME) {

		inode->mtim.tv_sec=newtimes[1].tv_sec;
		inode->mtim.tv_nsec=newtimes[1].tv_usec * 1000;

	    }

	}

    }

    out:

    st->st_dev=0;
    st->st_ino=inode->ino;
    st->st_mode=inode->mode;
    st->st_nlink=inode->nlink;
    st->st_uid=inode->uid;
    st->st_gid=inode->gid;
    st->st_rdev=inode->rdev;
    st->st_size=inode->size;

    st->st_blksize=_DEFAULT_BLOCKSIZE;

    if (inode->size % st->st_blksize == 0) {

	st->st_blocks = inode->size / st->st_blksize;

    } else {

	st->st_blocks = 1 + inode->size / st->st_blksize;

    }

    memcpy(&st->st_mtim, &inode->mtim, sizeof(struct timespec));
    memcpy(&st->st_ctim, &inode->ctim, sizeof(struct timespec));

    st->st_atim.tv_sec=0;
    st->st_atim.tv_nsec=0;

    fuse_reply_attr(req, st, fs_options.attr_timeout);

    free_path_pathinfo(&call_info->pathinfo);

}
static void workspace_nfs_lookup_cached(fuse_req_t req, struct entry_struct *entry, struct call_info_struct *call_info)
{
    struct resource_struct *resource=call_info->object->resource;
    struct net_nfs_export_struct *nfs_export=(struct net_nfs_export_struct *) resource->data;
    struct nfs_context *nfs_ctx=(struct nfs_context *) nfs_export->data;
    char *path=call_info->pathinfo.path + call_info->relpath;
    struct stat st;
    int result=0;

    memset(&st, 0, sizeof(struct stat));

    if (strlen(path)==0) path=(char *) rootpath;

    logoutput("workspace_nfs_lookup_cached, path %s", path);

    pthread_mutex_lock(&nfs_export->mutex);

    result=nfs_stat(nfs_ctx, path, &st);

    pthread_mutex_unlock(&nfs_export->mutex);

    if (result<0) {

	result=abs(result);

	if (result==ENOENT) {
	    struct inode_struct *inode=entry->inode;
	    unsigned int error=0;

	    inode=entry->inode;
	    inode->alias=NULL;

	    remove_entry(entry, &error);
	    queue_remove(call_info->object, entry, &error);
	    entry=NULL;

	}

	fuse_reply_err(req, result);

    } else {
	struct fuse_entry_param e;
	struct inode_struct *inode=entry->inode;

	inode->mode=st.st_mode;
	inode->nlink=st.st_nlink;
	inode->uid=st.st_uid;
	inode->gid=st.st_gid;

	inode->rdev=st.st_rdev;

	inode->mtim.tv_sec=st.st_mtim.tv_sec;
	inode->mtim.tv_nsec=st.st_mtim.tv_nsec;

	inode->ctim.tv_sec=st.st_ctim.tv_sec;
	inode->ctim.tv_nsec=st.st_ctim.tv_nsec;

	inode->size=st.st_size;

	e.ino = inode->ino;
	e.generation = 1;
	e.attr_timeout = fs_options.attr_timeout;
	e.entry_timeout = fs_options.entry_timeout;

	get_current_time(&entry->synctime);

	e.attr.st_dev = 0;
	e.attr.st_ino = e.ino;
	e.attr.st_mode = st.st_mode;
	e.attr.st_nlink = st.st_nlink;
	e.attr.st_uid = st.st_uid;
	e.attr.st_gid = st.st_gid;
	e.attr.st_rdev = st.st_rdev;
	e.attr.st_size = st.st_size;
	e.attr.st_atim.tv_sec = st.st_atim.tv_sec;
	e.attr.st_atim.tv_nsec = st.st_atim.tv_nsec;
	e.attr.st_mtim.tv_sec = st.st_mtim.tv_sec;
	e.attr.st_mtim.tv_nsec = st.st_mtim.tv_nsec;
	e.attr.st_ctim.tv_sec = st.st_ctim.tv_sec;
	e.attr.st_ctim.tv_nsec = st.st_ctim.tv_nsec;

	e.attr.st_blksize=_DEFAULT_BLOCKSIZE;

	if (inode->size % e.attr.st_blksize == 0) {

	    e.attr.st_blocks = inode->size / e.attr.st_blksize;

	} else {

	    e.attr.st_blocks = 1 + inode->size / e.attr.st_blksize;

	}

	fuse_reply_entry(req, &e);

    }

    free_path_pathinfo(&call_info->pathinfo);

}
void evaluate_fsevent_inotify_indir(struct inotify_watch_struct *inotify_watch, struct inotify_event *i_event)
{
    struct notifywatch_struct *watch=inotify_watch->watch;
    struct pathinfo_struct pathinfo={NULL, 0, 0};
    uint32_t fsnotify_mask=0;
    struct inode_struct *inode=watch->inode;
    struct entry_struct *parent=inode->alias;

    logoutput("evaluate_fsevent_inotify_indir: %s changed (%i)", i_event->name, i_event->mask);

    if ( !(i_event->mask & IN_DELETE) && !(i_event->mask & IN_MOVED_FROM)){
	struct stat st;
	char *path=NULL;
	struct entry_struct *entry=NULL;

	path=malloc(watch->pathinfo.len + i_event->len + 1);

	if (! path) goto out;

	memcpy(path, watch->pathinfo.path, watch->pathinfo.len);
	*(path + watch->pathinfo.len) = '/';
	memcpy(path + watch->pathinfo.len + 1, i_event->name, i_event->len); /* i_event->name includes the trailing zero */

	pathinfo.path = path;
	pathinfo.len = watch->pathinfo.len + i_event->len;
	pathinfo.flags = PATHINFOFLAGS_ALLOCATED;

	/* this should not give an error */

	if (lstat(pathinfo.path, &st)==0) {
	    unsigned int row=0;
	    unsigned int error=0;

	    entry=find_entry_by_name_sl(parent, i_event->name, &row, &error);

	    if (entry) {

		inode=entry->inode;

		/*
		    compare stat with cached values
		*/

		if (inode->mode != st.st_mode) {

		    fsnotify_mask |= IN_ATTRIB;
		    inode->mode = st.st_mode;

		}

		if (inode->uid != st.st_uid) {

		    fsnotify_mask |= IN_ATTRIB;
		    inode->uid = st.st_uid;

		}

		if (inode->gid != st.st_gid) {

		    fsnotify_mask |= IN_ATTRIB;
		    inode->gid = st.st_gid;

		}

		if (! S_ISDIR(st.st_mode) && inode->type.size != st.st_size) {

		    fsnotify_mask |= IN_MODIFY;
		    inode->type.size = st.st_size;

		}

		if (!(inode->mtim.tv_sec==st.st_mtim.tv_sec) || !(inode->mtim.tv_nsec==st.st_mtim.tv_nsec)) {

		    fsnotify_mask |= IN_ATTRIB;
		    inode->mtim.tv_sec=st.st_mtim.tv_sec;
		    inode->mtim.tv_nsec=st.st_mtim.tv_nsec;

		}

		if (!(inode->ctim.tv_sec==st.st_ctim.tv_sec) || !(inode->ctim.tv_nsec==st.st_ctim.tv_nsec)) {

		    fsnotify_mask |= IN_ATTRIB;
		    inode->ctim.tv_sec=st.st_ctim.tv_sec;
		    inode->ctim.tv_nsec=st.st_ctim.tv_nsec;

		}

		get_current_time(&entry->synctime);

		/* pass through other values */

		if (i_event->mask & IN_MODIFY) fsnotify_mask |= IN_MODIFY;
		if (i_event->mask & IN_ACCESS) fsnotify_mask |= IN_ACCESS;
		if (i_event->mask & IN_CLOSE_WRITE) fsnotify_mask |= IN_CLOSE_WRITE;
		if (i_event->mask & IN_CLOSE_NOWRITE) fsnotify_mask |= IN_CLOSE_NOWRITE;
		if (i_event->mask & IN_OPEN) fsnotify_mask |= IN_OPEN;

		logoutput("evaluate_fsevent_inotify_indir: fsnotify mask %i on existing %s", fsnotify_mask, i_event->name);

		/*
		    here call the right fuse_lowlevel_notify_ function, with what parameters
		    and then what? what should the kernel do with that information?
		*/

	    } else {

		entry=insert_entry_sl(parent, i_event->name, &row, &error, create_entry_cb, NULL);

		if (entry) {

		    inode=entry->inode;

		    if (i_event->mask & IN_MOVED_TO) {

			fsnotify_mask |= IN_MOVED_TO;

		    } else {

			fsnotify_mask |= IN_CREATE;

		    }

		    inode->mode = st.st_mode;
		    inode->uid = st.st_uid;
		    inode->gid = st.st_gid;
		    inode->nlink = st.st_nlink;

		    if (! S_ISDIR(st.st_mode)) inode->type.size = st.st_size;

		    inode->mtim.tv_sec=st.st_mtim.tv_sec;
		    inode->mtim.tv_nsec=st.st_mtim.tv_nsec;

		    inode->ctim.tv_sec=st.st_ctim.tv_sec;
		    inode->ctim.tv_nsec=st.st_ctim.tv_nsec;

		    inode->rdev=st.st_rdev;

		    get_current_time(&entry->synctime);

		    logoutput("evaluate_fsevent_inotify_indir: fsnotify mask %i on new %s", fsnotify_mask, i_event->name);

		    /*
			here call a new fuse_lowlevel_add function

			something like:

			fuse_lowlevel_add(struct fuse_chan chan, fuse_ino_t parentino, char *name, size_t len);

			and what will this do?
			the kernel should send a lookup request for this entry for example, and when that is succesfull,
			notify fsnotify for a new entry
		    */

		    notify_kernel_create(parent->inode->ino, i_event->name);

		} else {

		    goto out;

		}

	    }


	} else {


	    if (errno==ENOENT) {

		/* inotify does not report delete, but stat does, handle it as a delete */

		i_event->mask|=IN_DELETE;

	    }

	}

    }

    if ((i_event->mask & IN_DELETE) || (i_event->mask & IN_MOVED_FROM)) {
	unsigned int row=0;
	unsigned int error=0;
	struct entry_struct *entry=NULL;

	logoutput("evaluate_fsevent_inotify_indir: %s deleted", i_event->name);

	entry=find_entry_by_name_sl(parent, i_event->name, &row, &error);

	if (entry) {

	    if (S_ISDIR(entry->inode->mode)) remove_directory_recursive(entry);

	    delete_entry_sl(entry, &row, &error);

	    if (error==0) {

		logoutput("evaluate_fsevent_inotify_indir: %s is found at row %i and deleted", i_event->name, row);

		notify_kernel_delete(parent->inode->ino, entry->inode->ino, i_event->name);

		remove_entry(entry);

	    } else {

		logoutput("evaluate_fsevent_inotify_indir: %s is found at row %i and not deleted (error=%i)", i_event->name, row, error);

	    }

	} else {

	    logoutput("evaluate_fsevent_inotify_indir: %s reported deleted, but not found", i_event->name);

	}

    }

    logoutput("evaluate_fsevent_inotify_indir: ready for %s (%i)", i_event->name, i_event->mask);

    out:

    free_path_pathinfo(&pathinfo);

}