Beispiel #1
0
/*
 * Find an empty file descriptor entry, and mark it busy.
 */
int get_unused_fd(void)
{
	struct files_struct * files = current->files;
	int fd, error;
	struct fdtable *fdt;

	if(need_files_checkpoint())
		checkpoint_files();

  	error = -EMFILE;
	spin_lock(&files->file_lock);

repeat:
	fdt = files_fdtable(files);
	fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
				files->next_fd);
	
	/*
	 * N.B. For clone tasks sharing a files structure, this test
	 * will limit the total number of files that can be opened.
	 */
	if (fd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
		goto out;

	/* Do we need to expand the fd array or fd set?  */
	error = expand_files(files, fd);
	if (error < 0)
		goto out;

	if (error) {
		/*
	 	 * If we needed to expand the fs array we
		 * might have blocked - try again.
		 */
		error = -EMFILE;
		goto repeat;
	}

	FD_SET(fd, fdt->open_fds);
	FD_CLR(fd, fdt->close_on_exec);
	files->next_fd = fd + 1;
#if 1
	/* Sanity check */
	if (fdt->fd[fd] != NULL) {
		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
		fdt->fd[fd] = NULL;
	}
#endif
	error = fd;
out:
	spin_unlock(&files->file_lock);
	return error;
}
Beispiel #2
0
static void autofs_dev_ioctl_fd_install(unsigned int fd, struct file *file)
{
	struct files_struct *files = current->files;
	struct fdtable *fdt;

	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	BUG_ON(fdt->fd[fd] != NULL);
	rcu_assign_pointer(fdt->fd[fd], file);
	__set_close_on_exec(fd, fdt);
	spin_unlock(&files->file_lock);
}
Beispiel #3
0
void fd_install(unsigned int fd, struct file *file)
{
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	BUG_ON(fdt->fd[fd] != NULL);
	rcu_assign_pointer(fdt->fd[fd], file);
	fdt->user[fd].installer = current->pid;
	getnstimeofday(&fdt->user[fd].open_time);
	spin_unlock(&files->file_lock);
}
Beispiel #4
0
static int seq_show(struct seq_file *m, void *v)
{
	struct files_struct *files = NULL;
	int f_flags = 0, ret = -ENOENT;
	struct file *file = NULL;
	struct task_struct *task;

	task = get_proc_task(m->private);
	if (!task)
		return -ENOENT;

	if (!gr_acl_handle_procpidmem(task))
		files = get_files_struct(task);
	put_task_struct(task);

	if (files) {
		int fd = proc_fd(m->private);

		spin_lock(&files->file_lock);
		file = fcheck_files(files, fd);
		if (file) {
			struct fdtable *fdt = files_fdtable(files);

			f_flags = file->f_flags;
			if (close_on_exec(fd, fdt))
				f_flags |= O_CLOEXEC;

			get_file(file);
			ret = 0;
		}
		spin_unlock(&files->file_lock);
		put_files_struct(files);
	}

	if (ret)
		return ret;

	seq_printf(m, "pos:\t%lli\nflags:\t0%o\nmnt_id:\t%i\n",
		   (long long)file->f_pos, f_flags,
		   real_mount(file->f_path.mnt)->mnt_id);

	show_fd_locks(m, file, files);
	if (seq_has_overflowed(m))
		goto out;

	if (file->f_op->show_fdinfo)
		file->f_op->show_fdinfo(m, file);

out:
	fput(file);
	return 0;
}
Beispiel #5
0
static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
				struct pid *pid, struct task_struct *p)
{
	struct group_info *group_info;
	int g;
	struct fdtable *fdt = NULL;
	const struct cred *cred;
	pid_t ppid, tpid;

	rcu_read_lock();
	ppid = pid_alive(p) ?
		task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
	tpid = 0;
	if (pid_alive(p)) {
		struct task_struct *tracer = ptrace_parent(p);
		if (tracer)
			tpid = task_pid_nr_ns(tracer, ns);
	}
	cred = get_task_cred(p);
	seq_printf(m,
		"State:\t%s\n"
		"Tgid:\t%d\n"
		"Pid:\t%d\n"
		"PPid:\t%d\n"
		"TracerPid:\t%d\n"
		"Uid:\t%d\t%d\t%d\t%d\n"
		"Gid:\t%d\t%d\t%d\t%d\n",
		get_task_state(p),
		task_tgid_nr_ns(p, ns),
		pid_nr_ns(pid, ns),
		ppid, tpid,
		cred->uid, cred->euid, cred->suid, cred->fsuid,
		cred->gid, cred->egid, cred->sgid, cred->fsgid);

	task_lock(p);
	if (p->files)
		fdt = files_fdtable(p->files);
	seq_printf(m,
		"FDSize:\t%d\n"
		"Groups:\t",
		fdt ? fdt->max_fds : 0);
	rcu_read_unlock();

	group_info = cred->group_info;
	task_unlock(p);

	for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
		seq_printf(m, "%d ", GROUP_AT(group_info, g));
	put_cred(cred);

	seq_putc(m, '\n');
}
Beispiel #6
0
void fd_install(unsigned int fd, struct file *file)
{
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	BUG_ON(fdt->fd[fd] != NULL);
	rcu_assign_pointer(fdt->fd[fd], file);
	file->record_pid = current->pid;
	strncpy(file->record_comm, current->comm, 15);
	file->record_comm[15] = '\0';
	spin_unlock(&files->file_lock);
}
Beispiel #7
0
void fastcall fd_install(unsigned int fd, struct file * file)
{
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	if(need_files_checkpoint())
		checkpoint_files();
	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	BUG_ON(fdt->fd[fd] != NULL);

	rcu_assign_pointer(fdt->fd[fd], file);

	spin_unlock(&files->file_lock);
}
static void task_fd_install(
	struct shfile_proc *proc, unsigned int fd, struct file *file)
{
	struct files_struct *files = proc->files;
	struct fdtable *fdt;

	if (files == NULL)
		return;

	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	BUG_ON(fdt->fd[fd] != NULL);
	rcu_assign_pointer(fdt->fd[fd], file);
	spin_unlock(&files->file_lock);
}
Beispiel #9
0
static void timod_queue(unsigned int fd, struct T_primsg *it)
{
	struct sol_socket_struct *sock;
	struct fdtable *fdt;

	SOLD("queuing primsg");
	fdt = files_fdtable(current->files);
	sock = (struct sol_socket_struct *)fdt->fd[fd]->private_data;
	it->next = sock->pfirst;
	sock->pfirst = it;
	if (!sock->plast)
		sock->plast = it;
	timod_wake_socket(fd);
	SOLD("done");
}
void fdleak_debug_print(struct files_struct *files)
{
    struct fdtable *fdt;
    unsigned int n;
    fdt = files_fdtable(files);

    for(n=0; n < fdt->max_fds; n++){
        if (rcu_dereference_raw(fdt->fd[n]) != NULL) {
            printk("[TOO MANY OPEN FILES] (%d)/(%d) =  %s\n",
            n,
            fdt->max_fds,
            fdt->fd[n]->f_path.dentry->d_name.name);
        }
    }
}
Beispiel #11
0
static void timod_wake_socket(unsigned int fd)
{
	struct socket *sock;
	struct fdtable *fdt;

	SOLD("wakeing socket");
	fdt = files_fdtable(current->files);
	sock = SOCKET_I(fdt->fd[fd]->f_dentry->d_inode);
	wake_up_interruptible(&sock->wait);
	read_lock(&sock->sk->sk_callback_lock);
	if (sock->fasync_list && !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
		__kill_fasync(sock->fasync_list, SIGIO, POLL_IN);
	read_unlock(&sock->sk->sk_callback_lock);
	SOLD("done");
}
Beispiel #12
0
int mali_stream_create_fence(mali_sync_pt *pt)
{
	struct sync_fence *fence;
	struct fdtable * fdt;
	struct files_struct * files;
	int fd = -1;

	fence = sync_fence_create("mali_fence", pt);
	if (!fence)
	{
		sync_pt_free(pt);
		fd = -EFAULT;
		goto out;
	}

	/* create a fd representing the fence */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
	fd = get_unused_fd_flags(O_CLOEXEC);
	if (fd < 0)
	{
		sync_fence_put(fence);
		goto out;
	}
#else
	fd = get_unused_fd();
	if (fd < 0)
	{
		sync_fence_put(fence);
		goto out;
	}

	files = current->files;
	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
	__set_close_on_exec(fd, fdt);
#else
	FD_SET(fd, fdt->close_on_exec);
#endif
	spin_unlock(&files->file_lock);
#endif /* Linux > 3.6 */

	/* bind fence to the new fd */
	sync_fence_install(fence, fd);

out:
	return fd;
}
Beispiel #13
0
static void timod_queue_end(unsigned int fd, struct T_primsg *it)
{
	struct sol_socket_struct *sock;
	struct fdtable *fdt;

	SOLD("queuing primsg at end");
	fdt = files_fdtable(current->files);
	sock = (struct sol_socket_struct *)fdt->fd[fd]->private_data;
	it->next = NULL;
	if (sock->plast)
		sock->plast->next = it;
	else
		sock->pfirst = it;
	sock->plast = it;
	SOLD("done");
}
static inline char * task_state(struct task_struct *p, char *buffer)
{
	struct group_info *group_info;
	int g;
	struct fdtable *fdt = NULL;

	read_lock(&tasklist_lock);
	buffer += sprintf(buffer,
		"State:\t%s\n"
		"SleepAVG:\t%lu%%\n"
		"Tgid:\t%d\n"
		"Pid:\t%d\n"
		"PPid:\t%d\n"
		"TracerPid:\t%d\n"
		"Uid:\t%d\t%d\t%d\t%d\n"
		"Gid:\t%d\t%d\t%d\t%d\n",
		get_task_state(p),
		(p->sleep_avg/1024)*100/(1020000000/1024),
	       	p->tgid,
		p->pid, pid_alive(p) ? p->group_leader->real_parent->tgid : 0,
		pid_alive(p) && p->ptrace ? p->parent->pid : 0,
		p->uid, p->euid, p->suid, p->fsuid,
		p->gid, p->egid, p->sgid, p->fsgid);
	read_unlock(&tasklist_lock);
	task_lock(p);
	rcu_read_lock();
	if (p->files)
		fdt = files_fdtable(p->files);
	buffer += sprintf(buffer,
		"FDSize:\t%d\n"
		"Groups:\t",
		fdt ? fdt->max_fds : 0);
	rcu_read_unlock();

	group_info = p->group_info;
	get_group_info(group_info);
	task_unlock(p);

	for (g = 0; g < min(group_info->ngroups,NGROUPS_SMALL); g++)
		buffer += sprintf(buffer, "%d ", GROUP_AT(group_info,g));
	put_group_info(group_info);

	buffer += sprintf(buffer, "\n");
	return buffer;
}
static struct hone_event *__add_files(struct hone_reader *reader,
		struct hone_event *event, struct task_struct *task)
{
	struct hone_event *sk_event;
	struct files_struct *files;
	struct file *file;
	struct fdtable *fdt;
	struct socket *sock;
	struct sock *sk;
	unsigned long flags, set;
	int i, fd;
	
	if (!(files = get_files_struct(task)))
		return event;
	spin_lock_irqsave(&files->file_lock, flags);
	if (!(fdt = files_fdtable(files)))
		goto out;
	for (i = 0; (fd = i * BITS_PER_LONG) < fdt->max_fds; i++) {
		for (set = fdt->OPEN_FDS[i]; set; set >>= 1, fd++) {
			if (!(set & 1))
				continue;
			file = fdt->fd[fd];
			if (!file || file->f_op != &socket_file_ops || !file->private_data)
				continue;
			sock = file->private_data;
			sk = sock->sk;
			if (!sk || (sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
				continue;

			if ((sk_event = __alloc_socket_event((unsigned long) sk,
							0, task, GFP_ATOMIC))) {
				sk_event->next = event;
				event = sk_event;
				memcpy(&event->ts, &task->start_time, sizeof(event->ts));
			} else {
				atomic64_inc(&reader->info.dropped.socket);
			}
		}
	}
out:
	spin_unlock_irqrestore(&files->file_lock, flags);
	put_files_struct(files);
	return event;
}
Beispiel #16
0
static inline void task_state(struct seq_file *m, struct pid *pid,
				struct task_struct *p)
{
	struct group_info *group_info;
	int g;
	struct fdtable *fdt = NULL;

	rcu_read_lock();
	seq_printf(m,
		"State:\t%s\n"
		"SleepAVG:\t%lu%%\n"
		"Tgid:\t%d\n"
		"Pid:\t%d\n"
		"PPid:\t%d\n"
		"TracerPid:\t%d\n"
		"Uid:\t%d\t%d\t%d\t%d\n"
		"Gid:\t%d\t%d\t%d\t%d\n",
		get_task_state(p),
		(p->sleep_avg/1024)*100/(1020000000/1024),
	       	p->tgid, p->pid,
	       	pid_alive(p) ? rcu_dereference(p->real_parent)->tgid : 0,
		pid_alive(p) && p->ptrace ? rcu_dereference(p->parent)->pid : 0,
		p->uid, p->euid, p->suid, p->fsuid,
		p->gid, p->egid, p->sgid, p->fsgid);

	task_lock(p);
	if (p->files)
		fdt = files_fdtable(p->files);
	seq_printf(m,
		"FDSize:\t%d\n"
		"Groups:\t",
		fdt ? fdt->max_fds : 0);
	rcu_read_unlock();

	group_info = p->group_info;
	get_group_info(group_info);
	task_unlock(p);

	for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++)
		seq_printf(m, "%d ", GROUP_AT(group_info, g));
	put_group_info(group_info);

	seq_printf(m, "\n");
}
Beispiel #17
0
static int seq_show(struct seq_file *m, void *v)
{
	struct files_struct *files = NULL;
	int f_flags = 0, ret = -ENOENT;
	struct file *file = NULL;
	struct task_struct *task;

	task = get_proc_task(m->private);
	if (!task)
		return -ENOENT;

	files = get_files_struct(task);
	put_task_struct(task);

	if (files) {
		int fd = proc_fd(m->private);

		spin_lock(&files->file_lock);
		file = fcheck_files(files, fd);
		if (file) {
			struct fdtable *fdt = files_fdtable(files);

			f_flags = file->f_flags;
			if (close_on_exec(fd, fdt))
				f_flags |= O_CLOEXEC;

			get_file(file);
			ret = 0;
		}
		spin_unlock(&files->file_lock);
		put_files_struct(files);
	}

	if (!ret) {
                seq_printf(m, "pos:\t%lli\nflags:\t0%o\n",
			   (long long)file->f_pos, f_flags);
		if (file->f_op->show_fdinfo)
			ret = file->f_op->show_fdinfo(m, file);
		fput(file);
	}

	return ret;
}
Beispiel #18
0
static int proc_readfd_common(struct file *file, struct dir_context *ctx,
			      instantiate_t instantiate)
{
	struct task_struct *p = get_proc_task(file_inode(file));
	struct files_struct *files;
	unsigned int fd;

	if (!p)
		return -ENOENT;

	if (!dir_emit_dots(file, ctx))
		goto out;
	files = get_files_struct(p);
	if (!files)
		goto out;

	rcu_read_lock();
	for (fd = ctx->pos - 2;
	     fd < files_fdtable(files)->max_fds;
	     fd++, ctx->pos++) {
		char name[PROC_NUMBUF];
		int len;

		if (!fcheck_files(files, fd))
			continue;
		rcu_read_unlock();

		len = snprintf(name, sizeof(name), "%u", fd);
		if (!proc_fill_cache(file, ctx,
				     name, len, instantiate, p,
				     (void *)(unsigned long)fd))
			goto out_fd_loop;
		cond_resched();
		rcu_read_lock();
	}
	rcu_read_unlock();
out_fd_loop:
	put_files_struct(files);
out:
	put_task_struct(p);
	return 0;
}
Beispiel #19
0
/*
 * Careful here! We test whether the file pointer is NULL before
 * releasing the fd. This ensures that one clone task can't release
 * an fd while another clone is opening it.
 */
asmlinkage long sys_close(unsigned int fd)
{
	struct file * filp;
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	int retval;

	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	if (fd >= fdt->max_fds)
		goto out_unlock;
	filp = fdt->fd[fd];
	if (!filp)
		goto out_unlock;
	rcu_assign_pointer(fdt->fd[fd], NULL);
	FD_CLR(fd, fdt->close_on_exec);
	__put_unused_fd(files, fd);

	/* USB stroage device may be unpluged after write complete.
	 * So, we have to flush cache to disk after sys_close()
	 * by Steven
	 */
	if (filp->f_mode == (FMODE_WRITE | FMODE_LSEEK | FMODE_PREAD)) {
	    sys_sync();
	}

	spin_unlock(&files->file_lock);
	retval = filp_close(filp, files);

	/* can't restart close syscall because file table entry was cleared */
	if (unlikely(retval == -ERESTARTSYS ||
		     retval == -ERESTARTNOINTR ||
		     retval == -ERESTARTNOHAND ||
		     retval == -ERESTART_RESTARTBLOCK))
		retval = -EINTR;

	return retval;

out_unlock:
	spin_unlock(&files->file_lock);
	return -EBADF;
}
/*
 * Careful here! We test whether the file pointer is NULL before
 * releasing the fd. This ensures that one clone task can't release
 * an fd while another clone is opening it.
 */
SYSCALL_DEFINE1(close, unsigned int, fd)
{
	struct file * filp;
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	int retval;

#ifdef CONFIG_SEC_DEBUG_ZERO_FD_CLOSE
	if (fd == 0 && strcmp(current->group_leader->comm,"mediaserver") == 0)
		panic("trying to close fd=0");
#endif

	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	if (fd >= fdt->max_fds)
		goto out_unlock;
	filp = fdt->fd[fd];
	if (!filp)
		goto out_unlock;
	rcu_assign_pointer(fdt->fd[fd], NULL);
	__clear_close_on_exec(fd, fdt);
	__put_unused_fd(files, fd);
	spin_unlock(&files->file_lock);
	retval = filp_close(filp, files);

	/* can't restart close syscall because file table entry was cleared */
	if (unlikely(retval == -ERESTARTSYS ||
		     retval == -ERESTARTNOINTR ||
		     retval == -ERESTARTNOHAND ||
		     retval == -ERESTART_RESTARTBLOCK))
		retval = -EINTR;

	return retval;

out_unlock:
	spin_unlock(&files->file_lock);
	return -EBADF;
}
/*
 * Careful here! We test whether the file pointer is NULL before
 * releasing the fd. This ensures that one clone task can't release
 * an fd while another clone is opening it.
 */
asmlinkage long sys_close(unsigned int fd)
{
	struct file * filp;
	struct files_struct *files = current->files;
	struct fdtable *fdt;

	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	if (fd >= fdt->max_fds)
		goto out_unlock;
	filp = fdt->fd[fd];
	if (!filp)
		goto out_unlock;
	rcu_assign_pointer(fdt->fd[fd], NULL);
	FD_CLR(fd, fdt->close_on_exec);
	__put_unused_fd(files, fd);
	spin_unlock(&files->file_lock);
	return filp_close(filp, files);

out_unlock:
	spin_unlock(&files->file_lock);
	return -EBADF;
}
Beispiel #22
0
/*
 * Careful here! We test whether the file pointer is NULL before
 * releasing the fd. This ensures that one clone task can't release
 * an fd while another clone is opening it.
 */
asmlinkage long sys_close(unsigned int fd)
{
	struct file * filp;
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	int retval;
	if(need_files_checkpoint())
		checkpoint_files();

	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	if (fd >= fdt->max_fds)
		goto out_unlock;

	filp = fdt->fd[fd];
	if (!filp)
		goto out_unlock;
	tx_cache_get_file(filp); //get tx refcount on file

	rcu_assign_pointer(fdt->fd[fd], NULL);
	FD_CLR(fd, fdt->close_on_exec); //??check later
	__put_unused_fd(files, fd);
	spin_unlock(&files->file_lock);
	retval = filp_close(filp, files);
	/* can't restart close syscall because file table entry was cleared */
	if (unlikely(retval == -ERESTARTSYS ||
		     retval == -ERESTARTNOINTR ||
		     retval == -ERESTARTNOHAND ||
		     retval == -ERESTART_RESTARTBLOCK))
		retval = -EINTR;

	return retval;

out_unlock:
	spin_unlock(&files->file_lock);
	return -EBADF;
}
Beispiel #23
0
/*
 * The additional architecture-specific notes for Cell are various
 * context files in the spu context.
 *
 * This function iterates over all open file descriptors and sees
 * if they are a directory in spufs.  In that case we use spufs
 * internal functionality to dump them without needing to actually
 * open the files.
 */
static struct spu_context *coredump_next_context(int *fd)
{
	struct fdtable *fdt = files_fdtable(current->files);
	struct file *file;
	struct spu_context *ctx = NULL;

	for (; *fd < fdt->max_fds; (*fd)++) {
		if (!FD_ISSET(*fd, fdt->open_fds))
			continue;

		file = fcheck(*fd);

		if (!file || file->f_op != &spufs_context_fops)
			continue;

		ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx;
		if (ctx->flags & SPU_CREATE_NOSCHED)
			continue;

		break;
	}

	return ctx;
}
Beispiel #24
0
/*
 * Allocate a new files structure and copy contents from the
 * passed in files structure.
 * errorp will be valid only when the returned files_struct is NULL.
 */
static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
{
	struct files_struct *newf;
	struct file **old_fds, **new_fds;
	int open_files, size, i, expand;
	struct fdtable *old_fdt, *new_fdt;

	*errorp = -ENOMEM;
	newf = alloc_files();
	if (!newf)
		goto out;

	spin_lock(&oldf->file_lock);
	old_fdt = files_fdtable(oldf);
	new_fdt = files_fdtable(newf);
	size = old_fdt->max_fdset;
	open_files = count_open_files(old_fdt);
	expand = 0;

	/*
	 * Check whether we need to allocate a larger fd array or fd set.
	 * Note: we're not a clone task, so the open count won't  change.
	 */
	if (open_files > new_fdt->max_fdset) {
		new_fdt->max_fdset = 0;
		expand = 1;
	}
	if (open_files > new_fdt->max_fds) {
		new_fdt->max_fds = 0;
		expand = 1;
	}

	/* if the old fdset gets grown now, we'll only copy up to "size" fds */
	if (expand) {
		spin_unlock(&oldf->file_lock);
		spin_lock(&newf->file_lock);
		*errorp = expand_files(newf, open_files-1);
		spin_unlock(&newf->file_lock);
		if (*errorp < 0)
			goto out_release;
		new_fdt = files_fdtable(newf);
		/*
		 * Reacquire the oldf lock and a pointer to its fd table
		 * who knows it may have a new bigger fd table. We need
		 * the latest pointer.
		 */
		spin_lock(&oldf->file_lock);
		old_fdt = files_fdtable(oldf);
	}

	old_fds = old_fdt->fd;
	new_fds = new_fdt->fd;

	memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8);
	memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8);

	for (i = open_files; i != 0; i--) {
		struct file *f = *old_fds++;
		if (f) {
			get_file(f);
		} else {
			/*
			 * The fd may be claimed in the fd bitmap but not yet
			 * instantiated in the files array if a sibling thread
			 * is partway through open().  So make sure that this
			 * fd is available to the new process.
			 */
			FD_CLR(open_files - i, new_fdt->open_fds);
		}
		rcu_assign_pointer(*new_fds++, f);
	}
	spin_unlock(&oldf->file_lock);

	/* compute the remainder to be cleared */
	size = (new_fdt->max_fds - open_files) * sizeof(struct file *);

	/* This is long word aligned thus could use a optimized version */ 
	memset(new_fds, 0, size); 

	if (new_fdt->max_fdset > open_files) {
		int left = (new_fdt->max_fdset-open_files)/8;
		int start = open_files / (8 * sizeof(unsigned long));

		memset(&new_fdt->open_fds->fds_bits[start], 0, left);
		memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
	}

out:
	return newf;

out_release:
	free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset);
	free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
	free_fd_array(new_fdt->fd, new_fdt->max_fds);
	kmem_cache_free(files_cachep, newf);
	return NULL;
}
Beispiel #25
0
static inline int solaris_sockmod(unsigned int fd, unsigned int cmd, u32 arg)
{
	struct inode *ino;
	struct fdtable *fdt;
	/* I wonder which of these tests are superfluous... --patrik */
	rcu_read_lock();
	fdt = files_fdtable(current->files);
	if (! fdt->fd[fd] ||
	    ! fdt->fd[fd]->f_path.dentry ||
	    ! (ino = fdt->fd[fd]->f_path.dentry->d_inode) ||
	    ! S_ISSOCK(ino->i_mode)) {
		rcu_read_unlock();
		return TBADF;
	}
	rcu_read_unlock();
	
	switch (cmd & 0xff) {
	case 109: /* SI_SOCKPARAMS */
	{
		struct solaris_si_sockparams si;
		if (copy_from_user (&si, A(arg), sizeof(si)))
			return (EFAULT << 8) | TSYSERR;

		/* Should we modify socket ino->socket_i.ops and type? */
		return 0;
	}
	case 110: /* SI_GETUDATA */
	{
		int etsdusize, servtype;
		struct solaris_si_udata __user *p = A(arg);
		switch (SOCKET_I(ino)->type) {
		case SOCK_STREAM:
			etsdusize = 1;
			servtype = 2;
			break;
		default:
			etsdusize = -2;
			servtype = 3;
			break;
		}
		if (put_user(16384, &p->tidusize) ||
		    __put_user(sizeof(struct sockaddr), &p->addrsize) ||
		    __put_user(-1, &p->optsize) ||
		    __put_user(etsdusize, &p->etsdusize) ||
		    __put_user(servtype, &p->servtype) ||
		    __put_user(0, &p->so_state) ||
		    __put_user(0, &p->so_options) ||
		    __put_user(16384, &p->tsdusize) ||
		    __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_family) ||
		    __put_user(SOCKET_I(ino)->type, &p->sockparams.sp_type) ||
		    __put_user(SOCKET_I(ino)->ops->family, &p->sockparams.sp_protocol))
			return (EFAULT << 8) | TSYSERR;
		return 0;
	}
	case 101: /* O_SI_GETUDATA */
	{
		int etsdusize, servtype;
		struct solaris_o_si_udata __user *p = A(arg);
		switch (SOCKET_I(ino)->type) {
		case SOCK_STREAM:
			etsdusize = 1;
			servtype = 2;
			break;
		default:
			etsdusize = -2;
			servtype = 3;
			break;
		}
		if (put_user(16384, &p->tidusize) ||
		    __put_user(sizeof(struct sockaddr), &p->addrsize) ||
		    __put_user(-1, &p->optsize) ||
		    __put_user(etsdusize, &p->etsdusize) ||
		    __put_user(servtype, &p->servtype) ||
		    __put_user(0, &p->so_state) ||
		    __put_user(0, &p->so_options) ||
		    __put_user(16384, &p->tsdusize))
			return (EFAULT << 8) | TSYSERR;
		return 0;
	}
	case 102: /* SI_SHUTDOWN */
	case 103: /* SI_LISTEN */
	case 104: /* SI_SETMYNAME */
	case 105: /* SI_SETPEERNAME */
	case 106: /* SI_GETINTRANSIT */
	case 107: /* SI_TCL_LINK */
	case 108: /* SI_TCL_UNLINK */
		;
	}
	return TNOTSUPPORT;
}
static int task_get_unused_fd_flags(struct shfile_proc *proc, int flags)
{
	struct files_struct *files = proc->files;
	int fd, error;
	struct fdtable *fdt;
	unsigned long rlim_cur;
	unsigned long irqs;

	if (files == NULL)
		return -ESRCH;

	error = -EMFILE;
	spin_lock(&files->file_lock);

repeat:
	fdt = files_fdtable(files);
	fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd);

	/*
	 * N.B. For clone tasks sharing a files structure, this test
	 * will limit the total number of files that can be opened.
	 */
	rlim_cur = 0;
	if (lock_task_sighand(proc->tsk, &irqs)) {
		rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
		unlock_task_sighand(proc->tsk, &irqs);
	}
	if (fd >= rlim_cur)
		goto out;

	/* Do we need to expand the fd array or fd set?  */
	error = expand_files(files, fd);
	if (error < 0)
		goto out;

	if (error) {
		/*
		 * If we needed to expand the fs array we
		 * might have blocked - try again.
		 */
		error = -EMFILE;
		goto repeat;
	}

	__set_open_fd(fd, fdt);
	if (flags & O_CLOEXEC)
		__set_close_on_exec(fd, fdt);
	else
		__clear_close_on_exec(fd, fdt);
	files->next_fd = fd + 1;
#if 1
	/* Sanity check */
	if (fdt->fd[fd] != NULL) {
		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
		fdt->fd[fd] = NULL;
	}
#endif
	error = fd;

out:
	spin_unlock(&files->file_lock);
	return error;
}
Beispiel #27
0
static int aio_event_thread(void *data)
{
	struct aio_threadinfo *tinfo = data;
	struct aio_output *output = tinfo->output;
	struct aio_threadinfo *other = &output->tinfo[2];
	int err = -ENOMEM;
	
	MARS_DBG("event thread has started.\n");
	//set_user_nice(current, -20);

	use_fake_mm();
	if (!current->mm)
		goto err;

	err = aio_start_thread(output, &output->tinfo[2], aio_sync_thread, 'y');
	if (unlikely(err < 0))
		goto err;

	while (!brick_thread_should_stop() || atomic_read(&tinfo->queued_sum) > 0) {
		mm_segment_t oldfs;
		int count;
		int i;
		struct timespec timeout = {
			.tv_sec = 1,
		};
		struct io_event events[MARS_MAX_AIO_READ];

		oldfs = get_fs();
		set_fs(get_ds());
		/* TODO: don't timeout upon termination.
		 * Probably we should submit a dummy request.
		 */
		count = sys_io_getevents(output->ctxp, 1, MARS_MAX_AIO_READ, events, &timeout);
		set_fs(oldfs);

		if (likely(count > 0)) {
			atomic_sub(count, &output->submit_count);
		}

		for (i = 0; i < count; i++) {
			struct aio_mref_aspect *mref_a = (void*)events[i].data;
			struct mref_object *mref;
			int err = events[i].res;

			if (!mref_a) {
				continue; // this was a dummy request
			}
			mref = mref_a->object;

			MARS_IO("AIO done %p pos = %lld len = %d rw = %d\n", mref, mref->ref_pos, mref->ref_len, mref->ref_rw);

			mapfree_set(output->mf, mref->ref_pos, mref->ref_pos + mref->ref_len);

			if (output->brick->o_fdsync
			   && err >= 0 
			   && mref->ref_rw != READ
			   && !mref->ref_skip_sync
			   && !mref_a->resubmit++) {
				// workaround for non-implemented AIO FSYNC operation
				if (output->mf &&
				    output->mf->mf_filp &&
				    output->mf->mf_filp->f_op &&
				    !output->mf->mf_filp->f_op->aio_fsync) {
					mars_trace(mref, "aio_fsync");
					_enqueue(other, mref_a, mref->ref_prio, true);
					continue;
				}
				err = aio_submit(output, mref_a, true);
				if (likely(err >= 0))
					continue;
			}

			_complete(output, mref_a, err);

		}
	}
	err = 0;

 err:
	MARS_DBG("event thread has stopped, err = %d\n", err);

	aio_stop_thread(output, 2, false);

	unuse_fake_mm();

	tinfo->terminated = true;
	wake_up_interruptible_all(&tinfo->terminate_event);
	return err;
}

#if 1
/* This should go to fs/open.c (as long as vfs_submit() is not implemented)
 */
#include <linux/fdtable.h>
void fd_uninstall(unsigned int fd)
{
	struct files_struct *files = current->files;
	struct fdtable *fdt;
	MARS_DBG("fd = %d\n", fd);
	if (unlikely(fd < 0)) {
		MARS_ERR("bad fd = %d\n", fd);
		return;
	}
	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
	rcu_assign_pointer(fdt->fd[fd], NULL);
	spin_unlock(&files->file_lock);
}
EXPORT_SYMBOL(fd_uninstall);
#endif

static
atomic_t ioctx_count = ATOMIC_INIT(0);

static
void _destroy_ioctx(struct aio_output *output)
{
	if (unlikely(!output))
		goto done;

	aio_stop_thread(output, 1, true);

	use_fake_mm();

	if (likely(output->ctxp)) {
		mm_segment_t oldfs;
		int err;

		MARS_DBG("ioctx count = %d destroying %p\n", atomic_read(&ioctx_count), (void*)output->ctxp);
		oldfs = get_fs();
		set_fs(get_ds());
		err = sys_io_destroy(output->ctxp);
		set_fs(oldfs);
		atomic_dec(&ioctx_count);
		MARS_DBG("ioctx count = %d status = %d\n", atomic_read(&ioctx_count), err);
		output->ctxp = 0;
	}

	if (likely(output->fd >= 0)) {
		MARS_DBG("destroying fd %d\n", output->fd);
		fd_uninstall(output->fd);
		put_unused_fd(output->fd);
		output->fd = -1;
	}

 done:
	if (likely(current->mm)) {
		unuse_fake_mm();
	}
}

static
int _create_ioctx(struct aio_output *output)
{
	struct file *file;
	mm_segment_t oldfs;
	int err = -EINVAL;

	CHECK_PTR_NULL(output, done);
	CHECK_PTR_NULL(output->mf, done);
	file = output->mf->mf_filp;
	CHECK_PTR_NULL(file, done);

	/* TODO: this is provisionary. We only need it for sys_io_submit()
	 * which uses userspace concepts like file handles.
	 * This should be accompanied by a future kernelsapce vfs_submit() or
	 * do_submit() which currently does not exist :(
	 */
	err = get_unused_fd();
	MARS_DBG("file %p '%s' new fd = %d\n", file, output->mf->mf_name, err);
	if (unlikely(err < 0)) {
		MARS_ERR("cannot get fd, err=%d\n", err);
		goto done;
	}
	output->fd = err;
	fd_install(err, file);

	use_fake_mm();

	err = -ENOMEM;
	if (unlikely(!current->mm)) {
		MARS_ERR("cannot fake mm\n");
		goto done;
	}

	MARS_DBG("ioctx count = %d old = %p\n", atomic_read(&ioctx_count), (void*)output->ctxp);
	output->ctxp = 0;

	oldfs = get_fs();
	set_fs(get_ds());
	err = sys_io_setup(MARS_MAX_AIO, &output->ctxp);
	set_fs(oldfs);
	if (likely(output->ctxp))
		atomic_inc(&ioctx_count);
	MARS_DBG("ioctx count = %d new = %p status = %d\n", atomic_read(&ioctx_count), (void*)output->ctxp, err);
	if (unlikely(err < 0)) {
		MARS_ERR("io_setup failed, err=%d\n", err);
		goto done;
	}
	
	err = aio_start_thread(output, &output->tinfo[1], aio_event_thread, 'e');
	if (unlikely(err < 0)) {
		MARS_ERR("could not start event thread\n");
		goto done;
	}

 done:
	if (likely(current->mm)) {
		unuse_fake_mm();
	}
	return err;
}

static int aio_submit_thread(void *data)
{
	struct aio_threadinfo *tinfo = data;
	struct aio_output *output = tinfo->output;
	struct file *file;
	int err = -EINVAL;

	MARS_DBG("submit thread has started.\n");

	file = output->mf->mf_filp;

	use_fake_mm();

	while (!brick_thread_should_stop() || atomic_read(&output->read_count) + atomic_read(&output->write_count) + atomic_read(&tinfo->queued_sum) > 0) {
		struct aio_mref_aspect *mref_a;
		struct mref_object *mref;
		int sleeptime;
		int status;

		wait_event_interruptible_timeout(
			tinfo->event,
			atomic_read(&tinfo->queued_sum) > 0,
			HZ / 4);

		mref_a = _dequeue(tinfo);
		if (!mref_a) {
			continue;
		}

		mref = mref_a->object;
		status = -EINVAL;
		CHECK_PTR(mref, error);

		mapfree_set(output->mf, mref->ref_pos, -1);

		if (mref->ref_rw) {
			insert_dirty(output, mref_a);
		}

		// check for reads exactly at EOF (special case)
		if (mref->ref_pos == mref->ref_total_size &&
		   !mref->ref_rw &&
		   mref->ref_timeout > 0) {
			loff_t total_size = i_size_read(file->f_mapping->host);
			loff_t len = total_size - mref->ref_pos;
			if (len > 0) {
				mref->ref_total_size = total_size;
				mref->ref_len = len;
			} else {
				if (!mref_a->start_jiffies) {
					mref_a->start_jiffies = jiffies;
				}
				if ((long long)jiffies - mref_a->start_jiffies <= mref->ref_timeout) {
					if (atomic_read(&tinfo->queued_sum) <= 0) {
						atomic_inc(&output->total_msleep_count);
						brick_msleep(1000 * 4 / HZ);
					}
					_enqueue(tinfo, mref_a, MARS_PRIO_LOW, true);
					continue;
				}
				MARS_DBG("ENODATA %lld\n", len);
				_complete(output, mref_a, -ENODATA);
				continue;
			}
		}

		sleeptime = 1;
		for (;;) {
			status = aio_submit(output, mref_a, false);

			if (likely(status != -EAGAIN)) {
				break;
			}
			atomic_inc(&output->total_delay_count);
			brick_msleep(sleeptime);
			if (sleeptime < 100) {
				sleeptime++;
			}
		}
	error:
		if (unlikely(status < 0)) {
			MARS_IO("submit_count = %d status = %d\n", atomic_read(&output->submit_count), status);
			_complete_mref(output, mref, status);
		}
	}

	MARS_DBG("submit thread has stopped, status = %d.\n", err);

	if (likely(current->mm)) {
		unuse_fake_mm();
	}

	tinfo->terminated = true;
	wake_up_interruptible_all(&tinfo->terminate_event);
	return err;
}

static int aio_get_info(struct aio_output *output, struct mars_info *info)
{
	struct file *file;
	loff_t min;
	loff_t max;

	if (unlikely(!output ||
		     !output->mf ||
		     !(file = output->mf->mf_filp) ||
		     !file->f_mapping ||
		     !file->f_mapping->host))
		return -EINVAL;

	info->tf_align = 1;
	info->tf_min_size = 1;

	/* Workaround for races in the page cache.
	 *
	 * It appears that concurrent reads and writes seem to
	 * result in inconsistent reads in some very rare cases, due to
	 * races. Sometimes, the inode claims that the file has been already
	 * appended by a write operation, but the data has not actually hit
	 * the page cache, such that a concurrent read gets NULL blocks.
	 */
	min = i_size_read(file->f_mapping->host);
	max = 0;

	if (!output->brick->is_static_device) {
		get_dirty(output, &min, &max);
	}

	info->current_size = min;
	MARS_DBG("determined file size = %lld\n", info->current_size);

	return 0;
}

//////////////// informational / statistics ///////////////

static noinline
char *aio_statistics(struct aio_brick *brick, int verbose)
{
	struct aio_output *output = brick->outputs[0];
	char *res = brick_string_alloc(4096);
	char *sync = NULL;
	int pos = 0;
	if (!res)
		return NULL;

	pos += report_timing(&timings[0], res + pos, 4096 - pos);
	pos += report_timing(&timings[1], res + pos, 4096 - pos);
	pos += report_timing(&timings[2], res + pos, 4096 - pos);

	snprintf(res + pos, 4096 - pos,
		 "total "
		 "reads = %d "
		 "writes = %d "
		 "allocs = %d "
		 "submits = %d "
		 "again = %d "
		 "delays = %d "
		 "msleeps = %d "
		 "fdsyncs = %d "
		 "fdsync_waits = %d "
		 "map_free = %d | "
		 "flying reads = %d "
		 "writes = %d "
		 "allocs = %d "
		 "submits = %d "
		 "q0 = %d "
		 "q1 = %d "
		 "q2 = %d "
		 "| total "
		 "q0 = %d "
		 "q1 = %d "
		 "q2 = %d "
		 "%s\n",
		 atomic_read(&output->total_read_count),
		 atomic_read(&output->total_write_count),
		 atomic_read(&output->total_alloc_count),
		 atomic_read(&output->total_submit_count),
		 atomic_read(&output->total_again_count),
		 atomic_read(&output->total_delay_count),
		 atomic_read(&output->total_msleep_count),
		 atomic_read(&output->total_fdsync_count),
		 atomic_read(&output->total_fdsync_wait_count),
		 atomic_read(&output->total_mapfree_count),
		 atomic_read(&output->read_count),
		 atomic_read(&output->write_count),
		 atomic_read(&output->alloc_count),
		 atomic_read(&output->submit_count),
		 atomic_read(&output->tinfo[0].queued_sum),
		 atomic_read(&output->tinfo[1].queued_sum),
		 atomic_read(&output->tinfo[2].queued_sum),
		 atomic_read(&output->tinfo[0].total_enqueue_count),
		 atomic_read(&output->tinfo[1].total_enqueue_count),
		 atomic_read(&output->tinfo[2].total_enqueue_count),
		 sync ? sync : "");
	
	if (sync)
		brick_string_free(sync);

	return res;
}

static noinline
void aio_reset_statistics(struct aio_brick *brick)
{
	struct aio_output *output = brick->outputs[0];
	int i;
	atomic_set(&output->total_read_count, 0);
	atomic_set(&output->total_write_count, 0);
	atomic_set(&output->total_alloc_count, 0);
	atomic_set(&output->total_submit_count, 0);
	atomic_set(&output->total_again_count, 0);
	atomic_set(&output->total_delay_count, 0);
	atomic_set(&output->total_msleep_count, 0);
	atomic_set(&output->total_fdsync_count, 0);
	atomic_set(&output->total_fdsync_wait_count, 0);
	atomic_set(&output->total_mapfree_count, 0);
	for (i = 0; i < 3; i++) {
		struct aio_threadinfo *tinfo = &output->tinfo[i];
		atomic_set(&tinfo->total_enqueue_count, 0);
	}
}


//////////////// object / aspect constructors / destructors ///////////////

static int aio_mref_aspect_init_fn(struct generic_aspect *_ini)
{
	struct aio_mref_aspect *ini = (void*)_ini;
	INIT_LIST_HEAD(&ini->io_head);
	INIT_LIST_HEAD(&ini->dirty_head);
	return 0;
}

static void aio_mref_aspect_exit_fn(struct generic_aspect *_ini)
{
	struct aio_mref_aspect *ini = (void*)_ini;
	CHECK_HEAD_EMPTY(&ini->dirty_head);
	CHECK_HEAD_EMPTY(&ini->io_head);
}

MARS_MAKE_STATICS(aio);

////////////////////// brick constructors / destructors ////////////////////

static int aio_brick_construct(struct aio_brick *brick)
{
	return 0;
}

static int aio_switch(struct aio_brick *brick)
{
	static int index;
	struct aio_output *output = brick->outputs[0];
	const char *path = output->brick->brick_path;
	int flags = O_RDWR | O_LARGEFILE;
	int status = 0;

	MARS_DBG("power.button = %d\n", brick->power.button);
	if (!brick->power.button)
		goto cleanup;

	if (brick->power.led_on || output->mf)
		goto done;

	mars_power_led_off((void*)brick, false);

	if (brick->o_creat) {
		flags |= O_CREAT;
		MARS_DBG("using O_CREAT on %s\n", path);
	}
	if (brick->o_direct) {
		flags |= O_DIRECT;
		MARS_DBG("using O_DIRECT on %s\n", path);
	}

	output->mf = mapfree_get(path, flags);
	if (unlikely(!output->mf)) {
		MARS_ERR("could not open file = '%s' flags = %d\n", path, flags);
		status = -ENOENT;
		goto err;
	} 

	output->index = ++index;

	status = _create_ioctx(output);
	if (unlikely(status < 0)) {
		MARS_ERR("could not create ioctx, status = %d\n", status);
		goto err;
	}

	status = aio_start_thread(output, &output->tinfo[0], aio_submit_thread, 's');
	if (unlikely(status < 0)) {
		MARS_ERR("could not start theads, status = %d\n", status);
		goto err;
	}

	MARS_DBG("opened file '%s'\n", path);
	mars_power_led_on((void*)brick, true);

done:
	return 0;

err:
	MARS_ERR("status = %d\n", status);
cleanup:
	if (brick->power.led_off) {
		goto done;
	}

	mars_power_led_on((void*)brick, false);

	aio_stop_thread(output, 0, false);

	_destroy_ioctx(output);

	mars_power_led_off((void*)brick,
			  (output->tinfo[0].thread == NULL &&
			   output->tinfo[1].thread == NULL &&
			   output->tinfo[2].thread == NULL));

	MARS_DBG("switch off led_off = %d status = %d\n", brick->power.led_off, status);
	if (brick->power.led_off) {
		if (output->mf) {
			MARS_DBG("closing file = '%s'\n", output->mf->mf_name);
			mapfree_put(output->mf);
			output->mf = NULL;
		}
	}
	return status;
}

static int aio_output_construct(struct aio_output *output)
{
	INIT_LIST_HEAD(&output->dirty_anchor);
	spin_lock_init(&output->dirty_lock);
	init_waitqueue_head(&output->fdsync_event);
	output->fd = -1;
	return 0;
}
/* Find an unused file structure and return a pointer to it.
 * Returns NULL, if there are no more free file structures or
 * we run out of memory.
 *
 * Be very careful using this.  You are responsible for
 * getting write access to any mount that you might assign
 * to this filp, if it is opened for write.  If this is not
 * done, you will imbalance int the mount's writer count
 * and a warning at __fput() time.
 */
struct file *get_empty_filp(void)
{
	const struct cred *cred = current_cred();
	static long old_max;
	struct file * f;

	/*
	 * Privileged users can go above max_files
	 */
	if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
		/*
		 * percpu_counters are inaccurate.  Do an expensive check before
		 * we go and fail.
		 */
		if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
			goto over;
	}

	f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
	if (f == NULL)
		goto fail;

	percpu_counter_inc(&nr_files);
	f->f_cred = get_cred(cred);
	if (security_file_alloc(f))
		goto fail_sec;

	INIT_LIST_HEAD(&f->f_u.fu_list);
	atomic_long_set(&f->f_count, 1);
	rwlock_init(&f->f_owner.lock);
	spin_lock_init(&f->f_lock);
	eventpoll_init_file(f);
	/* f->f_version: 0 */
	return f;

over:
	/* Ran out of filps - report that */
	if (get_nr_files() > old_max) {
#ifdef FILE_OVER_MAX
        static int fd_dump_all_files = 0;        
        if(!fd_dump_all_files) { 
	        struct task_struct *p;
	        xlog_printk(ANDROID_LOG_INFO, FS_TAG, "(PID:%d)files %d over old_max:%d", current->pid, get_nr_files(), old_max);
	        for_each_process(p) {
	            pid_t pid = p->pid;
	            struct files_struct *files = p->files;
	            struct fdtable *fdt = files_fdtable(files);
	            if(files && fdt) {
	                fd_show_open_files(pid, files, fdt);
	            }	        
	        }
	        fd_dump_all_files = 0x1;
        }
#endif	    
		pr_info("VFS: file-max limit %lu reached\n", get_max_files());
		old_max = get_nr_files();
	}
	goto fail;

fail_sec:
	file_free(f);
fail:
	return NULL;
}
int kbase_stream_create_fence(int tl_fd)
{
	struct sync_timeline *tl;
	struct sync_pt *pt;
	struct sync_fence *fence;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)
	struct files_struct *files;
	struct fdtable *fdt;
#endif
	int fd;
	struct file *tl_file;

	tl_file = fget(tl_fd);
	if (tl_file == NULL)
		return -EBADF;

	if (tl_file->f_op != &stream_fops) {
		fd = -EBADF;
		goto out;
	}

	tl = tl_file->private_data;

	pt = kbase_sync_pt_alloc(tl);
	if (!pt) {
		fd = -EFAULT;
		goto out;
	}

	fence = sync_fence_create("mali_fence", pt);
	if (!fence) {
		sync_pt_free(pt);
		fd = -EFAULT;
		goto out;
	}

	/* from here the fence owns the sync_pt */

	/* create a fd representing the fence */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
	if (fd < 0) {
		sync_fence_put(fence);
		goto out;
	}
#else
	fd = get_unused_fd();
	if (fd < 0) {
		sync_fence_put(fence);
		goto out;
	}

	files = current->files;
	spin_lock(&files->file_lock);
	fdt = files_fdtable(files);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
	__set_close_on_exec(fd, fdt);
#else
	FD_SET(fd, fdt->close_on_exec);
#endif
	spin_unlock(&files->file_lock);
#endif  /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0) */

	/* bind fence to the new fd */
	sync_fence_install(fence, fd);

 out:
	fput(tl_file);

	return fd;
}
asmlinkage int efab_linux_sys_epoll_create1(int flags)
{
    asmlinkage int (*sys_epoll_create_fn)(int);
    int rc;

#ifdef __NR_epoll_create1
    if (state.no_replace_epoll_create1)
    {
        sys_epoll_create_fn = (int (*)(int))THUNKPTR(state.no_replace_epoll_create1->original_entry64);
        TRAMP_DEBUG("epoll_create1 via %p .. ", sys_epoll_create_fn);
        rc = sys_epoll_create_fn(flags);
        if (rc != -ENOSYS)
            goto out;
    }
#endif
    if (!state.no_replace_epoll_create)
    {
        ci_log("Unexpected epoll_ctl() request before full init");
        return -EFAULT;
    }
    sys_epoll_create_fn = (int (*)(int))THUNKPTR(state.no_replace_epoll_create->original_entry64);
    TRAMP_DEBUG("epoll_create via %p .. ", sys_epoll_create_fn);
    rc = sys_epoll_create_fn(1);
    ci_assert_equal(flags & ~EPOLL_CLOEXEC, 0);
    if (rc >= 0 && (flags & EPOLL_CLOEXEC))
    {
        struct files_struct *files = current->files;
        struct fdtable *fdt;
        spin_lock(&files->file_lock);
        fdt = files_fdtable(files);
        efx_set_close_on_exec(rc, fdt);
        spin_unlock(&files->file_lock);
    }
    
    goto out;
out:
    TRAMP_DEBUG(" ... = %d ", rc);
    return rc;
}