/* * Unshare the filesystem structure if it is being shared */ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) { struct fs_struct *fs = current->fs; if (!(unshare_flags & CLONE_FS) || !fs) return 0; /* don't need lock here; in the worst case we'll do useless copy */ if (fs->users == 1) return 0; *new_fsp = copy_fs_struct(fs); if (!*new_fsp) return -ENOMEM; return 0; }
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) { struct fs_struct *fs = current->fs; if (!(unshare_flags & CLONE_FS) || !fs) return 0; if (fs->users == 1) return 0; *new_fsp = copy_fs_struct(fs); if (!*new_fsp) return -ENOMEM; return 0; }
int cfs_daemonize_ctxt(char *str) { cfs_daemonize(str); #ifndef HAVE_UNSHARE_FS_STRUCT { struct task_struct *tsk = current; struct fs_struct *fs = NULL; fs = copy_fs_struct(tsk->fs); if (fs == NULL) return -ENOMEM; exit_fs(tsk); tsk->fs = fs; } #else unshare_fs_struct(); #endif return 0; }
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) { struct fs_struct *fs = current->fs; if (clone_flags & CLONE_FS) { /* tsk->fs is already what we want */ write_lock(&fs->lock); if (fs->in_exec) { write_unlock(&fs->lock); return -EAGAIN; } fs->users++; write_unlock(&fs->lock); return 0; } tsk->fs = copy_fs_struct(fs); if (!tsk->fs) return -ENOMEM; return 0; }
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) { struct fs_struct *fs = current->fs; if (clone_flags & CLONE_FS) { spin_lock(&fs->lock); if (fs->in_exec) { spin_unlock(&fs->lock); return -EAGAIN; } fs->users++; spin_unlock(&fs->lock); return 0; } tsk->fs = copy_fs_struct(fs); if (!tsk->fs) return -ENOMEM; return 0; }
// XXX: need comment here // XXX: currently only check perms to create here. // XXX: other checks needed? static int do_init_dir(cr_location_t *loc, struct file *dirp) { int result = 0; result = cr_permission(dirp->f_dentry->d_inode, (loc->is_write ? MAY_WRITE : MAY_READ) | MAY_EXEC, NULL); if (!result) { loc->fs = copy_fs_struct(current->fs); if (loc->fs) { // replace the pwd with that of 'dirp' cr_set_pwd_file(loc->fs, dirp); } else { result = -EINVAL; } } // Error and normal paths exit here fput(dirp); // We don't hold the filp for a directory return result; }
int __set_personality(u_long personality) { struct exec_domain *ep, *oep; ep = lookup_exec_domain(personality); if (ep == current_thread_info()->exec_domain) { current->personality = personality; return 0; } if (atomic_read(¤t->fs->count) != 1) { struct fs_struct *fsp, *ofsp; fsp = copy_fs_struct(current->fs); if (fsp == NULL) { module_put(ep->module); return -ENOMEM; } task_lock(current); ofsp = current->fs; current->fs = fsp; task_unlock(current); put_fs_struct(ofsp); } /* * At that point we are guaranteed to be the sole owner of * current->fs. */ current->personality = personality; oep = current_thread_info()->exec_domain; current_thread_info()->exec_domain = ep; set_fs_altroot(); module_put(oep->module); return 0; }
/* * This is the NFS server kernel thread */ static void nfsd(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; struct fs_struct *fsp; int err; struct nfsd_list me; sigset_t shutdown_mask, allowed_mask; /* Lock module and set up kernel thread */ lock_kernel(); daemonize("nfsd"); current->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; /* After daemonize() this kernel thread shares current->fs * with the init process. We need to create files with a * umask of 0 instead of init's umask. */ fsp = copy_fs_struct(current->fs); if (!fsp) { printk("Unable to start nfsd thread: out of memory\n"); goto out; } exit_fs(current); current->fs = fsp; current->fs->umask = 0; siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS); siginitsetinv(&allowed_mask, ALLOWED_SIGS); nfsdstats.th_cnt++; lockd_up(); /* start lockd */ me.task = current; list_add(&me.list, &nfsd_list); unlock_kernel(); /* * We want less throttling in balance_dirty_pages() so that nfs to * localhost doesn't cause nfsd to lock up due to all the client's * dirty pages. */ current->flags |= PF_LESS_THROTTLE; /* * The main request loop */ for (;;) { /* Block all but the shutdown signals */ sigprocmask(SIG_SETMASK, &shutdown_mask, NULL); /* * Find a socket with data available and call its * recvfrom routine. */ while ((err = svc_recv(serv, rqstp, 60*60*HZ)) == -EAGAIN) ; if (err < 0) break; update_thread_usage(atomic_read(&nfsd_busy)); atomic_inc(&nfsd_busy); /* Lock the export hash tables for reading. */ exp_readlock(); /* Process request with signals blocked. */ sigprocmask(SIG_SETMASK, &allowed_mask, NULL); svc_process(serv, rqstp); /* Unlock export hash tables */ exp_readunlock(); update_thread_usage(atomic_read(&nfsd_busy)); atomic_dec(&nfsd_busy); } if (err != -EINTR) { printk(KERN_WARNING "nfsd: terminating on error %d\n", -err); } else { unsigned int signo; for (signo = 1; signo <= _NSIG; signo++) if (sigismember(¤t->pending.signal, signo) && !sigismember(¤t->blocked, signo)) break; err = signo; } lock_kernel(); /* Release lockd */ lockd_down(); /* Check if this is last thread */ if (serv->sv_nrthreads==1) { printk(KERN_WARNING "nfsd: last server has exited\n"); if (err != SIG_NOCLEAN) { printk(KERN_WARNING "nfsd: unexporting all filesystems\n"); nfsd_export_flush(); } nfsd_serv = NULL; nfsd_racache_shutdown(); /* release read-ahead cache */ nfs4_state_shutdown(); } list_del(&me.list); nfsdstats.th_cnt --; out: /* Release the thread */ svc_exit_thread(rqstp); /* Release module */ module_put_and_exit(0); }