int do_wri(u8 * file){ s8 ret,fd; u32 retlba ; u8 i=0; u32 ch; u8 buff[256] = {0}; if (!check_for_args(WRI,1))return 1; fd = openf(file,O_RW); if ( fd == -1){ retlba=NO_LBA; if (ERROR_NO == NO_DIR_FILE) retlba = creatf(file , 064); if (retlba == NO_LBA){ vd_puts ( "COULD NOT OPEN OR CREATE THE DESTINATION FILE\n" ); errormessage(geterror()); closef(fd); fl_clean(); return 0; } fd = openf(file,O_RW); } if(fd == -1){ vd_puts("CANNOT OPEN THE FILE\n"); errormessage(geterror()); fl_clean(); return 1; } while(1){ ch=getche(); if(ch == 1) break; if( ch == 13 || ch == 10){printf("\n"); ch = '\n';} if (i == 255){ i=0; ret = writef(fd,buff,256); if(ret == -1){ vd_puts("SORRY SORRY CANNOT WRITE TO FILE SO REMOVING IT\n"); errormessage(geterror()); unlinkf(file); fl_clean(); return 1; } } buff[i++] = ch; } ret = writef(fd,buff,i); if(ret == -1){ vd_puts("SORRY SORRY CANNOT WRITE TO FILE SO REMOVING IT\n"); errormessage(geterror()); unlinkf(file); fl_clean(); return 1; } closef(fd); vd_puts(" \n"); fl_clean(); return 0; }
/* * Duplicate a file descriptor to a particular value. */ int dodup(struct lwp *l, int from, int to, int flags, register_t *retval) { int error; file_t *fp; if ((fp = fd_getfile(from)) == NULL) return EBADF; mutex_enter(&fp->f_lock); fp->f_count++; mutex_exit(&fp->f_lock); fd_putfile(from); if ((u_int)to >= curproc->p_rlimit[RLIMIT_NOFILE].rlim_cur || (u_int)to >= maxfiles) error = EBADF; else if (from == to) error = 0; else error = fd_dup2(fp, to, flags); closef(fp); *retval = to; return error; }
/* ARGSUSED */ int sys_opipe(struct proc *p, void *v, register_t *retval) { struct filedesc *fdp = p->p_fd; struct file *rf, *wf; struct pipe *rpipe, *wpipe; int fd, error; fdplock(fdp); rpipe = pool_get(&pipe_pool, PR_WAITOK); error = pipe_create(rpipe); if (error != 0) goto free1; wpipe = pool_get(&pipe_pool, PR_WAITOK); error = pipe_create(wpipe); if (error != 0) goto free2; error = falloc(p, &rf, &fd); if (error != 0) goto free2; rf->f_flag = FREAD | FWRITE; rf->f_type = DTYPE_PIPE; rf->f_data = rpipe; rf->f_ops = &pipeops; retval[0] = fd; error = falloc(p, &wf, &fd); if (error != 0) goto free3; wf->f_flag = FREAD | FWRITE; wf->f_type = DTYPE_PIPE; wf->f_data = wpipe; wf->f_ops = &pipeops; retval[1] = fd; rpipe->pipe_peer = wpipe; wpipe->pipe_peer = rpipe; FILE_SET_MATURE(rf); FILE_SET_MATURE(wf); fdpunlock(fdp); return (0); free3: fdremove(fdp, retval[0]); closef(rf, p); rpipe = NULL; free2: (void)pipeclose(wpipe); free1: if (rpipe != NULL) (void)pipeclose(rpipe); fdpunlock(fdp); return (error); }
void clrbuf (void) { if (Lastln > 0) svdel (1, Lastln); closef (Scr); unlink (Scrname); }
/* * Unmount a file descriptor from a node in the file system. * If the user is not the owner of the file and is not privileged, * the request is denied. * Otherwise, remove the namenode from the hash list. * If the mounted file descriptor was that of a stream and this * was the last mount of the stream, turn off the STRMOUNT flag. * If the rootvp is referenced other than through the mount, * nm_inactive will clean up. */ static int nm_unmount(vfs_t *vfsp, int flag, cred_t *crp) { struct namenode *nodep = (struct namenode *)vfsp->vfs_data; vnode_t *vp, *thisvp; struct file *fp = NULL; ASSERT((nodep->nm_flag & NMNMNT) == 0); /* * forced unmount is not supported by this file system * and thus, ENOTSUP, is being returned. */ if (flag & MS_FORCE) { return (ENOTSUP); } vp = nodep->nm_filevp; mutex_enter(&nodep->nm_lock); if (secpolicy_vnode_owner(crp, nodep->nm_vattr.va_uid) != 0) { mutex_exit(&nodep->nm_lock); return (EPERM); } mutex_exit(&nodep->nm_lock); mutex_enter(&ntable_lock); nameremove(nodep); thisvp = NMTOV(nodep); mutex_enter(&thisvp->v_lock); if (thisvp->v_count-- == 1) { fp = nodep->nm_filep; mutex_exit(&thisvp->v_lock); vn_invalid(thisvp); vn_free(thisvp); VFS_RELE(vfsp); namenodeno_free(nodep->nm_vattr.va_nodeid); kmem_free(nodep, sizeof (struct namenode)); } else { thisvp->v_flag &= ~VROOT; mutex_exit(&thisvp->v_lock); } if (namefind(vp, NULLVP) == NULL && vp->v_stream) { struct stdata *stp = vp->v_stream; mutex_enter(&stp->sd_lock); stp->sd_flag &= ~STRMOUNT; mutex_exit(&stp->sd_lock); } mutex_exit(&ntable_lock); if (fp != NULL) (void) closef(fp); return (0); }
int do_app(u8 * file){ s8 ret,fd; u8 i=0; u32 ch; u8 buff[256]; struct fnode fn; u32 size; if (!check_for_args(APP,1)) return 1; fd = openf(file,O_RW); if(fd == -1){ vd_puts( "CANNOT OPEN THE FILE\n"); errormessage(geterror()); fl_clean(); return 1; } finfo(fd, & fn); size = fn.size; ret= seekf(fd, size,SEEK_BEG); while(1){ ch=getche(); if(ch == 1) break; if (ch == 13 || ch == 10 ){ printf("\n");ch='\n'; } if (i == 255){ i=0; ret = writef(fd,buff,256); if(ret == -1){ vd_puts("SORRY SORRY CANNOT WRITE TO FILE SO REMOVING IT\n"); errormessage(geterror()); unlinkf(file); fl_clean(); return 1; } } buff[i++] = (u8)ch; } ret = writef(fd,buff,i); if(ret == -1){ vd_puts("SORRY SORRY CANNOT WRITE TO FILE SO REMOVING IT\n"); errormessage(geterror()); unlinkf(file); fl_clean(); return 1; } puts(" "); closef(fd); fl_clean(); return 0; }
/* ARGSUSED */ int t_kclose(TIUSER *tiptr, int callclosef) { file_t *fp; fp = (tiptr->flags & MADE_FP) ? tiptr->fp : NULL; kmem_free(tiptr, TIUSERSZ); if (fp != NULL) (void) closef(fp); return (0); }
int close(int des) { struct openposixfile* of = ofposix_find(posix_files, fd_of, &des); if(of == NULL) { TRACE(posix, "don't know FD %d; skipping 'close' instrumentation.", des); return closef(des); } TRACE(posix, "closing %s (FD %d [%d])", of->name, des, of->fd); assert(of->fd == des); if(of->name == NULL) { /* don't bother, not a real file? */ WARN(posix, "null filename!?"); return closef(des); } finish(transferlibs, of->name); { /* free up rid of table entry */ free(of->name); of->name = NULL; of->fd = 0; } return closef(des); }
void write_asm(Syntax* complete_syntax,Stack* string_stack){ tablevar = list_new(); openf("output.s"); //open file. emit_format(); emit_str(string_stack); emit_header(); //print .text function_declare("main"); //declare .globl main emit_init(); //alloc for main. if(complete_syntax->type == BLOCK){ emitblock(complete_syntax); }else printf("invalid syntax"); emit_footer(); //ralloc for main closef(fp); //close file. free(tablevar); }
int do_show(u8 * file){ s8 fd, ret ; u32 i ; struct fnode fn; u32 size; u8 buff[1025]; if (!check_for_args(SHOW,1)) return 1; fd=openf(file,O_READ); if ( fd == -1){ vd_puts("CANNOT OPEN THE FILE\n"); errormessage(geterror()); fl_clean(); return 1; } ret = finfo(fd,&fn); if ( ret == -1){ vd_puts("CANNOT OPEN THE FILE\n"); errormessage(geterror()); fl_clean(); return 1; } size=fn.size; while(size){ ret = readf(fd,buff,(size < 1024 ? size : 1024)); if ( ret == -1){ vd_puts("CANNOT READ THE FILE\n"); errormessage(geterror()); fl_clean(); return 1; } buff[(size < 1024 ? size : 1024)]=0 ; for(i = 0 ; i < (size < 1024 ? size : 1024) ; ++i ) if ( buff[i] == 10 || buff[i] == 13) buff[i] = '\n'; vd_puts( buff); vd_puts("\n"); if (size >= 1024) size-=1024 ; else size=0; } closef(fd); fl_clean(); return 0; }
void garbage_collect (void) { char new_name [MAXLINE]; int i, new_scrend; int new_fd; LINEDESC *p; makscr (&new_fd, new_name, MAXLINE); remark ("collecting garbage"); new_scrend = 0; for (p = Limbo, i = 1; i <= Limcnt; p++, i++) { gtxt (p); seekf ((long) new_scrend * 8, new_fd); writef (Txt, (int) p->Lineleng, new_fd); p->Seekaddr = new_scrend; new_scrend += (p->Lineleng + 7) / 8; } for (p = Line0, i = 0; i <= Lastln; p++, i++) { gtxt (p); seekf ((long) new_scrend * 8, new_fd); writef (Txt, (int) p->Lineleng, new_fd); p->Seekaddr = new_scrend; new_scrend += (p->Lineleng + 7) / 8; } closef (Scr); unlink (Scrname); Scr = new_fd; memset (Scrname, '\0', MAXLINE); snprintf (Scrname, MAXLINE-1, "%s", new_name); Scrend = new_scrend; Lost_lines = 0; remark (""); }
int sys_socket(struct proc *p, void *v, register_t *retval) { struct sys_socket_args /* { syscallarg(int) domain; syscallarg(int) type; syscallarg(int) protocol; } */ *uap = v; struct filedesc *fdp = p->p_fd; struct socket *so; struct file *fp; int fd, error; fdplock(fdp); error = falloc(p, &fp, &fd); fdpunlock(fdp); if (error != 0) goto out; fp->f_flag = FREAD|FWRITE; fp->f_type = DTYPE_SOCKET; fp->f_ops = &socketops; error = socreate(SCARG(uap, domain), &so, SCARG(uap, type), SCARG(uap, protocol)); if (error) { fdplock(fdp); fdremove(fdp, fd); closef(fp, p); fdpunlock(fdp); } else { fp->f_data = so; FILE_SET_MATURE(fp, p); *retval = fd; } out: return (error); }
/* ARGSUSED */ static void nm_inactive(vnode_t *vp, cred_t *crp, caller_context_t *ct) { struct namenode *nodep = VTONM(vp); vfs_t *vfsp = vp->v_vfsp; mutex_enter(&vp->v_lock); ASSERT(vp->v_count >= 1); if (--vp->v_count != 0) { mutex_exit(&vp->v_lock); return; } mutex_exit(&vp->v_lock); if (!(nodep->nm_flag & NMNMNT)) { ASSERT(nodep->nm_filep->f_vnode == nodep->nm_filevp); (void) closef(nodep->nm_filep); } vn_invalid(vp); vn_free(vp); if (vfsp != &namevfs) VFS_RELE(vfsp); namenodeno_free(nodep->nm_vattr.va_nodeid); kmem_free(nodep, sizeof (struct namenode)); }
int do_cp(u8 * src , u8 * dst){ s8 sfd,dfd,ret ; u32 retlba; struct fnode fn; u32 size; u8 buff[1024]; if (!check_for_args(CP,2)) return 1; sfd=openf(src,O_READ); if ( sfd == -1){ vd_puts("COULD NOT OPEN SOURCE FILE\n"); errormessage(geterror()); fl_clean(); return 1; } dfd = openf(dst, O_RW); if ( dfd == -1){ retlba = creatf(dst , 064); if (retlba == NO_LBA){ vd_puts("COULD NOT OPEN OR CREATE THE DESTINATION FILE\n"); errormessage(geterror()); fl_clean(); return 0; } dfd = openf(dst,O_RW); } ret = finfo(sfd, &fn); if (ret == -1){ vd_puts("HAD PROBLEMS WITH GETTING INFO OF SOURCE FILE\n"); errormessage(geterror()); closef(sfd); closef(dfd); fl_clean(); return 1; } size = fn.size; while (size ){ ret = readf(sfd , buff, (size < 1024 ? size : 1024)); if ( ret == -1){ vd_puts("ERRROR OCCURED IN READING SOURCE FILE\n"); errormessage(geterror()); closef(sfd); closef(dfd); fl_clean(); return 1; } ret = writef(dfd, buff , (size < 1024 ? size : 1024)); if (ret == -1){ vd_puts("ERROR IN WRITE DELETING THE FILE\n"); errormessage(geterror()); closef(sfd); closef(dfd); unlinkf(dst); fl_clean(); return 1; } if (size >= 1024){ size-=1024; }else size=0; } fl_clean(); return 0; }
int sys_accept(struct proc *p, void *v, register_t *retval) { struct sys_accept_args /* { syscallarg(int) s; syscallarg(struct sockaddr *) name; syscallarg(socklen_t *) anamelen; } */ *uap = v; struct file *fp, *headfp; struct mbuf *nam; socklen_t namelen; int error, s, tmpfd; struct socket *head, *so; int nflag; if (SCARG(uap, name) && (error = copyin(SCARG(uap, anamelen), &namelen, sizeof (namelen)))) return (error); if ((error = getsock(p->p_fd, SCARG(uap, s), &fp)) != 0) return (error); headfp = fp; s = splsoftnet(); head = fp->f_data; redo: if ((head->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto bad; } if ((head->so_state & SS_NBIO) && head->so_qlen == 0) { if (head->so_state & SS_CANTRCVMORE) error = ECONNABORTED; else error = EWOULDBLOCK; goto bad; } while (head->so_qlen == 0 && head->so_error == 0) { if (head->so_state & SS_CANTRCVMORE) { head->so_error = ECONNABORTED; break; } error = tsleep(&head->so_timeo, PSOCK | PCATCH, "netcon", 0); if (error) { goto bad; } } if (head->so_error) { error = head->so_error; head->so_error = 0; goto bad; } /* Take note if socket was non-blocking. */ nflag = (headfp->f_flag & FNONBLOCK); fdplock(p->p_fd); error = falloc(p, &fp, &tmpfd); fdpunlock(p->p_fd); if (error != 0) { /* * Probably ran out of file descriptors. Wakeup * so some other process might have a chance at it. */ wakeup_one(&head->so_timeo); goto bad; } nam = m_get(M_WAIT, MT_SONAME); /* * Check whether the queue emptied while we slept: falloc() or * m_get() may have blocked, allowing the connection to be reset * or another thread or process to accept it. If so, start over. */ if (head->so_qlen == 0) { m_freem(nam); fdplock(p->p_fd); fdremove(p->p_fd, tmpfd); closef(fp, p); fdpunlock(p->p_fd); goto redo; } /* * Do not sleep after we have taken the socket out of the queue. */ so = TAILQ_FIRST(&head->so_q); if (soqremque(so, 1) == 0) panic("accept"); /* connection has been removed from the listen queue */ KNOTE(&head->so_rcv.sb_sel.si_note, 0); fp->f_type = DTYPE_SOCKET; fp->f_flag = FREAD | FWRITE | nflag; fp->f_ops = &socketops; fp->f_data = so; error = soaccept(so, nam); if (!error && SCARG(uap, name)) { error = copyaddrout(p, nam, SCARG(uap, name), namelen, SCARG(uap, anamelen)); } if (error) { /* if an error occurred, free the file descriptor */ fdplock(p->p_fd); fdremove(p->p_fd, tmpfd); closef(fp, p); fdpunlock(p->p_fd); } else { FILE_SET_MATURE(fp, p); *retval = tmpfd; } m_freem(nam); bad: splx(s); FRELE(headfp, p); return (error); }
int sys_socketpair(struct proc *p, void *v, register_t *retval) { struct sys_socketpair_args /* { syscallarg(int) domain; syscallarg(int) type; syscallarg(int) protocol; syscallarg(int *) rsv; } */ *uap = v; struct filedesc *fdp = p->p_fd; struct file *fp1, *fp2; struct socket *so1, *so2; int fd, error, sv[2]; error = socreate(SCARG(uap, domain), &so1, SCARG(uap, type), SCARG(uap, protocol)); if (error) return (error); error = socreate(SCARG(uap, domain), &so2, SCARG(uap, type), SCARG(uap, protocol)); if (error) goto free1; fdplock(fdp); if ((error = falloc(p, &fp1, &fd)) != 0) goto free2; sv[0] = fd; fp1->f_flag = FREAD|FWRITE; fp1->f_type = DTYPE_SOCKET; fp1->f_ops = &socketops; fp1->f_data = so1; if ((error = falloc(p, &fp2, &fd)) != 0) goto free3; fp2->f_flag = FREAD|FWRITE; fp2->f_type = DTYPE_SOCKET; fp2->f_ops = &socketops; fp2->f_data = so2; sv[1] = fd; if ((error = soconnect2(so1, so2)) != 0) goto free4; if (SCARG(uap, type) == SOCK_DGRAM) { /* * Datagram socket connection is asymmetric. */ if ((error = soconnect2(so2, so1)) != 0) goto free4; } error = copyout(sv, SCARG(uap, rsv), 2 * sizeof (int)); if (error == 0) { FILE_SET_MATURE(fp1, p); FILE_SET_MATURE(fp2, p); fdpunlock(fdp); return (0); } free4: fdremove(fdp, sv[1]); closef(fp2, p); so2 = NULL; free3: fdremove(fdp, sv[0]); closef(fp1, p); so1 = NULL; free2: if (so2 != NULL) (void)soclose(so2); fdpunlock(fdp); free1: if (so1 != NULL) (void)soclose(so1); return (error); }
int t_kopen(file_t *fp, dev_t rdev, int flags, TIUSER **tiptr, cred_t *cr) { int madefp = 0; struct T_info_ack inforeq; int retval; vnode_t *vp; struct strioctl strioc; int error; TIUSER *ntiptr; int rtries = 0; /* * Special case for install: miniroot needs to be able to access files * via NFS as though it were always in the global zone. */ if (nfs_global_client_only != 0) cr = kcred; KTLILOG(2, "t_kopen: fp %x, ", fp); KTLILOG(2, "rdev %x, ", rdev); KTLILOG(2, "flags %x\n", flags); *tiptr = NULL; error = 0; retval = 0; if (fp == NULL) { if (rdev == 0 || rdev == NODEV) { KTLILOG(1, "t_kopen: null device\n", 0); return (EINVAL); } /* * allocate a file pointer, but * no file descripter. */ if ((error = falloc(NULL, flags, &fp, NULL)) != 0) { KTLILOG(1, "t_kopen: falloc: %d\n", error); return (error); } /* Install proper cred in file */ if (cr != fp->f_cred) { crhold(cr); crfree(fp->f_cred); fp->f_cred = cr; } vp = makespecvp(rdev, VCHR); /* * this will call the streams open for us. * Want to retry if error is EAGAIN, the streams open routine * might fail due to temporarely out of memory. */ do { if ((error = VOP_OPEN(&vp, flags, cr)) == EAGAIN) { (void) delay(hz); } } while (error == EAGAIN && ++rtries < 5); if (error) { KTLILOG(1, "t_kopen: VOP_OPEN: %d\n", error); unfalloc(fp); VN_RELE(vp); return (error); } /* * fp is completely initialized so drop the write lock. * I actually don't need any locking on fp in here since * there is no fd pointing at it. However, since I could * call closef if there is an error and closef requires * the fp read locked, I will acquire the read lock here * and make sure I release it before I leave this routine. */ fp->f_vnode = vp; mutex_exit(&fp->f_tlock); madefp = 1; } else { vp = fp->f_vnode; } if (vp->v_stream == NULL) { if (madefp) (void) closef(fp); KTLILOG(1, "t_kopen: not a streams device\n", 0); return (ENOSTR); } /* * allocate a new transport structure */ ntiptr = kmem_alloc(TIUSERSZ, KM_SLEEP); ntiptr->fp = fp; ntiptr->flags = madefp ? MADE_FP : 0; KTLILOG(2, "t_kopen: vp %x, ", vp); KTLILOG(2, "stp %x\n", vp->v_stream); /* * see if TIMOD is already pushed */ error = strioctl(vp, I_FIND, (intptr_t)"timod", 0, K_TO_K, cr, &retval); if (error) { kmem_free(ntiptr, TIUSERSZ); if (madefp) (void) closef(fp); KTLILOG(1, "t_kopen: strioctl(I_FIND, timod): %d\n", error); return (error); } if (retval == 0) { tryagain: error = strioctl(vp, I_PUSH, (intptr_t)"timod", 0, K_TO_K, cr, &retval); if (error) { switch (error) { case ENOSPC: case EAGAIN: case ENOSR: /* * This probably means the master file * should be tuned. */ cmn_err(CE_WARN, "t_kopen: I_PUSH of timod failed, error %d\n", error); (void) delay(hz); error = 0; goto tryagain; default: kmem_free(ntiptr, TIUSERSZ); if (madefp) (void) closef(fp); KTLILOG(1, "t_kopen: I_PUSH (timod): %d", error); return (error); } } } inforeq.PRIM_type = T_INFO_REQ; strioc.ic_cmd = TI_GETINFO; strioc.ic_timout = 0; strioc.ic_dp = (char *)&inforeq; strioc.ic_len = (int)sizeof (struct T_info_req); error = strdoioctl(vp->v_stream, &strioc, FNATIVE, K_TO_K, cr, &retval); if (error) { kmem_free(ntiptr, TIUSERSZ); if (madefp) (void) closef(fp); KTLILOG(1, "t_kopen: strdoioctl(T_INFO_REQ): %d\n", error); return (error); } if (retval) { if ((retval & 0xff) == TSYSERR) error = (retval >> 8) & 0xff; else error = t_tlitosyserr(retval & 0xff); kmem_free(ntiptr, TIUSERSZ); if (madefp) (void) closef(fp); KTLILOG(1, "t_kopen: strdoioctl(T_INFO_REQ): retval: 0x%x\n", retval); return (error); }
int do_sys_accept(struct lwp *l, int sock, struct mbuf **name, register_t *new_sock, const sigset_t *mask, int flags, int clrflags) { file_t *fp, *fp2; struct mbuf *nam; int error, fd; struct socket *so, *so2; short wakeup_state = 0; if ((fp = fd_getfile(sock)) == NULL) return EBADF; if (fp->f_type != DTYPE_SOCKET) { fd_putfile(sock); return ENOTSOCK; } if ((error = fd_allocfile(&fp2, &fd)) != 0) { fd_putfile(sock); return error; } nam = m_get(M_WAIT, MT_SONAME); *new_sock = fd; so = fp->f_socket; solock(so); if (__predict_false(mask)) sigsuspendsetup(l, mask); if (!(so->so_proto->pr_flags & PR_LISTEN)) { error = EOPNOTSUPP; goto bad; } if ((so->so_options & SO_ACCEPTCONN) == 0) { error = EINVAL; goto bad; } if ((so->so_state & SS_NBIO) && so->so_qlen == 0) { error = EWOULDBLOCK; goto bad; } while (so->so_qlen == 0 && so->so_error == 0) { if (so->so_state & SS_CANTRCVMORE) { so->so_error = ECONNABORTED; break; } if (wakeup_state & SS_RESTARTSYS) { error = ERESTART; goto bad; } error = sowait(so, true, 0); if (error) { goto bad; } wakeup_state = so->so_state; } if (so->so_error) { error = so->so_error; so->so_error = 0; goto bad; } /* connection has been removed from the listen queue */ KNOTE(&so->so_rcv.sb_sel.sel_klist, NOTE_SUBMIT); so2 = TAILQ_FIRST(&so->so_q); if (soqremque(so2, 1) == 0) panic("accept"); fp2->f_type = DTYPE_SOCKET; fp2->f_flag = (fp->f_flag & ~clrflags) | ((flags & SOCK_NONBLOCK) ? FNONBLOCK : 0)| ((flags & SOCK_NOSIGPIPE) ? FNOSIGPIPE : 0); fp2->f_ops = &socketops; fp2->f_socket = so2; if (fp2->f_flag & FNONBLOCK) so2->so_state |= SS_NBIO; else so2->so_state &= ~SS_NBIO; error = soaccept(so2, nam); so2->so_cred = kauth_cred_dup(so->so_cred); sounlock(so); if (error) { /* an error occurred, free the file descriptor and mbuf */ m_freem(nam); mutex_enter(&fp2->f_lock); fp2->f_count++; mutex_exit(&fp2->f_lock); closef(fp2); fd_abort(curproc, NULL, fd); } else { fd_set_exclose(l, fd, (flags & SOCK_CLOEXEC) != 0); fd_affix(curproc, fp2, fd); *name = nam; } fd_putfile(sock); if (__predict_false(mask)) sigsuspendteardown(l); return error; bad: sounlock(so); m_freem(nam); fd_putfile(sock); fd_abort(curproc, fp2, fd); if (__predict_false(mask)) sigsuspendteardown(l); return error; }
int procfs_loadvnode(struct mount *mp, struct vnode *vp, const void *key, size_t key_len, const void **new_key) { int error; struct pfskey pfskey; struct pfsnode *pfs; KASSERT(key_len == sizeof(pfskey)); memcpy(&pfskey, key, key_len); pfs = kmem_alloc(sizeof(*pfs), KM_SLEEP); pfs->pfs_pid = pfskey.pk_pid; pfs->pfs_type = pfskey.pk_type; pfs->pfs_fd = pfskey.pk_fd; pfs->pfs_vnode = vp; pfs->pfs_flags = 0; pfs->pfs_fileno = PROCFS_FILENO(pfs->pfs_pid, pfs->pfs_type, pfs->pfs_fd); vp->v_tag = VT_PROCFS; vp->v_op = procfs_vnodeop_p; vp->v_data = pfs; switch (pfs->pfs_type) { case PFSroot: /* /proc = dr-xr-xr-x */ vp->v_vflag |= VV_ROOT; /*FALLTHROUGH*/ case PFSproc: /* /proc/N = dr-xr-xr-x */ pfs->pfs_mode = S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH; vp->v_type = VDIR; break; case PFStask: /* /proc/N/task = dr-xr-xr-x */ if (pfs->pfs_fd == -1) { pfs->pfs_mode = S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP| S_IROTH|S_IXOTH; vp->v_type = VDIR; break; } /*FALLTHROUGH*/ case PFScurproc: /* /proc/curproc = lr-xr-xr-x */ case PFSself: /* /proc/self = lr-xr-xr-x */ case PFScwd: /* /proc/N/cwd = lr-xr-xr-x */ case PFSchroot: /* /proc/N/chroot = lr-xr-xr-x */ case PFSexe: /* /proc/N/exe = lr-xr-xr-x */ pfs->pfs_mode = S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH; vp->v_type = VLNK; break; case PFSfd: if (pfs->pfs_fd == -1) { /* /proc/N/fd = dr-x------ */ pfs->pfs_mode = S_IRUSR|S_IXUSR; vp->v_type = VDIR; } else { /* /proc/N/fd/M = [ps-]rw------- */ file_t *fp; vnode_t *vxp; struct proc *p; mutex_enter(proc_lock); p = proc_find(pfs->pfs_pid); mutex_exit(proc_lock); if (p == NULL) { error = ENOENT; goto bad; } KASSERT(rw_read_held(&p->p_reflock)); if ((fp = fd_getfile2(p, pfs->pfs_fd)) == NULL) { error = EBADF; goto bad; } pfs->pfs_mode = S_IRUSR|S_IWUSR; switch (fp->f_type) { case DTYPE_VNODE: vxp = fp->f_vnode; /* * We make symlinks for directories * to avoid cycles. */ if (vxp->v_type == VDIR) goto symlink; vp->v_type = vxp->v_type; break; case DTYPE_PIPE: vp->v_type = VFIFO; break; case DTYPE_SOCKET: vp->v_type = VSOCK; break; case DTYPE_KQUEUE: case DTYPE_MISC: case DTYPE_SEM: symlink: pfs->pfs_mode = S_IRUSR|S_IXUSR|S_IRGRP| S_IXGRP|S_IROTH|S_IXOTH; vp->v_type = VLNK; break; default: error = EOPNOTSUPP; closef(fp); goto bad; } closef(fp); } break; case PFSfile: /* /proc/N/file = -rw------- */ case PFSmem: /* /proc/N/mem = -rw------- */ case PFSregs: /* /proc/N/regs = -rw------- */ case PFSfpregs: /* /proc/N/fpregs = -rw------- */ pfs->pfs_mode = S_IRUSR|S_IWUSR; vp->v_type = VREG; break; case PFSctl: /* /proc/N/ctl = --w------ */ case PFSnote: /* /proc/N/note = --w------ */ case PFSnotepg: /* /proc/N/notepg = --w------ */ pfs->pfs_mode = S_IWUSR; vp->v_type = VREG; break; case PFSmap: /* /proc/N/map = -r--r--r-- */ case PFSmaps: /* /proc/N/maps = -r--r--r-- */ case PFSstatus: /* /proc/N/status = -r--r--r-- */ case PFSstat: /* /proc/N/stat = -r--r--r-- */ case PFScmdline: /* /proc/N/cmdline = -r--r--r-- */ case PFSemul: /* /proc/N/emul = -r--r--r-- */ case PFSmeminfo: /* /proc/meminfo = -r--r--r-- */ case PFScpustat: /* /proc/stat = -r--r--r-- */ case PFSdevices: /* /proc/devices = -r--r--r-- */ case PFScpuinfo: /* /proc/cpuinfo = -r--r--r-- */ case PFSuptime: /* /proc/uptime = -r--r--r-- */ case PFSmounts: /* /proc/mounts = -r--r--r-- */ case PFSloadavg: /* /proc/loadavg = -r--r--r-- */ case PFSstatm: /* /proc/N/statm = -r--r--r-- */ case PFSversion: /* /proc/version = -r--r--r-- */ pfs->pfs_mode = S_IRUSR|S_IRGRP|S_IROTH; vp->v_type = VREG; break; #ifdef __HAVE_PROCFS_MACHDEP PROCFS_MACHDEP_NODETYPE_CASES procfs_machdep_allocvp(vp); break; #endif default: panic("procfs_allocvp"); } uvm_vnp_setsize(vp, 0); *new_key = &pfs->pfs_key; return 0; bad: vp->v_tag =VT_NON; vp->v_type = VNON; vp->v_op = NULL; vp->v_data = NULL; kmem_free(pfs, sizeof(*pfs)); return error; }
/* * File control. */ int fcntl(int fdes, int cmd, intptr_t arg) { int iarg; int error = 0; int retval; proc_t *p; file_t *fp; vnode_t *vp; u_offset_t offset; u_offset_t start; struct vattr vattr; int in_crit; int flag; struct flock sbf; struct flock64 bf; struct o_flock obf; struct flock64_32 bf64_32; struct fshare fsh; struct shrlock shr; struct shr_locowner shr_own; offset_t maxoffset; model_t datamodel; int fdres; #if defined(_ILP32) && !defined(lint) && defined(_SYSCALL32) ASSERT(sizeof (struct flock) == sizeof (struct flock32)); ASSERT(sizeof (struct flock64) == sizeof (struct flock64_32)); #endif #if defined(_LP64) && !defined(lint) && defined(_SYSCALL32) ASSERT(sizeof (struct flock) == sizeof (struct flock64_64)); ASSERT(sizeof (struct flock64) == sizeof (struct flock64_64)); #endif /* * First, for speed, deal with the subset of cases * that do not require getf() / releasef(). */ switch (cmd) { case F_GETFD: if ((error = f_getfd_error(fdes, &flag)) == 0) retval = flag; goto out; case F_SETFD: error = f_setfd_error(fdes, (int)arg); retval = 0; goto out; case F_GETFL: if ((error = f_getfl(fdes, &flag)) == 0) retval = (flag & (FMASK | FASYNC)) + FOPEN; goto out; case F_GETXFL: if ((error = f_getfl(fdes, &flag)) == 0) retval = flag + FOPEN; goto out; case F_BADFD: if ((error = f_badfd(fdes, &fdres, (int)arg)) == 0) retval = fdres; goto out; } /* * Second, for speed, deal with the subset of cases that * require getf() / releasef() but do not require copyin. */ if ((fp = getf(fdes)) == NULL) { error = EBADF; goto out; } iarg = (int)arg; switch (cmd) { /* ONC_PLUS EXTRACT END */ case F_DUPFD: p = curproc; if ((uint_t)iarg >= p->p_fno_ctl) { if (iarg >= 0) fd_too_big(p); error = EINVAL; } else if ((retval = ufalloc_file(iarg, fp)) == -1) { error = EMFILE; } else { mutex_enter(&fp->f_tlock); fp->f_count++; mutex_exit(&fp->f_tlock); } goto done; case F_DUP2FD: p = curproc; if (fdes == iarg) { retval = iarg; } else if ((uint_t)iarg >= p->p_fno_ctl) { if (iarg >= 0) fd_too_big(p); error = EBADF; } else { /* * We can't hold our getf(fdes) across the call to * closeandsetf() because it creates a window for * deadlock: if one thread is doing dup2(a, b) while * another is doing dup2(b, a), each one will block * waiting for the other to call releasef(). The * solution is to increment the file reference count * (which we have to do anyway), then releasef(fdes), * then closeandsetf(). Incrementing f_count ensures * that fp won't disappear after we call releasef(). * When closeandsetf() fails, we try avoid calling * closef() because of all the side effects. */ mutex_enter(&fp->f_tlock); fp->f_count++; mutex_exit(&fp->f_tlock); releasef(fdes); if ((error = closeandsetf(iarg, fp)) == 0) { retval = iarg; } else { mutex_enter(&fp->f_tlock); if (fp->f_count > 1) { fp->f_count--; mutex_exit(&fp->f_tlock); } else { mutex_exit(&fp->f_tlock); (void) closef(fp); } } goto out; } goto done; case F_SETFL: vp = fp->f_vnode; flag = fp->f_flag; if ((iarg & (FNONBLOCK|FNDELAY)) == (FNONBLOCK|FNDELAY)) iarg &= ~FNDELAY; if ((error = VOP_SETFL(vp, flag, iarg, fp->f_cred)) == 0) { iarg &= FMASK; mutex_enter(&fp->f_tlock); fp->f_flag &= ~FMASK | (FREAD|FWRITE); fp->f_flag |= (iarg - FOPEN) & ~(FREAD|FWRITE); mutex_exit(&fp->f_tlock); } retval = 0; goto done; } /* * Finally, deal with the expensive cases. */ retval = 0; in_crit = 0; maxoffset = MAXOFF_T; datamodel = DATAMODEL_NATIVE; #if defined(_SYSCALL32_IMPL) if ((datamodel = get_udatamodel()) == DATAMODEL_ILP32) maxoffset = MAXOFF32_T; #endif vp = fp->f_vnode; flag = fp->f_flag; offset = fp->f_offset; switch (cmd) { /* ONC_PLUS EXTRACT START */ /* * The file system and vnode layers understand and implement * locking with flock64 structures. So here once we pass through * the test for compatibility as defined by LFS API, (for F_SETLK, * F_SETLKW, F_GETLK, F_GETLKW, F_FREESP) we transform * the flock structure to a flock64 structure and send it to the * lower layers. Similarly in case of GETLK the returned flock64 * structure is transformed to a flock structure if everything fits * in nicely, otherwise we return EOVERFLOW. */ case F_GETLK: case F_O_GETLK: case F_SETLK: case F_SETLKW: case F_SETLK_NBMAND: /* * Copy in input fields only. */ if (cmd == F_O_GETLK) { if (datamodel != DATAMODEL_ILP32) { error = EINVAL; break; } if (copyin((void *)arg, &obf, sizeof (obf))) { error = EFAULT; break; } bf.l_type = obf.l_type; bf.l_whence = obf.l_whence; bf.l_start = (off64_t)obf.l_start; bf.l_len = (off64_t)obf.l_len; bf.l_sysid = (int)obf.l_sysid; bf.l_pid = obf.l_pid; } else if (datamodel == DATAMODEL_NATIVE) { if (copyin((void *)arg, &sbf, sizeof (sbf))) { error = EFAULT; break; } /* * XXX In an LP64 kernel with an LP64 application * there's no need to do a structure copy here * struct flock == struct flock64. However, * we did it this way to avoid more conditional * compilation. */ bf.l_type = sbf.l_type; bf.l_whence = sbf.l_whence; bf.l_start = (off64_t)sbf.l_start; bf.l_len = (off64_t)sbf.l_len; bf.l_sysid = sbf.l_sysid; bf.l_pid = sbf.l_pid; } #if defined(_SYSCALL32_IMPL) else { struct flock32 sbf32; if (copyin((void *)arg, &sbf32, sizeof (sbf32))) { error = EFAULT; break; } bf.l_type = sbf32.l_type; bf.l_whence = sbf32.l_whence; bf.l_start = (off64_t)sbf32.l_start; bf.l_len = (off64_t)sbf32.l_len; bf.l_sysid = sbf32.l_sysid; bf.l_pid = sbf32.l_pid; } #endif /* _SYSCALL32_IMPL */ /* * 64-bit support: check for overflow for 32-bit lock ops */ if ((error = flock_check(vp, &bf, offset, maxoffset)) != 0) break; /* * Not all of the filesystems understand F_O_GETLK, and * there's no need for them to know. Map it to F_GETLK. */ if ((error = VOP_FRLOCK(vp, (cmd == F_O_GETLK) ? F_GETLK : cmd, &bf, flag, offset, NULL, fp->f_cred)) != 0) break; /* * If command is GETLK and no lock is found, only * the type field is changed. */ if ((cmd == F_O_GETLK || cmd == F_GETLK) && bf.l_type == F_UNLCK) { /* l_type always first entry, always a short */ if (copyout(&bf.l_type, &((struct flock *)arg)->l_type, sizeof (bf.l_type))) error = EFAULT; break; } if (cmd == F_O_GETLK) { /* * Return an SVR3 flock structure to the user. */ obf.l_type = (int16_t)bf.l_type; obf.l_whence = (int16_t)bf.l_whence; obf.l_start = (int32_t)bf.l_start; obf.l_len = (int32_t)bf.l_len; if (bf.l_sysid > SHRT_MAX || bf.l_pid > SHRT_MAX) { /* * One or both values for the above fields * is too large to store in an SVR3 flock * structure. */ error = EOVERFLOW; break; } obf.l_sysid = (int16_t)bf.l_sysid; obf.l_pid = (int16_t)bf.l_pid; if (copyout(&obf, (void *)arg, sizeof (obf))) error = EFAULT; } else if (cmd == F_GETLK) { /* * Copy out SVR4 flock. */ int i; if (bf.l_start > maxoffset || bf.l_len > maxoffset) { error = EOVERFLOW; break; } if (datamodel == DATAMODEL_NATIVE) { for (i = 0; i < 4; i++) sbf.l_pad[i] = 0; /* * XXX In an LP64 kernel with an LP64 * application there's no need to do a * structure copy here as currently * struct flock == struct flock64. * We did it this way to avoid more * conditional compilation. */ sbf.l_type = bf.l_type; sbf.l_whence = bf.l_whence; sbf.l_start = (off_t)bf.l_start; sbf.l_len = (off_t)bf.l_len; sbf.l_sysid = bf.l_sysid; sbf.l_pid = bf.l_pid; if (copyout(&sbf, (void *)arg, sizeof (sbf))) error = EFAULT; } #if defined(_SYSCALL32_IMPL) else { struct flock32 sbf32; if (bf.l_start > MAXOFF32_T || bf.l_len > MAXOFF32_T) { error = EOVERFLOW; break; } for (i = 0; i < 4; i++) sbf32.l_pad[i] = 0; sbf32.l_type = (int16_t)bf.l_type; sbf32.l_whence = (int16_t)bf.l_whence; sbf32.l_start = (off32_t)bf.l_start; sbf32.l_len = (off32_t)bf.l_len; sbf32.l_sysid = (int32_t)bf.l_sysid; sbf32.l_pid = (pid32_t)bf.l_pid; if (copyout(&sbf32, (void *)arg, sizeof (sbf32))) error = EFAULT; } #endif } break; /* ONC_PLUS EXTRACT END */ case F_CHKFL: /* * This is for internal use only, to allow the vnode layer * to validate a flags setting before applying it. User * programs can't issue it. */ error = EINVAL; break; case F_ALLOCSP: case F_FREESP: case F_ALLOCSP64: case F_FREESP64: if ((flag & FWRITE) == 0) { error = EBADF; break; } if (vp->v_type != VREG) { error = EINVAL; break; } if (datamodel != DATAMODEL_ILP32 && (cmd == F_ALLOCSP64 || cmd == F_FREESP64)) { error = EINVAL; break; } #if defined(_ILP32) || defined(_SYSCALL32_IMPL) if (datamodel == DATAMODEL_ILP32 && (cmd == F_ALLOCSP || cmd == F_FREESP)) { struct flock32 sbf32; /* * For compatibility we overlay an SVR3 flock on an SVR4 * flock. This works because the input field offsets * in "struct flock" were preserved. */ if (copyin((void *)arg, &sbf32, sizeof (sbf32))) { error = EFAULT; break; } else { bf.l_type = sbf32.l_type; bf.l_whence = sbf32.l_whence; bf.l_start = (off64_t)sbf32.l_start; bf.l_len = (off64_t)sbf32.l_len; bf.l_sysid = sbf32.l_sysid; bf.l_pid = sbf32.l_pid; } } #endif /* _ILP32 || _SYSCALL32_IMPL */ #if defined(_LP64) if (datamodel == DATAMODEL_LP64 && (cmd == F_ALLOCSP || cmd == F_FREESP)) { if (copyin((void *)arg, &bf, sizeof (bf))) { error = EFAULT; break; } } #endif /* defined(_LP64) */ #if !defined(_LP64) || defined(_SYSCALL32_IMPL) if (datamodel == DATAMODEL_ILP32 && (cmd == F_ALLOCSP64 || cmd == F_FREESP64)) { if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) { error = EFAULT; break; } else { /* * Note that the size of flock64 is different in * the ILP32 and LP64 models, due to the l_pad * field. We do not want to assume that the * flock64 structure is laid out the same in * ILP32 and LP64 environments, so we will * copy in the ILP32 version of flock64 * explicitly and copy it to the native * flock64 structure. */ bf.l_type = (short)bf64_32.l_type; bf.l_whence = (short)bf64_32.l_whence; bf.l_start = bf64_32.l_start; bf.l_len = bf64_32.l_len; bf.l_sysid = (int)bf64_32.l_sysid; bf.l_pid = (pid_t)bf64_32.l_pid; } } #endif /* !defined(_LP64) || defined(_SYSCALL32_IMPL) */ if (cmd == F_ALLOCSP || cmd == F_FREESP) error = flock_check(vp, &bf, offset, maxoffset); else if (cmd == F_ALLOCSP64 || cmd == F_FREESP64) error = flock_check(vp, &bf, offset, MAXOFFSET_T); if (error) break; if (vp->v_type == VREG && bf.l_len == 0 && bf.l_start > OFFSET_MAX(fp)) { error = EFBIG; break; } /* * Make sure that there are no conflicting non-blocking * mandatory locks in the region being manipulated. If * there are such locks then return EACCES. */ if ((error = flock_get_start(vp, &bf, offset, &start)) != 0) break; if (nbl_need_check(vp)) { u_offset_t begin; ssize_t length; nbl_start_crit(vp, RW_READER); in_crit = 1; vattr.va_mask = AT_SIZE; if ((error = VOP_GETATTR(vp, &vattr, 0, CRED())) != 0) break; begin = start > vattr.va_size ? vattr.va_size : start; length = vattr.va_size > start ? vattr.va_size - start : start - vattr.va_size; if (nbl_conflict(vp, NBL_WRITE, begin, length, 0)) { error = EACCES; break; } } if (cmd == F_ALLOCSP64) cmd = F_ALLOCSP; else if (cmd == F_FREESP64) cmd = F_FREESP; error = VOP_SPACE(vp, cmd, &bf, flag, offset, fp->f_cred, NULL); break; #if !defined(_LP64) || defined(_SYSCALL32_IMPL) /* ONC_PLUS EXTRACT START */ case F_GETLK64: case F_SETLK64: case F_SETLKW64: case F_SETLK64_NBMAND: /* * Large Files: Here we set cmd as *LK and send it to * lower layers. *LK64 is only for the user land. * Most of the comments described above for F_SETLK * applies here too. * Large File support is only needed for ILP32 apps! */ if (datamodel != DATAMODEL_ILP32) { error = EINVAL; break; } if (cmd == F_GETLK64) cmd = F_GETLK; else if (cmd == F_SETLK64) cmd = F_SETLK; else if (cmd == F_SETLKW64) cmd = F_SETLKW; else if (cmd == F_SETLK64_NBMAND) cmd = F_SETLK_NBMAND; /* * Note that the size of flock64 is different in the ILP32 * and LP64 models, due to the sucking l_pad field. * We do not want to assume that the flock64 structure is * laid out in the same in ILP32 and LP64 environments, so * we will copy in the ILP32 version of flock64 explicitly * and copy it to the native flock64 structure. */ if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) { error = EFAULT; break; } bf.l_type = (short)bf64_32.l_type; bf.l_whence = (short)bf64_32.l_whence; bf.l_start = bf64_32.l_start; bf.l_len = bf64_32.l_len; bf.l_sysid = (int)bf64_32.l_sysid; bf.l_pid = (pid_t)bf64_32.l_pid; if ((error = flock_check(vp, &bf, offset, MAXOFFSET_T)) != 0) break; if ((error = VOP_FRLOCK(vp, cmd, &bf, flag, offset, NULL, fp->f_cred)) != 0) break; if ((cmd == F_GETLK) && bf.l_type == F_UNLCK) { if (copyout(&bf.l_type, &((struct flock *)arg)->l_type, sizeof (bf.l_type))) error = EFAULT; break; } if (cmd == F_GETLK) { int i; /* * We do not want to assume that the flock64 structure * is laid out in the same in ILP32 and LP64 * environments, so we will copy out the ILP32 version * of flock64 explicitly after copying the native * flock64 structure to it. */ for (i = 0; i < 4; i++) bf64_32.l_pad[i] = 0; bf64_32.l_type = (int16_t)bf.l_type; bf64_32.l_whence = (int16_t)bf.l_whence; bf64_32.l_start = bf.l_start; bf64_32.l_len = bf.l_len; bf64_32.l_sysid = (int32_t)bf.l_sysid; bf64_32.l_pid = (pid32_t)bf.l_pid; if (copyout(&bf64_32, (void *)arg, sizeof (bf64_32))) error = EFAULT; } break; /* ONC_PLUS EXTRACT END */ #endif /* !defined(_LP64) || defined(_SYSCALL32_IMPL) */ /* ONC_PLUS EXTRACT START */ case F_SHARE: case F_SHARE_NBMAND: case F_UNSHARE: /* * Copy in input fields only. */ if (copyin((void *)arg, &fsh, sizeof (fsh))) { error = EFAULT; break; } /* * Local share reservations always have this simple form */ shr.s_access = fsh.f_access; shr.s_deny = fsh.f_deny; shr.s_sysid = 0; shr.s_pid = ttoproc(curthread)->p_pid; shr_own.sl_pid = shr.s_pid; shr_own.sl_id = fsh.f_id; shr.s_own_len = sizeof (shr_own); shr.s_owner = (caddr_t)&shr_own; error = VOP_SHRLOCK(vp, cmd, &shr, flag, fp->f_cred); /* ONC_PLUS EXTRACT END */ break; default: error = EINVAL; break; } if (in_crit) nbl_end_crit(vp); done: releasef(fdes); out: if (error) return (set_errno(error)); return (retval); }
void exec_clear(struct exec_params *epp) { char *cp; int cc; /* clear BSS */ if (epp->bss.len > 0) bzero((void *)epp->bss.vaddr, epp->bss.len); if (epp->heap.len > 0) bzero((void *)epp->heap.vaddr, epp->heap.len); /* Clear stack */ bzero((void *)epp->stack.vaddr, epp->stack.len); /* * set SUID/SGID protections, if no tracing */ if ((u.u_procp->p_flag & P_TRACED) == 0) { u.u_uid = epp->uid; u.u_procp->p_uid = epp->uid; u.u_groups[0] = epp->gid; } else psignal (u.u_procp, SIGTRAP); u.u_svuid = u.u_uid; u.u_svgid = u.u_groups[0]; u.u_tsize = epp->text.len; u.u_dsize = epp->data.len + epp->bss.len; u.u_ssize = epp->stack.len; /* * Clear registers. */ u.u_frame [FRAME_R1] = 0; /* $at */ u.u_frame [FRAME_R2] = 0; /* $v0 */ u.u_frame [FRAME_R3] = 0; /* $v1 */ u.u_frame [FRAME_R7] = 0; /* $a3 */ u.u_frame [FRAME_R8] = 0; /* $t0 */ u.u_frame [FRAME_R9] = 0; /* $t1 */ u.u_frame [FRAME_R10] = 0; /* $t2 */ u.u_frame [FRAME_R11] = 0; /* $t3 */ u.u_frame [FRAME_R12] = 0; /* $t4 */ u.u_frame [FRAME_R13] = 0; /* $t5 */ u.u_frame [FRAME_R14] = 0; /* $t6 */ u.u_frame [FRAME_R15] = 0; /* $t7 */ u.u_frame [FRAME_R16] = 0; /* $s0 */ u.u_frame [FRAME_R17] = 0; /* $s1 */ u.u_frame [FRAME_R18] = 0; /* $s2 */ u.u_frame [FRAME_R19] = 0; /* $s3 */ u.u_frame [FRAME_R20] = 0; /* $s4 */ u.u_frame [FRAME_R21] = 0; /* $s5 */ u.u_frame [FRAME_R22] = 0; /* $s6 */ u.u_frame [FRAME_R23] = 0; /* $s7 */ u.u_frame [FRAME_R24] = 0; /* $t8 */ u.u_frame [FRAME_R25] = 0; /* $t9 */ u.u_frame [FRAME_FP] = 0; u.u_frame [FRAME_RA] = 0; u.u_frame [FRAME_LO] = 0; u.u_frame [FRAME_HI] = 0; u.u_frame [FRAME_GP] = 0; execsigs (u.u_procp); /* * Clear (close) files */ for (cp = u.u_pofile, cc = 0; cc <= u.u_lastfile; cc++, cp++) { if (*cp & UF_EXCLOSE) { (void) closef (u.u_ofile [cc]); u.u_ofile [cc] = NULL; *cp = 0; } } while (u.u_lastfile >= 0 && u.u_ofile [u.u_lastfile] == NULL) u.u_lastfile--; }
int dopipe(struct proc *p, int *ufds, int flags) { struct filedesc *fdp = p->p_fd; struct file *rf, *wf; struct pipe *rpipe, *wpipe = NULL; int fds[2], error; rpipe = pool_get(&pipe_pool, PR_WAITOK); error = pipe_create(rpipe); if (error != 0) goto free1; wpipe = pool_get(&pipe_pool, PR_WAITOK); error = pipe_create(wpipe); if (error != 0) goto free1; fdplock(fdp); error = falloc(p, &rf, &fds[0]); if (error != 0) goto free2; rf->f_flag = FREAD | FWRITE | (flags & FNONBLOCK); rf->f_type = DTYPE_PIPE; rf->f_data = rpipe; rf->f_ops = &pipeops; error = falloc(p, &wf, &fds[1]); if (error != 0) goto free3; wf->f_flag = FREAD | FWRITE | (flags & FNONBLOCK); wf->f_type = DTYPE_PIPE; wf->f_data = wpipe; wf->f_ops = &pipeops; if (flags & O_CLOEXEC) { fdp->fd_ofileflags[fds[0]] |= UF_EXCLOSE; fdp->fd_ofileflags[fds[1]] |= UF_EXCLOSE; } rpipe->pipe_peer = wpipe; wpipe->pipe_peer = rpipe; FILE_SET_MATURE(rf, p); FILE_SET_MATURE(wf, p); error = copyout(fds, ufds, sizeof(fds)); if (error != 0) { fdrelease(p, fds[0]); fdrelease(p, fds[1]); } fdpunlock(fdp); return (error); free3: fdremove(fdp, fds[0]); closef(rf, p); rpipe = NULL; free2: fdpunlock(fdp); free1: pipeclose(wpipe); pipeclose(rpipe); return (error); }
/* ARGSUSED */ int sys_execve(struct proc *p, void *v, register_t *retval) { struct sys_execve_args /* { syscallarg(const char *) path; syscallarg(char *const *) argp; syscallarg(char *const *) envp; } */ *uap = v; int error; struct exec_package pack; struct nameidata nid; struct vattr attr; struct ucred *cred = p->p_ucred; char *argp; char * const *cpp, *dp, *sp; #ifdef KTRACE char *env_start; #endif struct process *pr = p->p_p; long argc, envc; size_t len, sgap; #ifdef MACHINE_STACK_GROWS_UP size_t slen; #endif char *stack; struct ps_strings arginfo; struct vmspace *vm = pr->ps_vmspace; char **tmpfap; extern struct emul emul_native; #if NSYSTRACE > 0 int wassugid = ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC); size_t pathbuflen; #endif char *pathbuf = NULL; struct vnode *otvp; /* get other threads to stop */ if ((error = single_thread_set(p, SINGLE_UNWIND, 1))) return (error); /* * Cheap solution to complicated problems. * Mark this process as "leave me alone, I'm execing". */ atomic_setbits_int(&pr->ps_flags, PS_INEXEC); #if NSYSTRACE > 0 if (ISSET(p->p_flag, P_SYSTRACE)) { systrace_execve0(p); pathbuf = pool_get(&namei_pool, PR_WAITOK); error = copyinstr(SCARG(uap, path), pathbuf, MAXPATHLEN, &pathbuflen); if (error != 0) goto clrflag; } #endif if (pathbuf != NULL) { NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p); } else { NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p); } /* * initialize the fields of the exec package. */ if (pathbuf != NULL) pack.ep_name = pathbuf; else pack.ep_name = (char *)SCARG(uap, path); pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK); pack.ep_hdrlen = exec_maxhdrsz; pack.ep_hdrvalid = 0; pack.ep_ndp = &nid; pack.ep_interp = NULL; pack.ep_emul_arg = NULL; VMCMDSET_INIT(&pack.ep_vmcmds); pack.ep_vap = &attr; pack.ep_emul = &emul_native; pack.ep_flags = 0; /* see if we can run it. */ if ((error = check_exec(p, &pack)) != 0) { goto freehdr; } /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ /* allocate an argument buffer */ argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok); #ifdef DIAGNOSTIC if (argp == NULL) panic("execve: argp == NULL"); #endif dp = argp; argc = 0; /* copy the fake args list, if there's one, freeing it as we go */ if (pack.ep_flags & EXEC_HASARGL) { tmpfap = pack.ep_fa; while (*tmpfap != NULL) { char *cp; cp = *tmpfap; while (*cp) *dp++ = *cp++; *dp++ = '\0'; free(*tmpfap, M_EXEC, 0); tmpfap++; argc++; } free(pack.ep_fa, M_EXEC, 0); pack.ep_flags &= ~EXEC_HASARGL; } /* Now get argv & environment */ if (!(cpp = SCARG(uap, argp))) { error = EFAULT; goto bad; } if (pack.ep_flags & EXEC_SKIPARG) cpp++; while (1) { len = argp + ARG_MAX - dp; if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) goto bad; if (!sp) break; if ((error = copyinstr(sp, dp, len, &len)) != 0) { if (error == ENAMETOOLONG) error = E2BIG; goto bad; } dp += len; cpp++; argc++; } /* must have at least one argument */ if (argc == 0) { error = EINVAL; goto bad; } #ifdef KTRACE if (KTRPOINT(p, KTR_EXECARGS)) ktrexec(p, KTR_EXECARGS, argp, dp - argp); #endif envc = 0; /* environment does not need to be there */ if ((cpp = SCARG(uap, envp)) != NULL ) { #ifdef KTRACE env_start = dp; #endif while (1) { len = argp + ARG_MAX - dp; if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) goto bad; if (!sp) break; if ((error = copyinstr(sp, dp, len, &len)) != 0) { if (error == ENAMETOOLONG) error = E2BIG; goto bad; } dp += len; cpp++; envc++; } #ifdef KTRACE if (KTRPOINT(p, KTR_EXECENV)) ktrexec(p, KTR_EXECENV, env_start, dp - env_start); #endif } dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES); sgap = STACKGAPLEN; /* * If we have enabled random stackgap, the stack itself has already * been moved from a random location, but is still aligned to a page * boundary. Provide the lower bits of random placement now. */ if (stackgap_random != 0) { sgap += arc4random() & PAGE_MASK; sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES; } /* Now check if args & environ fit into new stack */ len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) + sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp; len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES; if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ error = ENOMEM; goto bad; } /* adjust "active stack depth" for process VSZ */ pack.ep_ssize = len; /* maybe should go elsewhere, but... */ /* * we're committed: any further errors will kill the process, so * kill the other threads now. */ single_thread_set(p, SINGLE_EXIT, 0); /* * Prepare vmspace for remapping. Note that uvmspace_exec can replace * pr_vmspace! */ uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); vm = pr->ps_vmspace; /* Now map address space */ vm->vm_taddr = (char *)trunc_page(pack.ep_taddr); vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) - trunc_page(pack.ep_taddr)); vm->vm_daddr = (char *)trunc_page(pack.ep_daddr); vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) - trunc_page(pack.ep_daddr)); vm->vm_dused = 0; vm->vm_ssize = atop(round_page(pack.ep_ssize)); vm->vm_maxsaddr = (char *)pack.ep_maxsaddr; vm->vm_minsaddr = (char *)pack.ep_minsaddr; /* create the new process's VM space by running the vmcmds */ #ifdef DIAGNOSTIC if (pack.ep_vmcmds.evs_used == 0) panic("execve: no vmcmds"); #endif error = exec_process_vmcmds(p, &pack); /* if an error happened, deallocate and punt */ if (error) goto exec_abort; /* old "stackgap" is gone now */ pr->ps_stackgap = 0; #ifdef MACHINE_STACK_GROWS_UP pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap; if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr, trunc_page(pr->ps_strings), PROT_NONE, TRUE)) goto exec_abort; #else pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap; if (uvm_map_protect(&vm->vm_map, round_page(pr->ps_strings + sizeof(arginfo)), (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE)) goto exec_abort; #endif /* remember information about the process */ arginfo.ps_nargvstr = argc; arginfo.ps_nenvstr = envc; #ifdef MACHINE_STACK_GROWS_UP stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap; slen = len - sizeof(arginfo) - sgap; #else stack = (char *)(vm->vm_minsaddr - len); #endif /* Now copy argc, args & environ to new stack */ if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp)) goto exec_abort; /* copy out the process's ps_strings structure */ if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo))) goto exec_abort; stopprofclock(pr); /* stop profiling */ fdcloseexec(p); /* handle close on exec */ execsigs(p); /* reset caught signals */ TCB_SET(p, NULL); /* reset the TCB address */ pr->ps_kbind_addr = 0; /* reset the kbind bits */ pr->ps_kbind_cookie = 0; /* set command name & other accounting info */ memset(p->p_comm, 0, sizeof(p->p_comm)); len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN); memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, len); pr->ps_acflag &= ~AFORK; /* record proc's vnode, for use by sysctl */ otvp = pr->ps_textvp; vref(pack.ep_vp); pr->ps_textvp = pack.ep_vp; if (otvp) vrele(otvp); atomic_setbits_int(&pr->ps_flags, PS_EXEC); if (pr->ps_flags & PS_PPWAIT) { atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT); wakeup(pr->ps_pptr); } /* * If process does execve() while it has a mismatched real, * effective, or saved uid/gid, we set PS_SUGIDEXEC. */ if (cred->cr_uid != cred->cr_ruid || cred->cr_uid != cred->cr_svuid || cred->cr_gid != cred->cr_rgid || cred->cr_gid != cred->cr_svgid) atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC); else atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC); atomic_clearbits_int(&pr->ps_flags, PS_TAMED); tame_dropwpaths(pr); /* * deal with set[ug]id. * MNT_NOEXEC has already been used to disable s[ug]id. */ if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) { int i; atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC); #ifdef KTRACE /* * If process is being ktraced, turn off - unless * root set it. */ if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT)) ktrcleartrace(pr); #endif p->p_ucred = cred = crcopy(cred); if (attr.va_mode & VSUID) cred->cr_uid = attr.va_uid; if (attr.va_mode & VSGID) cred->cr_gid = attr.va_gid; /* * For set[ug]id processes, a few caveats apply to * stdin, stdout, and stderr. */ error = 0; fdplock(p->p_fd); for (i = 0; i < 3; i++) { struct file *fp = NULL; /* * NOTE - This will never return NULL because of * immature fds. The file descriptor table is not * shared because we're suid. */ fp = fd_getfile(p->p_fd, i); /* * Ensure that stdin, stdout, and stderr are already * allocated. We do not want userland to accidentally * allocate descriptors in this range which has implied * meaning to libc. */ if (fp == NULL) { short flags = FREAD | (i == 0 ? 0 : FWRITE); struct vnode *vp; int indx; if ((error = falloc(p, &fp, &indx)) != 0) break; #ifdef DIAGNOSTIC if (indx != i) panic("sys_execve: falloc indx != i"); #endif if ((error = cdevvp(getnulldev(), &vp)) != 0) { fdremove(p->p_fd, indx); closef(fp, p); break; } if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) { fdremove(p->p_fd, indx); closef(fp, p); vrele(vp); break; } if (flags & FWRITE) vp->v_writecount++; fp->f_flag = flags; fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = (caddr_t)vp; FILE_SET_MATURE(fp, p); } } fdpunlock(p->p_fd); if (error) goto exec_abort; } else atomic_clearbits_int(&pr->ps_flags, PS_SUGID); /* * Reset the saved ugids and update the process's copy of the * creds if the creds have been changed */ if (cred->cr_uid != cred->cr_svuid || cred->cr_gid != cred->cr_svgid) { /* make sure we have unshared ucreds */ p->p_ucred = cred = crcopy(cred); cred->cr_svuid = cred->cr_uid; cred->cr_svgid = cred->cr_gid; } if (pr->ps_ucred != cred) { struct ucred *ocred; ocred = pr->ps_ucred; crhold(cred); pr->ps_ucred = cred; crfree(ocred); } if (pr->ps_flags & PS_SUGIDEXEC) { int i, s = splclock(); timeout_del(&pr->ps_realit_to); for (i = 0; i < nitems(pr->ps_timer); i++) { timerclear(&pr->ps_timer[i].it_interval); timerclear(&pr->ps_timer[i].it_value); } splx(s); } /* reset CPU time usage for the thread, but not the process */ timespecclear(&p->p_tu.tu_runtime); p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; km_free(argp, NCARGS, &kv_exec, &kp_pageable); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); vn_close(pack.ep_vp, FREAD, cred, p); /* * notify others that we exec'd */ KNOTE(&pr->ps_klist, NOTE_EXEC); /* setup new registers and do misc. setup. */ if (pack.ep_emul->e_fixup != NULL) { if ((*pack.ep_emul->e_fixup)(p, &pack) != 0) goto free_pack_abort; } #ifdef MACHINE_STACK_GROWS_UP (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval); #else (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval); #endif /* map the process's signal trampoline code */ if (exec_sigcode_map(pr, pack.ep_emul)) goto free_pack_abort; #ifdef __HAVE_EXEC_MD_MAP /* perform md specific mappings that process might need */ if (exec_md_map(p, &pack)) goto free_pack_abort; #endif if (pr->ps_flags & PS_TRACED) psignal(p, SIGTRAP); free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); /* * Call emulation specific exec hook. This can setup per-process * p->p_emuldata or do any other per-process stuff an emulation needs. * * If we are executing process of different emulation than the * original forked process, call e_proc_exit() of the old emulation * first, then e_proc_exec() of new emulation. If the emulation is * same, the exec hook code should deallocate any old emulation * resources held previously by this process. */ if (pr->ps_emul && pr->ps_emul->e_proc_exit && pr->ps_emul != pack.ep_emul) (*pr->ps_emul->e_proc_exit)(p); p->p_descfd = 255; if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255) p->p_descfd = pack.ep_fd; /* * Call exec hook. Emulation code may NOT store reference to anything * from &pack. */ if (pack.ep_emul->e_proc_exec) (*pack.ep_emul->e_proc_exec)(p, &pack); #if defined(KTRACE) && defined(COMPAT_LINUX) /* update ps_emul, but don't ktrace it if native-execing-native */ if (pr->ps_emul != pack.ep_emul || pack.ep_emul != &emul_native) { pr->ps_emul = pack.ep_emul; if (KTRPOINT(p, KTR_EMUL)) ktremul(p); } #else /* update ps_emul, the old value is no longer needed */ pr->ps_emul = pack.ep_emul; #endif atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); single_thread_clear(p, P_SUSPSIG); #if NSYSTRACE > 0 if (ISSET(p->p_flag, P_SYSTRACE) && wassugid && !ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC)) systrace_execve1(pathbuf, p); #endif if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); return (0); bad: /* free the vmspace-creation commands, and release their references */ kill_vmcmds(&pack.ep_vmcmds); /* kill any opened file descriptor, if necessary */ if (pack.ep_flags & EXEC_HASFD) { pack.ep_flags &= ~EXEC_HASFD; fdplock(p->p_fd); (void) fdrelease(p, pack.ep_fd); fdpunlock(p->p_fd); } if (pack.ep_interp != NULL) pool_put(&namei_pool, pack.ep_interp); if (pack.ep_emul_arg != NULL) free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); /* close and put the exec'd file */ vn_close(pack.ep_vp, FREAD, cred, p); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); km_free(argp, NCARGS, &kv_exec, &kp_pageable); freehdr: free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); #if NSYSTRACE > 0 clrflag: #endif atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); single_thread_clear(p, P_SUSPSIG); if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); return (error); exec_abort: /* * the old process doesn't exist anymore. exit gracefully. * get rid of the (new) address space we have created, if any, get rid * of our namei data and vnode, and exit noting failure */ uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); if (pack.ep_interp != NULL) pool_put(&namei_pool, pack.ep_interp); if (pack.ep_emul_arg != NULL) free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); vn_close(pack.ep_vp, FREAD, cred, p); km_free(argp, NCARGS, &kv_exec, &kp_pageable); free_pack_abort: free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL); /* NOTREACHED */ atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); return (0); }
int close(int d) { fprintf(stderr, "[%d] closing %d\n", (int)getpid(), d); return closef(d); }
int ptmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) { dev_t newdev, error; struct pt_softc * pti; struct nameidata cnd, snd; struct filedesc *fdp = p->p_fd; struct file *cfp = NULL, *sfp = NULL; int cindx, sindx; uid_t uid; gid_t gid; struct vattr vattr; struct ucred *cred; struct ptmget *ptm = (struct ptmget *)data; error = 0; switch (cmd) { case PTMGET: fdplock(fdp); /* Grab two filedescriptors. */ if ((error = falloc(p, &cfp, &cindx)) != 0) { fdpunlock(fdp); break; } if ((error = falloc(p, &sfp, &sindx)) != 0) { fdremove(fdp, cindx); closef(cfp, p); fdpunlock(fdp); break; } retry: /* Find and open a free master pty. */ newdev = pty_getfree(); if ((error = check_pty(minor(newdev)))) goto bad; pti = pt_softc[minor(newdev)]; NDINIT(&cnd, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, pti->pty_pn, p); if ((error = ptm_vn_open(&cnd)) != 0) { /* * Check if the master open failed because we lost * the race to grab it. */ if (error == EIO && !pty_isfree(minor(newdev))) goto retry; goto bad; } cfp->f_flag = FREAD|FWRITE; cfp->f_type = DTYPE_VNODE; cfp->f_ops = &vnops; cfp->f_data = (caddr_t) cnd.ni_vp; VOP_UNLOCK(cnd.ni_vp, 0, p); /* * Open the slave. * namei -> setattr -> unlock -> revoke -> vrele -> * namei -> open -> unlock * Three stage rocket: * 1. Change the owner and permissions on the slave. * 2. Revoke all the users of the slave. * 3. open the slave. */ NDINIT(&snd, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, pti->pty_sn, p); if ((error = namei(&snd)) != 0) goto bad; if ((snd.ni_vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { gid = tty_gid; /* get real uid */ uid = p->p_cred->p_ruid; VATTR_NULL(&vattr); vattr.va_uid = uid; vattr.va_gid = gid; vattr.va_mode = (S_IRUSR|S_IWUSR|S_IWGRP) & ALLPERMS; /* Get a fake cred to pretend we're root. */ cred = crget(); error = VOP_SETATTR(snd.ni_vp, &vattr, cred, p); crfree(cred); if (error) { vput(snd.ni_vp); goto bad; } } VOP_UNLOCK(snd.ni_vp, 0, p); if (snd.ni_vp->v_usecount > 1 || (snd.ni_vp->v_flag & (VALIASED))) VOP_REVOKE(snd.ni_vp, REVOKEALL); /* * The vnode is useless after the revoke, we need to * namei again. */ vrele(snd.ni_vp); NDINIT(&snd, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, pti->pty_sn, p); /* now open it */ if ((error = ptm_vn_open(&snd)) != 0) goto bad; sfp->f_flag = FREAD|FWRITE; sfp->f_type = DTYPE_VNODE; sfp->f_ops = &vnops; sfp->f_data = (caddr_t) snd.ni_vp; VOP_UNLOCK(snd.ni_vp, 0, p); /* now, put the indexen and names into struct ptmget */ ptm->cfd = cindx; ptm->sfd = sindx; memcpy(ptm->cn, pti->pty_pn, sizeof(pti->pty_pn)); memcpy(ptm->sn, pti->pty_sn, sizeof(pti->pty_sn)); /* mark the files mature now that we've passed all errors */ FILE_SET_MATURE(cfp); FILE_SET_MATURE(sfp); fdpunlock(fdp); break; default: error = EINVAL; break; } return (error); bad: fdremove(fdp, cindx); closef(cfp, p); fdremove(fdp, sindx); closef(sfp, p); fdpunlock(fdp); return (error); }
/* * Note: This routine is single threaded * Protected by FIFOOPEN flag (i.e. flk_lock is not held) * Upon successful completion, the original fifo is unlocked * and FIFOOPEN is cleared for the original vpp. * The new fifo returned has FIFOOPEN set. */ static int fifo_connld(struct vnode **vpp, int flag, cred_t *crp) { struct vnode *vp1; struct vnode *vp2; struct fifonode *oldfnp; struct fifonode *fn_dest; int error; struct file *filep; struct fifolock *fn_lock; cred_t *c; /* * Get two vnodes that will represent the pipe ends for the new pipe. */ makepipe(&vp1, &vp2); /* * Allocate a file descriptor and file pointer for one of the pipe * ends. The file descriptor will be used to send that pipe end to * the process on the other end of this stream. Note that we get * the file structure only, there is no file list entry allocated. */ if (error = falloc(vp1, FWRITE|FREAD, &filep, NULL)) { VN_RELE(vp1); VN_RELE(vp2); return (error); } mutex_exit(&filep->f_tlock); oldfnp = VTOF(*vpp); fn_lock = oldfnp->fn_lock; fn_dest = oldfnp->fn_dest; /* * Create two new stream heads and attach them to the two vnodes for * the new pipe. */ if ((error = fifo_stropen(&vp1, FREAD|FWRITE, filep->f_cred, 0, 0)) != 0 || (error = fifo_stropen(&vp2, flag, filep->f_cred, 0, 0)) != 0) { #if DEBUG cmn_err(CE_NOTE, "fifo stropen failed error 0x%x", error); #endif /* * this will call fifo_close and VN_RELE on vp1 */ (void) closef(filep); VN_RELE(vp2); return (error); } /* * twist the ends of the pipe together */ strmate(vp1, vp2); /* * Set our end to busy in open * Note: Don't need lock around this because we're the only * one who knows about it */ VTOF(vp2)->fn_flag |= FIFOOPEN; mutex_enter(&fn_lock->flk_lock); fn_dest->fn_flag |= FIFOSEND; /* * check to make sure neither end of pipe has gone away */ if (!(fn_dest->fn_flag & FIFOISOPEN)) { error = ENXIO; fn_dest->fn_flag &= ~FIFOSEND; mutex_exit(&fn_lock->flk_lock); /* * this will call fifo_close and VN_RELE on vp1 */ goto out; } mutex_exit(&fn_lock->flk_lock); /* * Tag the sender's credential on the pipe descriptor. */ crhold(VTOF(vp1)->fn_pcredp = crp); VTOF(vp1)->fn_cpid = curproc->p_pid; /* * send the file descriptor to other end of pipe */ if (error = do_sendfp((*vpp)->v_stream, filep, crp)) { mutex_enter(&fn_lock->flk_lock); fn_dest->fn_flag &= ~FIFOSEND; mutex_exit(&fn_lock->flk_lock); /* * this will call fifo_close and VN_RELE on vp1 */ goto out; } mutex_enter(&fn_lock->flk_lock); /* * Wait for other end to receive file descriptor * FIFOCLOSE indicates that one or both sides of the pipe * have gone away. */ while ((fn_dest->fn_flag & (FIFOCLOSE | FIFOSEND)) == FIFOSEND) { if (!cv_wait_sig(&oldfnp->fn_wait_cv, &fn_lock->flk_lock)) { error = EINTR; fn_dest->fn_flag &= ~FIFOSEND; mutex_exit(&fn_lock->flk_lock); goto out; } } /* * If either end of pipe has gone away and the other end did not * receive pipe, reject the connld open */ if ((fn_dest->fn_flag & FIFOSEND)) { error = ENXIO; fn_dest->fn_flag &= ~FIFOSEND; mutex_exit(&fn_lock->flk_lock); goto out; } oldfnp->fn_flag &= ~FIFOOPEN; cv_broadcast(&oldfnp->fn_wait_cv); mutex_exit(&fn_lock->flk_lock); VN_RELE(*vpp); *vpp = vp2; (void) closef(filep); return (0); out: c = filep->f_cred; crhold(c); (void) closef(filep); VTOF(vp2)->fn_flag &= ~FIFOOPEN; (void) fifo_close(vp2, flag, 1, (offset_t)0, c, NULL); crfree(c); VN_RELE(vp2); return (error); }
/* ARGSUSED */ int sys_pipe(struct proc *p, void *v, register_t *retval) { struct sys_pipe_args /* { syscallarg(int *) fdp; } */ *uap = v; struct filedesc *fdp = p->p_fd; struct file *rf, *wf; struct pipe *rpipe, *wpipe = NULL; int fds[2], error; rpipe = pool_get(&pipe_pool, PR_WAITOK); error = pipe_create(rpipe); if (error != 0) goto free1; wpipe = pool_get(&pipe_pool, PR_WAITOK); error = pipe_create(wpipe); if (error != 0) goto free1; fdplock(fdp); error = falloc(p, &rf, &fds[0]); if (error != 0) goto free2; rf->f_flag = FREAD | FWRITE; rf->f_type = DTYPE_PIPE; rf->f_data = rpipe; rf->f_ops = &pipeops; error = falloc(p, &wf, &fds[1]); if (error != 0) goto free3; wf->f_flag = FREAD | FWRITE; wf->f_type = DTYPE_PIPE; wf->f_data = wpipe; wf->f_ops = &pipeops; rpipe->pipe_peer = wpipe; wpipe->pipe_peer = rpipe; FILE_SET_MATURE(rf, p); FILE_SET_MATURE(wf, p); error = copyout(fds, SCARG(uap, fdp), sizeof(fds)); if (error != 0) { fdrelease(p, fds[0]); fdrelease(p, fds[1]); } fdpunlock(fdp); return (error); free3: fdremove(fdp, fds[0]); closef(rf, p); rpipe = NULL; free2: fdpunlock(fdp); free1: pipeclose(wpipe); pipeclose(rpipe); return (error); }
static int init(struct selabel_handle *rec, const struct selinux_opt *opts, unsigned n) { struct saved_data *data = (struct saved_data *)rec->data; const char *path = NULL; const char *prefix = NULL; char subs_file[PATH_MAX + 1]; int status = -1, baseonly = 0; /* Process arguments */ while (n--) switch(opts[n].type) { case SELABEL_OPT_PATH: path = opts[n].value; break; case SELABEL_OPT_SUBSET: prefix = opts[n].value; break; case SELABEL_OPT_BASEONLY: baseonly = !!opts[n].value; break; } /* Process local and distribution substitution files */ if (!path) { rec->dist_subs = selabel_subs_init(selinux_file_context_subs_dist_path(), rec->dist_subs, rec->digest); rec->subs = selabel_subs_init(selinux_file_context_subs_path(), rec->subs, rec->digest); path = selinux_file_context_path(); } else { snprintf(subs_file, sizeof(subs_file), "%s.subs_dist", path); rec->dist_subs = selabel_subs_init(subs_file, rec->dist_subs, rec->digest); snprintf(subs_file, sizeof(subs_file), "%s.subs", path); rec->subs = selabel_subs_init(subs_file, rec->subs, rec->digest); } rec->spec_file = strdup(path); /* * The do detailed validation of the input and fill the spec array */ status = process_file(path, NULL, rec, prefix, rec->digest); if (status) goto finish; if (rec->validating) { status = nodups_specs(data, path); if (status) goto finish; } if (!baseonly) { status = process_file(path, "homedirs", rec, prefix, rec->digest); if (status && errno != ENOENT) goto finish; status = process_file(path, "local", rec, prefix, rec->digest); if (status && errno != ENOENT) goto finish; } digest_gen_hash(rec->digest); status = sort_specs(data); finish: if (status) closef(rec); return status; }