static int do_pipe(register int *fd) { register struct inode *inode; struct file *f; int error = -ENOMEM; if(!(inode = new_inode(NULL, S_IFIFO | S_IRUSR | S_IWUSR))) /* Create inode */ goto no_inodes; /* read file */ if((error = open_fd(O_RDONLY, inode)) < 0) goto no_files; *fd = error; /* write file */ if((error = open_fd(O_WRONLY, inode)) < 0) { f = current->files.fd[*fd]; current->files.fd[*fd] = NULL; close_filp(inode, f); no_files: iput(inode); no_inodes: return error; } (inode->i_count)++; /* Increase inode usage count */ fd[1] = error; return 0; }
int mmap_exec(u_int k, int envid, int execonly) { /* munmap all non-MAP_INHERIT regions */ struct Mmap *m2, *m = mmap_list.lh_first; if (!execonly) return 0; while (m) if (m->mmap_flags & MAP_INHERIT) { /* not implemented - need to set up a region of vm to mmap data */ assert(0); m = m->mmap_link.le_next; } else { m2 = m->mmap_link.le_next; assert(msync(m->mmap_addr, m->mmap_len, 0) == 0); if (m->mmap_filp) { lock_filp(m->mmap_filp); filp_refcount_dec(m->mmap_filp); if (filp_refcount_get(m->mmap_filp) == 0) { unlock_filp(m->mmap_filp); close_filp(m->mmap_filp); } else unlock_filp(m->mmap_filp); } m = m2; } return 0; }
static int do_pipe(int *fd) { register struct inode *inode; struct file *f1; struct file *f2; int error = -ENOMEM; int i; if(!(inode = new_inode(NULL, S_IFIFO | S_IRUSR | S_IWUSR))) /* Create inode */ goto no_inodes; /* read file */ if((error = open_filp(O_RDONLY, inode, &f1))) goto no_files; if ((error = get_unused_fd(f1)) < 0) goto close_f1; fd[0] = error; i = error; (inode->i_count)++; /* Increase inode usage count */ /* write file */ if((error = open_filp(O_WRONLY, inode, &f2))) goto close_f1_i; if ((error = get_unused_fd(f2)) < 0) goto close_f12; fd[1] = error; return 0; close_f12: close_filp(inode, f2); close_f1_i: current->files.fd[i] = NULL; inode->i_count--; close_f1: close_filp(inode, f1); no_files: iput(inode); no_inodes: return error; }
int open_fd(int flags, register struct inode *inode) { int fd; struct file *filp; if(!(fd = open_filp(flags, inode, &filp)) && ((fd = get_unused_fd(filp)) < 0)) close_filp(inode, filp); return fd; }
/*===========================================================================* * do_filp_gc * *===========================================================================*/ void *do_filp_gc(void *UNUSED(arg)) { struct filp *f; struct vnode *vp; for (f = &filp[0]; f < &filp[NR_FILPS]; f++) { if (!(f->filp_state & FS_INVALIDATED)) continue; if (f->filp_mode == FILP_CLOSED || f->filp_vno == NULL) { /* File was already closed before gc could kick in */ assert(f->filp_count <= 0); f->filp_state &= ~FS_INVALIDATED; f->filp_count = 0; continue; } assert(f->filp_vno != NULL); vp = f->filp_vno; /* Synchronize with worker thread that might hold a lock on the vp */ lock_vnode(vp, VNODE_OPCL); unlock_vnode(vp); /* If garbage collection was invoked due to a failed device open * request, then common_open has already cleaned up and we have * nothing to do. */ if (!(f->filp_state & FS_INVALIDATED)) { continue; } /* If garbage collection was invoked due to a failed device close * request, the close_filp has already cleaned up and we have nothing * to do. */ if (f->filp_mode != FILP_CLOSED) { assert(f->filp_count == 0); f->filp_count = 1; /* So lock_filp and close_filp will do * their job */ lock_filp(f, VNODE_READ); close_filp(f); } f->filp_state &= ~FS_INVALIDATED; } thread_cleanup(NULL); return(NULL); }
static int close_fp(register struct file *filp) { register struct inode *inode; if (filp->f_count < 1) printk("VFS: Close: file count is 0\n"); else if (filp->f_count > 1) filp->f_count--; else { inode = filp->f_inode; close_filp(inode, filp); filp->f_inode = NULL; iput(inode); } return 0; }
int open_fd(int flags, struct inode *inode) { int fd; struct file *f; register struct file *filp; if (!(fd = open_filp(flags, inode, &f))) { filp = f; filp->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); /* * We have to do this last, because we mustn't export * an incomplete fd to other processes which may share * the same file table with us. */ if ((fd = get_unused_fd(filp)) < 0) close_filp(inode, filp); } return fd; }
void mmap_exit(void *arg) { /* munmap all regions */ struct Mmap *m2, *m = mmap_list.lh_first; while (m) { m2 = m->mmap_link.le_next; assert(msync(m->mmap_addr, m->mmap_len, 0) == 0); if (m->mmap_filp) { lock_filp(m->mmap_filp); filp_refcount_dec(m->mmap_filp); if (filp_refcount_get(m->mmap_filp) == 0) { unlock_filp(m->mmap_filp); close_filp(m->mmap_filp); } else unlock_filp(m->mmap_filp); } m = m2; } }
/*===========================================================================* * close_fd * *===========================================================================*/ int close_fd(struct fproc * rfp, int fd_nr, int may_suspend) { /* Perform the close(fd) system call. */ register struct filp *rfilp; register struct vnode *vp; struct file_lock *flp; int r, lock_count; /* First locate the vnode that belongs to the file descriptor. */ if ( (rfilp = get_filp2(rfp, fd_nr, VNODE_OPCL)) == NULL) return(err_code); vp = rfilp->filp_vno; /* first, make all future get_filp2()'s fail; otherwise * we might try to close the same fd in different threads */ rfp->fp_filp[fd_nr] = NULL; r = close_filp(rfilp, may_suspend); FD_CLR(fd_nr, &rfp->fp_cloexec_set); /* Check to see if the file is locked. If so, release all locks. */ if (nr_locks > 0) { lock_count = nr_locks; /* save count of locks */ for (flp = &file_lock[0]; flp < &file_lock[NR_LOCKS]; flp++) { if (flp->lock_type == 0) continue; /* slot not in use */ if (flp->lock_vnode == vp && flp->lock_pid == rfp->fp_pid) { flp->lock_type = 0; nr_locks--; } } if (nr_locks < lock_count) lock_revive(); /* one or more locks released */ } return(r); }
int sys_open(char *filename, int flags, int mode) { struct inode *inode; register struct inode *pinode; struct file *f; int error, flag; flag = flags; if ((mode_t)((flags + 1) & O_ACCMODE)) flag++; if (flag & (O_TRUNC | O_CREAT)) flag |= FMODE_WRITE; error = open_namei(filename, flag, mode, &inode, NULL); if(error) goto exit_open; pinode = inode; error = open_filp(flags, pinode, &f); if(error) goto cleanup_inode; f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); /* * We have to do this last, because we mustn't export * an incomplete fd to other processes which may share * the same file table with us. */ if ((error = get_unused_fd(f)) > -1) goto exit_open; close_filp(pinode, f); cleanup_inode: iput(pinode); exit_open: return error; }
int munmap(void *addr, size_t len) { struct Mmap *m; struct mmap_ustruct *mus; void *nextaddr; size_t nextlen; OSCALLENTER(OSCALL_munmap); /* page-ify */ len += (((u_int)addr) & PGMASK); addr = (void*)PGROUNDDOWN((u_int)addr); len = PGROUNDUP(len); /* impossible to do what man page says! */ #if 0 if ((((u_int)addr) & PGMASK) || len < 0) { errno = EINVAL; OSCALLEXIT(OSCALL_munmap); return -1; } #endif if (len == 0) { OSCALLEXIT(OSCALL_munmap); return 0; } nextlen = len; do { /* get info on the to-be-freed region */ mus = (struct mmap_ustruct *)mregion_get_ustruct(addr); if (!mus) { errno = EINVAL; OSCALLEXIT(OSCALL_munmap); return -1; } m = &(mus->m); if (addr+len > m->mmap_addr+m->mmap_len) len -= addr+len - (m->mmap_addr+m->mmap_len); /* something strange, shouldn't happen */ if (addr >= m->mmap_addr+m->mmap_len || addr+len <= m->mmap_addr) { OSCALLEXIT(OSCALL_munmap); return 0; } /* if completely freed */ if (addr <= m->mmap_addr && len >= m->mmap_len) { __vm_free_region((u_int)m->mmap_addr, m->mmap_len, CAP_ROOT); /* XXX - error check */ __free(m->mmap_addr); /* if wasn't __malloc'd then this will do nothing */ LIST_REMOVE(m, mmap_link); if (m->mmap_filp) { lock_filp(m->mmap_filp); filp_refcount_dec(m->mmap_filp); if (filp_refcount_get(m->mmap_filp) == 0) { unlock_filp(m->mmap_filp); close_filp(m->mmap_filp); } else unlock_filp(m->mmap_filp); } exos_pinned_free(mus); /* retore original handler to region */ if (mregion_alloc(addr, len, mus->oldmru) < 0) { errno = EINVAL; OSCALLEXIT(OSCALL_munmap); return -1; } } /* if the end is freed */ else if (addr > m->mmap_addr && addr+len >= m->mmap_addr+m->mmap_len) { m->mmap_len = addr-m->mmap_addr; __vm_free_region((u_int)addr, len, CAP_ROOT); /* XXX - error check */ __free2(addr, 0); /* if wasn't __malloc'd then this will do nothing */ /* retore original handler to region */ if (mregion_alloc(addr, len, mus->oldmru) < 0) { errno = EINVAL; OSCALLEXIT(OSCALL_munmap); return -1; } } /* if the beginning is freed */ else if (addr <= m->mmap_addr && addr+len < m->mmap_addr+m->mmap_len) { __vm_free_region((u_int)addr, len, CAP_ROOT); /* XXX - error check */ __free2(m->mmap_addr, addr+len - m->mmap_addr); m->mmap_len = m->mmap_addr+m->mmap_len - (addr+len); m->mmap_addr = addr+len; /* retore original handler to region */ if (mregion_alloc(addr, len, mus->oldmru) < 0) { errno = EINVAL; OSCALLEXIT(OSCALL_munmap); return -1; } } /* if the middle is freed */ else { __vm_free_region((u_int)addr, len, CAP_ROOT); /* XXX - error check */ /* retore original handler to region */ if (mregion_alloc(addr, len, mus->oldmru) < 0) { errno = EINVAL; OSCALLEXIT(OSCALL_munmap); return -1; } assert(0); /* XXX - too much trouble right now */ } nextaddr = addr+len; nextlen -= len; addr = nextaddr; len = nextlen; } while (len > 0); OSCALLEXIT(OSCALL_munmap); return 0; }