int osi_abspath(char *aname, char *buf, int buflen, int followlink, char **pathp) { struct dentry *dp = NULL; struct vfsmount *mnt = NULL; char *name, *path; int code; name = afs_getname(aname); if (IS_ERR(name)) return -PTR_ERR(name); code = osi_lookupname_internal(name, followlink, &mnt, &dp); if (!code) { #if defined(D_PATH_TAKES_STRUCT_PATH) afs_linux_path_t p = { mnt, dp }; path = d_path(&p, buf, buflen); #else path = d_path(dp, mnt, buf, buflen); #endif if (IS_ERR(path)) { code = -PTR_ERR(path); } else { *pathp = path; } dput(dp); mntput(mnt); } afs_putname(name); return code; }
static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath, char *buf, int len, struct super_block *sb) { char *p; int n; AuTraceEnter(); p = d_path(h_rootpath->dentry, h_rootpath->mnt, buf, len); if (IS_ERR(p)) goto out; n = strlen(p); p = d_path(h_parent, h_rootpath->mnt, buf, len); if (IS_ERR(p)) goto out; LKTRTrace("%s\n", p); if (n != 1) p += n; LKTRTrace("%p, %s, %ld\n", p, p, (long)(p - buf)); p = d_path(sb->s_root, au_sbi(sb)->si_mnt, buf, len - strlen(p)); if (IS_ERR(p)) goto out; if (n != 1) p[strlen(p)] = '/'; LKTRTrace("%s\n", p); out: AuTraceErrPtr(p); return p; }
/* Caller is responsible for path_get()/path_put() */ static char * cr_getpath(struct path *path, char *buf, int size) { char *name = NULL; if (path->dentry == NULL) { CR_WARN("path->dentry is NULL!"); goto out; } if (path->mnt == NULL) { CR_WARN("path->vfsmnt is NULL!"); goto out; } #if HAVE_NAMEIDATA_DENTRY name = d_path(path->dentry, path->mnt, buf, size); #elif HAVE_NAMEIDATA_PATH name = d_path(path, buf, size); #else #error #endif out: return name; }
static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath, char *buf, int len, struct super_block *sb) { char *p; int n; struct path path; p = d_path(h_rootpath, buf, len); if (IS_ERR(p)) goto out; n = strlen(p); path.mnt = h_rootpath->mnt; path.dentry = h_parent; p = d_path(&path, buf, len); if (IS_ERR(p)) goto out; if (n != 1) p += n; path.mnt = au_mnt_get(sb); path.dentry = sb->s_root; p = d_path(&path, buf, len - strlen(p)); mntput(path.mnt); if (IS_ERR(p)) goto out; if (n != 1) p[strlen(p)] = '/'; out: AuTraceErrPtr(p); return p; }
char *file_path(struct file *filp, char *buf, int buflen) { struct path path; f_covering_path(filp, &path); return d_path(&path, buf, buflen); }
static inline void ltt_enumerate_task_fd(struct ltt_probe_private_data *call_data, struct task_struct *t, char *tmp) { struct fdtable *fdt; struct file *filp; unsigned int i; const unsigned char *path; if (!t->files) return; spin_lock(&t->files->file_lock); fdt = files_fdtable(t->files); for (i = 0; i < fdt->max_fds; i++) { filp = fcheck_files(t->files, i); if (!filp) continue; path = d_path(filp->f_dentry, filp->f_vfsmnt, tmp, PAGE_SIZE); /* Make sure we give at least some info */ __trace_mark(0, list_file_descriptor, call_data, "filename %s pid %d fd %u", (IS_ERR(path))?(filp->f_dentry->d_name.name):(path), t->pid, i); } spin_unlock(&t->files->file_lock); }
static int tpe_file_mmap(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags, unsigned long addr, unsigned long addr_only) #endif { int retval; char *fptmp, *exepath; if((unlikely(current->uid == 0)) || (unlikely(reqprot != PROT_EXEC)) || (unlikely(file == NULL))) return 0; retval = tpe_acl_check(file, 1); if(retval) { fptmp = (char*)__get_free_page(GFP_KERNEL); if(unlikely(fptmp == NULL)) return -ENOMEM; exepath = d_path(file->f_dentry, file->f_vfsmnt, fptmp, PAGE_SIZE); TPE_INFO("Denied mmap of %s by uid: %d gid: %d pid: %d", exepath, current->uid, current->gid, current->pid); free_page((unsigned long)fptmp); } return retval; }
static int zfsctl_snapshot_zpath(struct path *path, int len, char *zpath) { char *path_buffer, *path_ptr; int path_len, error = 0; path_buffer = kmem_alloc(len, KM_SLEEP); path_ptr = d_path(path, path_buffer, len); if (IS_ERR(path_ptr)) { error = -PTR_ERR(path_ptr); goto out; } path_len = path_buffer + len - 1 - path_ptr; if (path_len > len) { error = EFAULT; goto out; } memcpy(zpath, path_ptr, path_len); zpath[path_len] = '\0'; out: kmem_free(path_buffer, len); return (error); }
static inline void ltt_enumerate_task_fd(struct task_struct *t, char *tmp) { struct fdtable *fdt; struct file * filp; unsigned int i; char *path; if (!t->files) return; spin_lock(&t->files->file_lock); fdt = files_fdtable(t->files); for (i = 0; i < fdt->max_fds; i++) { filp = fcheck_files(t->files, i); if (!filp) continue; path = d_path(filp->f_dentry, filp->f_vfsmnt, tmp, PAGE_SIZE); /* Make sure we give at least some info */ if (IS_ERR(path)) trace_statedump_enumerate_file_descriptors( filp->f_dentry->d_name.name, t->pid, i); else trace_statedump_enumerate_file_descriptors( path, t->pid, i); } spin_unlock(&t->files->file_lock); }
static int cn_print_exe_file(struct core_name *cn) { struct file *exe_file; char *pathbuf, *path; int ret; exe_file = get_mm_exe_file(current->mm); if (!exe_file) return cn_esc_printf(cn, "%s (path unknown)", current->comm); pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); if (!pathbuf) { ret = -ENOMEM; goto put_exe_file; } path = d_path(&exe_file->f_path, pathbuf, PATH_MAX); if (IS_ERR(path)) { ret = PTR_ERR(path); goto free_buf; } ret = cn_esc_printf(cn, "%s", path); free_buf: kfree(pathbuf); put_exe_file: fput(exe_file); return ret; }
static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; ktime_t fsync_t, fsync_diff; char pathname[256], *path; file = fget(fd); if (file) { path = d_path(&(file->f_path), pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; fsync_t = ktime_get(); ret = vfs_fsync(file, datasync); fput(file); fsync_diff = ktime_sub(ktime_get(), fsync_t); if (ktime_to_ms(fsync_diff) >= 5000) { pr_info("VFS: %s pid:%d(%s)(parent:%d/%s) takes %lld ms to fsync %s.\n", __func__, current->pid, current->comm, current->parent->pid, current->parent->comm, ktime_to_ms(fsync_diff), path); } } return ret; }
static void report_load_module(struct path *path, char *operation) { char *alloced = NULL, *cmdline; char *pathname; /* Pointer to either static string or "alloced". */ if (!path) pathname = "<unknown>"; else { /* We will allow 11 spaces for ' (deleted)' to be appended */ alloced = pathname = kmalloc(PATH_MAX+11, GFP_KERNEL); if (!pathname) pathname = "<no_memory>"; else { pathname = d_path(path, pathname, PATH_MAX+11); if (IS_ERR(pathname)) pathname = "<too_long>"; else { pathname = printable(pathname); kfree(alloced); alloced = pathname; } } } cmdline = printable_cmdline(current); pr_notice("init_module %s module=%s pid=%d cmdline=%s\n", operation, pathname, task_pid_nr(current), cmdline); kfree(cmdline); kfree(alloced); }
static int ext4_file_open(struct inode * inode, struct file * filp) { struct super_block *sb = inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct vfsmount *mnt = filp->f_path.mnt; struct path path; char buf[64], *cp; if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && !(sb->s_flags & MS_RDONLY))) { sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; /* * Sample where the filesystem has been mounted and * store it in the superblock for sysadmin convenience * when trying to sort through large numbers of block * devices or filesystem images. */ memset(buf, 0, sizeof(buf)); path.mnt = mnt->mnt_parent; path.dentry = mnt->mnt_mountpoint; path_get(&path); cp = d_path(&path, buf, sizeof(buf)); path_put(&path); if (!IS_ERR(cp)) { memcpy(sbi->s_es->s_last_mounted, cp, sizeof(sbi->s_es->s_last_mounted)); sb->s_dirt = 1; } } return generic_file_open(inode, filp); }
char *npm_getcwd(char *buf, unsigned long bufsize) { struct path pwd; char *res; ASSERT(bufsize >= PAGE_SIZE - 1); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) || defined CONFIG_VE get_fs_pwd(current->fs, &pwd); #else read_lock(¤t->fs->lock); pwd = current->fs->pwd; path_get(&pwd); read_unlock(¤t->fs->lock); #endif res = d_path(&pwd, buf, bufsize); if (IS_ERR(res)) res = NULL; path_put(&pwd); return res; }
static int do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; int fput_needed; #ifdef CONFIG_ASYNC_FSYNC struct fsync_work *fwork; #endif if (!fsync_enabled) return 0; file = fget_light(fd, &fput_needed); if (file) { #ifdef CONFIG_ASYNC_FSYNC ktime_t fsync_t, fsync_diff; char pathname[256], *path; path = d_path(&(file->f_path), pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; else if (async_fsync(file, fd)) { if (!fsync_workqueue) fsync_workqueue = create_singlethread_workqueue("fsync"); if (!fsync_workqueue) goto no_async; if (IS_ERR(path)) goto no_async; fwork = kmalloc(sizeof(*fwork), GFP_KERNEL); if (fwork) { strncpy(fwork->pathname, path, sizeof(fwork->pathname) - 1); INIT_WORK(&fwork->work, do_afsync_work); queue_work(fsync_workqueue, &fwork->work); fput_light(file, fput_needed); return 0; } } no_async: fsync_t = ktime_get(); #endif ret = vfs_fsync(file, datasync); fput_light(file, fput_needed); #ifdef CONFIG_ASYNC_FSYNC fsync_diff = ktime_sub(ktime_get(), fsync_t); if (ktime_to_ms(fsync_diff) >= 5000) { pr_info("VFS: %s pid:%d(%s)(parent:%d/%s)\ takes %lld ms to fsync %s.\n", __func__, current->pid, current->comm, current->parent->pid, current->parent->comm, ktime_to_ms(fsync_diff), path); } #endif } return ret; }
inline char * fd2path(long fd,char *buffer,int pathmax){ struct files_struct * files = 0; struct file * f_ptr = 0; //----- need to convert inode to dentry. files = current->files; //----- get file pointer associated with file descriptor if(files) f_ptr = fcheck_files(files,fd); #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) return d_path(f_ptr->f_dentry,f_ptr->f_vfsmnt,buffer,pathmax); #else return d_path(&f_ptr->f_path,buffer,pathmax); #endif }
/* copy the pathname of a file to a buffer */ char *file_path(struct file *file, char *buf, int count) { if (!buf) return NULL; buf = d_path(&file->f_path, buf, count); return IS_ERR(buf) ? NULL : buf; }
static char* miyabi_guess_binary(struct task_struct* t, char* buf, int len) { struct vm_area_struct* vm_area; struct file* file; char tmp[LOCAL_MIYABI_GUESS_PATH_MAX_LEN + 1]; char* p = NULL; char* q = NULL; int i; read_lock(&tasklist_lock); if(t && t->mm && t->mm->mmap) { vm_area = t->mm->mmap; for(i = 0; i < t->mm->map_count; i++) { if(!vm_area) { break; } file = vm_area->vm_file; if(file) { memset(tmp, 0, LOCAL_MIYABI_GUESS_PATH_MAX_LEN + 1); p = d_path(&file->f_path, tmp, LOCAL_MIYABI_GUESS_PATH_MAX_LEN); if(p == NULL || (long)p == ENAMETOOLONG) { vm_area = vm_area->vm_next; continue; } if(p[0] == '/') { strncpy(buf, p, len); q = buf; break; } } vm_area = vm_area->vm_next; } } read_unlock(&tasklist_lock); return q; }
static int notify_exec(struct mm_struct *mm) { char *buf, *path; struct vm_area_struct *vma; #ifndef CONFIG_KVM_GUEST /* see notify_sim_task_change() */ if (!sim_is_simulator()) #endif return 1; if (mm->exe_file == NULL) return 0; for (vma = current->mm->mmap; ; vma = vma->vm_next) { if (vma == NULL) return 0; if (vma->vm_file == mm->exe_file) break; } buf = (char *) __get_free_page(GFP_KERNEL); if (buf == NULL) return 0; path = d_path(&mm->exe_file->f_path, buf, PAGE_SIZE); if (IS_ERR(path)) { free_page((unsigned long)buf); return 0; } /* * Notify simulator of an ET_DYN object so we know the load address. * The somewhat cryptic overuse of SIM_CONTROL_DLOPEN allows us * to be backward-compatible with older simulator releases. */ if (vma->vm_start == (ELF_ET_DYN_BASE & PAGE_MASK)) { char buf[64]; int i; snprintf(buf, sizeof(buf), "0x%lx:@", vma->vm_start); for (i = 0; ; ++i) { char c = buf[i]; __insn_mtspr(SPR_SIM_CONTROL, (SIM_CONTROL_DLOPEN | (c << _SIM_CONTROL_OPERATOR_BITS))); if (c == '\0') break; } } sim_notify_exec(path); free_page((unsigned long)buf); return 1; }
static int ext4_file_open(struct inode * inode, struct file * filp) { struct super_block *sb = inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); struct vfsmount *mnt = filp->f_path.mnt; struct path path; char buf[64], *cp; if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && !(sb->s_flags & MS_RDONLY))) { sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; memset(buf, 0, sizeof(buf)); path.mnt = mnt; path.dentry = mnt->mnt_root; cp = d_path(&path, buf, sizeof(buf)); if (!IS_ERR(cp)) { handle_t *handle; int err; handle = ext4_journal_start_sb(sb, 1); if (IS_ERR(handle)) return PTR_ERR(handle); err = ext4_journal_get_write_access(handle, sbi->s_sbh); if (err) { ext4_journal_stop(handle); return err; } strlcpy(sbi->s_es->s_last_mounted, cp, sizeof(sbi->s_es->s_last_mounted)); ext4_handle_dirty_super(handle, sb); ext4_journal_stop(handle); } } if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); spin_lock(&inode->i_lock); if (!ei->jinode) { if (!jinode) { spin_unlock(&inode->i_lock); return -ENOMEM; } ei->jinode = jinode; jbd2_journal_init_jbd_inode(ei->jinode, inode); jinode = NULL; } spin_unlock(&inode->i_lock); if (unlikely(jinode != NULL)) jbd2_free_inode(jinode); } return dquot_file_open(inode, filp); }
static inline void read_maps (void) { struct vm_area_struct * map, * next; char * buffer; ssize_t i; buffer = (char*)__get_free_page(GFP_KERNEL); if (!buffer) return; for (map = current->mm->mmap ; map ; map = next ) { /* produce the next line */ char *line; char str[5], *cp = str; int flags; kdev_t dev; unsigned long ino; /* * Get the next vma now (but it won't be used if we sleep). */ next = map->vm_next; flags = map->vm_flags; *cp++ = flags & VM_READ ? 'r' : '-'; *cp++ = flags & VM_WRITE ? 'w' : '-'; *cp++ = flags & VM_EXEC ? 'x' : '-'; *cp++ = flags & VM_MAYSHARE ? 's' : 'p'; *cp++ = 0; dev = 0; ino = 0; if (map->vm_file != NULL) { dev = map->vm_file->f_dentry->d_inode->i_dev; ino = map->vm_file->f_dentry->d_inode->i_ino; line = d_path(map->vm_file->f_dentry, map->vm_file->f_vfsmnt, buffer, PAGE_SIZE); if (IS_ERR(line)) break; } printk(MAPS_LINE_FORMAT, map->vm_start, map->vm_end, str, map->vm_pgoff << PAGE_SHIFT, kdevname(dev), ino); if (map->vm_file != NULL) printk("%s\n", line); else printk("\n"); } free_page((unsigned long)buffer); return; }
static int ext4_file_open(struct inode * inode, struct file * filp) { struct super_block *sb = inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); struct vfsmount *mnt = filp->f_path.mnt; struct path path; char buf[64], *cp; if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && !(sb->s_flags & MS_RDONLY))) { sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; /* * Sample where the filesystem has been mounted and * store it in the superblock for sysadmin convenience * when trying to sort through large numbers of block * devices or filesystem images. */ memset(buf, 0, sizeof(buf)); path.mnt = mnt; path.dentry = mnt->mnt_root; cp = d_path(&path, buf, sizeof(buf)); if (!IS_ERR(cp)) { strlcpy(sbi->s_es->s_last_mounted, cp, sizeof(sbi->s_es->s_last_mounted)); ext4_mark_super_dirty(sb); } } /* * Set up the jbd2_inode if we are opening the inode for * writing and the journal is present */ if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); spin_lock(&inode->i_lock); if (!ei->jinode) { if (!jinode) { spin_unlock(&inode->i_lock); return -ENOMEM; } ei->jinode = jinode; jbd2_journal_init_jbd_inode(ei->jinode, inode); jinode = NULL; } spin_unlock(&inode->i_lock); if (unlikely(jinode != NULL)) jbd2_free_inode(jinode); } return dquot_file_open(inode, filp); }
/** create a copy of a regular file in overlay @param nd specifies underlay - source to copy @param nnew specifies new entry - destination @return 0 on success, -ECODE otherwise */ int translucent_copy(struct nameidata *nd, struct nameidata *nnew, int lookup_flags) { char *p, *buf, *pathbuf; ssize_t (*sys_write)(int fd, const void *buf, size_t count)=sys_call_table[__NR_write]; ssize_t (*sys_read)(int fd, void *buf, size_t count)=sys_call_table[__NR_read]; int (*sys_close)(int fd)=sys_call_table[__NR_close]; int result,inphandle,outphandle; int i_Bufsize=4096; umode_t mode=nd->dentry->d_inode->i_mode; struct utimbuf timebuf={ actime:nd->dentry->d_inode->i_atime, modtime:nd->dentry->d_inode->i_mtime }; // exclude device/pipe/socket/dir and proc entries from COW if(is_special(nd)) return -ENODEV; mode &= S_IRWXUGO; buf=malloc(i_Bufsize); pathbuf=malloc(REDIR_BUFSIZE+1); p = d_path(nd->dentry, nd->mnt, pathbuf, REDIR_BUFSIZE); // printk(KERN_DEBUG SYSLOGID ": copy-on-write %s %o\n",p,mode); BEGIN_KMEM result=orig_sys_open(p,O_RDONLY,0666); END_KMEM if(result<0) goto out_free; inphandle=result; p=d_path(nnew->dentry, nnew->mnt, pathbuf, REDIR_BUFSIZE); BEGIN_KMEM result=orig_sys_open(p,O_WRONLY|O_CREAT,mode); END_KMEM if(result<0) goto out_close; outphandle=result; if(!(lookup_flags&LOOKUP_TRUNCATE)) { BEGIN_KMEM while((result=sys_read(inphandle,buf,i_Bufsize))>0&&sys_write(outphandle,buf,result)>0); END_KMEM } else result=0;
char *getfullPath(const char *pathname, char *fullpath) { //char *fullpath = NULL; char *path = NULL; char *start = NULL; //struct dentry *pwd; //struct vfsmount *vfsmount; struct fs_struct *fs = current->fs; struct path pwd; /*fullpath = kmalloc(PATH_MAX, GFP_KERNEL); if (!fullpath) { // kmalloc error return fullpath; } memset(fullpath, 0, PATH_MAX);*/ path = kmalloc(PATH_MAX, GFP_KERNEL); if (!path) { return NULL; } // 2.4 // get dentry and vfsmnt //read_lock(&(fs->lock)); //pwd = dget(fs->pwd); //vfsmount = mntget(fs->pwdmnt); //read_unlock(&(fs->lock)); // get path //start = d_path(pwd, vfsmount, path, PATH_MAX); //strcat(fullpath, start); // 2.6.32 read_lock(&fs->lock); pwd = fs->pwd; path_get(&pwd); read_unlock(&fs->lock); //set_fs_pwd(fs, &pwd); start = d_path(&pwd, path, PATH_MAX); strcat(fullpath, start); strcat(fullpath, "/"); strcat(fullpath, pathname); // 2.6.35 // use spinlock kfree(path); return fullpath; }
inline char * fd2path(long fd,char *buffer,int pathmax){ struct files_struct * files = 0; struct file * f_ptr = 0; //----- need to convert inode to dentry. files = current->files; //----- get file pointer associated with file descriptor if(files) f_ptr = fcheck_files(files,fd); return d_path(f_ptr->f_dentry,f_ptr->f_vfsmnt,buffer,pathmax); }
/* copy the pathname of a file to a buffer */ char *file_path(struct file *file, char *buf, int count) { struct dentry *d; struct vfsmount *v; if (!buf) return NULL; d = file->f_dentry; v = file->f_vfsmnt; buf = d_path(d, v, buf, count); return IS_ERR(buf) ? NULL : buf; }
/* * Create path from root for given inode. * Path is formed as set of stuctures, containing name of the object * and its inode data (mode, permissions and so on). */ int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int len) { struct path path; struct dentry *d; char *ptr; int err = 0, strlen, reduce = 0; d = d_find_alias(&pi->vfs_inode); if (!d) { ; return -ENOENT; } spin_lock(¤t->fs->lock); path.mnt = mntget(current->fs->root.mnt); spin_unlock(¤t->fs->lock); path.dentry = d; if (!IS_ROOT(d) && d_unhashed(d)) reduce = 1; ptr = d_path(&path, data, len); if (IS_ERR(ptr)) { err = PTR_ERR(ptr); goto out; } if (reduce && len >= UNHASHED_OBSCURE_STRING_SIZE) { char *end = data + len - UNHASHED_OBSCURE_STRING_SIZE; *end = '\0'; } strlen = len - (ptr - (char *)data); memmove(data, ptr, strlen); ptr = data; err = strlen; // dprintk("%s: dname: '%s', len: %u, maxlen: %u, name: '%s', strlen: %d.\n", ; out: dput(d); mntput(path.mnt); return err; }
/* sec_check_execpath return value : give task's exec path is matched or not */ int sec_check_execpath(struct mm_struct *mm, char *denypath) { struct file *exe_file; char *path, *pathbuf = NULL; unsigned int path_length = 0, denypath_length = 0; int ret = 0; if(mm == NULL) return 0; if(!(exe_file = get_mm_exe_file(mm))) { PRINT_LOG("Cannot get exe from task->mm.\n"); goto out_nofile; } if(!(pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY))) { PRINT_LOG("failed to kmalloc for pathbuf\n"); goto out; } path = d_path(&exe_file->f_path, pathbuf, PATH_MAX); if (IS_ERR(path)) { PRINT_LOG("Error get path..\n"); goto out; } path_length = strlen(path); denypath_length = strlen(denypath); if(!strncmp(path, denypath, (path_length < denypath_length) ? path_length : denypath_length)) { ret = 1; } out : fput(exe_file); out_nofile: if(pathbuf) kfree(pathbuf); return ret; }
static int appcl_lsm_bprm_set_creds(struct linux_binprm *bprm) { struct task_audit_data *newtd; /* cred security label */ struct inode *inode = file_inode(bprm->file); char *fpath_name; /* temp path name */ char *cred_path; /* saved path name */ char *tmp; struct path *fpath; if (bprm->cred_prepared) return 0; newtd = bprm->cred->security; spin_lock(&bprm->file->f_lock); fpath = &bprm->file->f_path; path_get(fpath); spin_unlock(&bprm->file->f_lock); tmp = (char *)__get_free_page(GFP_TEMPORARY); if (!tmp) { path_put(fpath); return -ENOMEM; } fpath_name = d_path(fpath, tmp, PAGE_SIZE); path_put(fpath); if (IS_ERR(fpath_name)) fpath_name = (char*)bprm->filename; if (fpath_name == NULL || strlen(fpath_name) < 1) cred_path = APPCL_VALUE_UNLABELLED; else cred_path = fpath_name; newtd->bprm_pathname = cred_path; newtd->u.inode = inode; bprm->cred->security = newtd; free_page((unsigned long) tmp); return 0; }
/** * \<\<private\>\> Writes a specified memory area into the checkpoint * file - light version. It fills out the rest of the header, * extracts the mapped file pathname and stores the header, pathname * size and the actual pathname into the checkpoint. * * @param *ckpt - checkpoint file where the area is to be stored * @param *vma - the actual VM area that is being processed * @param *hdr - partially filled VM area header * @return 0 upon success. */ static int tcmi_ckpt_vm_area_write_l(struct tcmi_ckpt *ckpt, struct vm_area_struct *vma, struct tcmi_ckpt_vm_area_hdr *hdr) { /* page for the filepathname */ unsigned long page; char *pathname; /* finish the header */ hdr->type = TCMI_CKPT_VM_AREA_LIGHT; /* resolve the path name. */ if (!(page = __get_free_page(GFP_KERNEL))) { mdbg(ERR3, "Can't allocate page for file pathname!"); goto exit0; } if (IS_ERR(pathname = d_path(&vma->vm_file->f_path, (char*)page, PAGE_SIZE))) { mdbg(ERR3, "Can't resolve pathname for '%s'", vma->vm_file->f_dentry->d_name.name); goto exit1; } hdr->pathname_size = strlen(pathname) + 1; /* write the header and the pathname into the checkpoint */ if (tcmi_ckpt_write(ckpt, hdr, sizeof(*hdr)) < 0) { mdbg(ERR3, "Error writing VM_area header chunk"); goto exit1; } /* write the header and the pathname into the checkpoint */ if (tcmi_ckpt_write(ckpt, pathname, hdr->pathname_size) < 0) { mdbg(ERR3, "Error writing pathname chunk"); goto exit1; } free_page(page); return 0; /* error handling */ exit1: free_page(page); exit0: return -EINVAL; }