static int f_permission(struct inode *inode, int mask, unsigned int unused) { int i; /* If not root, deny access */ if (current_euid() != 0) return -EACCES; /* "control" file can only be written to */ if (inode->i_ino == proc_net_firm_action->low_ino) { if (mask & MAY_WRITE) return 0; else return -EACCES; } /* * Test if it was any of the rule files. Always allows reading, * but writing only if the flow is not started. */ for (i = 0; f_flowtable[i].name != NULL; i++) { if (inode->i_ino == f_flowtable[i].pdir->low_ino) { if (mask & MAY_READ) return 0; if ((mask & MAY_WRITE) && !f_flowtable[i].started) return 0; else return -EACCES; } } /* For any other file. Should not reach this point. */ printk(KERN_DEBUG "firm_vm: f_permission called for an invalid file.\n"); return -EACCES; }
static int proc_overview_seq_show(struct seq_file *s, void *v) { struct crypto_db *db; struct crypto_context *context; size_t ix; if(v == NULL) { return -EINVAL; } if(mutex_lock_interruptible(&get_cryptodev()->crypto_dbs_mutex)) { return -ERESTARTSYS; } db = get_or_create_crypto_db(&get_cryptodev()->crypto_dbs, current_euid()); mutex_unlock(&get_cryptodev()->crypto_dbs_mutex); if(NULL == db) { return -ENOMEM; } context = v; ix = context - db->contexts; if(context->is_active) { seq_printf(s, "%zd\tdes\t%ld\t%ld\t%ld\n", ix, context->added_time, context->encoded_count, context->decoded_count); } return 0; }
SYSCALL_DEFINE1(set_gps_location, struct gps_location __user *, loc) { /* Still to be implemented */ struct gps_location *k_gps = &kernel_gps.loc; /* Only root can update the gps information */ if (current_uid() != 0 && current_euid() != 0) return -EACCES; if (valid_gps(loc) != 0) return -EINVAL; if (copy_from_user(k_gps, loc, sizeof(struct gps_location)) != 0) return -EFAULT; write_lock(&gps_lock); kernel_gps.timestamp = CURRENT_TIME; memcpy(k_gps, loc, sizeof(struct gps_location)); write_unlock(&gps_lock); return 0; }
/** * ecryptfs_miscdev_release * @inode: inode of fs/ecryptfs/euid handle (ignored) * @file: file for fs/ecryptfs/euid handle (ignored) * * This keeps the daemon registered until the daemon sends another * ioctl to fs/ecryptfs/ctl or until the kernel module unregisters. * * Returns zero on success; non-zero otherwise */ static int ecryptfs_miscdev_release(struct inode *inode, struct file *file) { struct ecryptfs_daemon *daemon = NULL; uid_t euid = current_euid(); int rc; mutex_lock(&ecryptfs_daemon_hash_mux); rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); if (rc || !daemon) daemon = file->private_data; mutex_lock(&daemon->mux); BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN)); daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN; atomic_dec(&ecryptfs_num_miscdev_opens); mutex_unlock(&daemon->mux); rc = ecryptfs_exorcise_daemon(daemon); if (rc) { printk(KERN_CRIT "%s: Fatal error whilst attempting to " "shut down daemon; rc = [%d]. Please report this " "bug.\n", __func__, rc); BUG(); } module_put(THIS_MODULE); mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; }
static int scull_u_open(struct inode *inode, struct file *filp) { struct scull_dev *dev = &scull_u_device; /* device information */ spin_lock(&scull_u_lock); if (scull_u_count && (scull_u_owner != current_uid()) && /* allow user */ (scull_u_owner != current_euid()) && /* allow whoever did su */ !capable(CAP_DAC_OVERRIDE)) { /* still allow root */ spin_unlock(&scull_u_lock); return -EBUSY; /* -EPERM would confuse the user */ } if (scull_u_count == 0) scull_u_owner = current_uid(); /* grab it */ scull_u_count++; spin_unlock(&scull_u_lock); /* then, everything else is copied from the bare scull device */ if ((filp->f_flags & O_ACCMODE) == O_WRONLY) scull_trim(dev); filp->private_data = dev; return 0; /* success */ }
static int check_quotactl_permission(struct super_block *sb, int type, int cmd, qid_t id) { switch (cmd) { /* these commands do not require any special privilegues */ case Q_GETFMT: case Q_SYNC: case Q_GETINFO: case Q_XGETQSTAT: case Q_XQUOTASYNC: break; /* allow to query information for dquots we "own" */ case Q_GETQUOTA: case Q_XGETQUOTA: if ((type == USRQUOTA && current_euid() == id) || (type == GRPQUOTA && in_egroup_p(id))) break; /*FALLTHROUGH*/ default: if (!capable(CAP_SYS_ADMIN)) return -EPERM; } return security_quotactl(cmd, type, id, sb); }
/** * ecryptfs_miscdev_poll * @file: dev file (ignored) * @pt: dev poll table (ignored) * * Returns the poll mask */ static unsigned int ecryptfs_miscdev_poll(struct file *file, poll_table *pt) { struct ecryptfs_daemon *daemon; unsigned int mask = 0; uid_t euid = current_euid(); int rc; mutex_lock(&ecryptfs_daemon_hash_mux); /* TODO: Just use file->private_data? */ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); BUG_ON(rc || !daemon); mutex_lock(&daemon->mux); mutex_unlock(&ecryptfs_daemon_hash_mux); if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { printk(KERN_WARNING "%s: Attempt to poll on zombified " "daemon\n", __func__); goto out_unlock_daemon; } if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) goto out_unlock_daemon; if (daemon->flags & ECRYPTFS_DAEMON_IN_POLL) goto out_unlock_daemon; daemon->flags |= ECRYPTFS_DAEMON_IN_POLL; mutex_unlock(&daemon->mux); poll_wait(file, &daemon->wait, pt); mutex_lock(&daemon->mux); if (!list_empty(&daemon->msg_ctx_out_queue)) mask |= POLLIN | POLLRDNORM; out_unlock_daemon: daemon->flags &= ~ECRYPTFS_DAEMON_IN_POLL; mutex_unlock(&daemon->mux); return mask; }
static inline bool kbasep_am_i_root(void) { #if KBASE_HWCNT_DUMP_BYPASS_ROOT return true; #else /* Check if root */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) if (uid_eq(current_euid(), GLOBAL_ROOT_UID)) return true; #else if (current_euid() == 0) return true; #endif /*LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)*/ return false; #endif /*KBASE_HWCNT_DUMP_BYPASS_ROOT*/ }
int write_dek_packet(char *dest, struct ecryptfs_crypt_stat *crypt_stat, size_t *written) { *written = 0; dest[(*written)++] = ECRYPTFS_DEK_PACKET_TYPE; memset(dest + *written, 0, PKG_NAME_SIZE); memcpy(dest + *written, current->comm, PKG_NAME_SIZE); (*written) += PKG_NAME_SIZE; put_unaligned_be32(current_euid(), dest + *written); (*written) += 4; memset(dest + *written, 0, DEK_MAXLEN); if (crypt_stat->flags & ECRYPTFS_DEK_IS_SENSITIVE) { put_unaligned_be32(crypt_stat->sdp_dek.type, dest + *written); (*written) += 4; put_unaligned_be32(crypt_stat->sdp_dek.len, dest + *written); (*written) += 4; memcpy(dest + *written, crypt_stat->sdp_dek.buf, crypt_stat->sdp_dek.len); (*written) += crypt_stat->sdp_dek.len; } return 0; }
static inline int check_permissions(void) { int rc = 0; if (!current_euid() || in_egroup_p(AID_NET_RAW)) rc = 1; return rc; }
static int interceptor_proc_entry_iop_permission(struct inode *inode, int op #ifdef LINUX_INODE_OPERATION_PERMISSION_HAS_NAMEIDATA , struct nameidata *nd #endif /* LINUX_INODE_OPERATION_PERMISSION_HAS_NAMEIDATA */ #ifdef LINUX_INODE_OPERATION_PERMISSION_HAS_UINT , unsigned int ed #endif /* LINUX_INODE_OPERATION_PERMISSION_HAS_UINT */ ) { if (op & MAY_EXEC) return -EACCES; if ((op & (MAY_READ | MAY_WRITE)) && #ifdef LINUX_HAS_TASK_CRED_STRUCT current_euid() == (uid_t) ssh_procfs_uid #else /* LINUX_HAS_TASK_CRED_STRUCT */ current->euid == (uid_t) ssh_procfs_uid #endif /* LINUX_HAS_TASK_CRED_STRUCT */ ) { return 0; } return -EACCES; }
static inline int scull_w_available(void) { return scull_w_count == 0 || scull_w_owner == current_uid() || scull_w_owner == current_euid() || capable(CAP_DAC_OVERRIDE); }
void refcount_error_report(struct pt_regs *regs, const char *err) { WARN_RATELIMIT(1, "refcount_t %s at %pB in %s[%d], uid/euid: %u/%u\n", err, (void *)instruction_pointer(regs), current->comm, task_pid_nr(current), from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); }
static int module_permission(struct inode* inode, int op) { if(op == 4 || (op == 2 && current_euid().val == 0)) { return 0; } return -EACCES; }
static inline int check_permissions(void) { int rc = 0; if (!current_euid() || in_egroup_p(AID_NET_RAW)) rc = 1; /* Bright Lee, 20121009, allow engineer mode of modem info to create ipc socket { */ if (in_egroup_p( 1001 /* AID_RADIO */ )) /* telephony subsystem, RIL, reference android_filesystem_config.h */ rc = 1; /* } Bright Lee, 20121009 */ return rc; }
/** * ecryptfs_miscdev_open * @inode: inode of miscdev handle (ignored) * @file: file for miscdev handle (ignored) * * Returns zero on success; non-zero otherwise */ static int ecryptfs_miscdev_open(struct inode *inode, struct file *file) { struct ecryptfs_daemon *daemon = NULL; uid_t euid = current_euid(); int rc; mutex_lock(&ecryptfs_daemon_hash_mux); rc = try_module_get(THIS_MODULE); if (rc == 0) { rc = -EIO; printk(KERN_ERR "%s: Error attempting to increment module use " "count; rc = [%d]\n", __func__, rc); goto out_unlock_daemon_list; } rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); if (rc || !daemon) { rc = ecryptfs_spawn_daemon(&daemon, euid, current_user_ns(), task_pid(current)); if (rc) { printk(KERN_ERR "%s: Error attempting to spawn daemon; " "rc = [%d]\n", __func__, rc); goto out_module_put_unlock_daemon_list; } } mutex_lock(&daemon->mux); if (daemon->pid != task_pid(current)) { rc = -EINVAL; printk(KERN_ERR "%s: pid [0x%p] has registered with euid [%d], " "but pid [0x%p] has attempted to open the handle " "instead\n", __func__, daemon->pid, daemon->euid, task_pid(current)); goto out_unlock_daemon; } if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { rc = -EBUSY; printk(KERN_ERR "%s: Miscellaneous device handle may only be " "opened once per daemon; pid [0x%p] already has this " "handle open\n", __func__, daemon->pid); goto out_unlock_daemon; } daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN; file->private_data = daemon; atomic_inc(&ecryptfs_num_miscdev_opens); out_unlock_daemon: mutex_unlock(&daemon->mux); out_module_put_unlock_daemon_list: if (rc) module_put(THIS_MODULE); out_unlock_daemon_list: mutex_unlock(&ecryptfs_daemon_hash_mux); return rc; }
static int rootfctrl_dentry_open(struct file *file, const struct cred *cred) { pid_t pid; char tcomm[sizeof(current->comm)], name_buf[MAX_NAME_BUF_LEN]; char *full_path = get_full_path(&file->f_path, NULL, name_buf); if (is_felica_RWP_file(full_path) || is_nfc_file(full_path)) { pid = task_tgid_vnr(current); get_task_comm(tcomm, current); RTFCTL_MSG("########## %s (felica/nfc) ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s)\n", pid, tcomm); RTFCTL_MSG("uid: %d\n", current_uid()); RTFCTL_MSG("euid: %d, suid: %d\n", current_euid(), current_suid()); #if (RTFCTL_RUN_MODE != RTFCTL_TRACKING_MODE) if (is_non_felica_root(current_uid(), pid) || (current_uid() != current_euid())) { printk("[RTFCTL] RType-1-1 <%s-%s (%d, %d, %d, %d)>\n", full_path, tcomm, pid, current_uid(), current_euid(), current_suid()); return -EACCES; } #endif } else if (is_felica_WP_file(full_path)) { pid = task_tgid_vnr(current); get_task_comm(tcomm, current); RTFCTL_MSG("########## %s (WP) ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s)\n", pid, tcomm); #if (RTFCTL_RUN_MODE != RTFCTL_TRACKING_MODE) if (pid == adbd_pid) { printk("[RTFCTL] RType-1-2 <%s-%s (%d)>\n", full_path, tcomm, pid); return -EACCES; } #endif } return 0; }
static int MksckCreate(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; uid_t currentUid = current_euid(); if (!(currentUid == 0 || currentUid == Mvpkm_vmwareUid)) { pr_warn("MksckCreate: rejected from process %s " \ "tgid=%d, pid=%d euid:%d.\n", current->comm, task_tgid_vnr(current), task_pid_vnr(current), currentUid); return -EPERM; } if (!sock) return -EINVAL; if (protocol) return -EPROTONOSUPPORT; switch (sock->type) { case SOCK_DGRAM: sock->ops = &mksckDgramOps; break; default: return -ESOCKTNOSUPPORT; } sock->state = SS_UNCONNECTED; sk = sk_alloc(net, mksckFamilyOps.family, GFP_KERNEL, &mksckProto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_type = SOCK_DGRAM; sk->sk_destruct = MksckSkDestruct; sk->sk_backlog_rcv = MksckBacklogRcv; sk->sk_protinfo = NULL; sock_reset_flag(sk, SOCK_DONE); return 0; }
static int proc_des_read(char *buffer, char **start, off_t offset, int count, int *eof, void *data) { int result, written; struct crypto_db *db; struct new_context_info *info; if(offset > 0) { *eof = 1; return 0; } if(count <= 10) { // We do not support small reads return -EINVAL; } if(mutex_lock_interruptible(&get_cryptodev()->crypto_dbs_mutex)) { return -ERESTARTSYS; } db = get_or_create_crypto_db(&get_cryptodev()->crypto_dbs, current_euid()); mutex_unlock(&get_cryptodev()->crypto_dbs_mutex); if(NULL == db) { result = -ENOMEM; goto out; } if(mutex_lock_interruptible(&db->new_context_wait_mutex)) { result = -ERESTARTSYS; goto out; } if(wait_event_interruptible(db->new_context_created_waitqueue, !list_empty(&db->new_contexts_queue))) { result = -ERESTARTSYS; goto mutex_unlock; } spin_lock(&db->new_contexts_list_lock); info = list_first_entry(&db->new_contexts_queue, struct new_context_info, contexts); list_del(&info->contexts); spin_unlock(&db->new_contexts_list_lock); written = sprintf(buffer, "%d", info->ix); kfree(info); result = min(written, count); mutex_unlock: mutex_unlock(&db->new_context_wait_mutex); out: return result; }
/* Return standard mode bits for table entry. */ static int net_ctl_permissions(struct ctl_table_header *head, struct ctl_table *table) { struct net *net = container_of(head->set, struct net, sysctls); kuid_t root_uid = make_kuid(net->user_ns, 0); kgid_t root_gid = make_kgid(net->user_ns, 0); /* Allow network administrator to have same access as root. */ if (ns_capable(net->user_ns, CAP_NET_ADMIN) || uid_eq(root_uid, current_euid())) { int mode = (table->mode >> 6) & 7; return (mode << 6) | (mode << 3) | mode; }
/* * Find which interface to use. */ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) { ax25_uid_assoc *user; ax25_route *ax25_rt; int err = 0; ax25_route_lock_use(); ax25_rt = ax25_get_route(addr, NULL); if (!ax25_rt) { ax25_route_lock_unuse(); return -EHOSTUNREACH; } if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { err = -EHOSTUNREACH; goto put; } user = ax25_findbyuid(current_euid()); if (user) { ax25->source_addr = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { err = -EPERM; goto put; } ax25->source_addr = *(ax25_address *)ax25->ax25_dev->dev->dev_addr; } if (ax25_rt->digipeat != NULL) { ax25->digipeat = kmemdup(ax25_rt->digipeat, sizeof(ax25_digi), GFP_ATOMIC); if (ax25->digipeat == NULL) { err = -ENOMEM; goto put; } ax25_adjust_path(addr, ax25->digipeat); } if (ax25->sk != NULL) { bh_lock_sock(ax25->sk); sock_reset_flag(ax25->sk, SOCK_ZAPPED); bh_unlock_sock(ax25->sk); } put: ax25_route_lock_unuse(); return err; }
static int module_permission(struct inode *inode, int op) //, struct nameidata *foo) { /* * We allow everybody to read (op==36) from our module, but * only root (uid 0) may write to it (op==34) */ printk(KERN_INFO "op %d - euid %d \n", op, current_cred()->euid); if (op == 36 || (op == 34 && current_euid()== 0)) // was if (op == 4 || (op == 2 && current->euid== 0))current->euid return 0; /* * If it's anything else, access is denied */ return -EACCES; }
static int interceptor_proc_entry_iop_permission(struct inode *inode, int mask) { if (mask & MAY_EXEC) return -EACCES; if ((mask & (MAY_READ | MAY_WRITE)) && #ifdef LINUX_HAS_TASK_CRED_STRUCT current_euid() == (uid_t) ssh_procfs_uid #else /* LINUX_HAS_TASK_CRED_STRUCT */ current->euid == (uid_t) ssh_procfs_uid #endif /* LINUX_HAS_TASK_CRED_STRUCT */ ) { return 0; } return -EACCES; }
static void* proc_overview_seq_start(struct seq_file *s, loff_t *pos) { struct crypto_db *db; if(*pos >= CRYPTO_MAX_CONTEXT_COUNT) { return NULL; } if(mutex_lock_interruptible(&get_cryptodev()->crypto_dbs_mutex)) { return NULL; } db = get_or_create_crypto_db(&get_cryptodev()->crypto_dbs, current_euid()); mutex_unlock(&get_cryptodev()->crypto_dbs_mutex); if(NULL == db) { return NULL; } return &db->contexts[*pos]; }
static long dek_ioctl_kek(struct file *file, unsigned int cmd, unsigned long arg) { unsigned int minor; if(!is_container_app() && !is_root()) { DEK_LOGE("Current process can't access kek device\n"); DEK_LOGE("Current process info :: " "uid=%u gid=%u euid=%u egid=%u suid=%u sgid=%u " "fsuid=%u fsgid=%u\n", current_uid(), current_gid(), current_euid(), current_egid(), current_suid(), current_sgid(), current_fsuid(), current_fsgid()); dek_add_to_log(000, "Access denied to kek device"); return -EACCES; } minor = iminor(file->f_path.dentry->d_inode); return dek_do_ioctl_kek(minor, cmd, arg); }
/* * Get the persistent keyring for a specific UID and link it to the nominated * keyring. */ long keyctl_get_persistent(uid_t _uid, key_serial_t destid) { struct user_namespace *ns = current_user_ns(); key_ref_t dest_ref; kuid_t uid; long ret; /* -1 indicates the current user */ if (_uid == (uid_t)-1) { uid = current_uid(); } else { uid = make_kuid(ns, _uid); if (!uid_valid(uid)) return -EINVAL; /* You can only see your own persistent cache if you're not * sufficiently privileged. */ if (!uid_eq(uid, current_uid()) && !uid_eq(uid, current_euid()) && !ns_capable(ns, CAP_SETUID)) return -EPERM; } /* There must be a destination keyring */ dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) return PTR_ERR(dest_ref); if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) { ret = -ENOTDIR; goto out_put_dest; } ret = key_get_persistent(ns, uid, dest_ref); out_put_dest: key_ref_put(dest_ref); return ret; }
/* * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process */ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { cpumask_t new_mask; cpumask_t effective_mask; int retval; struct task_struct *p; struct thread_info *ti; uid_t euid; if (len < sizeof(new_mask)) return -EINVAL; if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) return -EFAULT; get_online_cpus(); read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) { read_unlock(&tasklist_lock); put_online_cpus(); return -ESRCH; } /* * It is not safe to call set_cpus_allowed with the * tasklist_lock held. We will bump the task_struct's * usage count and drop tasklist_lock before invoking * set_cpus_allowed. */ get_task_struct(p); euid = current_euid(); retval = -EPERM; if (euid != p->cred->euid && euid != p->cred->uid && !capable(CAP_SYS_NICE)) { read_unlock(&tasklist_lock); goto out_unlock; } retval = security_task_setscheduler(p, 0, NULL); if (retval) goto out_unlock; /* Record new user-specified CPU set for future reference */ p->thread.user_cpus_allowed = new_mask; /* Unlock the task list */ read_unlock(&tasklist_lock); /* Compute new global allowed CPU set if necessary */ ti = task_thread_info(p); if (test_ti_thread_flag(ti, TIF_FPUBOUND) && cpus_intersects(new_mask, mt_fpu_cpumask)) { cpus_and(effective_mask, new_mask, mt_fpu_cpumask); retval = set_cpus_allowed_ptr(p, &effective_mask); } else { clear_ti_thread_flag(ti, TIF_FPUBOUND); retval = set_cpus_allowed_ptr(p, &new_mask); } out_unlock: put_task_struct(p); put_online_cpus(); return retval; }
static struct ip6_flowlabel * fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, char __user *optval, int optlen, int *err_p) { struct ip6_flowlabel *fl = NULL; int olen; int addr_type; int err; olen = optlen - CMSG_ALIGN(sizeof(*freq)); err = -EINVAL; if (olen > 64 * 1024) goto done; err = -ENOMEM; fl = kzalloc(sizeof(*fl), GFP_KERNEL); if (fl == NULL) goto done; if (olen > 0) { struct msghdr msg; struct flowi6 flowi6; int junk; err = -ENOMEM; fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL); if (fl->opt == NULL) goto done; memset(fl->opt, 0, sizeof(*fl->opt)); fl->opt->tot_len = sizeof(*fl->opt) + olen; err = -EFAULT; if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen)) goto done; msg.msg_controllen = olen; msg.msg_control = (void*)(fl->opt+1); memset(&flowi6, 0, sizeof(flowi6)); err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, &junk, &junk); if (err) goto done; err = -EINVAL; if (fl->opt->opt_flen) goto done; if (fl->opt->opt_nflen == 0) { kfree(fl->opt); fl->opt = NULL; } } fl->fl_net = hold_net(net); fl->expires = jiffies; err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); if (err) goto done; fl->share = freq->flr_share; addr_type = ipv6_addr_type(&freq->flr_dst); if ((addr_type & IPV6_ADDR_MAPPED) || addr_type == IPV6_ADDR_ANY) { err = -EINVAL; goto done; } fl->dst = freq->flr_dst; atomic_set(&fl->users, 1); switch (fl->share) { case IPV6_FL_S_EXCL: case IPV6_FL_S_ANY: break; case IPV6_FL_S_PROCESS: fl->owner = current->pid; break; case IPV6_FL_S_USER: fl->owner = current_euid(); break; default: err = -EINVAL; goto done; } return fl; done: fl_free(fl); *err_p = err; return NULL; }
/** * ecryptfs_miscdev_read - format and send message from queue * @file: fs/ecryptfs/euid miscdevfs handle (ignored) * @buf: User buffer into which to copy the next message on the daemon queue * @count: Amount of space available in @buf * @ppos: Offset in file (ignored) * * Pulls the most recent message from the daemon queue, formats it for * being sent via a miscdevfs handle, and copies it into @buf * * Returns the number of bytes copied into the user buffer */ static ssize_t ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct ecryptfs_daemon *daemon; struct ecryptfs_msg_ctx *msg_ctx; size_t packet_length_size; char packet_length[3]; size_t i; size_t total_length; uid_t euid = current_euid(); int rc; mutex_lock(&ecryptfs_daemon_hash_mux); /* TODO: Just use file->private_data? */ rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); if (rc || !daemon) { mutex_unlock(&ecryptfs_daemon_hash_mux); return -EINVAL; } mutex_lock(&daemon->mux); if (task_pid(current) != daemon->pid) { mutex_unlock(&daemon->mux); mutex_unlock(&ecryptfs_daemon_hash_mux); return -EPERM; } if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { rc = 0; mutex_unlock(&ecryptfs_daemon_hash_mux); printk(KERN_WARNING "%s: Attempt to read from zombified " "daemon\n", __func__); goto out_unlock_daemon; } if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) { rc = 0; mutex_unlock(&ecryptfs_daemon_hash_mux); goto out_unlock_daemon; } /* This daemon will not go away so long as this flag is set */ daemon->flags |= ECRYPTFS_DAEMON_IN_READ; mutex_unlock(&ecryptfs_daemon_hash_mux); check_list: if (list_empty(&daemon->msg_ctx_out_queue)) { mutex_unlock(&daemon->mux); rc = wait_event_interruptible( daemon->wait, !list_empty(&daemon->msg_ctx_out_queue)); mutex_lock(&daemon->mux); if (rc < 0) { rc = 0; goto out_unlock_daemon; } } if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { rc = 0; goto out_unlock_daemon; } if (list_empty(&daemon->msg_ctx_out_queue)) { /* Something else jumped in since the * wait_event_interruptable() and removed the * message from the queue; try again */ goto check_list; } msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue, struct ecryptfs_msg_ctx, daemon_out_list); BUG_ON(!msg_ctx); mutex_lock(&msg_ctx->mux); if (msg_ctx->msg) { rc = ecryptfs_write_packet_length(packet_length, msg_ctx->msg_size, &packet_length_size); if (rc) { rc = 0; printk(KERN_WARNING "%s: Error writing packet length; " "rc = [%d]\n", __func__, rc); goto out_unlock_msg_ctx; } } else { packet_length_size = 0; msg_ctx->msg_size = 0; } /* miscdevfs packet format: * Octet 0: Type * Octets 1-4: network byte order msg_ctx->counter * Octets 5-N0: Size of struct ecryptfs_message to follow * Octets N0-N1: struct ecryptfs_message (including data) * * Octets 5-N1 not written if the packet type does not * include a message */ total_length = (1 + 4 + packet_length_size + msg_ctx->msg_size); if (count < total_length) { rc = 0; printk(KERN_WARNING "%s: Only given user buffer of " "size [%zd], but we need [%zd] to read the " "pending message\n", __func__, count, total_length); goto out_unlock_msg_ctx; } rc = -EFAULT; if (put_user(msg_ctx->type, buf)) goto out_unlock_msg_ctx; if (put_user(cpu_to_be32(msg_ctx->counter), (__be32 __user *)(buf + 1))) goto out_unlock_msg_ctx; i = 5; if (msg_ctx->msg) { if (copy_to_user(&buf[i], packet_length, packet_length_size)) goto out_unlock_msg_ctx; i += packet_length_size; if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size)) goto out_unlock_msg_ctx; i += msg_ctx->msg_size; } rc = i; list_del(&msg_ctx->daemon_out_list); kfree(msg_ctx->msg); msg_ctx->msg = NULL; /* We do not expect a reply from the userspace daemon for any * message type other than ECRYPTFS_MSG_REQUEST */ if (msg_ctx->type != ECRYPTFS_MSG_REQUEST) ecryptfs_msg_ctx_alloc_to_free(msg_ctx); out_unlock_msg_ctx: mutex_unlock(&msg_ctx->mux); out_unlock_daemon: daemon->flags &= ~ECRYPTFS_DAEMON_IN_READ; mutex_unlock(&daemon->mux); return rc; }
/** * ecryptfs_miscdev_write - handle write to daemon miscdev handle * @file: File for misc dev handle (ignored) * @buf: Buffer containing user data * @count: Amount of data in @buf * @ppos: Pointer to offset in file (ignored) * * miscdevfs packet format: * Octet 0: Type * Octets 1-4: network byte order msg_ctx->counter (0's for non-response) * Octets 5-N0: Size of struct ecryptfs_message to follow * Octets N0-N1: struct ecryptfs_message (including data) * * Returns the number of bytes read from @buf */ static ssize_t ecryptfs_miscdev_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { __be32 counter_nbo; u32 seq; size_t packet_size, packet_size_length, i; ssize_t sz = 0; char *data; uid_t euid = current_euid(); unsigned char packet_size_peek[3]; int rc; if (count == 0) { goto out; } else if (count == (1 + 4)) { /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */ goto memdup; } else if (count < (1 + 4 + 1) || count > (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)) { printk(KERN_WARNING "%s: Acceptable packet size range is " "[%d-%lu], but amount of data written is [%zu].", __func__, (1 + 4 + 1), (1 + 4 + 2 + sizeof(struct ecryptfs_message) + 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES), count); return -EINVAL; } if (copy_from_user(packet_size_peek, (buf + 1 + 4), sizeof(packet_size_peek))) { printk(KERN_WARNING "%s: Error while inspecting packet size\n", __func__); return -EFAULT; } rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size, &packet_size_length); if (rc) { printk(KERN_WARNING "%s: Error parsing packet length; " "rc = [%d]\n", __func__, rc); return rc; } if ((1 + 4 + packet_size_length + packet_size) != count) { printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__, packet_size); return -EINVAL; } memdup: data = memdup_user(buf, count); if (IS_ERR(data)) { printk(KERN_ERR "%s: memdup_user returned error [%ld]\n", __func__, PTR_ERR(data)); goto out; } sz = count; i = 0; switch (data[i++]) { case ECRYPTFS_MSG_RESPONSE: if (count < (1 + 4 + 1 + sizeof(struct ecryptfs_message))) { printk(KERN_WARNING "%s: Minimum acceptable packet " "size is [%zd], but amount of data written is " "only [%zd]. Discarding response packet.\n", __func__, (1 + 4 + 1 + sizeof(struct ecryptfs_message)), count); goto out_free; } memcpy(&counter_nbo, &data[i], 4); seq = be32_to_cpu(counter_nbo); i += 4 + packet_size_length; rc = ecryptfs_miscdev_response(&data[i], packet_size, euid, current_user_ns(), task_pid(current), seq); if (rc) printk(KERN_WARNING "%s: Failed to deliver miscdev " "response to requesting operation; rc = [%d]\n", __func__, rc); break; case ECRYPTFS_MSG_HELO: case ECRYPTFS_MSG_QUIT: break; default: ecryptfs_printk(KERN_WARNING, "Dropping miscdev " "message of unrecognized type [%d]\n", data[0]); break; } out_free: kfree(data); out: return sz; }