int memory_engine_takeover(memory_engine_t *engine,int alignaddr) { int res = 0; memory_node_t *node = NULL; struct task_struct *grptask = NULL; shm_debug("mem_engine_takeover start, (0x%08X)\n",alignaddr); if ((NULL == engine) || (0 == alignaddr)) return -EINVAL; down(&(engine->m_mutex)); /* find alignaddr */ res = _FindNode_alignaddr(engine, alignaddr, &node); if (0 != res) goto err_exit; /* if the node found is a free one, there could be invalid operations */ if (NULL != node->m_next_free) { shm_error("node(%#.8x) already freed\n",alignaddr); res = -EFAULT; goto err_exit; } /* change usrtaskid to current */ node->m_usrtaskid = task_tgid_vnr(current); grptask = pid_task(task_tgid(current),PIDTYPE_PID); if (NULL == grptask) memset(node->m_usrtaskname,0,16); else strncpy(node->m_usrtaskname,grptask->comm,16); up(&(engine->m_mutex)); shm_debug("memory_engine_takeover OK.\n"); return 0; err_exit: up(&(engine->m_mutex)); shm_error("memory_engine_takeover failed!!! (0x%08X)\n", alignaddr); return res; }
static __inline__ int scm_check_creds(struct ucred *creds) { const struct cred *cred = current_cred(); kuid_t uid = make_kuid(cred->user_ns, creds->uid); kgid_t gid = make_kgid(cred->user_ns, creds->gid); if (!uid_valid(uid) || !gid_valid(gid)) return -EINVAL; if ((creds->pid == task_tgid_vnr(current) || ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) && ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || uid_eq(uid, cred->suid)) || ns_capable(cred->user_ns, CAP_SETUID)) && ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || gid_eq(gid, cred->sgid)) || ns_capable(cred->user_ns, CAP_SETGID))) { return 0; } return -EPERM; }
static int shm_driver_open_noncache(struct inode *inode, struct file *filp) { /*save the device and opening task id to private_data * then from user space we will depend on the specified device * since there are cache and non-cache device */ struct shm_device_priv_data *priv_data = kzalloc(sizeof(struct shm_device_priv_data), GFP_KERNEL); if (NULL == priv_data) { shm_error("shm_driver_open_noncache fail to allocate memory\n"); return -ENOMEM; } priv_data->m_taskid = task_tgid_vnr(current); priv_data->m_device = shm_device_noncache; filp->private_data = priv_data; shm_debug("shm_driver_open_noncache ok\n"); return 0; }
/* * remove the attach descriptor vma. * free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ static void shm_close(struct vm_area_struct *vma) { struct file * file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); }
static int rootfctrl_path_chmod(struct path *path, umode_t mode) { pid_t pid; char tcomm[sizeof(current->comm)], name_buf[MAX_NAME_BUF_LEN]; char *full_path = get_full_path(path, NULL, name_buf); if (!is_felica_mode_valid(mode, full_path) || !is_nfc_mode_valid(mode, full_path)) { pid = task_tgid_vnr(current); get_task_comm(tcomm, current); RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s)\n", pid, tcomm); #if (RTFCTL_RUN_MODE != RTFCTL_TRACKING_MODE) printk("[RTFCTL] RType-6 <%s (%o)-%s (%d, %d)>\n", full_path, mode, tcomm, pid, current_uid()); return -EACCES; #endif } return 0; }
static int rootfctrl_path_unlink(struct path *dir, struct dentry *dentry) { pid_t pid; char tcomm[sizeof(current->comm)], name_buf[MAX_NAME_BUF_LEN]; char *full_path = get_full_path(dir, dentry, name_buf); if (is_felica_file(full_path) || is_nfc_file(full_path)) { pid = task_tgid_vnr(current); get_task_comm(tcomm, current); RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s)\n", pid, tcomm); #if (RTFCTL_RUN_MODE != RTFCTL_TRACKING_MODE) printk("[RTFCTL] RType-2 <%s-%s (%d)>\n", full_path, tcomm, pid); return -EACCES; #endif } return 0; }
static int rootfctrl_task_create(unsigned long clone_flags) { pid_t ppid = task_tgid_vnr(current->real_parent); char tcomm[sizeof(current->comm)]; get_task_comm(tcomm, current); if (zygote_pid == -1) { if (!strcmp("zygote", tcomm) && current_uid() == 0 && ppid == 1) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); zygote_pid = task_tgid_vnr(current); printk("[RTFCTL] Current zg: %d\n", zygote_pid); } } if (flc_daemon_pid == -1) { if (!strcmp("felica_daemon", tcomm) && current_uid() == 0 && ppid == 1) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); flc_daemon_pid = task_tgid_vnr(current); printk("[RTFCTL] Current fdaemon: %d\n", flc_daemon_pid); } } if (flc_agent_pid == -1) { if (!strcmp("felica_agent", tcomm) && current_uid() == 0 && ppid == 1) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); flc_agent_pid = task_tgid_vnr(current); printk("[RTFCTL] Current fagent: %d\n", flc_agent_pid); } } if (installd_pid == -1) { if (!strcmp("installd", tcomm) && current_uid() == 0 && ppid == 1) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); installd_pid = task_tgid_vnr(current); printk("[RTFCTL] Current isd: %d\n", installd_pid); } } if (!strcmp("adbd", tcomm) && ppid == 1) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); adbd_pid = task_tgid_vnr(current); } return 0; }
/* format_corename will inspect the pattern parameter, and output a * name into corename, which must have space for at least * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. */ static int format_corename(struct core_name *cn, struct coredump_params *cprm) { const struct cred *cred = current_cred(); const char *pat_ptr = core_pattern; int ispipe = (*pat_ptr == '|'); int pid_in_pattern = 0; int err = 0; cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count); cn->corename = kmalloc(cn->size, GFP_KERNEL); cn->used = 0; if (!cn->corename) return -ENOMEM; /* Repeat as long as we have more pattern to process and more output space */ while (*pat_ptr) { if (*pat_ptr != '%') { if (*pat_ptr == 0) goto out; err = cn_printf(cn, "%c", *pat_ptr++); } else { switch (*++pat_ptr) { /* single % at the end, drop that */ case 0: goto out; /* Double percent, output one percent */ case '%': err = cn_printf(cn, "%c", '%'); break; /* pid */ case 'p': pid_in_pattern = 1; err = cn_printf(cn, "%d", task_tgid_vnr(current)); break; /* uid */ case 'u': err = cn_printf(cn, "%d", cred->uid); break; /* gid */ case 'g': err = cn_printf(cn, "%d", cred->gid); break; case 'd': err = cn_printf(cn, "%d", __get_dumpable(cprm->mm_flags)); break; /* signal that caused the coredump */ case 's': err = cn_printf(cn, "%ld", cprm->siginfo->si_signo); break; /* UNIX time of coredump */ case 't': { struct timeval tv; do_gettimeofday(&tv); err = cn_printf(cn, "%lu", tv.tv_sec); break; } /* hostname */ case 'h': { char *namestart = cn->corename + cn->used; down_read(&uts_sem); err = cn_printf(cn, "%s", utsname()->nodename); up_read(&uts_sem); cn_escape(namestart); break; } /* executable */ case 'e': { char *commstart = cn->corename + cn->used; err = cn_printf(cn, "%s", current->comm); cn_escape(commstart); break; } case 'E': err = cn_print_exe_file(cn); break; /* core limit size */ case 'c': err = cn_printf(cn, "%lu", rlimit(RLIMIT_CORE)); break; default: break; } ++pat_ptr; } if (err) return err; } /* Backward compatibility with core_uses_pid: * * If core_pattern does not include a %p (as is the default) * and core_uses_pid is set, then .%pid will be appended to * the filename. Do not do this for piped commands. */ if (!ispipe && !pid_in_pattern && core_uses_pid) { err = cn_printf(cn, ".%d", task_tgid_vnr(current)); if (err) return err; } out: return ispipe; }
long do_msgrcv(int msqid, long *pmtype, void __user *mtext, size_t msgsz, long msgtyp, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int mode; struct ipc_namespace *ns; if (msqid < 0 || (long) msgsz < 0) return -EINVAL; mode = convert_mode(&msgtyp, msgflg); ns = current->nsproxy->ipc_ns; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) return PTR_ERR(msq); for (;;) { struct msg_receiver msr_d; struct list_head *tmp; msg = ERR_PTR(-EACCES); if (ipcperms(&msq->q_perm, S_IRUGO)) goto out_unlock; msg = ERR_PTR(-EAGAIN); tmp = msq->q_messages.next; while (tmp != &msq->q_messages) { struct msg_msg *walk_msg; walk_msg = list_entry(tmp, struct msg_msg, m_list); if (testmsg(walk_msg, msgtyp, mode) && !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) { msg = walk_msg; if (mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) { msg = walk_msg; msgtyp = walk_msg->m_type - 1; } else { msg = walk_msg; break; } } tmp = tmp->next; } if (!IS_ERR(msg)) { /* * Found a suitable message. * Unlink it from the queue. */ if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); goto out_unlock; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); msq->q_lrpid = task_tgid_vnr(current); msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts, &ns->msg_bytes); atomic_dec(&ns->msg_hdrs); ss_wakeup(&msq->q_senders, 0); msg_unlock(msq); break; } /* No message waiting. Wait for a message */ if (msgflg & IPC_NOWAIT) { msg = ERR_PTR(-ENOMSG); goto out_unlock; } list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = msgsz; msr_d.r_msg = ERR_PTR(-EAGAIN); current->state = TASK_INTERRUPTIBLE; msg_unlock(msq); schedule(); /* Lockless receive, part 1: * Disable preemption. We don't hold a reference to the queue * and getting a reference would defeat the idea of a lockless * operation, thus the code relies on rcu to guarantee the * existance of msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. * rcu_read_lock() prevents preemption between reading r_msg * and the spin_lock() inside ipc_lock_by_ptr(). */ rcu_read_lock(); /* Lockless receive, part 2: * Wait until pipelined_send or expunge_all are outside of * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. */ msg = (struct msg_msg*)msr_d.r_msg; while (msg == NULL) { cpu_relax(); msg = (struct msg_msg *)msr_d.r_msg; } /* Lockless receive, part 3: * If there is a message or an error then accept it without * locking. */ if (msg != ERR_PTR(-EAGAIN)) { rcu_read_unlock(); break; } /* Lockless receive, part 3: * Acquire the queue spinlock. */ ipc_lock_by_ptr(&msq->q_perm); rcu_read_unlock(); /* Lockless receive, part 4: * Repeat test after acquiring the spinlock. */ msg = (struct msg_msg*)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) goto out_unlock; list_del(&msr_d.r_list); if (signal_pending(current)) { msg = ERR_PTR(-ERESTARTNOHAND); out_unlock: msg_unlock(msq); break; } } if (IS_ERR(msg)) return PTR_ERR(msg); msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; *pmtype = msg->m_type; if (store_msg(mtext, msg, msgsz)) msgsz = -EFAULT; free_msg(msg); return msgsz; }
long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int err; struct ipc_namespace *ns; ns = current->nsproxy->ipc_ns; if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (mtype < 1) return -EINVAL; msg = load_msg(mtext, msgsz); if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; msq = msg_lock_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_free; } for (;;) { struct msg_sender s; err = -EACCES; if (ipcperms(&msq->q_perm, S_IWUGO)) goto out_unlock_free; err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) goto out_unlock_free; if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { break; } /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; goto out_unlock_free; } ss_add(msq, &s); ipc_rcu_getref(msq); msg_unlock(msq); schedule(); ipc_lock_by_ptr(&msq->q_perm); ipc_rcu_putref(msq); if (msq->q_perm.deleted) { err = -EIDRM; goto out_unlock_free; } ss_del(&s); if (signal_pending(current)) { err = -ERESTARTNOHAND; goto out_unlock_free; } } msq->q_lspid = task_tgid_vnr(current); msq->q_stime = get_seconds(); if (!pipelined_send(msq, msg)) { /* noone is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz, &ns->msg_bytes); atomic_inc(&ns->msg_hdrs); } err = 0; msg = NULL; out_unlock_free: msg_unlock(msq); out_free: if (msg != NULL) free_msg(msg); return err; }
int memory_engine_allocate(memory_engine_t *engine, size_t size, size_t alignment, memory_node_t **node) { int res = 0; memory_node_t *new_node = NULL; struct task_struct *grptask = NULL; shm_debug("memory_engine_allocate start. (%d, %d)\n", size, alignment); if ((engine == NULL) || (node == NULL)) return -EINVAL; #ifdef SHM_GUARD_BYTES_ENABLE //add gurad bytes. if (engine->m_cache_or_noncache == SHM_CACHE){ size += SHM_GUARD_BYTES; } #endif down(&(engine->m_mutex)); if (size > engine->m_size_free) { shm_error("heap has not enough (%u) bytes for (%u) bytes\n", engine->m_size_free, size); res = -ENOMEM; goto err_exit; } /* Find a free node in heap */ new_node = _FindNode_size(engine, size, alignment); if (new_node == NULL) { memory_node_t *pLastNode = NULL; pLastNode = engine->m_root.m_prev_free; if (pLastNode) shm_error("heap has not enough liner memory for (%u) bytes, free blocks:%u(max free block:%u)\n", size, engine->m_num_freeblock, pLastNode->m_size); else shm_error("heap has not enough liner memory, no free blocks!!!\n"); res = -ENOMEM; goto err_exit; } /* Do we have enough memory after the allocation to split it? */ if (MEMNODE_ALIGN_SIZE(new_node) - size > engine->m_threshold) _Split(engine, new_node, size + new_node->m_offset);/* Adjust the node size. */ else engine->m_num_freeblock--; engine->m_num_usedblock++; /* Remove the node from the free list. */ new_node->m_prev_free->m_next_free = new_node->m_next_free; new_node->m_next_free->m_prev_free = new_node->m_prev_free; new_node->m_next_free = new_node->m_prev_free = NULL; /* Fill in the information. */ new_node->m_alignment = alignment; /*record pid/thread name in node info, for debug usage*/ new_node->m_threadid = task_pid_vnr(current);/*(current)->pid;*/ /* qzhang@marvell * record creating task id,user task id * by default user task id is creating task id * until memory_engine_lock invoked */ new_node->m_taskid = new_node->m_usrtaskid= task_tgid_vnr(current); strncpy(new_node->m_threadname, current->comm, 16); grptask = pid_task(task_tgid(current),PIDTYPE_PID); if (NULL != grptask) { strncpy(new_node->m_taskname,grptask->comm,16); strncpy(new_node->m_usrtaskname,grptask->comm,16); } else { memset(new_node->m_taskname,0,16); memset(new_node->m_usrtaskname,0,16); } new_node->m_phyaddress = MEMNODE_ALIGN_ADDR(new_node); memory_engine_insert_shm_node(&(engine->m_shm_root), new_node); /* Adjust the number of free bytes. */ engine->m_size_free -= new_node->m_size; engine->m_size_used += new_node->m_size; engine->m_peak_usedmem = max(engine->m_peak_usedmem, engine->m_size_used); /* Return the pointer to the node. */ *node = new_node; #ifdef SHM_GUARD_BYTES_ENABLE //fill gurad bytes with SHM_GUARD_DATA if (engine->m_cache_or_noncache == SHM_CACHE) { memset((void *)(MEMNODE_ALIGN_ADDR(new_node)- engine->m_base + engine->m_virt_base + MEMNODE_ALIGN_SIZE(new_node) - SHM_GUARD_BYTES), SHM_GUARD_DATA, SHM_GUARD_BYTES); } #endif up(&(engine->m_mutex)); shm_debug("Allocated %u (%u) bytes @ 0x%08X (0x%08X) for align (%u)\n", MEMNODE_ALIGN_SIZE(new_node), new_node->m_size, MEMNODE_ALIGN_ADDR(new_node), new_node->m_addr, new_node->m_alignment); shm_debug("memory_engine_allocate OK.\n"); return 0; err_exit: up(&(engine->m_mutex)); *node = NULL; shm_error("memory_engine_allocate failed !!! (%d, %d) (%d %s)\n", size, alignment, current->pid, current->comm); return res; }
static void cpufreq_limit_work(struct work_struct *work) { struct cpufreq_limit_data *limit = container_of(work, struct cpufreq_limit_data, limit_work); struct task_struct *p = NULL, *t = NULL; char **s = limit->limit_name; char *comm = task_comm; int cpu, i = 0, len = limit->limit_num; if (!test_bit(STATE_RESUME_DONE, &limit->resume_state)) goto _exit; task_comm[0] = 0; for_each_possible_cpu(cpu) { p = curr_task(cpu); t = find_task_by_vpid(task_tgid_vnr(p)); /* parent */ if (t) p = t; if (p->flags & PF_KTHREAD) continue; if (!likely(p->mm)) continue; cpufreq_cmdline(p, comm); pr_debug("cpu %d current (%d) %s\n", cpu, p->pid, comm); for (i = 0; len > i; i++) { /* boost : task is running */ if (!strncmp(comm, s[i], strlen(s[i]))) { limit->time_stamp = 0; cpufreq_set_max_frequency(limit, 1); pr_debug(": run %s\n", s[i]); goto _exit; } } } for_each_process(p) { if (p->flags & PF_KTHREAD) continue; if (!likely(p->mm)) continue; cpufreq_cmdline(p, comm); for (i = 0; len > i; i++) { if (!strncmp(comm, s[i], strlen(s[i]))) { pr_debug("detect %s:%s [%ld.%ld ms]\n", s[i], comm, limit->time_stamp, limit->time_stamp%1000); limit->current_time_stamp = ktime_to_ms(ktime_get()); if (0 == limit->time_stamp) { limit->time_stamp = limit->current_time_stamp; } else { /* restore : task is sleep status */ if ((limit->current_time_stamp - limit->time_stamp) > limit->op_timeout) cpufreq_set_max_frequency(limit, 0); } goto _exit; } } } /* restore : not find task */ cpufreq_set_max_frequency(limit, 0); limit->time_stamp = 0; _exit: hrtimer_start(&limit->limit_timer, ms_to_ktime(limit->timer_duration), HRTIMER_MODE_REL_PINNED); }
int rootfctrl_task_kill(struct task_struct *p, struct siginfo *info, int sig, u32 secid) { char tcomm[sizeof(p->comm)]; char tcomm2[sizeof(current->comm)]; get_task_comm(tcomm2, current); get_task_comm(tcomm, p); if (!strcmp("zygote", tcomm) && (task_tgid_vnr(p) == zygote_pid)) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s) kill pid: %d (%s)\n", task_tgid_vnr(current), tcomm2, task_tgid_vnr(p), tcomm); RTFCTL_MSG("info: %x, isFromKernel: %d\n", (unsigned int)info, SI_FROMKERNEL(info)); RTFCTL_MSG("sig: %d, secid: %d\n", sig, secid); printk("[RTFCTL] zg killed\n"); zygote_pid = -1; } if (!strcmp("felica_daemon", tcomm) && (task_tgid_vnr(p) == flc_daemon_pid)) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s) kill pid: %d (%s)\n", task_tgid_vnr(current), tcomm2, task_tgid_vnr(p), tcomm); RTFCTL_MSG("info: %x, isFromKernel: %d\n", (unsigned int)info, SI_FROMKERNEL(info)); RTFCTL_MSG("sig: %d, secid: %d\n", sig, secid); printk("[RTFCTL] fdaemon killed\n"); flc_daemon_pid = -1; } if (!strcmp("felica_agent", tcomm) && (task_tgid_vnr(p) == flc_agent_pid)) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s) kill pid: %d (%s)\n", task_tgid_vnr(current), tcomm2, task_tgid_vnr(p), tcomm); RTFCTL_MSG("info: %x, isFromKernel: %d\n", (unsigned int)info, SI_FROMKERNEL(info)); RTFCTL_MSG("sig: %d, secid: %d\n", sig, secid); printk("[RTFCTL] fagent killed\n"); flc_agent_pid = -1; } if (!strcmp("installd", tcomm) && (task_tgid_vnr(p) == installd_pid)) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s) kill pid: %d (%s)\n", task_tgid_vnr(current), tcomm2, task_tgid_vnr(p), tcomm); RTFCTL_MSG("info: %x, isFromKernel: %d\n", (unsigned int)info, SI_FROMKERNEL(info)); RTFCTL_MSG("sig: %d, secid: %d\n", sig, secid); printk("[RTFCTL] isd killed\n"); installd_pid = -1; } return 0; }
static int rootfctrl_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { pid_t pid; char tcomm[sizeof(current->comm)], name_buf[MAX_NAME_BUF_LEN]; char *full_path = get_full_path(dir, dentry, name_buf); int ret, format = mode & S_IFMT; if (format == S_IFCHR || format == S_IFBLK) { dev_t dev_num; dev_num = new_decode_dev(dev); ret = is_felica_dev(MAJOR(dev_num), MINOR(dev_num), full_path); if (ret < 0) { pid = task_tgid_vnr(current); get_task_comm(tcomm, current); RTFCTL_MSG("########## %s(felica dev node) ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s)\n", task_tgid_vnr(current), tcomm); RTFCTL_MSG("mode: %o\n", mode); RTFCTL_MSG("Felica Node major/minor wrong or Fake felica node\n"); #if (RTFCTL_RUN_MODE != RTFCTL_TRACKING_MODE) printk("[RTFCTL] RType-3-1-%d <%s (%o, %u, %u)-%s (%d)>\n", ret, full_path, mode, MAJOR(dev_num), MINOR(dev_num), tcomm, pid); return -EACCES; #endif } ret = is_nfc_dev(MAJOR(dev_num), MINOR(dev_num), full_path); if (ret < 0) { pid = task_tgid_vnr(current); get_task_comm(tcomm, current); RTFCTL_MSG("########## %s(nfc dev node) ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s)\n", task_tgid_vnr(current), tcomm); RTFCTL_MSG("mode: %o\n", mode); RTFCTL_MSG("Nfc Node major/minor wrong or Fake nfc node\n"); #if (RTFCTL_RUN_MODE != RTFCTL_TRACKING_MODE) printk("[RTFCTL] RType-3-2-%d <%s (%o, %u, %u)-%s (%d)>\n", ret, full_path, mode, MAJOR(dev_num), MINOR(dev_num), tcomm, pid); return -EACCES; #endif } } else if (is_felica_file(full_path)) { pid = task_tgid_vnr(current); get_task_comm(tcomm, current); RTFCTL_MSG("########## %s(regular file) ##########\n", __FUNCTION__); RTFCTL_MSG("pid: %d (%s)\n", task_tgid_vnr(current), tcomm); RTFCTL_MSG("mode: %o\n", mode); RTFCTL_MSG("Can't create Felica file dynamically\n"); #if (RTFCTL_RUN_MODE != RTFCTL_TRACKING_MODE) printk("[RTFCTL] RType-3-3 <%s (%o)-%s (%d)>\n", full_path, mode, tcomm, pid); return -EACCES; #endif } return 0; }
RTFCTL_MSG("info: %x, isFromKernel: %d\n", (unsigned int)info, SI_FROMKERNEL(info)); RTFCTL_MSG("sig: %d, secid: %d\n", sig, secid); printk("[RTFCTL] isd killed\n"); installd_pid = -1; } return 0; } static int rootfctrl_task_fix_setuid (struct cred *new, const struct cred *old, int flags) { pid_t pid, ppid; char tcomm[sizeof(current->comm)]; char ptcomm[sizeof(current->real_parent->comm)]; pid = task_tgid_vnr(current); ppid = task_tgid_vnr(current->real_parent); get_task_comm(tcomm, current); get_task_comm(ptcomm, current->real_parent); #if (RTFCTL_RUN_MODE == RTFCTL_TRACKING_MODE) if (is_felica_uid(new->uid) || is_nfc_uid(new->uid)) { RTFCTL_MSG("########## %s ##########\n", __FUNCTION__); RTFCTL_MSG("old->uid = %d, old->gid = %d\n", old->uid, old->gid); RTFCTL_MSG("old->suid = %d, old->sgid = %d\n", old->suid, old->sgid); RTFCTL_MSG("old->euid = %d, old->egid = %d\n", old->euid, old->egid); RTFCTL_MSG("new->uid = %d, new->gid = %d\n", new->uid, new->gid); RTFCTL_MSG("new->suid = %d, new->sgid = %d\n", new->suid, new->sgid); RTFCTL_MSG("new->euid = %d, new->egid = %d\n", new->euid, new->egid); RTFCTL_MSG("Zygote pid: %d\n", zygote_pid);