static void ShowStatus(void) { struct task_struct *t,*p; struct pid *pid; int count=0; InDumpAllStack=1; //show all kbt in init LOGE("[Hang_Detect] dump init all thread bt \n"); if(init_pid) { pid=find_get_pid(init_pid); t=p=get_pid_task(pid,PIDTYPE_PID); do { sched_show_task_local(t); } while_each_thread(p, t); } //show all kbt in surfaceflinger LOGE("[Hang_Detect] dump surfaceflinger all thread bt \n"); if(surfaceflinger_pid) { pid=find_get_pid(surfaceflinger_pid); t=p=get_pid_task(pid,PIDTYPE_PID); count=0; do { sched_show_task_local(t); if((++count)%5==4) msleep(20); } while_each_thread(p, t); } msleep(100); //show all kbt in system_server LOGE("[Hang_Detect] dump system_server all thread bt \n"); if(system_server_pid) { pid=find_get_pid(system_server_pid); t=p=get_pid_task(pid,PIDTYPE_PID); count=0; do { sched_show_task_local(t); if((++count)%5==4) msleep(20); } while_each_thread(p, t); } msleep(100); //show all D state thread kbt LOGE("[Hang_Detect] dump all D thread bt \n"); show_state_filter_local(TASK_UNINTERRUPTIBLE); debug_show_all_locks(); system_server_pid=0; surfaceflinger_pid=0; init_pid=0; InDumpAllStack=0; msleep(10); }
int send_signal(int myPid) { /* send the signal */ struct siginfo info; int ret; struct task_struct *t; struct pid *pid_struct; pid_t pid; memset(&info, 0, sizeof(struct siginfo)); info.si_signo = SIG_TEST; info.si_code = SI_QUEUE; // this is bit of a trickery: SI_QUEUE is normally used by sigqueue from user space, // and kernel space should use SI_KERNEL. But if SI_KERNEL is used the real_time data // is not delivered to the user space signal handler function. info.si_int = 1; //real time signals may have 32 bits of data. rcu_read_lock(); pid = myPid; //integer value of pid pid_struct = find_get_pid(pid); //function to find the pid_struct t = pid_task(pid_struct,PIDTYPE_PID); //find the task_struct if(t == NULL){ printk("no such pid\n"); rcu_read_unlock(); return -ENODEV; } rcu_read_unlock(); ret = send_sig_info(SIG_TEST, &info, t); //send the signal if (ret < 0) { printk("error sending signal\n"); return ret; } return 0; }
/* Log a fork system call. * * This will log the return value, but only if it's not 0 (i.e., is in * the parent). */ static void handle_fork(struct filemon *fm, char op, is_at_enum is_at __maybe_unused, struct pt_regs *regs) { #ifndef FILEMON_PERFORMANCE_NO_FORK_FM struct pid *pid; #endif int scrv; scrv = syscall_get_return_value(current, regs); if (scrv == 0) /* The < 0 case was already handled. */ return; filemon_log(fm, op, "%i", scrv); #ifndef FILEMON_PERFORMANCE_NO_FORK_FM /* List is already locked */ pid = find_get_pid(scrv); if (pid >= 0) { struct fm_pids *s; s = kmalloc(sizeof(struct fm_pids), GFP_KERNEL); if (s) { s->pid = pid; LIST_ADD(&s->list, &fm->shead->list); } } #endif }
/*Ä£¿éŒÓÔغ¯Êý¶šÒå*/ static int __init init_waitqueue_entry_init(void) { //ŸÖ²¿±äÁ¿¶šÒå int result; wait_queue_t data; printk("<0>into init_waitqueue_entry_init.\n"); /*ŽŽœš1žöÐÂœø³Ì*/ result=kernel_thread(my_function,NULL,CLONE_KERNEL); /*»ñÈ¡ÐÂœø³ÌµÄœø³ÌÃèÊö·ûÐÅÏ¢*/ struct pid * kpid = find_get_pid(result); struct task_struct * task = pid_task(kpid,PIDTYPE_PID); if(data.private==NULL||data.func==NULL) { printk("<0>the data has not been initialized\n"); } /*ÓÃÐÂœø³Ì³õÊŒ»¯µÈŽý¶ÓÁÐÔªËØ*/ init_waitqueue_entry(&data,task); if(data.private==task && data.func!=NULL) { printk("<0>the data has been initialized\n"); printk("<0>the flags of the data is:%d\n",data.flags); } else {
//Ä£¿éŒÓÔغ¯Êý¶šÒå static int __init wait_for_completion_interruptible_init(void) { int result; long leavetime; wait_queue_t data; printk("<0>into wait_for_completion_interruptible_init.\n"); result=kernel_thread(my_function,NULL,CLONE_KERNEL); //ŽŽœšÐÂœø³Ì /*»ñÈ¡ÐÂœø³ÌµÄÃèÊö·ûÐÅÏ¢*/ struct pid * kpid=find_get_pid(result); struct task_struct * task=pid_task(kpid,PIDTYPE_PID); init_completion(&comple); //³õÊŒ»¯completion±äÁ¿ init_waitqueue_entry(&data,task); //ÓÃÐÂœø³Ì³õÊŒ»¯µÈŽý¶ÓÁÐÔªËØ __add_wait_queue_tail(&(comple.wait),&data); //œ«ÐÂœø³ÌŒÓÈëµÈŽý¶ÓÁеÄβ²¿ leavetime=wait_for_completion_interruptible(&comple); //×èÈûœø³Ì£¬µÈŽýÐÂœø³ÌµÄœáÊø /*ÏÔÊŸº¯Êýwait_for_completion_interruptible( )µÄ·µ»Øœá¹û*/ printk("<0>the result of the wait_for_completion_interruptible is:%ld\n",leavetime); /*ÏÔÊŸº¯Êýkernel_thread( )µÄ·µ»Øœá¹û*/ printk("<0>the result of the kernel_thread is :%d\n",result); printk("<0>the current pid is:%d\n",current->pid); //ÏÔÊŸµ±Ç°œø³ÌµÄPIDÖµ printk("<0>out wait_for_completion_interruptible_init.\n"); return 0; }
static void wb_dfe_exit ( void ) { struct pid *pid_ptr; int pid_index = 0; int i ; PRINTF("Killing module thread ...\n"); for (i = 0; i < dfe_nr_threads; i++) { killed[i] = 0; } dfe_gone = 1; while (pid_index < dfe_nr_threads) { for (i = 0; i < dfe_nr_threads; i++) { if (0 == killed[i]) { pid_ptr = find_get_pid(pid[i]); PRINTF("we try to kill kernel thread %d ,thread id is %d !\n", i,pid[i]); if (!kill_pid(pid_ptr, SIGTERM, 1)) { PRINTF("Unable to kill thread. Waiting for completion !\n"); wake_up(&wait); wait_for_completion(&evt_dead); } PRINTF("~~~~~kernel thread %d ,thread id %d was killed !~~~~~\n", i,pid[i]); killed[i] = 1; ++pid_index; } } } PRINTF("Bye bye, %s !\n",DRIVER_DESC); }
static int proc_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { int err; struct super_block *sb; struct pid_namespace *ns; struct proc_inode *ei; char *options; if (proc_mnt) { /* Seed the root directory with a pid so it doesn't need * to be special in base.c. I would do this earlier but * the only task alive when /proc is mounted the first time * is the init_task and it doesn't have any pids. */ ei = PROC_I(proc_mnt->mnt_sb->s_root->d_inode); if (!ei->pid) ei->pid = find_get_pid(1); } if (flags & MS_KERNMOUNT) { ns = (struct pid_namespace *)data; options = NULL; } else { ns = task_active_pid_ns(current); options = data; } sb = sget(fs_type, proc_test_super, proc_set_super, ns); if (IS_ERR(sb)) return PTR_ERR(sb); if (!sb->s_root) { sb->s_flags = flags; if (!proc_parse_options(options, ns)) { deactivate_locked_super(sb); return -EINVAL; } err = proc_fill_super(sb); if (err) { deactivate_locked_super(sb); return err; } ei = PROC_I(sb->s_root->d_inode); if (!ei->pid) { rcu_read_lock(); ei->pid = get_pid(find_pid_ns(1, ns)); rcu_read_unlock(); } sb->s_flags |= MS_ACTIVE; ns->proc_mnt = mnt; } simple_set_mnt(mnt, sb); return 0; }
/** * Récupère la struct pid correspondant au PID passé en argument * @param pid le PID a cherché * @return 1 si le PID existe, 0 sinon. */ int monitor_pid(pid_t pid) { pr_info("[DEBUG] monitor_pid called with arg : %d\n", pid); target_pid = find_get_pid(pid); if(!target_pid) return 0; return 1; }
/** * Récupère la struct pid correspondant au PID passé en argument * @param pid le PID a cherché * @return 1 si le PID existe, 0 sinon. */ int monitor_pid(pid_t pid) { struct pid* tmp = find_get_pid(pid); if(!tmp) return 0; tm = (struct task_monitor*)kmalloc(sizeof(struct task_monitor), GFP_KERNEL); tm->target_pid=tmp; return 1; }
/** * ecryptfs_process_nl_helo * @skb: The socket buffer containing the nlmsghdr in HELO state * * Gets uid and pid of the skb and adds the values to the daemon id * hash. Returns zero after adding a new daemon id to the hash list; * non-zero otherwise. */ static int ecryptfs_process_nl_helo(struct sk_buff *skb) { struct pid *pid; int rc; pid = find_get_pid(NETLINK_CREDS(skb)->pid); rc = ecryptfs_process_helo(ECRYPTFS_TRANSPORT_NETLINK, NETLINK_CREDS(skb)->uid, NULL, pid); put_pid(pid); if (rc) printk(KERN_WARNING "Error processing HELO; rc = [%d]\n", rc); return rc; }
//Ä£¿éŒÓÔغ¯Êý¶šÒå static int __init find_get_pid_init(void) { int result; printk("<0> into find_get_pid_init.\n"); result=kernel_thread(my_function,NULL,CLONE_KERNEL); //ŽŽœšÒ»žöеĜø³Ì struct pid * kpid=find_get_pid(result); //žùŸÝœø³ÌºÅ£¬µ÷Óú¯Êý»ñÈ¡œø³ÌÃèÊö·ûÐÅÏ¢ printk("<0>the count of the pid is :%d\n",kpid->count); //ÏÔÊŸœø³ÌÃèÊö·ûÐÅÏ¢ printk("<0>the level of the pid is :%d\n",kpid->level); printk("<0>the pid of the find_get_pid is :%d\n",kpid->numbers[kpid->level].nr); //ÏÔÊŸœø³ÌºÅ printk("<0>the result of the kernel_thread is :%d\n",result); //ÏÔÊŸkernel_threadº¯Êý·µ»Øœá¹û printk("<0> out find_get_pid_init.\n"); return 0; }
/** * ecryptfs_process_nl_quit * @skb: The socket buffer containing the nlmsghdr in QUIT state * * Gets uid and pid of the skb and deletes the corresponding daemon * id, if it is the registered that is requesting the * deletion. Returns zero after deleting the desired daemon id; * non-zero otherwise. */ static int ecryptfs_process_nl_quit(struct sk_buff *skb) { struct pid *pid; int rc; pid = find_get_pid(NETLINK_CREDS(skb)->pid); rc = ecryptfs_process_quit(NETLINK_CREDS(skb)->uid, NULL, pid); put_pid(pid); if (rc) printk(KERN_WARNING "Error processing QUIT message; rc = [%d]\n", rc); return rc; }
static void exitModuleTest ( void ) { struct pid *pid_ptr; PRINTF("Killing module thread ...\n"); moduleTestExiting = 1; pid_ptr = find_get_pid(pid); if (!kill_pid (pid_ptr,SIGTERM,1)) { PRINTF("Unable to kill thread. Waiting for completion !\n"); wake_up (&wait); wait_for_completion(&evt_dead); } PRINTF("Bye bye, %s !\n",DRIVER_DESC); }
static ssize_t mdb_file_read ( struct file *file, char __user *buf, size_t count, loff_t *ppos) { Mdb_s mdb; FN; if (copy_from_user( &mdb, buf, count)) { return -EFAULT; } switch (mdb.mdb_cmd) { case MDB_READ: if (copy_to_user((void*)mdb.mdb_buf, (void*)mdb.mdb_addr, mdb.mdb_size)) { return -EFAULT; } return mdb.mdb_size; case MDB_WRITE: if (copy_from_user((void*)mdb.mdb_addr, (void*)mdb.mdb_buf, mdb.mdb_size)) { return -EFAULT; } return mdb.mdb_size; case MDB_PID2TASK: rcu_read_lock(); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)) mdb.pid_task = (unsigned long)find_task_by_pid(mdb.pid_pid); #else //mdb.pid_task = (unsigned long)find_task_by_vpid(mdb.pid_pid); mdb.pid_task = (unsigned long)pid_task(find_get_pid(mdb.pid_pid), PIDTYPE_PID); #endif rcu_read_unlock(); if (copy_to_user( buf, &mdb, count)) { return -EFAULT; } return 0; default: return -EINVAL; } return 0; }
struct task_struct *x_find_task_by_pid(pid_t nr) { #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 27) return find_task_by_pid(nr); #else struct pid *pid; struct task_struct *ts = NULL; pid = find_get_pid(nr); if(pid) { ts = pid_task(pid,PIDTYPE_PID); put_pid(pid); } return ts; #endif }
//Ä£¿éŒÓÔغ¯Êý¶šÒå static int __init __task_pid_nr_ns_init(void) { int result; printk("<0> into __task_pid_nr_ns_init.\n"); result=kernel_thread(my_function,NULL,CLONE_KERNEL); //ŽŽœšÐÂœø³Ì struct pid * kpid=find_get_pid(result); //»ñÈ¡×Óœø³ÌµÄœø³ÌÃèÊö·û struct task_struct * task=pid_task(kpid,PIDTYPE_PID); //»ñÈ¡œø³ÌËùÊôµÄÈÎÎñµÄÈÎÎñÃèÊö·û pid_t result1=__task_pid_nr_ns(task,PIDTYPE_PID,kpid->numbers[kpid->level].ns); //»ñÈ¡ÈÎÎñ¶ÔÓŠœø³ÌµÄœø³ÌÃèÊö·û printk("<0>the pid of the find_get_pid is :%d\n",kpid->numbers[kpid->level].nr); //ÏÔÊŸº¯Êýfind_get_pid()·µ»ØÖµµÄœø³ÌÃèÊö·ûµÄœø³ÌºÅ printk("<0>the result of the __task_pid_nr_ns is:%d\n",result1); //ÏÔÊŸº¯Êý__task_pid_nr_ns()µÄ·µ»ØÖµ printk("<0>the result of the kernel_thread is :%d\n",result); //ÏÔÊŸº¯Êýkernel_thread()µÄ·µ»ØÖµ printk("<0>the pid of current thread is :%d\n",current->pid); //ÏÔÊŸµ±Ç°œø³ÌºÅ printk("<0> out __task_pid_nr_ns_init.\n"); return 0; }
static int proc_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { if (proc_mnt) { /* Seed the root directory with a pid so it doesn't need * to be special in base.c. I would do this earlier but * the only task alive when /proc is mounted the first time * is the init_task and it doesn't have any pids. */ struct proc_inode *ei; ei = PROC_I(proc_mnt->mnt_sb->s_root->d_inode); if (!ei->pid) ei->pid = find_get_pid(1); } return get_sb_single(fs_type, flags, data, proc_fill_super, mnt); }
static struct task_struct *get_check_task(pid_t pid) { struct task_struct *task; struct pid *struct_pid = NULL; rcu_read_lock(); struct_pid = find_get_pid(pid); task = pid_task(struct_pid, PIDTYPE_PID); rcu_read_unlock(); if(unlikely(task == NULL)){ printk(KERN_INFO "sendsig: no process with pid %d found\n", pid); return NULL; } return task; }
//Ä£¿éŒÓÔغ¯Êý¶šÒå static int __init find_task_by_pid_ns_init(void) { int result; printk("<0> into find_task_by_pid_ns_init.\n"); result=kernel_thread(my_function,NULL,CLONE_KERNEL); //ŽŽœšÐÂœø³Ì struct pid * kpid=find_get_pid(result); //»ñÈ¡œø³ÌÃèÊö·û //µ÷Óú¯Êý»ñµÃÓë²ÎÊýÐÅÏ¢¶ÔÓŠµÄÈÎÎñÃèÊö·ûÐÅÏ¢ struct task_struct * task=find_task_by_pid_ns(kpid->numbers[kpid->level].nr,kpid->numbers[kpid->level].ns); //ÏÔÊŸfind_get_pid()º¯Êý·µ»ØµÄœø³ÌÃèÊö·ûµÄœø³ÌºÅ printk("<0>the pid of the find_get_pid is :%d\n",kpid->numbers[kpid->level].nr); //ÏÔÊŸº¯Êýµ÷ÓÃœá¹ûÈÎÎñÃèÊö·ûµÄÐÅÏ¢ printk("<0>the pid of the task of the function find_task_by_pid_ns is:%d\n",task->pid); printk("<0>the tgid of the task of the function find_task_by_pid_ns is:%d\n",task->tgid); //ÏÔÊŸkernel_thread()º¯Êýµ÷ÓÃœá¹û printk("<0>the result of the kernel_thread is :%d\n",result); printk("<0> out find_task_by_pid_ns_init.\n"); return 0; }
static int msmrtc_timeremote_set_time(struct device *dev, struct rtc_time *tm) { int rc; struct rtc_tod_args rtc_args; struct msm_rtc *rtc_pdata = dev_get_drvdata(dev); struct pid * pid_struct = NULL; pid_t pid = 0; struct task_struct *task = NULL; if (tm->tm_year < 1900) tm->tm_year += 1900; if (tm->tm_year < 1970) return -EINVAL; pid = sys_getpid(); pid_struct = find_get_pid(pid); if ( pid_struct ) { task = pid_task( pid_struct, PIDTYPE_PID); } dev_dbg(dev, "%s: %.2u/%.2u/%.4u %.2u:%.2u:%.2u (%.2u) pid:%d %s\n", __func__, tm->tm_mon, tm->tm_mday, tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday, (int)pid, (task?task->comm:"(null)")); WARN_ON(1); rtc_args.proc = TIMEREMOTE_PROCEEDURE_SET_JULIAN; rtc_args.tm = tm; rc = msm_rpc_client_req(rtc_pdata->rpc_client, TIMEREMOTE_PROCEEDURE_SET_JULIAN, msmrtc_tod_proc_args, &rtc_args, NULL, NULL, -1); if (rc) { dev_err(dev, "%s: rtc time (TOD) could not be set\n", __func__); return rc; } return 0; }
static int rp_sched_process_fork_leave(struct kretprobe_instance *ri, struct pt_regs *regs) { pid_t pid = (pid_t)regs_return_value(regs); if (pid) { struct task_struct *task = NULL; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) struct pid *p_pid = find_get_pid(pid); task = pid_task(p_pid, PIDTYPE_PID); put_pid(p_pid); #else /* < 2.6.31 */ rcu_read_lock(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) task = find_task_by_vpid(pid); #else /* < 2.6.24 */ task = find_task_by_pid(pid); #endif /* 2.6.24 */ rcu_read_unlock(); #endif /* 2.6.31 */ vtss_target_fork(current, task); } return 0; }
/** * ecryptfs_process_nl_reponse * @skb: The socket buffer containing the netlink message of state * RESPONSE * * Processes a response message after sending a operation request to * userspace. Attempts to assign the msg to a netlink context element * at the index specified in the msg. The sk_buff and nlmsghdr must * be validated before this function. Returns zero upon delivery to * desired context element; non-zero upon delivery failure or error. */ static int ecryptfs_process_nl_response(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); struct ecryptfs_message *msg = NLMSG_DATA(nlh); struct pid *pid; int rc; if (skb->len - NLMSG_HDRLEN - sizeof(*msg) != msg->data_len) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "Received netlink message with " "incorrectly specified data length\n"); goto out; } pid = find_get_pid(NETLINK_CREDS(skb)->pid); rc = ecryptfs_process_response(msg, NETLINK_CREDS(skb)->uid, NULL, pid, nlh->nlmsg_seq); put_pid(pid); if (rc) printk(KERN_ERR "Error processing response message; rc = [%d]\n", rc); out: return rc; }
long sys_set_reserve(pid_t pid, struct timespec __user *user_C, struct timespec __user *user_T, int cid) { struct cpumask set; struct timespec T, C, empty; struct pid *pid_struct; struct task_struct *task; struct task_struct *tmp; int i; int cpu_task_count[] = {0, 0, 0, 0}; set_normalized_timespec(&empty, 0, 0); // locate the task_struct for the task required if (pid == 0) { task = current; } else { rcu_read_lock(); pid_struct = find_get_pid(pid); if (!pid_struct) { rcu_read_unlock(); return -ENODEV; } task = pid_task(pid_struct, PIDTYPE_PID); if (!task) { rcu_read_unlock(); return -ENODEV; } rcu_read_unlock(); } // get timespec struct info if (copy_from_user(&C, user_C, sizeof(struct timespec))) { printk(KERN_ALERT "[sys_set_reserve] failed to copy C from user\n"); return -EFAULT; } if (copy_from_user(&T, user_T, sizeof(struct timespec))) { printk(KERN_ALERT "[sys_set_reserve] failed to copy T from user\n"); return -EFAULT; } // check for timespec validity if ((timespec_compare(&T, &C) < 0) || !timespec_valid(&T) || !timespec_valid(&C) || (cid >= NUM_CPUS)) { printk(KERN_ALERT "[sys_set_reserve] invalid T and C\n"); return -EINVAL; } // do a reservation admission check cid = admission_check(task, C, T, cid); if (cid < 0) { return -EBUSY; } if (set_reserve_hook(task) != 0) { return -EFAULT; } // cancel any old timers for an updated reservation if (hrtimer_active(&(task->C_timer))) { hrtimer_cancel(&(task->C_timer)); } if (hrtimer_active(&(task->T_timer))) { hrtimer_cancel(&(task->T_timer)); } // make runnable any task suspended by enforcement if (task->put_to_sleep) { task->put_to_sleep = 0; wake_up_process(task); } // copy into task struct ktime values task->real_C_time = ktime_set(0, 0); task->C_time = ktime_set(C.tv_sec, C.tv_nsec); task->T_time = ktime_set(T.tv_sec, T.tv_nsec); // find what cpus have tasks on them rcu_read_lock(); for_each_process(tmp) { if (tmp->has_reservation) { cpu_task_count[task_cpu(tmp)] = 1; } } rcu_read_unlock(); cpu_task_count[cid] = 1; task->reserve_cpu = cid; // Bring offline all cpus with no tasks for (i = 0; i < NUM_CPUS; i ++) { if (cpu_task_count[i] == 0) { if (power_cpu(i, 0) != 0) { printk(KERN_ALERT"[sys_set_reserve] failed to turn off cpu %d\n", i); goto fail; } printk(KERN_ALERT"[sys_set_reserve] turned OFF CPU %d\n", i); } else { if (power_cpu(i, 1) != 0) { printk(KERN_ALERT"[sys_set_reserve] failed to turn on cpu %d\n", i); goto fail; } printk(KERN_ALERT"[sys_set_reserve] turned ON CPU %d\n", i); } } // set process CPU cpumask_clear(&set); cpumask_set_cpu(cid, &set); if (sched_setaffinity(pid, &set)) { printk(KERN_ALERT"[sys_set_reserve] failed to set CPU affinity\n"); goto fail; } printk(KERN_ALERT "[sys_set_reserve] PID %d (C = %lld ms / T = %lld ms) CPU %u\n", pid, ktime_to_ms(task->C_time), ktime_to_ms(task->T_time), cid); // mark as having a reservation task->has_reservation = 1; // set the frequency based on sysclock algorithm sysclock_set(); return 0; fail: if (task->has_reservation || task->energymon_node) { cancel_reserve_hook(task); } return -EINVAL; }
static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) { struct ncp_mount_data_kernel data; struct ncp_server *server; struct file *ncp_filp; struct inode *root_inode; struct inode *sock_inode; struct socket *sock; int error; int default_bufsize; #ifdef CONFIG_NCPFS_PACKET_SIGNING int options; #endif struct ncp_entry_info finfo; data.wdog_pid = NULL; server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL); if (!server) return -ENOMEM; sb->s_fs_info = server; error = -EFAULT; if (raw_data == NULL) goto out; switch (*(int*)raw_data) { case NCP_MOUNT_VERSION: { struct ncp_mount_data* md = (struct ncp_mount_data*)raw_data; data.flags = md->flags; data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE; data.mounted_uid = md->mounted_uid; data.wdog_pid = find_get_pid(md->wdog_pid); data.ncp_fd = md->ncp_fd; data.time_out = md->time_out; data.retry_count = md->retry_count; data.uid = md->uid; data.gid = md->gid; data.file_mode = md->file_mode; data.dir_mode = md->dir_mode; data.info_fd = -1; memcpy(data.mounted_vol, md->mounted_vol, NCP_VOLNAME_LEN+1); } break; case NCP_MOUNT_VERSION_V4: { struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data; data.flags = md->flags; data.int_flags = 0; data.mounted_uid = md->mounted_uid; data.wdog_pid = find_get_pid(md->wdog_pid); data.ncp_fd = md->ncp_fd; data.time_out = md->time_out; data.retry_count = md->retry_count; data.uid = md->uid; data.gid = md->gid; data.file_mode = md->file_mode; data.dir_mode = md->dir_mode; data.info_fd = -1; data.mounted_vol[0] = 0; } break; default: error = -ECHRNG; if (memcmp(raw_data, "vers", 4) == 0) { error = ncp_parse_options(&data, raw_data); } if (error) goto out; break; } error = -EBADF; ncp_filp = fget(data.ncp_fd); if (!ncp_filp) goto out; error = -ENOTSOCK; sock_inode = ncp_filp->f_path.dentry->d_inode; if (!S_ISSOCK(sock_inode->i_mode)) goto out_fput; sock = SOCKET_I(sock_inode); if (!sock) goto out_fput; if (sock->type == SOCK_STREAM) default_bufsize = 0xF000; else default_bufsize = 1024; sb->s_flags |= MS_NODIRATIME; /* probably even noatime */ sb->s_maxbytes = 0xFFFFFFFFU; sb->s_blocksize = 1024; /* Eh... Is this correct? */ sb->s_blocksize_bits = 10; sb->s_magic = NCP_SUPER_MAGIC; sb->s_op = &ncp_sops; server = NCP_SBP(sb); memset(server, 0, sizeof(*server)); server->ncp_filp = ncp_filp; server->ncp_sock = sock; if (data.info_fd != -1) { struct socket *info_sock; error = -EBADF; server->info_filp = fget(data.info_fd); if (!server->info_filp) goto out_fput; error = -ENOTSOCK; sock_inode = server->info_filp->f_path.dentry->d_inode; if (!S_ISSOCK(sock_inode->i_mode)) goto out_fput2; info_sock = SOCKET_I(sock_inode); if (!info_sock) goto out_fput2; error = -EBADFD; if (info_sock->type != SOCK_STREAM) goto out_fput2; server->info_sock = info_sock; } /* server->lock = 0; */ mutex_init(&server->mutex); server->packet = NULL; /* server->buffer_size = 0; */ /* server->conn_status = 0; */ /* server->root_dentry = NULL; */ /* server->root_setuped = 0; */ #ifdef CONFIG_NCPFS_PACKET_SIGNING /* server->sign_wanted = 0; */ /* server->sign_active = 0; */ #endif server->auth.auth_type = NCP_AUTH_NONE; /* server->auth.object_name_len = 0; */ /* server->auth.object_name = NULL; */ /* server->auth.object_type = 0; */ /* server->priv.len = 0; */ /* server->priv.data = NULL; */ server->m = data; /* Althought anything producing this is buggy, it happens now because of PATH_MAX changes.. */ if (server->m.time_out < 1) { server->m.time_out = 10; printk(KERN_INFO "You need to recompile your ncpfs utils..\n"); } server->m.time_out = server->m.time_out * HZ / 100; server->m.file_mode = (server->m.file_mode & S_IRWXUGO) | S_IFREG; server->m.dir_mode = (server->m.dir_mode & S_IRWXUGO) | S_IFDIR; #ifdef CONFIG_NCPFS_NLS /* load the default NLS charsets */ server->nls_vol = load_nls_default(); server->nls_io = load_nls_default(); #endif /* CONFIG_NCPFS_NLS */ server->dentry_ttl = 0; /* no caching */ INIT_LIST_HEAD(&server->tx.requests); mutex_init(&server->rcv.creq_mutex); server->tx.creq = NULL; server->rcv.creq = NULL; server->data_ready = sock->sk->sk_data_ready; server->write_space = sock->sk->sk_write_space; server->error_report = sock->sk->sk_error_report; sock->sk->sk_user_data = server; init_timer(&server->timeout_tm); #undef NCP_PACKET_SIZE #define NCP_PACKET_SIZE 131072 error = -ENOMEM; server->packet_size = NCP_PACKET_SIZE; server->packet = vmalloc(NCP_PACKET_SIZE); if (server->packet == NULL) goto out_nls; server->txbuf = vmalloc(NCP_PACKET_SIZE); if (server->txbuf == NULL) goto out_packet; server->rxbuf = vmalloc(NCP_PACKET_SIZE); if (server->rxbuf == NULL) goto out_txbuf; sock->sk->sk_data_ready = ncp_tcp_data_ready; sock->sk->sk_error_report = ncp_tcp_error_report; if (sock->type == SOCK_STREAM) { server->rcv.ptr = (unsigned char*)&server->rcv.buf; server->rcv.len = 10; server->rcv.state = 0; INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc); INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc); sock->sk->sk_write_space = ncp_tcp_write_space; } else { INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc); INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc); server->timeout_tm.data = (unsigned long)server; server->timeout_tm.function = ncpdgram_timeout_call; } ncp_lock_server(server); error = ncp_connect(server); ncp_unlock_server(server); if (error < 0) goto out_rxbuf; DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb)); error = -EMSGSIZE; /* -EREMOTESIDEINCOMPATIBLE */ #ifdef CONFIG_NCPFS_PACKET_SIGNING if (ncp_negotiate_size_and_options(server, default_bufsize, NCP_DEFAULT_OPTIONS, &(server->buffer_size), &options) == 0) { if (options != NCP_DEFAULT_OPTIONS) { if (ncp_negotiate_size_and_options(server, default_bufsize, options & 2, &(server->buffer_size), &options) != 0) { goto out_disconnect; } } if (options & 2) server->sign_wanted = 1; } else #endif /* CONFIG_NCPFS_PACKET_SIGNING */ if (ncp_negotiate_buffersize(server, default_bufsize, &(server->buffer_size)) != 0) goto out_disconnect; DPRINTK("ncpfs: bufsize = %d\n", server->buffer_size); memset(&finfo, 0, sizeof(finfo)); finfo.i.attributes = aDIR; finfo.i.dataStreamSize = 0; /* ignored */ finfo.i.dirEntNum = 0; finfo.i.DosDirNum = 0; #ifdef CONFIG_NCPFS_SMALLDOS finfo.i.NSCreator = NW_NS_DOS; #endif finfo.volume = NCP_NUMBER_OF_VOLUMES; /* set dates of mountpoint to Jan 1, 1986; 00:00 */ finfo.i.creationTime = finfo.i.modifyTime = cpu_to_le16(0x0000); finfo.i.creationDate = finfo.i.modifyDate = finfo.i.lastAccessDate = cpu_to_le16(0x0C21); finfo.i.nameLen = 0; finfo.i.entryName[0] = '\0'; finfo.opened = 0; finfo.ino = 2; /* tradition */ server->name_space[finfo.volume] = NW_NS_DOS; error = -ENOMEM; root_inode = ncp_iget(sb, &finfo); if (!root_inode) goto out_disconnect; DPRINTK("ncp_fill_super: root vol=%d\n", NCP_FINFO(root_inode)->volNumber); sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) goto out_no_root; sb->s_root->d_op = &ncp_root_dentry_operations; return 0; out_no_root: iput(root_inode); out_disconnect: ncp_lock_server(server); ncp_disconnect(server); ncp_unlock_server(server); out_rxbuf: ncp_stop_tasks(server); vfree(server->rxbuf); out_txbuf: vfree(server->txbuf); out_packet: vfree(server->packet); out_nls: #ifdef CONFIG_NCPFS_NLS unload_nls(server->nls_io); unload_nls(server->nls_vol); #endif out_fput2: if (server->info_filp) fput(server->info_filp); out_fput: /* 23/12/1998 Marcin Dalecki <*****@*****.**>: * * The previously used put_filp(ncp_filp); was bogous, since * it doesn't proper unlocking. */ fput(ncp_filp); out: put_pid(data.wdog_pid); sb->s_fs_info = NULL; kfree(server); return error; }
long sys_cancel_reserve(pid_t pid) { struct cpumask set; struct pid *pid_struct; struct task_struct *task; struct task_struct *tmp; int i; int cpu_task_count[] = {0, 0, 0, 0}; printk(KERN_ALERT "[sys_cancel_reserve] PID %u\n", pid); // locate the task_struct for the task required if (pid == 0) { task = current; } else { rcu_read_lock(); pid_struct = find_get_pid(pid); if (!pid_struct) { rcu_read_unlock(); return -ENODEV; } task = pid_task(pid_struct, PIDTYPE_PID); if (!task) { rcu_read_unlock(); return -ENODEV; } rcu_read_unlock(); } // make sure the task has a reservation if (task->has_reservation == 0) { return -EINVAL; } if (task->has_reservation || task->energymon_node) { cancel_reserve_hook(task); // execute cancel reserve hook } // cancel timers if they are active if (hrtimer_active(&(task->T_timer))) { hrtimer_cancel(&(task->T_timer)); } if (hrtimer_active(&(task->C_timer))) { hrtimer_cancel(&(task->C_timer)); } // make runnable any task suspended by enforcement if (task->put_to_sleep) { task->put_to_sleep = 0; wake_up_process(task); } // mark as not having a reservation task->has_reservation = 0; // set process CPU to 0 because it is never offline cpumask_clear(&set); cpumask_set_cpu(0, &set); if (sched_setaffinity(task->pid, &set)) { printk(KERN_INFO "[sys_cancel_reserve] failed to set CPU affinity\n"); return -EINVAL; } // find what cpus have tasks with reservations rcu_read_lock(); for_each_process(tmp) { if (tmp->has_reservation) { cpu_task_count[task_cpu(tmp)] = 1; } } rcu_read_unlock(); // Bring offline all cpus with no tasks for (i = 0; i < NUM_CPUS; i ++) { if (cpu_task_count[i] == 0) { if (power_cpu(i, 0) != 0) { printk(KERN_INFO "[sys_cancel_reserve] failed to turn off cpu %d", i); return -EINVAL; } } else { if (power_cpu(i, 1) != 0) { printk(KERN_INFO "[sys_cancel_reserve] failed to turn on cpu %d", i); return -EINVAL; } } } // set the frequency based on sysclock algorithm sysclock_set(); return 0; }
static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options) { int optval; char *optarg; unsigned long optint; int version = 0; int ret; data->flags = 0; data->int_flags = 0; data->mounted_uid = 0; data->wdog_pid = NULL; data->ncp_fd = ~0; data->time_out = 10; data->retry_count = 20; data->uid = 0; data->gid = 0; data->file_mode = 0600; data->dir_mode = 0700; data->info_fd = -1; data->mounted_vol[0] = 0; while ((optval = ncp_getopt("ncpfs", &options, ncp_opts, NULL, &optarg, &optint)) != 0) { ret = optval; if (ret < 0) goto err; switch (optval) { case 'u': data->uid = optint; break; case 'g': data->gid = optint; break; case 'o': data->mounted_uid = optint; break; case 'm': data->file_mode = optint; break; case 'd': data->dir_mode = optint; break; case 't': data->time_out = optint; break; case 'r': data->retry_count = optint; break; case 'f': data->flags = optint; break; case 'w': data->wdog_pid = find_get_pid(optint); break; case 'n': data->ncp_fd = optint; break; case 'i': data->info_fd = optint; break; case 'v': ret = -ECHRNG; if (optint < NCP_MOUNT_VERSION_V4) goto err; if (optint > NCP_MOUNT_VERSION_V5) goto err; version = optint; break; } } return 0; err: put_pid(data->wdog_pid); data->wdog_pid = NULL; return ret; }
int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) { struct cmsghdr *cmsg; int err; for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { err = -EINVAL; /* Verify that cmsg_len is at least sizeof(struct cmsghdr) */ /* The first check was omitted in <= 2.2.5. The reasoning was that parser checks cmsg_len in any case, so that additional check would be work duplication. But if cmsg_level is not SOL_SOCKET, we do not check for too short ancillary data object at all! Oops. OK, let's add it... */ if (!CMSG_OK(msg, cmsg)) goto error; if (cmsg->cmsg_level != SOL_SOCKET) continue; switch (cmsg->cmsg_type) { case SCM_RIGHTS: if (!sock->ops || sock->ops->family != PF_UNIX) goto error; err=scm_fp_copy(cmsg, &p->fp); if (err<0) goto error; break; case SCM_CREDENTIALS: if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) goto error; memcpy(&p->creds, CMSG_DATA(cmsg), sizeof(struct ucred)); err = scm_check_creds(&p->creds); if (err) goto error; if (pid_vnr(p->pid) != p->creds.pid) { struct pid *pid; err = -ESRCH; pid = find_get_pid(p->creds.pid); if (!pid) goto error; put_pid(p->pid); p->pid = pid; } if ((p->cred->euid != p->creds.uid) || (p->cred->egid != p->creds.gid)) { struct cred *cred; err = -ENOMEM; cred = prepare_creds(); if (!cred) goto error; cred->uid = cred->euid = p->creds.uid; cred->gid = cred->egid = p->creds.uid; put_cred(p->cred); p->cred = cred; } break; default: goto error; } } if (p->fp && !p->fp->count) { kfree(p->fp); p->fp = NULL; } return 0; error: scm_destroy(p); return err; }
int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) { struct cmsghdr *cmsg; int err; for_each_cmsghdr(cmsg, msg) { err = -EINVAL; /* Verify that cmsg_len is at least sizeof(struct cmsghdr) */ /* The first check was omitted in <= 2.2.5. The reasoning was that parser checks cmsg_len in any case, so that additional check would be work duplication. But if cmsg_level is not SOL_SOCKET, we do not check for too short ancillary data object at all! Oops. OK, let's add it... */ if (!CMSG_OK(msg, cmsg)) goto error; if (cmsg->cmsg_level != SOL_SOCKET) continue; switch (cmsg->cmsg_type) { case SCM_RIGHTS: if (!sock->ops || sock->ops->family != PF_UNIX) goto error; err=scm_fp_copy(cmsg, &p->fp); if (err<0) goto error; break; case SCM_CREDENTIALS: { struct ucred creds; kuid_t uid; kgid_t gid; if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred))) goto error; memcpy(&creds, CMSG_DATA(cmsg), sizeof(struct ucred)); err = scm_check_creds(&creds); if (err) goto error; p->creds.pid = creds.pid; if (!p->pid || pid_vnr(p->pid) != creds.pid) { struct pid *pid; err = -ESRCH; pid = find_get_pid(creds.pid); if (!pid) goto error; put_pid(p->pid); p->pid = pid; } err = -EINVAL; uid = make_kuid(current_user_ns(), creds.uid); gid = make_kgid(current_user_ns(), creds.gid); if (!uid_valid(uid) || !gid_valid(gid)) goto error; p->creds.uid = uid; p->creds.gid = gid; break; } default: goto error; } }
int autofs_fill_super(struct super_block *s, void *data, int silent) { struct inode * root_inode; struct dentry * root; struct file * pipe; int pipefd; struct autofs_sb_info *sbi; int minproto, maxproto; pid_t pgid; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) goto fail_unlock; DPRINTK(("autofs: starting up, sbi = %p\n",sbi)); s->s_fs_info = sbi; sbi->magic = AUTOFS_SBI_MAGIC; sbi->pipe = NULL; sbi->catatonic = 1; sbi->exp_timeout = 0; autofs_initialize_hash(&sbi->dirhash); sbi->queues = NULL; memset(sbi->symlink_bitmap, 0, sizeof(long)*AUTOFS_SYMLINK_BITMAP_LEN); sbi->next_dir_ino = AUTOFS_FIRST_DIR_INO; s->s_blocksize = 1024; s->s_blocksize_bits = 10; s->s_magic = AUTOFS_SUPER_MAGIC; s->s_op = &autofs_sops; s->s_time_gran = 1; sbi->sb = s; root_inode = iget(s, AUTOFS_ROOT_INO); root = d_alloc_root(root_inode); pipe = NULL; if (!root) goto fail_iput; /* Can this call block? - WTF cares? s is locked. */ if (parse_options(data, &pipefd, &root_inode->i_uid, &root_inode->i_gid, &pgid, &minproto, &maxproto)) { printk("autofs: called with bogus options\n"); goto fail_dput; } /* Couldn't this be tested earlier? */ if (minproto > AUTOFS_PROTO_VERSION || maxproto < AUTOFS_PROTO_VERSION) { printk("autofs: kernel does not match daemon version\n"); goto fail_dput; } DPRINTK(("autofs: pipe fd = %d, pgrp = %u\n", pipefd, pgid)); sbi->oz_pgrp = find_get_pid(pgid); if (!sbi->oz_pgrp) { printk("autofs: could not find process group %d\n", pgid); goto fail_dput; } pipe = fget(pipefd); if (!pipe) { printk("autofs: could not open pipe file descriptor\n"); goto fail_put_pid; } if (!pipe->f_op || !pipe->f_op->write) goto fail_fput; sbi->pipe = pipe; sbi->catatonic = 0; /* * Success! Install the root dentry now to indicate completion. */ s->s_root = root; return 0; fail_fput: printk("autofs: pipe file descriptor does not contain proper ops\n"); fput(pipe); fail_put_pid: put_pid(sbi->oz_pgrp); fail_dput: dput(root); goto fail_free; fail_iput: printk("autofs: get root dentry failed\n"); iput(root_inode); fail_free: kfree(sbi); s->s_fs_info = NULL; fail_unlock: return -EINVAL; }
static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options) { int optval; char *optarg; unsigned long optint; int version = 0; int ret; data->flags = 0; data->int_flags = 0; data->mounted_uid = GLOBAL_ROOT_UID; data->wdog_pid = NULL; data->ncp_fd = ~0; data->time_out = NCP_DEFAULT_TIME_OUT; data->retry_count = NCP_DEFAULT_RETRY_COUNT; data->uid = GLOBAL_ROOT_UID; data->gid = GLOBAL_ROOT_GID; data->file_mode = NCP_DEFAULT_FILE_MODE; data->dir_mode = NCP_DEFAULT_DIR_MODE; data->info_fd = -1; data->mounted_vol[0] = 0; while ((optval = ncp_getopt("ncpfs", &options, ncp_opts, NULL, &optarg, &optint)) != 0) { ret = optval; if (ret < 0) goto err; switch (optval) { case 'u': data->uid = make_kuid(current_user_ns(), optint); if (!uid_valid(data->uid)) { ret = -EINVAL; goto err; } break; case 'g': data->gid = make_kgid(current_user_ns(), optint); if (!gid_valid(data->gid)) { ret = -EINVAL; goto err; } break; case 'o': data->mounted_uid = make_kuid(current_user_ns(), optint); if (!uid_valid(data->mounted_uid)) { ret = -EINVAL; goto err; } break; case 'm': data->file_mode = optint; break; case 'd': data->dir_mode = optint; break; case 't': data->time_out = optint; break; case 'r': data->retry_count = optint; break; case 'f': data->flags = optint; break; case 'w': data->wdog_pid = find_get_pid(optint); break; case 'n': data->ncp_fd = optint; break; case 'i': data->info_fd = optint; break; case 'v': ret = -ECHRNG; if (optint < NCP_MOUNT_VERSION_V4) goto err; if (optint > NCP_MOUNT_VERSION_V5) goto err; version = optint; break; } } return 0; err: put_pid(data->wdog_pid); data->wdog_pid = NULL; return ret; }