/**************************************************************************** * * bcm_fuse_kril_exit_module(void); * * Exit module. * ***************************************************************************/ static void __exit bcm_fuse_kril_exit_module(void) { int i; if (gKRILCmdThreadPid >= 0) { kill_proc_info(SIGTERM, SEND_SIG_PRIV, gKRILCmdThreadPid); wait_for_completion(&gCmdThreadExited); } // Release allocated Notify list memory. for( i = 0; i < TOTAL_BCMDEVICE_NUM ; i++) { if(bcm_dev_results[i].notifyid_list != NULL && bcm_dev_results[i].notifyid_list_len != 0) { kfree(bcm_dev_results[i].notifyid_list); bcm_dev_results[i].notifyid_list = NULL; bcm_dev_results[i].notifyid_list_len = 0; } } #ifdef CONFIG_WAKELCOK wake_lock_destroy(&kril_rsp_wake_lock); wake_lock_destroy(&kril_notify_wake_lock); #endif #ifdef CONFIG_CPU_FREQ_GOV_BCM21553 KRIL_UnregDVFSHandler( ); #endif return; }
/*------------------------------------------------------------------------- * */ int deliver_signal(struct bproc_masq_master_t *m, struct bproc_signal_msg_t *msg) { int realpid, r; struct siginfo info; /* FIX ME: If we're doing pid mapping we should deliver the signal * before letting go of the tasklist_lock */ if (m) { read_lock(&tasklist_lock); realpid = masq_masq2real(m, msg->hdr.to); read_unlock(&tasklist_lock); if (realpid < 0) { /* This case can happen when exit messages and fwd_sig * messages cross each other. I'm not sure whether allowing * this to happen is technically "incorrect". */ return -ESRCH; } } else realpid = msg->hdr.to; if (realpid == 1) { /* XXXX DEBUG PARANOIA */ BUG(); } /* We know we're signalling a process here since this signal was * forwarded from a ghost */ bproc_unpack_siginfo(&msg->info, &info); r = kill_proc_info(msg->info.si_signo, &info, realpid); return r; }
static ssize_t dbgcfgtool_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { char* buf_idx; char szCommand[TEMP_BUF_SIZE * 3]; char szMaskName[TEMP_BUF_SIZE]; int mask; if(count > (DBGCFG_COMMAND_BUFFER_SIZE - 1)) { return -EFAULT; } write_buf = kmalloc(DBGCFG_COMMAND_BUFFER_SIZE, GFP_KERNEL); if(write_buf) { memset(write_buf, 0, DBGCFG_COMMAND_BUFFER_SIZE); } else { return 0; } if(copy_from_user(write_buf, buf, count)) { return -EFAULT; } write_buf[count] = '\0'; //printk(KERN_INFO "dbgcfgtool_write: write_buf=%s=\n", write_buf); buf_idx = write_buf; getNextWord(&buf_idx, szCommand, 30); if(!strncmp(szCommand, "HANG_LINUX", DBGCFG_COMMAND_BUFFER_SIZE)) { printk(KERN_INFO "dbgcfgtool_write: hang linux\n"); panic("dbgcfgtool"); } else if(!strncmp(szCommand, "DALVIK_HEAP_DUMP", DBGCFG_COMMAND_BUFFER_SIZE)) { getNextWord(&buf_idx, szMaskName, TEMP_BUF_SIZE); //this is pid value. printk(KERN_INFO "dbgcfgtool_write: %s %s\n", szCommand, szMaskName); mask = (int)simple_strtol(szMaskName, NULL, TEMP_BUF_SIZE); printk(KERN_INFO "dbgcfgtool_write: pid=%d sig=%d\n", mask, SIGUSR1); kill_proc_info(SIGUSR1, SEND_SIG_PRIV, mask); } *pos += count; if(write_buf) { kfree(write_buf); write_buf = NULL; } return count; }
int lb_rcvmsg(char *rcvmsg) { /*initialize and setup socket to receive messages from bare*/ struct sockaddr_in server; int servererror; rcvbuffer = (char*)kmalloc(64000, __GFP_COLD); rcvbuffercount = 0; totalpackets = 0; printk(KERN_DEBUG "LB_SOCKET: T201 start: lb_rcvmsg()\n"); if( sock_create( PF_INET,SOCK_DGRAM, IPPROTO_UDP,&udpsocket)<0 ) { printk(KERN_DEBUG "LB_SOCKET: lb_rcvmsg() TERROR002 server: Error creating udpsocket.\n"); return -EIO; } server.sin_family = AF_INET; server.sin_addr.s_addr = INADDR_ANY; server.sin_port = htons( (unsigned short)SERVERPORT); servererror = udpsocket->ops->bind(udpsocket, (struct sockaddr *) &server, sizeof(server ) ); if( servererror ) { printk(KERN_DEBUG "LB_SOCKET: lb_rcvmsg() Server Error: TERROR003: %i \n", servererror); sock_release( udpsocket ); return -EIO; } printk(KERN_DEBUG "LB_SOCKET: T202 start lb_rcvmsg()\n"); /* pointer reallocation does not work in kernel */ /* copy the data to pass by reference!! assigning pointers will not work*/ udpsocket->sk->sk_data_ready = callbackrcv; /*wait until a packet arrived here, synchronous rcv */ while (rcvbuffercount == 0) { msleep_interruptible(HZ*2); } memcpy(rcvmsg, rcvbuffer, rcvbuffercount); printk(KERN_DEBUG "LB_SOCKET: T203 lb_rcvmsg: Leaving While Loop \n"); /* close socket */ if( com_thread_pid ) { kill_proc_info( com_thread_pid, SIGTERM, 0 ); wait_for_completion( &threadcomplete ); } if( udpsocket ) sock_release( udpsocket ); printk(KERN_DEBUG "LB_SOCKET: T206 end lb_rcvmsg(): TOTAL BYTES RCVD: %i TOTAL PACKETS: %i \n", rcvbuffercount, totalpackets); return rcvbuffercount; /* return size */ }
int lb_sendmsg (char *msgdata, int size) { int len; struct msghdr msg; struct iovec iov; mm_segment_t oldfs; struct sockaddr_in to; /* initialize and setup socket to send messages to bare */ printk(KERN_DEBUG "LB_SOCKET T101 start: lb_sendmsg() msg: %s \n", msgdata); if( sock_create( PF_INET,SOCK_DGRAM,IPPROTO_UDP,&clientsocket)<0 ){ printk( KERN_DEBUG "LB_SOCKET: lb_sendmsg() TERROR001: server: Error creating clientsocket \n" ); return -EIO; } memset(&to,0, sizeof(to)); to.sin_family = AF_INET; to.sin_addr.s_addr = in_aton(ipaddr); /* bare PC destination address */ to.sin_port = htons( (unsigned short) SERVERPORT ); memset(&msg,0,sizeof(msg)); msg.msg_name = &to; msg.msg_namelen = sizeof(to); iov.iov_base = msgdata; iov.iov_len = size; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iov = &iov; msg.msg_iovlen = 1; oldfs = get_fs(); set_fs( KERNEL_DS ); len = sock_sendmsg( clientsocket, &msg, size ); set_fs( oldfs ); /* close socket */ if( clientsocket ) { /*msleep_interruptible(HZ*2);*/ sock_release( clientsocket ); printk(KERN_DEBUG "LB_SOCKET T102 close socket: lb_sendmsg() \n"); if( com_thread_pid ) { kill_proc_info( com_thread_pid, SIGTERM, 0 ); wait_for_completion( &threadcomplete ); } } printk(KERN_DEBUG "LB_SOCKET T103 end: lb_sendmsg() \n"); return len; }
void __exit iodump_exit(void) { del_timer(&IoDumpTimer); if (gSysCtlHeader != NULL) { unregister_sysctl_table(gSysCtlHeader); } if (IoDump.bufp != NULL) { vfree(IoDump.bufp); } if (gSysCtlHeader != NULL) { unregister_sysctl_table(gSysCtlHeader); } if (IoDumpThreadPid >= 0) { kill_proc_info(SIGTERM, SEND_SIG_PRIV, IoDumpThreadPid); wait_for_completion(&IoDumpExited); } }
/*********************************************************************** F* Function: static int process_mon_chains(void) P*A*Z* * P* Parameters: none P* P* Returnvalue: int P* - 0 if the function returns at all * Z* Intention: This is the core function of the chain functionality. Z* The list with the monitored chain is processed and Z* expired entries handled appropriately by stepping up Z* the escalation ladder. The escalation actions are Z* triggered from here. * D* Design: [email protected] C* Coding: [email protected] V* Verification: [email protected] ***********************************************************************/ static int process_mon_chains(void) { struct list_head *ptr; monitored_chain_t *entry; int sig; spin_lock(&mon_lock); for (ptr = mon_list.next; ptr != &mon_list; ptr = ptr->next) { entry = list_entry(ptr, monitored_chain_t, list); if (time_after_eq(jiffies, entry->expires)) { debugk("%s: WD monitor expired for id %d\n", __FUNCTION__, entry->chainid); switch (entry->action[entry->escalation]) { case WD_ACTION_SIGNAL: debugk("WD: sending user signal for key " "%d...\n", entry->chainid); sig = (entry->signal) ? entry->signal : SIGTERM; if (entry->pid) kill_proc_info(sig, SEND_SIG_PRIV, entry->pid); break; case WD_ACTION_KILL: debugk("WD: sending KILL signal for key " "%d...\n", entry->chainid); if (entry->pid) kill_proc_info(SIGKILL, SEND_SIG_PRIV, entry->pid); break; case WD_ACTION_REBOOT: spin_unlock(&mon_lock); wd_unregister_mon_chain(entry->chainid); printk("WD: Rebooting system for key " "%d...\n", entry->chainid); flush_cache_all(); /* * XXX This is not safe to call in interrupt * context. */ sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, LINUX_REBOOT_CMD_RESTART, NULL); break; case WD_ACTION_RESET: printk("WD: Resetting system for key " "%d...\n", entry->chainid); BUG_ON(wd_hw_functions.wd_machine_restart == NULL); wd_hw_functions.wd_machine_restart(); break; default: debugk("WD: undefined action %d\n", entry->action[entry->escalation]); break; } entry->escalation++; entry->expires = jiffies + HZ * entry->timer_count[entry->escalation]; list_del(&entry->list); insert_mon_chain(entry); } else /* The list is sorted, so we can stop here */ break; } spin_unlock(&mon_lock); return 0; }
/* If we are writing the enable field, we start/stop the kernel timer */ static int loadtest_intvec_enable(int cpu, ctl_table * table, int write, void __user *buffer, size_t * lenp, loff_t *ppos) { int rc; ICP_OBJ *o = &obj[cpu]; ICP_LOAD_TEST *lt = &o->loadtest; init_timer(<->timer); lt->timer.function = ipc_load_timer_func; lt->timer.expires = 0; lt->timer.data = (ulong) cpu; if (!table || !table->data) return -EINVAL; if (write) { /* use generic int handler to get input value */ rc = proc_dointvec(table, write, buffer, lenp, ppos); if (rc < 0) return rc; if (!lt->timerStarted && lt->enable) { if (lt->useThread) { cpumask_t cpu_mask; sema_init(<->threadSemaphore, 0); init_completion(<->threadCompletion); cpumask_set_cpu(cpu, &cpu_mask); lt->threadPid = kernel_thread(ipc_load_test_thread, &o->cpu_enum, 0); sched_setaffinity(lt->threadPid, &cpu_mask); cpumask_clear_cpu(cpu, &cpu_mask); } if (lt->loadPeriod > MAX_LOAD_PERIOD) lt->loadPeriod = MAX_LOAD_PERIOD; lt->timer.expires = jiffies + msecs_to_jiffies(lt->loadPeriod); add_timer(<->timer); lt->timerStarted = 1; } else if (lt->timerStarted && !lt->enable) { lt->timerStarted = 0; /* Kill load testing thread */ if (lt->useThread) { if (lt->threadPid >= 0) { kill_proc_info(SIGTERM, SEND_SIG_PRIV, lt->threadPid); wait_for_completion(<-> threadCompletion); } lt->threadPid = -1; } } } else { /* nothing special for read */ return proc_dointvec(table, write, buffer, lenp, ppos); } return rc; }