/* * swap_inactive_pagelist() will be called in low memory case, * swap SWAP_CLUSTER_MAX pages to swap space */ int swap_inactive_pagelist(unsigned int page_swap_cluster) { struct task_struct *p, *selected = NULL; int tasksize; int hidden_min_oom_adj = 9; int pages_counter = 0; read_lock(&tasklist_lock); for_each_process(p) { struct mm_struct *mm; struct signal_struct *sig; int oom_adj; task_lock(p); mm = p->mm; sig = p->signal; if (!mm || !sig) { task_unlock(p); continue; } tasksize = get_mm_rss(mm); if (tasksize <= 0) { task_unlock(p); continue; } oom_adj = sig->oom_adj; if (oom_adj >= hidden_min_oom_adj) { selected = p; #if SWAP_PROCESS_DEBUG_LOG > 0 printk ("runtime compcache: swap process pid %d, name %s, oom %d\n", p->pid, p->comm, oom_adj); #endif break; } task_unlock(p); } read_unlock(&tasklist_lock); if (selected) { struct zone *zone0 = NULL, *zone1 = NULL; LIST_HEAD(zone0_page_list); LIST_HEAD(zone1_page_list); shrink_pages(selected->mm, &zone0, &zone0_page_list, &zone1, &zone1_page_list, 32); task_unlock(selected); pages_counter = swap_pages(zone0, &zone0_page_list, zone1, &zone1_page_list); printk("pagefreed = %d\n", pages_counter); } return pages_counter; }
/* * lmk_state_store() will called by framework, * the framework will send the pid of process that need to be swapped */ static ssize_t lmk_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { sscanf(buf, "%d,%d", &lmk_kill_pid, &lmk_kill_ok); /* if the screen on, the optimized compcache will stop */ if (atomic_read(&optimize_comp_on) != 1) return size; if (lmk_kill_ok == 1) { struct task_struct *p; struct task_struct *selected = NULL; struct sysinfo ramzswap_info = { 0 }; struct mm_struct *mm_scan = NULL; /* * check the free RAM and swap area, * stop the optimized compcache in cpu idle case; * leave some swap area for using in low memory case */ si_swapinfo(&ramzswap_info); si_meminfo(&ramzswap_info); if ((ramzswap_info.freeswap < CHECK_FREE_SWAPSPACE) || (ramzswap_info.freeram < check_free_memory)) { #if SWAP_PROCESS_DEBUG_LOG > 0 printk(KERN_INFO "idletime compcache is ignored : free RAM %lu, free swap %lu\n", ramzswap_info.freeram, ramzswap_info.freeswap); #endif lmk_kill_ok = 0; return size; } read_lock(&tasklist_lock); for_each_process(p) { if ((p->pid == lmk_kill_pid) && (__task_cred(p)->uid > 10000)) { task_lock(p); selected = p; if (!selected->mm || !selected->signal) { task_unlock(p); selected = NULL; break; } mm_scan = selected->mm; if (mm_scan) { if (selected->flags & PF_KTHREAD) mm_scan = NULL; else atomic_inc(&mm_scan->mm_users); } task_unlock(selected); #if SWAP_PROCESS_DEBUG_LOG > 0 printk(KERN_INFO "idle time compcache: swap process pid %d, name %s, oom %d, task size %ld\n", p->pid, p->comm, p->signal->oom_adj, get_mm_rss(p->mm)); #endif break; } } read_unlock(&tasklist_lock); if (mm_scan) { LIST_HEAD(zone0_page_list); LIST_HEAD(zone1_page_list); int pages_tofree = 0, pages_freed = 0; down_read(&mm_scan->mmap_sem); pages_tofree = shrink_pages(mm_scan, &zone0_page_list, &zone1_page_list, 0x7FFFFFFF); up_read(&mm_scan->mmap_sem); mmput(mm_scan); pages_freed = swap_pages(&zone0_page_list, &zone1_page_list); lmk_kill_ok = 0; } } return size; }
/* * lmk_state_store() will called by framework, * the framework will send the pid of process that need to be swapped */ static ssize_t lmk_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { sscanf(buf, "%d,%d", &lmk_kill_pid, &lmk_kill_ok); /* if the screen on, the optimized compcache will stop */ if (atomic_read(&optimize_comp_on) != 1) return size; if (lmk_kill_ok == 1) { struct task_struct *p; struct task_struct *selected = NULL; struct sysinfo ramzswap_info = { 0 }; /* * check the free RAM and swap area, * stop the optimized compcache in cpu idle case; * leave some swap area for using in low memory case */ si_swapinfo(&ramzswap_info); si_meminfo(&ramzswap_info); if ((ramzswap_info.freeswap < CHECK_FREE_SWAPSPACE) || (ramzswap_info.freeram < CHECK_FREE_MEMORY)) { lmk_kill_ok = 0; return size; } read_lock(&tasklist_lock); for_each_process(p) { if ((p->pid == lmk_kill_pid) && (__task_cred(p)->uid > 10000)) { task_lock(p); selected = p; if (!selected->mm || !selected->signal) { task_unlock(p); selected = NULL; pr_info("idletime compcache: process is being killed\n"); break; } else { #if SWAP_PROCESS_DEBUG_LOG > 0 pr_info("idletime compcache: swap process pid %d, name %s, task_size %ld\n", p->pid, p->comm, get_mm_rss(p->mm)); #endif } break; } } read_unlock(&tasklist_lock); if (selected) { struct zone *zone0 = NULL, *zone1 = NULL; LIST_HEAD(zone0_page_list); LIST_HEAD(zone1_page_list); int pages_tofree = 0, pages_freed = 0; pages_tofree = shrink_pages(selected->mm, &zone0, &zone0_page_list, &zone1, &zone1_page_list, 0x7FFFFFFF); task_unlock(selected); pages_freed = swap_pages(zone0, &zone0_page_list, zone1, &zone1_page_list); lmk_kill_ok = 0; } } return size; }