Пример #1
0
/*
 * Link contents of ipipe to opipe.
 */
static int link_pipe(struct pipe_inode_info *ipipe,
		     struct pipe_inode_info *opipe,
		     size_t len, unsigned int flags)
{
	struct pipe_buffer *ibuf, *obuf;
	int ret = 0, i = 0, nbuf;

	/*
	 * Potential ABBA deadlock, work around it by ordering lock
	 * grabbing by inode address. Otherwise two different processes
	 * could deadlock (one doing tee from A -> B, the other from B -> A).
	 */
	if (ipipe->inode < opipe->inode) {
		mutex_lock(&ipipe->inode->i_mutex);
		mutex_lock(&opipe->inode->i_mutex);
	} else {
		mutex_lock(&opipe->inode->i_mutex);
		mutex_lock(&ipipe->inode->i_mutex);
	}

	do {
		if (!opipe->readers) {
			send_sig(SIGPIPE, current, 0);
			if (!ret)
				ret = -EPIPE;
			break;
		}

		/*
		 * If we have iterated all input buffers or ran out of
		 * output room, break.
		 */
		if (i >= ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS)
			break;

		ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
		nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);

		/*
		 * Get a reference to this pipe buffer,
		 * so we can copy the contents over.
		 */
		ibuf->ops->get(ipipe, ibuf);

		obuf = opipe->bufs + nbuf;
		*obuf = *ibuf;

		/*
		 * Don't inherit the gift flag, we need to
		 * prevent multiple steals of this page.
		 */
		obuf->flags &= ~PIPE_BUF_FLAG_GIFT;

		if (obuf->len > len)
			obuf->len = len;

		opipe->nrbufs++;
		ret += obuf->len;
		len -= obuf->len;
		i++;
	} while (len);

	mutex_unlock(&ipipe->inode->i_mutex);
	mutex_unlock(&opipe->inode->i_mutex);

	/*
	 * If we put data in the output pipe, wakeup any potential readers.
	 */
	if (ret > 0) {
		smp_mb();
		if (waitqueue_active(&opipe->wait))
			wake_up_interruptible(&opipe->wait);
		kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
	}

	return ret;
}
Пример #2
0
static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
	struct task_struct *selected = NULL;
	unsigned long rem = 0;
	int tasksize;
	int i;
	short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
	int selected_tasksize = 0;
	short selected_oom_score_adj;
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
	int other_file = global_page_state(NR_FILE_PAGES) -
						global_page_state(NR_SHMEM) -
						total_swapcache_pages();

	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;
	for (i = 0; i < array_size; i++) {
		if (other_free < lowmem_minfree[i] &&
		    other_file < lowmem_minfree[i]) {
			min_score_adj = lowmem_adj[i];
			break;
		}
	}

	lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n",
			sc->nr_to_scan, sc->gfp_mask, other_free,
			other_file, min_score_adj);

	if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_scan %lu, %x, return 0\n",
			     sc->nr_to_scan, sc->gfp_mask);
		return 0;
	}

	selected_oom_score_adj = min_score_adj;

	rcu_read_lock();
	for_each_process(tsk) {
		struct task_struct *p;
		short oom_score_adj;

		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
		    time_before_eq(jiffies, lowmem_deathpending_timeout)) {
			task_unlock(p);
			rcu_read_unlock();
			return 0;
		}
		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
	}
	if (selected) {
		lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize);
		lowmem_deathpending_timeout = jiffies + HZ;
		/*
		 * FIXME: lowmemorykiller shouldn't abuse global OOM killer
		 * infrastructure. There is no real reason why the selected
		 * task should have access to the memory reserves.
		 */
		mark_tsk_oom_victim(selected);
		send_sig(SIGKILL, selected, 0);
		rem += selected_tasksize;
	}

	lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n",
		     sc->nr_to_scan, sc->gfp_mask, rem);
	rcu_read_unlock();
	return rem;
}
Пример #3
0
asmlinkage void
do_entIF(unsigned long type, unsigned long a1,
	 unsigned long a2, unsigned long a3, unsigned long a4,
	 unsigned long a5, struct pt_regs regs)
{
	if (!opDEC_testing || type != 4) {
		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
		      &regs, type, 0);
	}

	switch (type) {
	      case 0: /* breakpoint */
		if (ptrace_cancel_bpt(current)) {
			regs.pc -= 4;	/* make pc point to former bpt */
		}
		send_sig(SIGTRAP, current, 1);
		return;

	      case 1: /* bugcheck */
		send_sig(SIGTRAP, current, 1);
		return;

	      case 2: /* gentrap */
		/*
		 * The exception code should be passed on to the signal
		 * handler as the second argument.  Linux doesn't do that
		 * yet (also notice that Linux *always* behaves like
		 * DEC Unix with SA_SIGINFO off; see DEC Unix man page
		 * for sigaction(2)).
		 */
		switch ((long) regs.r16) {
		      case GEN_INTOVF: case GEN_INTDIV: case GEN_FLTOVF:
		      case GEN_FLTDIV: case GEN_FLTUND: case GEN_FLTINV:
		      case GEN_FLTINE: case GEN_ROPRAND:
			send_sig(SIGFPE, current, 1);
			return;

		      case GEN_DECOVF:
		      case GEN_DECDIV:
		      case GEN_DECINV:
		      case GEN_ASSERTERR:
		      case GEN_NULPTRERR:
		      case GEN_STKOVF:
		      case GEN_STRLENERR:
		      case GEN_SUBSTRERR:
		      case GEN_RANGERR:
		      case GEN_SUBRNG:
		      case GEN_SUBRNG1:
		      case GEN_SUBRNG2:
		      case GEN_SUBRNG3:
		      case GEN_SUBRNG4:
		      case GEN_SUBRNG5:
		      case GEN_SUBRNG6:
		      case GEN_SUBRNG7:
			send_sig(SIGTRAP, current, 1);
			return;
		}
		break;

	      case 4: /* opDEC */
		if (implver() == IMPLVER_EV4) {
			/* The some versions of SRM do not handle
			   the opDEC properly - they return the PC of the
			   opDEC fault, not the instruction after as the
			   Alpha architecture requires.  Here we fix it up.
			   We do this by intentionally causing an opDEC
			   fault during the boot sequence and testing if
			   we get the correct PC.  If not, we set a flag
			   to correct it every time through.
			*/
			if (opDEC_testing) {
				if (regs.pc == opDEC_test_pc) {
					opDEC_fix = 4;
					regs.pc += 4;
					printk("opDEC fixup enabled.\n");
				}
				return;
			}
			regs.pc += opDEC_fix; 
			
			/* EV4 does not implement anything except normal
			   rounding.  Everything else will come here as
			   an illegal instruction.  Emulate them.  */
			if (alpha_fp_emul(regs.pc-4))
				return;
		}
		break;

	      case 3: /* FEN fault */
		/* Irritating users can call PAL_clrfen to disable the
		   FPU for the process.  The kernel will then trap in
		   do_switch_stack and undo_switch_stack when we try
		   to save and restore the FP registers.

		   Given that GCC by default generates code that uses the
		   FP registers, PAL_clrfen is not useful except for DoS
		   attacks.  So turn the bleeding FPU back on and be done
		   with it.  */
		current->thread.pal_flags |= 1;
		__reload_thread(&current->thread);
		return;

	      case 5: /* illoc */
	      default: /* unexpected instruction-fault type */
		      ;
	}
	send_sig(SIGILL, current, 1);
}
Пример #4
0
static ssize_t
pipe_write(struct kiocb *iocb, const struct iovec *_iov,
           unsigned long nr_segs, loff_t ppos)
{
    struct file *filp = iocb->ki_filp;
    struct inode *inode = filp->f_path.dentry->d_inode;
    struct pipe_inode_info *pipe;
    ssize_t ret;
    int do_wakeup;
    struct iovec *iov = (struct iovec *)_iov;
    size_t total_len;
    ssize_t chars;

    total_len = iov_length(iov, nr_segs);
    /* Null write succeeds. */
    if (unlikely(total_len == 0))
        return 0;

    do_wakeup = 0;
    ret = 0;
    mutex_lock(&inode->i_mutex);
    pipe = inode->i_pipe;

    if (!pipe->readers) {
        send_sig(SIGPIPE, current, 0);
        ret = -EPIPE;
        goto out;
    }

    /* We try to merge small writes */
    chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
    if (pipe->nrbufs && chars != 0) {
        int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
                      (PIPE_BUFFERS-1);
        struct pipe_buffer *buf = pipe->bufs + lastbuf;
        const struct pipe_buf_operations *ops = buf->ops;
        int offset = buf->offset + buf->len;

        if (ops->can_merge && offset + chars <= PAGE_SIZE) {
            int error, atomic = 1;
            void *addr;

            error = ops->confirm(pipe, buf);
            if (error)
                goto out;

            iov_fault_in_pages_read(iov, chars);
redo1:
            addr = ops->map(pipe, buf, atomic);
            error = pipe_iov_copy_from_user(offset + addr, iov,
                                            chars, atomic);
            ops->unmap(pipe, buf, addr);
            ret = error;
            do_wakeup = 1;
            if (error) {
                if (atomic) {
                    atomic = 0;
                    goto redo1;
                }
                goto out;
            }
            buf->len += chars;
            total_len -= chars;
            ret = chars;
            if (!total_len)
                goto out;
        }
    }

    for (;;) {
        int bufs;

        if (!pipe->readers) {
            send_sig(SIGPIPE, current, 0);
            if (!ret)
                ret = -EPIPE;
            break;
        }
        bufs = pipe->nrbufs;
        if (bufs < PIPE_BUFFERS) {
            int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS-1);
            struct pipe_buffer *buf = pipe->bufs + newbuf;
            struct page *page = pipe->tmp_page;
            char *src;
            int error, atomic = 1;

            if (!page) {
                page = alloc_page(GFP_HIGHUSER);
                if (unlikely(!page)) {
                    ret = ret ? : -ENOMEM;
                    break;
                }
                pipe->tmp_page = page;
            }
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
	struct task_struct *selected = NULL;
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
	int selected_tasksize = 0;
	int selected_oom_score_adj;
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free;
	int other_file;
	unsigned long nr_to_scan = sc->nr_to_scan;
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
	static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 1);
#endif
	unsigned long nr_cma_free;
	struct reclaim_state *reclaim_state = current->reclaim_state;
#if defined(CONFIG_CMA_PAGE_COUNTING)
	unsigned long nr_cma_inactive_file;
	unsigned long nr_cma_active_file;
	unsigned long cma_page_ratio;
	bool is_active_high;
	bool flag = 0;
#endif

	if (nr_to_scan > 0) {
		if (mutex_lock_interruptible(&scan_mutex) < 0)
			return 0;
	}

	other_free = global_page_state(NR_FREE_PAGES);

	nr_cma_free = global_page_state(NR_FREE_CMA_PAGES);
#ifdef CONFIG_ZSWAP
	if (!current_is_kswapd() || sc->priority <= 6)
#endif
		other_free -= nr_cma_free;

#if defined(CONFIG_CMA_PAGE_COUNTING)
	nr_cma_inactive_file = global_page_state(NR_CMA_INACTIVE_FILE);
	nr_cma_active_file = global_page_state(NR_CMA_ACTIVE_FILE);
	cma_page_ratio = 100 * global_page_state(NR_CMA_INACTIVE_FILE) /
				global_page_state(NR_INACTIVE_FILE);
	is_active_high = (global_page_state(NR_ACTIVE_FILE) >
				global_page_state(NR_INACTIVE_FILE)) ? 1 : 0;
#endif
	other_file = global_page_state(NR_FILE_PAGES);

#if defined(CONFIG_CMA_PAGE_COUNTING) && defined(CONFIG_EXCLUDE_LRU_LIVING_IN_CMA)
	if (get_nr_swap_pages() < SSWAP_LMK_THRESHOLD && cma_page_ratio >= CMA_PAGE_RATIO
			&& !is_active_high) {
		other_file = other_file - (nr_cma_inactive_file + nr_cma_active_file);
		flag = 1;
	}
#endif
	if (global_page_state(NR_SHMEM) + total_swapcache_pages < other_file)
		other_file -= global_page_state(NR_SHMEM) + total_swapcache_pages;
	else
		other_file = 0;

	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;
	for (i = 0; i < array_size; i++) {
		if (other_free < lowmem_minfree[i] &&
		    other_file < lowmem_minfree[i]) {
			min_score_adj = lowmem_adj[i];
			break;
		}
	}
	if (nr_to_scan > 0)
		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
				nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
	rem = global_page_state(NR_ACTIVE_ANON) +
		global_page_state(NR_ACTIVE_FILE) +
		global_page_state(NR_INACTIVE_ANON) +
		global_page_state(NR_INACTIVE_FILE);
	if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
			     nr_to_scan, sc->gfp_mask, rem);

		if (nr_to_scan > 0)
			mutex_unlock(&scan_mutex);

		return rem;
	}
	selected_oom_score_adj = min_score_adj;

	rcu_read_lock();
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;

		if (tsk->flags & PF_KTHREAD)
			continue;

		/* if task no longer has any memory ignore it */
		if (test_task_flag(tsk, TIF_MM_RELEASED))
			continue;

		if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
			if (test_task_flag(tsk, TIF_MEMDIE)) {
				rcu_read_unlock();
				/* give the system time to free up the memory */
				msleep_interruptible(20);
				mutex_unlock(&scan_mutex);
				return 0;
			}
		}

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
#if defined(CONFIG_ZSWAP)
		if (atomic_read(&zswap_stored_pages)) {
			lowmem_print(3, "shown tasksize : %d\n", tasksize);
			tasksize += atomic_read(&zswap_pool_pages) * get_mm_counter(p->mm, MM_SWAPENTS)
				/ atomic_read(&zswap_stored_pages);
			lowmem_print(3, "real tasksize : %d\n", tasksize);
		}
#endif

		task_unlock(p);
		if (tasksize <= 0)
			continue;
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
	}
	if (selected) {
#if defined(CONFIG_CMA_PAGE_COUNTING)
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, "
			"ofree %d, ofile %d(%c), is_kswapd %d - "
			"cma_free %lu priority %d cma_i_file %lu cma_a_file %lu\n",
			selected->pid, selected->comm,
			selected_oom_score_adj, selected_tasksize,
			other_free, other_file, flag ? '-' : '+',
			!!current_is_kswapd(),
			nr_cma_free, sc->priority,
			nr_cma_inactive_file, nr_cma_active_file);
#else
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, "
				"free memory = %d, reclaimable memory = %d "
				"is_kswapd %d cma_free %lu priority %d\n",
				selected->pid, selected->comm,
				selected_oom_score_adj, selected_tasksize,
				other_free, other_file,
				!!current_is_kswapd(),
				nr_cma_free, sc->priority);
#endif
		lowmem_deathpending_timeout = jiffies + HZ;
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
		rcu_read_unlock();
#ifdef LMK_COUNT_READ
                lmk_count++;
#endif

#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
		if ((selected_oom_score_adj < lowmem_adj[5]) && __ratelimit(&lmk_rs)) {
			lowmem_print(1, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
					nr_to_scan, sc->gfp_mask, other_free,
					other_file, min_score_adj);
			show_mem(SHOW_MEM_FILTER_NODES);
			dump_tasks_info();
		}
#endif
		/* give the system time to free up the memory */
		msleep_interruptible(20);
		if(reclaim_state)
			reclaim_state->reclaimed_slab = selected_tasksize;
	} else
		rcu_read_unlock();

	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
		     nr_to_scan, sc->gfp_mask, rem);
	mutex_unlock(&scan_mutex);
	return rem;
}
Пример #6
0
static ssize_t
pipe_write(struct kiocb *iocb, struct iov_iter *from)
{
	struct file *filp = iocb->ki_filp;
	struct pipe_inode_info *pipe = filp->private_data;
	ssize_t ret = 0;
	int do_wakeup = 0;
	size_t total_len = iov_iter_count(from);
	ssize_t chars;

	/* Null write succeeds. */
	if (unlikely(total_len == 0))
		return 0;

	__pipe_lock(pipe);

	if (!pipe->readers) {
		send_sig(SIGPIPE, current, 0);
		ret = -EPIPE;
		goto out;
	}

	/* We try to merge small writes */
	chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
	if (pipe->nrbufs && chars != 0) {
		int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
							(pipe->buffers - 1);
		struct pipe_buffer *buf = pipe->bufs + lastbuf;
		int offset = buf->offset + buf->len;

		if (buf->ops->can_merge && offset + chars <= PAGE_SIZE) {
			ret = pipe_buf_confirm(pipe, buf);
			if (ret)
				goto out;

			ret = copy_page_from_iter(buf->page, offset, chars, from);
			if (unlikely(ret < chars)) {
				ret = -EFAULT;
				goto out;
			}
			do_wakeup = 1;
			buf->len += ret;
			if (!iov_iter_count(from))
				goto out;
		}
	}

	for (;;) {
		int bufs;

		if (!pipe->readers) {
			send_sig(SIGPIPE, current, 0);
			if (!ret)
				ret = -EPIPE;
			break;
		}
		bufs = pipe->nrbufs;
		if (bufs < pipe->buffers) {
			int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
			struct pipe_buffer *buf = pipe->bufs + newbuf;
			struct page *page = pipe->tmp_page;
			int copied;

			if (!page) {
				page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
				if (unlikely(!page)) {
					ret = ret ? : -ENOMEM;
					break;
				}
				pipe->tmp_page = page;
			}
Пример #7
0
/*
 * Pipe output worker. This sets up our pipe format with the page cache
 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
 */
static ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
			      struct splice_pipe_desc *spd)
{
	unsigned int spd_pages = spd->nr_pages;
	int ret, do_wakeup, page_nr;

	ret = 0;
	do_wakeup = 0;
	page_nr = 0;

	if (pipe->inode)
		mutex_lock(&pipe->inode->i_mutex);

	for (;;) {
		if (!pipe->readers) {
			send_sig(SIGPIPE, current, 0);
			if (!ret)
				ret = -EPIPE;
			break;
		}

		if (pipe->nrbufs < PIPE_BUFFERS) {
			int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
			struct pipe_buffer *buf = pipe->bufs + newbuf;

			buf->page = spd->pages[page_nr];
			buf->offset = spd->partial[page_nr].offset;
			buf->len = spd->partial[page_nr].len;
			buf->ops = spd->ops;
			if (spd->flags & SPLICE_F_GIFT)
				buf->flags |= PIPE_BUF_FLAG_GIFT;

			pipe->nrbufs++;
			page_nr++;
			ret += buf->len;

			if (pipe->inode)
				do_wakeup = 1;

			if (!--spd->nr_pages)
				break;
			if (pipe->nrbufs < PIPE_BUFFERS)
				continue;

			break;
		}

		if (spd->flags & SPLICE_F_NONBLOCK) {
			if (!ret)
				ret = -EAGAIN;
			break;
		}

		if (signal_pending(current)) {
			if (!ret)
				ret = -ERESTARTSYS;
			break;
		}

		if (do_wakeup) {
			smp_mb();
			if (waitqueue_active(&pipe->wait))
				wake_up_interruptible_sync(&pipe->wait);
			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
			do_wakeup = 0;
		}

		pipe->waiting_writers++;
		pipe_wait(pipe);
		pipe->waiting_writers--;
	}

	if (pipe->inode) {
		mutex_unlock(&pipe->inode->i_mutex);

		if (do_wakeup) {
			smp_mb();
			if (waitqueue_active(&pipe->wait))
				wake_up_interruptible(&pipe->wait);
			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
		}
	}

	while (page_nr < spd_pages)
		page_cache_release(spd->pages[page_nr++]);

	return ret;
}
Пример #8
0
void
UserProc::kill_forcibly()
{
	send_sig( SIGKILL );
}
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
#ifdef ENHANCED_LMK_ROUTINE
	struct task_struct *selected[LOWMEM_DEATHPENDING_DEPTH] = {NULL,};
#else
	struct task_struct *selected = NULL;
#endif
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
#ifdef ENHANCED_LMK_ROUTINE
	int selected_tasksize[LOWMEM_DEATHPENDING_DEPTH] = {0,};
	int selected_oom_score_adj[LOWMEM_DEATHPENDING_DEPTH] = {OOM_ADJUST_MAX,};
	int all_selected_oom = 0;
	int max_selected_oom_idx = 0;
#else
	int selected_tasksize = 0;
	int selected_oom_score_adj;
#endif
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free = global_page_state(NR_FREE_PAGES);
	int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM);

	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;
	for (i = 0; i < array_size; i++) {
		if (other_free < lowmem_minfree[i] &&
		    other_file < lowmem_minfree[i]) {
			min_score_adj = lowmem_adj[i];
			break;
		}
	}
	if (sc->nr_to_scan > 0)
		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
				sc->nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
	rem = global_page_state(NR_ACTIVE_ANON) +
		global_page_state(NR_ACTIVE_FILE) +
		global_page_state(NR_INACTIVE_ANON) +
		global_page_state(NR_INACTIVE_FILE);
	if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
			     sc->nr_to_scan, sc->gfp_mask, rem);
		return rem;
	}

#ifdef ENHANCED_LMK_ROUTINE
	for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++)
		selected_oom_score_adj[i] = min_score_adj;
#else
	selected_oom_score_adj = min_score_adj;
#endif

	read_lock(&tasklist_lock);
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;
#ifdef ENHANCED_LMK_ROUTINE
		int is_exist_oom_task = 0;
#endif

		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
			time_before_eq(jiffies, lowmem_deathpending_timeout)) {
				task_unlock(p);
				read_unlock(&tasklist_lock);
				return 0;
		}
		
		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;

#ifdef ENHANCED_LMK_ROUTINE
		if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH) {
			for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
				if (!selected[i]) {
					is_exist_oom_task = 1;
					max_selected_oom_idx = i;
					break;
				}
			}
		} else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj ||
			(selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj &&
			selected_tasksize[max_selected_oom_idx] < tasksize)) {
			is_exist_oom_task = 1;
		}

		if (is_exist_oom_task) {
			selected[max_selected_oom_idx] = p;
			selected_tasksize[max_selected_oom_idx] = tasksize;
			selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj;

			if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH)
				all_selected_oom++;

			if (all_selected_oom == LOWMEM_DEATHPENDING_DEPTH) {
				for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
					if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx])
						max_selected_oom_idx = i;
					else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] &&
						selected_tasksize[i] < selected_tasksize[max_selected_oom_idx])
						max_selected_oom_idx = i;
				}
			}

			lowmem_print(2, "select %d (%s), adj %d, \
					size %d, to kill\n",
				p->pid, p->comm, oom_score_adj, tasksize);
		}
#else
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#endif
	}
#ifdef ENHANCED_LMK_ROUTINE
	for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
		if (selected[i]) {
			lowmem_print(1, "send sigkill to %d (%s), adj %d,\
				     size %d\n",
				     selected[i]->pid, selected[i]->comm,
				     selected_oom_score_adj[i],
				     selected_tasksize[i]);
			lowmem_deathpending_timeout = jiffies + HZ;
			send_sig(SIGKILL, selected[i], 0);
			set_tsk_thread_flag(selected[i], TIF_MEMDIE);
			rem -= selected_tasksize[i];
#ifdef LMK_COUNT_READ
			lmk_count++;
#endif
		}
	}
#else
	if (selected) {
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize);
		lowmem_deathpending_timeout = jiffies + HZ;
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
#ifdef LMK_COUNT_READ
		lmk_count++;
#endif
	}
#endif
	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
		     sc->nr_to_scan, sc->gfp_mask, rem);
	read_unlock(&tasklist_lock);
	return rem;
}
Пример #10
0
void
UserProc::request_periodic_ckpt()
{
	send_sig( SIGUSR2 );
}
Пример #11
0
void
UserProc::request_exit()
{
	exit_requested = TRUE;
	send_sig( soft_kill_sig );
}
Пример #12
0
void
UserProc::request_ckpt()
{
	send_sig( SIGTSTP );
}
Пример #13
0
void
UserProc::resume()
{
	send_sig( SIGCONT );
	state = EXECUTING;
}
Пример #14
0
void
UserProc::suspend()
{
	send_sig( SIGSTOP );
	state = _SUSPENDED;
}
Пример #15
0
void window_ret_fault(struct pt_regs *regs)
{
	send_sig(SIGSEGV, current, 1);
}
Пример #16
0
static int unix_read(struct socket *sock, char *ubuf, int size, int nonblock)
{
    struct unix_proto_data *upd;
    int todo, avail;

    if ((todo = size) <= 0)
	return 0;

    upd = UN_DATA(sock);

    while (!(avail = UN_BUF_AVAIL(upd))) {
	if (sock->state != SS_CONNECTED)
	    return ((sock->state == SS_DISCONNECTING) ? 0 : -EINVAL);

	if (nonblock)
	    return -EAGAIN;

	sock->flags |= SO_WAITDATA;
	interruptible_sleep_on(sock->wait);
	sock->flags &= ~SO_WAITDATA;

	if (current->signal /* & ~current->blocked */ )
	    return -ERESTARTSYS;
    }

/*
 *	Copy from the read buffer into the user's buffer,
 *	watching for wraparound. Then we wake up the writer.
 */

    down(&upd->sem);
    do {
	int part, cando;

	if (avail <= 0) {
	    printk("UNIX: read: avail is negative (%d)\n", avail);
	    send_sig(SIGKILL, current, 1);
	    return -EPIPE;
	}

	if ((cando = todo) > avail)
	    cando = avail;

	if (cando > (part = UN_BUF_SIZE - upd->bp_tail))
	    cando = part;

	memcpy_tofs(ubuf, upd->buf + upd->bp_tail, cando);
	upd->bp_tail = (upd->bp_tail + cando) & (UN_BUF_SIZE - 1);
	ubuf += cando;
	todo -= cando;

	if (sock->state == SS_CONNECTED) {
	    wake_up_interruptible(sock->conn->wait);
#if 0
	    sock_wake_async(sock->conn, 2);
#endif
	}
	avail = UN_BUF_AVAIL(upd);
    } while (todo && avail);

    up(&upd->sem);

    return (size - todo);
}
Пример #17
0
//---------------------------------------------------------------------------
//
// Function:    EplSdoUdpuConfig
//
// Description: reconfigurate socket with new IP-Address
//              -> needed for NMT ResetConfiguration
//
// Parameters:  ulIpAddr_p      = IpAddress in platform byte order
//              uiPort_p        = port number in platform byte order
//
//
// Returns:     tEplKernel  = Errorcode
//
//
// State:
//
//---------------------------------------------------------------------------
tEplKernel PUBLIC EplSdoUdpuConfig(unsigned long ulIpAddr_p, unsigned int uiPort_p)
{
tEplKernel          Ret;
struct sockaddr_in  Addr;
int                 iError;

#if (TARGET_SYSTEM == _WIN32_)
BOOL                fTermError;
unsigned long       ulThreadId;
#endif

    Ret = kEplSuccessful;

    if (uiPort_p == 0)
    {   // set UDP port to default port number
        uiPort_p = EPL_C_SDO_EPL_PORT;
    }
    else if (uiPort_p > 65535)
    {
        Ret = kEplSdoUdpSocketError;
        goto Exit;
    }

    if (SdoUdpInstance_g.m_ThreadHandle != 0)
    {   // listen thread was started

        // close old thread
#if (TARGET_SYSTEM == _WIN32_)
        fTermError = TerminateThread(SdoUdpInstance_g.m_ThreadHandle, 0);
        if(fTermError == FALSE)
        {
            Ret = kEplSdoUdpThreadError;
            goto Exit;
        }

#elif (TARGET_SYSTEM == _LINUX_) && defined(__KERNEL__)
        send_sig(SIGTERM, SdoUdpInstance_g.m_ThreadHandle, 1);
        kthread_stop(SdoUdpInstance_g.m_ThreadHandle);
#endif

        SdoUdpInstance_g.m_ThreadHandle = 0;
    }

    if (SdoUdpInstance_g.m_UdpSocket != INVALID_SOCKET)
    {
        // close socket
        iError = closesocket(SdoUdpInstance_g.m_UdpSocket);
        SdoUdpInstance_g.m_UdpSocket = INVALID_SOCKET;
        if(iError != 0)
        {
            Ret = kEplSdoUdpSocketError;
            goto Exit;
        }
    }

    // create Socket
    SdoUdpInstance_g.m_UdpSocket = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP);
    if (SdoUdpInstance_g.m_UdpSocket == INVALID_SOCKET)
    {
        Ret = kEplSdoUdpNoSocket;
        EPL_DBGLVL_SDO_TRACE("EplSdoUdpuConfig: socket() failed\n");
        goto Exit;
    }

    // bind socket
    Addr.sin_family = AF_INET;
    Addr.sin_port = htons((unsigned short) uiPort_p);
    Addr.sin_addr.s_addr = htonl(ulIpAddr_p);
    iError = bind(SdoUdpInstance_g.m_UdpSocket, (struct sockaddr*)&Addr, sizeof (Addr));
    if (iError < 0)
    {
        //iError = WSAGetLastError();
        EPL_DBGLVL_SDO_TRACE("EplSdoUdpuConfig: bind() finished with %i\n", iError);
        Ret = kEplSdoUdpNoSocket;
        goto Exit;
    }

    // create Listen-Thread
#if (TARGET_SYSTEM == _WIN32_)
    // for win32

    // create thread
    SdoUdpInstance_g.m_ThreadHandle = CreateThread(NULL,
                                                    0,
                                                    EplSdoUdpThread,
                                                    &SdoUdpInstance_g,
                                                    0,
                                                    &ulThreadId);
    if (SdoUdpInstance_g.m_ThreadHandle == NULL)
    {
        Ret = kEplSdoUdpThreadError;
        goto Exit;
    }

#elif (TARGET_SYSTEM == _LINUX_) && defined(__KERNEL__)

    SdoUdpInstance_g.m_ThreadHandle = kthread_run(EplSdoUdpThread, &SdoUdpInstance_g, "EplSdoUdpThread");
    if (IS_ERR(SdoUdpInstance_g.m_ThreadHandle))
    {
        Ret = kEplSdoUdpThreadError;
        goto Exit;
    }
#endif


Exit:
    return Ret;

}
Пример #18
0
static int unix_write(struct socket *sock, char *ubuf, int size, int nonblock)
{
    struct unix_proto_data *pupd;
    int todo, space;

    if ((todo = size) <= 0)
	return 0;

    if (sock->state != SS_CONNECTED) {
	if (sock->state == SS_DISCONNECTING) {
	    send_sig(SIGPIPE, current, 1);
	    return -EPIPE;
	}
	return -EINVAL;
    }

    pupd = UN_DATA(sock)->peerupd;	/* safer than sock->conn */

    while (!(space = UN_BUF_SPACE(pupd))) {
	sock->flags |= SO_NOSPACE;

	if (nonblock)
	    return -EAGAIN;

	sock->flags &= ~SO_NOSPACE;
	interruptible_sleep_on(sock->wait);

	if (current->signal /* & ~current->blocked */ )
	    return -ERESTARTSYS;

	if (sock->state == SS_DISCONNECTING) {
	    send_sig(SIGPIPE, current, 1);
	    return -EPIPE;
	}
    }

/*
 *	Copy from the user's buffer to the write buffer,
 *	watching for wraparound. Then we wake up the reader.
 */

    down(&pupd->sem);
    do {
	int part, cando;

	if (space <= 0) {
	    printk("UNIX: write: space is negative (%d)\n", space);
	    send_sig(SIGKILL, current, 1);
	    return -EPIPE;
	}

	/*
	 *      We may become disconnected inside this loop, so watch
	 *      for it (peerupd is safe until we close).
	 */

	if (sock->state == SS_DISCONNECTING) {
	    send_sig(SIGPIPE, current, 1);
	    up(&pupd->sem);
	    return -EPIPE;
	}

	if ((cando = todo) > space)
	    cando = space;

	if (cando > (part = UN_BUF_SIZE - pupd->bp_head))
	    cando = part;

	memcpy_fromfs(pupd->buf + pupd->bp_head, ubuf, cando);
	pupd->bp_head = (pupd->bp_head + cando) & (UN_BUF_SIZE - 1);

	ubuf += cando;
	todo -= cando;

	if (sock->state == SS_CONNECTED) {
	    wake_up_interruptible(sock->conn->wait);
#if 0
	    sock_wake_async(sock->conn, 1);
#endif
	}
	space = UN_BUF_SPACE(pupd);
    } while (todo && space);

    up(&pupd->sem);

    return (size - todo);
}
Пример #19
0
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
	struct task_struct *selected = NULL;
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
	int selected_tasksize = 0;
	int selected_oom_score_adj;
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free = global_page_state(NR_FREE_PAGES);
	int other_file = global_page_state(NR_FILE_PAGES) -
						global_page_state(NR_SHMEM);

	tune_lmk_param(&other_free, &other_file, sc);

	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;
	for (i = 0; i < array_size; i++) {
		if (other_free < lowmem_minfree[i] &&
		    other_file < lowmem_minfree[i]) {
			min_score_adj = lowmem_adj[i];
			break;
		}
	}
	if (sc->nr_to_scan > 0)
		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
				sc->nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
	rem = global_page_state(NR_ACTIVE_ANON) +
		global_page_state(NR_ACTIVE_FILE) +
		global_page_state(NR_INACTIVE_ANON) +
		global_page_state(NR_INACTIVE_FILE);
	if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
			     sc->nr_to_scan, sc->gfp_mask, rem);
		return rem;
	}
	selected_oom_score_adj = min_score_adj;

	rcu_read_lock();
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;

		if (tsk->flags & PF_KTHREAD)
			continue;

		if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
			if (test_task_flag(tsk, TIF_MEMDIE)) {
				rcu_read_unlock();
				return 0;
			}
		}

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
	}
	if (selected) {
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize);
		lowmem_deathpending_timeout = jiffies + HZ;
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
	}
	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
		     sc->nr_to_scan, sc->gfp_mask, rem);
	rcu_read_unlock();
	return rem;
}
Пример #20
0
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
#ifdef ENHANCED_LMK_ROUTINE
	struct task_struct *selected[LOWMEM_DEATHPENDING_DEPTH] = {NULL,};
#else
	struct task_struct *selected = NULL;
#endif
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE
	static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 0);
#else
	static DEFINE_RATELIMIT_STATE(lmk_rs, 6*DEFAULT_RATELIMIT_INTERVAL, 0);
#endif
#endif
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
#ifdef ENHANCED_LMK_ROUTINE
	int selected_tasksize[LOWMEM_DEATHPENDING_DEPTH] = {0,};
	int selected_oom_score_adj[LOWMEM_DEATHPENDING_DEPTH] = {OOM_ADJUST_MAX,};
	int all_selected_oom = 0;
	int max_selected_oom_idx = 0;
#else
	int selected_tasksize = 0;
	int selected_oom_score_adj;
#endif
#ifdef CONFIG_SAMP_HOTNESS
	int selected_hotness_adj = 0;
#endif
	int array_size = ARRAY_SIZE(lowmem_adj);
#if (!defined(CONFIG_MACH_JF) \
	&& !defined(CONFIG_SEC_PRODUCT_8960)\
	)
	unsigned long nr_to_scan = sc->nr_to_scan;
#endif
	struct reclaim_state *reclaim_state = current->reclaim_state;
#ifndef CONFIG_CMA
	int other_free = global_page_state(NR_FREE_PAGES);
#else
	int other_free = global_page_state(NR_FREE_PAGES) -
				global_page_state(NR_FREE_CMA_PAGES);
#endif
	int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM);
#if defined(CONFIG_RUNTIME_COMPCACHE) || defined(CONFIG_ZSWAP)
	other_file -= total_swapcache_pages;
#endif /* CONFIG_RUNTIME_COMPCACHE || CONFIG_ZSWAP */
	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;
	for (i = 0; i < array_size; i++) {
		if (other_free < lowmem_minfree[i] &&
		    other_file < lowmem_minfree[i]) {
			min_score_adj = lowmem_adj[i];
			break;
		}
	}
	if (sc->nr_to_scan > 0)
		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
				sc->nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
	rem = global_page_state(NR_ACTIVE_ANON) +
		global_page_state(NR_ACTIVE_FILE) +
		global_page_state(NR_INACTIVE_ANON) +
		global_page_state(NR_INACTIVE_FILE);
	if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
			     sc->nr_to_scan, sc->gfp_mask, rem);
		return rem;
	}

#ifdef ENHANCED_LMK_ROUTINE
	for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++)
		selected_oom_score_adj[i] = min_score_adj;
#else
	selected_oom_score_adj = min_score_adj;
#endif

	read_lock(&tasklist_lock);
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;
#ifdef ENHANCED_LMK_ROUTINE
		int is_exist_oom_task = 0;
#endif
#ifdef CONFIG_SAMP_HOTNESS
		int hotness_adj = 0;
#endif
		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
			time_before_eq(jiffies, lowmem_deathpending_timeout)) {
				task_unlock(p);
				read_unlock(&tasklist_lock);
				return 0;
		}
		
		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
#if defined(CONFIG_ZSWAP)
		if (atomic_read(&zswap_stored_pages)) {
			lowmem_print(3, "shown tasksize : %d\n", tasksize);
			tasksize += atomic_read(&zswap_pool_pages) * get_mm_counter(p->mm, MM_SWAPENTS)
				/ atomic_read(&zswap_stored_pages);
			lowmem_print(3, "real tasksize : %d\n", tasksize);
		}
#endif

#ifdef CONFIG_SAMP_HOTNESS
		hotness_adj = p->signal->hotness_adj;
#endif
		task_unlock(p);
		if (tasksize <= 0)
			continue;

#ifdef ENHANCED_LMK_ROUTINE
		if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH) {
			for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
				if (!selected[i]) {
					is_exist_oom_task = 1;
					max_selected_oom_idx = i;
					break;
				}
			}
		} else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj ||
			(selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj &&
			selected_tasksize[max_selected_oom_idx] < tasksize)) {
			is_exist_oom_task = 1;
		}

		if (is_exist_oom_task) {
			selected[max_selected_oom_idx] = p;
			selected_tasksize[max_selected_oom_idx] = tasksize;
			selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj;

			if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH)
				all_selected_oom++;

			if (all_selected_oom == LOWMEM_DEATHPENDING_DEPTH) {
				for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
					if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx])
						max_selected_oom_idx = i;
					else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] &&
						selected_tasksize[i] < selected_tasksize[max_selected_oom_idx])
						max_selected_oom_idx = i;
				}
			}

			lowmem_print(2, "select %d (%s), adj %d, \
					size %d, to kill\n",
				p->pid, p->comm, oom_score_adj, tasksize);
		}
#else
		if (selected) {
#ifdef CONFIG_SAMP_HOTNESS
			if (min_score_adj <= lowmem_adj[4]) {
#endif
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
#ifdef CONFIG_SAMP_HOTNESS
			} else {
				if (hotness_adj > selected_hotness_adj)
					continue;
				if (hotness_adj == selected_hotness_adj && tasksize <= selected_tasksize)
					continue;
			}
#endif
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
#ifdef CONFIG_SAMP_HOTNESS
		selected_hotness_adj = hotness_adj;
#endif
		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#endif
	}
#ifdef ENHANCED_LMK_ROUTINE
	for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
		if (selected[i]) {
#ifdef CONFIG_SAMP_HOTNESS			
			lowmem_print(1, "send sigkill to %d (%s), adj %d,\
				     size %d, free memory = %d, reclaimable memory = %d ,hotness %d\n",
				     selected[i]->pid, selected[i]->comm,
				     selected_oom_score_adj[i],
				     selected_tasksize[i],
					 other_free, other_file,
					 selected_hotness_adj);
#else
			lowmem_print(1, "send sigkill to %d (%s), adj %d,\
				     size %d, free memory = %d, reclaimable memory = %d\n",
				     selected[i]->pid, selected[i]->comm,
				     selected_oom_score_adj[i],
				     selected_tasksize[i],
					 other_free, other_file);
#endif
			lowmem_deathpending_timeout = jiffies + HZ;
			send_sig(SIGKILL, selected[i], 0);
			set_tsk_thread_flag(selected[i], TIF_MEMDIE);
			rem -= selected_tasksize[i];
			if(reclaim_state)
				reclaim_state->reclaimed_slab += selected_tasksize[i];
#ifdef LMK_COUNT_READ
			lmk_count++;
#endif
		}
	}
#else
	if (selected) {
#ifdef CONFIG_SAMP_HOTNESS
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d ,hotness %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize,selected_hotness_adj);
#else
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize);
#endif
		lowmem_deathpending_timeout = jiffies + HZ;
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
		if(reclaim_state)
			reclaim_state->reclaimed_slab = selected_tasksize;
#ifdef LMK_COUNT_READ
		lmk_count++;
#endif
	}
#endif
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
	if (__ratelimit(&lmk_rs)) {
		lowmem_print(1, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
				nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE
		show_mem(SHOW_MEM_FILTER_NODES);
		dump_tasks_info();
#endif
	}
#endif
	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
		     sc->nr_to_scan, sc->gfp_mask, rem);
	read_unlock(&tasklist_lock);
	return rem;
}
Пример #21
0
/*
 * This routine handles present pages, when users try to write
 * to a shared page. It is done by copying the page to a new address
 * and decrementing the shared-page counter for the old page.
 *
 * Goto-purists beware: the only reason for goto's here is that it results
 * in better assembly code.. The "default" path will see no jumps at all.
 *
 * Note that this routine assumes that the protection checks have been
 * done by the caller (the low-level page fault routine in most cases).
 * Thus we can safely just mark it writable once we've done any necessary
 * COW.
 *
 * We also mark the page dirty at this point even though the page will
 * change only once the write actually happens. This avoids a few races,
 * and potentially makes it more efficient.
 */
void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma,
	unsigned long address, int write_access)
{
	pgd_t *page_dir;
	pmd_t *page_middle;
	pte_t *page_table, pte;
	unsigned long old_page, new_page;

	new_page = __get_free_page(GFP_KERNEL);
	page_dir = pgd_offset(vma->vm_mm, address);
	if (pgd_none(*page_dir))
		goto end_wp_page;
	if (pgd_bad(*page_dir))
		goto bad_wp_pagedir;
	page_middle = pmd_offset(page_dir, address);
	if (pmd_none(*page_middle))
		goto end_wp_page;
	if (pmd_bad(*page_middle))
		goto bad_wp_pagemiddle;
	page_table = pte_offset(page_middle, address);
	pte = *page_table;
	if (!pte_present(pte))
		goto end_wp_page;
	if (pte_write(pte))
		goto end_wp_page;
	old_page = pte_page(pte);
	if (old_page >= high_memory)
		goto bad_wp_page;
	tsk->min_flt++;
	/*
	 * Do we need to copy?
	 */
	if (mem_map[MAP_NR(old_page)].count != 1) {
		if (new_page) {
			if (PageReserved(mem_map + MAP_NR(old_page)))
				++vma->vm_mm->rss;
			copy_page(old_page,new_page);
			flush_page_to_ram(old_page);
			flush_page_to_ram(new_page);
			flush_cache_page(vma, address);
			set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
			free_page(old_page);
			flush_tlb_page(vma, address);
			return;
		}
		flush_cache_page(vma, address);
		set_pte(page_table, BAD_PAGE);
		flush_tlb_page(vma, address);
		free_page(old_page);
		oom(tsk);
		return;
	}
	flush_cache_page(vma, address);
	set_pte(page_table, pte_mkdirty(pte_mkwrite(pte)));
	flush_tlb_page(vma, address);
	if (new_page)
		free_page(new_page);
	return;
bad_wp_page:
	printk("do_wp_page: bogus page at address %08lx (%08lx)\n",address,old_page);
	send_sig(SIGKILL, tsk, 1);
	goto end_wp_page;
bad_wp_pagemiddle:
	printk("do_wp_page: bogus page-middle at address %08lx (%08lx)\n", address, pmd_val(*page_middle));
	send_sig(SIGKILL, tsk, 1);
	goto end_wp_page;
bad_wp_pagedir:
	printk("do_wp_page: bogus page-dir entry at address %08lx (%08lx)\n", address, pgd_val(*page_dir));
	send_sig(SIGKILL, tsk, 1);
end_wp_page:
	if (new_page)
		free_page(new_page);
	return;
}
Пример #22
0
static int fault_in_page(int taskid,
			 struct vm_area_struct *vma,
			 unsigned long address, int write)
{
	static unsigned last_address;
	static int last_task, loop_counter;
	struct task_struct *tsk = task[taskid];
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;

	if (!tsk || !tsk->mm)
		return 1;

	if (!vma || (write && !(vma->vm_flags & VM_WRITE)))
	  goto bad_area;
	if (vma->vm_start > address)
	  goto bad_area;

	if (address == last_address && taskid == last_task) {
		loop_counter++;
	} else {
		loop_counter = 0;
		last_address = address; 
		last_task = taskid;
	}

	if (loop_counter == WRITE_LIMIT && !write) {
		printk("MSC bug? setting write request\n");
		stats.errors++;
		write = 1;
	}

	if (loop_counter == LOOP_LIMIT) {
		printk("MSC bug? failing request\n");
		stats.errors++;
		return 1;
	}

	pgd = pgd_offset(vma->vm_mm, address);
	pmd = pmd_alloc(pgd,address);
	if(!pmd)
		goto no_memory;
	pte = pte_alloc(pmd, address);
	if(!pte)
		goto no_memory;
	if(!pte_present(*pte)) {
		handle_mm_fault(tsk, vma, address, write);
		goto finish_up;
	}
	set_pte(pte, pte_mkyoung(*pte));
	flush_tlb_page(vma, address);
	if(!write)
		goto finish_up;
	if(pte_write(*pte)) {
		set_pte(pte, pte_mkdirty(*pte));
		flush_tlb_page(vma, address);
		goto finish_up;
	}
	handle_mm_fault(tsk, vma, address, write);

	/* Fall through for do_wp_page */
finish_up:
	stats.success++;
	return 0;

no_memory:
	stats.failure++;
	oom(tsk);
	return 1;
	
bad_area:	  
	stats.failure++;
	tsk->tss.sig_address = address;
	tsk->tss.sig_desc = SUBSIG_NOMAPPING;
	send_sig(SIGSEGV, tsk, 1);
	return 1;
}
Пример #23
0
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
	struct task_struct *selected = NULL;
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
	int selected_tasksize = 0;
	int selected_oom_score_adj;
	int selected_oom_adj = 0;
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free = global_page_state(NR_FREE_PAGES);
	int other_file = global_page_state(NR_FILE_PAGES) -
		global_page_state(NR_SHMEM) - global_page_state(NR_MLOCK);
	int fork_boost = 0;
	int *adj_array;
	size_t *min_array;

	if (lowmem_fork_boost &&
		time_before_eq(jiffies, lowmem_fork_boost_timeout)) {
		for (i = 0; i < lowmem_minfree_size; i++)
			minfree_tmp[i] = lowmem_minfree[i] + lowmem_fork_boost_minfree[i];

		adj_array = fork_boost_adj;
		min_array = minfree_tmp;
	}
	else {
		adj_array = lowmem_adj;
		min_array = lowmem_minfree;
	}

	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;

	for (i = 0; i < array_size; i++) {
		if (other_free < min_array[i] &&
		    other_file < min_array[i]) {
			min_score_adj = adj_array[i];
			fork_boost = lowmem_fork_boost_minfree[i];
			break;
		}
	}

	if (sc->nr_to_scan > 0)
		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
				sc->nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
	rem = global_page_state(NR_ACTIVE_ANON) +
		global_page_state(NR_ACTIVE_FILE) +
		global_page_state(NR_INACTIVE_ANON) +
		global_page_state(NR_INACTIVE_FILE);
	if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
			     sc->nr_to_scan, sc->gfp_mask, rem);
		return rem;
	}
	selected_oom_score_adj = min_score_adj;

	rcu_read_lock();
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;

		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
		    time_before_eq(jiffies, lowmem_deathpending_timeout)) {
			lowmem_print(2, "%d (%s), oom_adj %d score_adj %d, is exiting, return\n"
					, p->pid, p->comm, p->signal->oom_adj, p->signal->oom_score_adj);
			task_unlock(p);
			rcu_read_unlock();
			return 0;
		}
		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		selected_oom_adj = p->signal->oom_adj;
		lowmem_print(2, "select %d (%s), oom_adj %d score_adj %d, size %d, to kill\n",
			     p->pid, p->comm, selected_oom_adj, oom_score_adj, tasksize);
	}
	if (selected) {
		lowmem_print(1, "[%s] send sigkill to %d (%s), oom_adj %d, score_adj %d,"
			" min_score_adj %d, size %dK, free %dK, file %dK, fork_boost %dK\n",
			     current->comm, selected->pid, selected->comm,
			     selected_oom_adj, selected_oom_score_adj,
			     min_score_adj, selected_tasksize << 2,
			     other_free << 2, other_file << 2, fork_boost << 2);
		lowmem_deathpending_timeout = jiffies + HZ;
		if (selected_oom_adj < 7)
		{
			dump_tasks();
		}
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
	}
	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
		     sc->nr_to_scan, sc->gfp_mask, rem);
	rcu_read_unlock();
	return rem;
}
Пример #24
0
int
nfsd_svc(unsigned short port, int nrservs)
{
	int	error;
	int	none_left;	
	struct list_head *victim;
	
	lock_kernel();
	dprintk("nfsd: creating service\n");
	error = -EINVAL;
	if (nrservs <= 0)
		nrservs = 0;
	if (nrservs > NFSD_MAXSERVS)
		nrservs = NFSD_MAXSERVS;
	
	/* Readahead param cache - will no-op if it already exists */
	error =	nfsd_racache_init(2*nrservs);
	if (error<0)
		goto out;
	error = nfs4_state_init();
	if (error<0)
		goto out;
	if (!nfsd_serv) {
		atomic_set(&nfsd_busy, 0);
		error = -ENOMEM;
		nfsd_serv = svc_create(&nfsd_program, NFSD_BUFSIZE);
		if (nfsd_serv == NULL)
			goto out;
		error = svc_makesock(nfsd_serv, IPPROTO_UDP, port);
		if (error < 0)
			goto failure;

#ifdef CONFIG_NFSD_TCP
		error = svc_makesock(nfsd_serv, IPPROTO_TCP, port);
		if (error < 0)
			goto failure;
#endif
		do_gettimeofday(&nfssvc_boot);		/* record boot time */
	} else
		nfsd_serv->sv_nrthreads++;
	nrservs -= (nfsd_serv->sv_nrthreads-1);
	while (nrservs > 0) {
		nrservs--;
		__module_get(THIS_MODULE);
		error = svc_create_thread(nfsd, nfsd_serv);
		if (error < 0) {
			module_put(THIS_MODULE);
			break;
		}
	}
	victim = nfsd_list.next;
	while (nrservs < 0 && victim != &nfsd_list) {
		struct nfsd_list *nl =
			list_entry(victim,struct nfsd_list, list);
		victim = victim->next;
		send_sig(SIG_NOCLEAN, nl->task, 1);
		nrservs++;
	}
 failure:
	none_left = (nfsd_serv->sv_nrthreads == 1);
	svc_destroy(nfsd_serv);		/* Release server */
	if (none_left) {
		nfsd_serv = NULL;
		nfsd_racache_shutdown();
		nfs4_state_shutdown();
	}
 out:
	unlock_kernel();
	return error;
}
static int android_oom_handler(struct notifier_block *nb,
				      unsigned long val, void *data)
{
	struct task_struct *tsk;
#ifdef MULTIPLE_OOM_KILLER
	struct task_struct *selected[OOM_DEPTH] = {NULL,};
#else
	struct task_struct *selected = NULL;
#endif
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
#ifdef MULTIPLE_OOM_KILLER
	int selected_tasksize[OOM_DEPTH] = {0,};
	int selected_oom_score_adj[OOM_DEPTH] = {OOM_ADJUST_MAX,};
	int all_selected_oom = 0;
	int max_selected_oom_idx = 0;
#else
	int selected_tasksize = 0;
	int selected_oom_score_adj;
#endif
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL/5, 1);
#endif

	unsigned long *freed = data;
#if defined(CONFIG_CMA_PAGE_COUNTING)
	unsigned long nr_cma_free;
	unsigned long nr_cma_inactive_file;
	unsigned long nr_cma_active_file;
	int other_free;
	int other_file;

	nr_cma_free = global_page_state(NR_FREE_CMA_PAGES);
	other_free = global_page_state(NR_FREE_PAGES) - nr_cma_free;

	nr_cma_inactive_file = global_page_state(NR_CMA_INACTIVE_FILE);
	nr_cma_active_file = global_page_state(NR_CMA_ACTIVE_FILE);
	other_file = global_page_state(NR_FILE_PAGES) -
					global_page_state(NR_SHMEM) -
					total_swapcache_pages -
					nr_cma_inactive_file -
					nr_cma_active_file;
#endif

	/* show status */
	pr_warning("%s invoked Android-oom-killer: "
		"oom_adj=%d, oom_score_adj=%d\n",
		current->comm, current->signal->oom_adj,
		current->signal->oom_score_adj);
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
	if (__ratelimit(&oom_rs)) {
		dump_stack();
		show_mem(SHOW_MEM_FILTER_NODES);
		dump_tasks_info();
	}
#endif

	min_score_adj = 0;
#ifdef MULTIPLE_OOM_KILLER
	for (i = 0; i < OOM_DEPTH; i++)
		selected_oom_score_adj[i] = min_score_adj;
#else
	selected_oom_score_adj = min_score_adj;
#endif

	read_lock(&tasklist_lock);
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;
#ifdef MULTIPLE_OOM_KILLER
		int is_exist_oom_task = 0;
#endif

		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;

		lowmem_print(2, "oom: ------ %d (%s), adj %d, size %d\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#ifdef MULTIPLE_OOM_KILLER
		if (all_selected_oom < OOM_DEPTH) {
			for (i = 0; i < OOM_DEPTH; i++) {
				if (!selected[i]) {
					is_exist_oom_task = 1;
					max_selected_oom_idx = i;
					break;
				}
			}
		} else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj ||
			(selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj &&
			selected_tasksize[max_selected_oom_idx] < tasksize)) {
			is_exist_oom_task = 1;
		}

		if (is_exist_oom_task) {
			selected[max_selected_oom_idx] = p;
			selected_tasksize[max_selected_oom_idx] = tasksize;
			selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj;

			if (all_selected_oom < OOM_DEPTH)
				all_selected_oom++;

			if (all_selected_oom == OOM_DEPTH) {
				for (i = 0; i < OOM_DEPTH; i++) {
					if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx])
						max_selected_oom_idx = i;
					else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] &&
						selected_tasksize[i] < selected_tasksize[max_selected_oom_idx])
						max_selected_oom_idx = i;
				}
			}

			lowmem_print(2, "oom: max_selected_oom_idx(%d) select %d (%s), adj %d, \
					size %d, to kill\n",
				max_selected_oom_idx, p->pid, p->comm, oom_score_adj, tasksize);
		}
#else
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(2, "oom: select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#endif
	}
#ifdef MULTIPLE_OOM_KILLER
	for (i = 0; i < OOM_DEPTH; i++) {
		if (selected[i]) {
#if defined(CONFIG_CMA_PAGE_COUNTING)
			lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, "
				"size %d ofree %d ofile %d "
				"cma_free %lu cma_i_file %lu cma_a_file %lu\n",
				selected[i]->pid, selected[i]->comm,
				selected_oom_score_adj[i],
				selected_tasksize[i],
				other_free, other_file,
				nr_cma_free, nr_cma_inactive_file, nr_cma_active_file);
#else
			lowmem_print(1, "oom: send sigkill to %d (%s), adj %d,\
				     size %d\n",
				     selected[i]->pid, selected[i]->comm,
				     selected_oom_score_adj[i],
				     selected_tasksize[i]);
#endif
			send_sig(SIGKILL, selected[i], 0);
			rem -= selected_tasksize[i];
			*freed += (unsigned long)selected_tasksize[i];
#ifdef OOM_COUNT_READ
			oom_count++;
#endif

		}
	}
#else
	if (selected) {
		lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, size %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize);
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
		*freed += (unsigned long)selected_tasksize;
#ifdef OOM_COUNT_READ
		oom_count++;
#endif
	}
#endif
	read_unlock(&tasklist_lock);

	lowmem_print(2, "oom: get memory %lu", *freed);
	return rem;
}
Пример #26
0
void window_overflow_fault(void)
{
	send_sig(SIGSEGV, current, 1);
}
Пример #27
0
static int32_t afe_callback(struct apr_client_data *data, void *priv)
{
	if (data->opcode == RESET_EVENTS) {
		pr_debug("q6afe: reset event = %d %d apr[%p]\n",
			data->reset_event, data->reset_proc, this_afe.apr);
		if (this_afe.apr) {
			apr_reset(this_afe.apr);
			atomic_set(&this_afe.state, 0);
			this_afe.apr = NULL;
		}
		/* send info to user */
		pr_debug("task_name = %s pid = %d\n",
			this_afe.task->comm, this_afe.task->pid);
		send_sig(SIGUSR1, this_afe.task, 0);
		return 0;
	}
	if (data->payload_size) {
		uint32_t *payload;
		uint16_t port_id = 0;
		payload = data->payload;
		pr_debug("%s:opcode = 0x%x cmd = 0x%x status = 0x%x\n",
					__func__, data->opcode,
					payload[0], payload[1]);
		/* payload[1] contains the error status for response */
		if (payload[1] != 0) {
			atomic_set(&this_afe.status, -1);
			pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
					__func__, payload[0], payload[1]);
		}
		if (data->opcode == APR_BASIC_RSP_RESULT) {
			switch (payload[0]) {
			case AFE_PORT_AUDIO_IF_CONFIG:
			case AFE_PORT_CMD_I2S_CONFIG:
			case AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG:
			case AFE_PORT_AUDIO_SLIM_SCH_CONFIG:
			case AFE_PORT_CMD_STOP:
			case AFE_PORT_CMD_START:
			case AFE_PORT_CMD_LOOPBACK:
			case AFE_PORT_CMD_SIDETONE_CTL:
			case AFE_PORT_CMD_SET_PARAM:
			case AFE_PSEUDOPORT_CMD_START:
			case AFE_PSEUDOPORT_CMD_STOP:
			case AFE_PORT_CMD_APPLY_GAIN:
			case AFE_SERVICE_CMD_MEMORY_MAP:
			case AFE_SERVICE_CMD_MEMORY_UNMAP:
			case AFE_SERVICE_CMD_UNREG_RTPORT:
				atomic_set(&this_afe.state, 0);
				wake_up(&this_afe.wait);
				break;
			case AFE_SERVICE_CMD_REG_RTPORT:
				break;
			case AFE_SERVICE_CMD_RTPORT_WR:
				port_id = RT_PROXY_PORT_001_TX;
				break;
			case AFE_SERVICE_CMD_RTPORT_RD:
				port_id = RT_PROXY_PORT_001_RX;
				break;
			default:
				pr_err("Unknown cmd 0x%x\n",
						payload[0]);
				break;
			}
		} else if (data->opcode == AFE_EVENT_RT_PROXY_PORT_STATUS) {
			port_id = (uint16_t)(0x0000FFFF & payload[0]);
		}
		pr_debug("%s:port_id = %x\n", __func__, port_id);
		switch (port_id) {
		case RT_PROXY_PORT_001_TX: {
			if (this_afe.tx_cb) {
				this_afe.tx_cb(data->opcode, data->token,
					data->payload,
					this_afe.tx_private_data);
			}
			break;
		}
		case RT_PROXY_PORT_001_RX: {
			if (this_afe.rx_cb) {
				this_afe.rx_cb(data->opcode, data->token,
					data->payload,
					this_afe.rx_private_data);
			}
			break;
		}
		default:
			break;
		}
	}
	return 0;
}
Пример #28
0
void window_underflow_fault(unsigned long sp)
{
	send_sig(SIGSEGV, current, 1);
}
Пример #29
0
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
	struct task_struct *selected[MANAGED_PROCESS_TYPES] = {NULL};
	int rem = 0;
	int ret = 0;
	int tasksize;
	int i;
	short min_score_adj = OOM_SCORE_ADJ_MAX + 1;
	int minfree = 0;
	enum lowmem_process_type proc_type = KILLABLE_PROCESS;
	int selected_tasksize[MANAGED_PROCESS_TYPES] = {0};
	int selected_oom_score_adj[MANAGED_PROCESS_TYPES];
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
	int other_file = global_page_state(NR_FILE_PAGES) -
						global_page_state(NR_SHMEM);
	struct reclaim_state *reclaim_state = current->reclaim_state;

#ifdef CONFIG_ZSWAP
	/* to prevent other_file underflow and then be negative */
	if (other_file > total_swapcache_pages())
		other_file -= total_swapcache_pages();
	else
		other_file = 0;
#endif

	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;
	for (i = 0; i < array_size; i++) {
		minfree = lowmem_minfree[i];
		if (other_free < minfree && other_file < minfree) {
			min_score_adj = lowmem_adj[i];
			break;
		}
	}
	if (sc->nr_to_scan > 0) {
		ret = adjust_minadj(&min_score_adj);
		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %hd\n",
				sc->nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
	}

	rem = global_page_state(NR_ACTIVE_ANON) +
		global_page_state(NR_ACTIVE_FILE) +
		global_page_state(NR_INACTIVE_ANON) +
		global_page_state(NR_INACTIVE_FILE);
	if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
			     sc->nr_to_scan, sc->gfp_mask, rem);

		if ((min_score_adj == OOM_SCORE_ADJ_MAX + 1) &&
			(sc->nr_to_scan > 0))
			trace_almk_shrink(0, ret, other_free, other_file, 0);

		return rem;
	}

	/* Set the initial oom_score_adj for each managed process type */
	for (proc_type = KILLABLE_PROCESS; proc_type < MANAGED_PROCESS_TYPES; proc_type++)
		selected_oom_score_adj[proc_type] = min_score_adj;

	rcu_read_lock();
	for_each_process(tsk) {
		struct task_struct *p;
		short oom_score_adj;

		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
		    time_before_eq(jiffies, lowmem_deathpending_timeout)) {
			task_unlock(p);
			rcu_read_unlock();
			/* give the system time to free up the memory */
			msleep_interruptible(20);
			return 0;
		}
		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
#if defined(CONFIG_ZSWAP)
		if (atomic_read(&zswap_stored_pages)) {
			lowmem_print(3, "shown tasksize : %d\n", tasksize);
			tasksize += atomic_read(&zswap_pool_pages) * get_mm_counter(p->mm, MM_SWAPENTS)
				/ atomic_read(&zswap_stored_pages);
			lowmem_print(3, "real tasksize : %d\n", tasksize);
		}
#endif

		task_unlock(p);
		if (tasksize <= 0)
			continue;
#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_DO_NOT_KILL_PROCESS
		/* Check if the process name is contained inside the process to be preserved lists */
		if (is_in_donotkill_proc_list(p->comm)) {
			/* This user process must be preserved from killing */
			proc_type = DO_NOT_KILL_PROCESS;
			lowmem_print(2, "The process '%s' is inside the donotkill_proc_names", p->comm);
		} else if (is_in_donotkill_sysproc_list(p->comm)) {
			/* This system process must be preserved from killing */
			proc_type = DO_NOT_KILL_SYSTEM_PROCESS;
			lowmem_print(2, "The process '%s' is inside the donotkill_sysproc_names", p->comm);
		}
#endif
		if (selected[proc_type]) {
			if (oom_score_adj < selected_oom_score_adj[proc_type])
				continue;
			if (oom_score_adj == selected_oom_score_adj[proc_type] &&
			    tasksize <= selected_tasksize[proc_type])
				continue;
		}
		selected[proc_type] = p;
		selected_tasksize[proc_type] = tasksize;
		selected_oom_score_adj[proc_type] = oom_score_adj;
		lowmem_print(2, "select '%s' (%d), adj %hd, size %d, to kill\n",
			     p->comm, p->pid, oom_score_adj, tasksize);
	}

	/* For each managed process type check if a process to be killed has been found:
	 * - check first if a standard killable process has been found, if so kill it
	 * - if there is no killable process, then check if a user process has been found,
	 *   if so kill it to prevent system slowdowns, hangs, etc.
	 * - if there is no killable and user process, then check if a system process has been found,
	 *   if so kill it to prevent system slowdowns, hangs, etc. */
	for (proc_type = KILLABLE_PROCESS; proc_type < MANAGED_PROCESS_TYPES; proc_type++) {
		if (selected[proc_type]) {
			lowmem_print(1, "Killing '%s' (%d), adj %hd,\n" \
				"   to free %ldkB on behalf of '%s' (%d) because\n" \
				"   cache %ldkB is below limit %ldkB for oom_score_adj %hd\n" \
				"   Free memory is %ldkB above reserved\n",
			     	selected[proc_type]->comm, selected[proc_type]->pid,
			     	selected_oom_score_adj[proc_type],
			     	selected_tasksize[proc_type] * (long)(PAGE_SIZE / 1024),
			     	current->comm, current->pid,
			     	other_file * (long)(PAGE_SIZE / 1024),
			     	minfree * (long)(PAGE_SIZE / 1024),
			     	min_score_adj,
			     	other_free * (long)(PAGE_SIZE / 1024));
			lowmem_deathpending_timeout = jiffies + HZ;
			send_sig(SIGKILL, selected[proc_type], 0);
			set_tsk_thread_flag(selected[proc_type], TIF_MEMDIE);
			rem -= selected_tasksize[proc_type];
			break;
		} else {
			trace_almk_shrink(1, ret, other_free, other_file, 0);
			rcu_read_unlock();
	 	}
	}

	trace_almk_shrink(1, ret, other_free, other_file, 0);
	rcu_read_unlock();

		lowmem_lmkcount++;

	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
		     sc->nr_to_scan, sc->gfp_mask, rem);
	return rem;
}
Пример #30
0
/* These are the functions used to load ELF style executables and shared
 * libraries.  There is no binary dependent code anywhere else.
 */
static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
	struct elfhdr elf_ex, interp_elf_ex;
	struct file *interpreter;
	struct elf_phdr *elf_phdata, *elf_ihdr, *elf_ephdr;
	unsigned int load_addr, elf_bss, elf_brk;
	unsigned int elf_entry, interp_load_addr = 0;
	unsigned int start_code, end_code, end_data, elf_stack;
	int retval, has_interp, has_ephdr, size, i;
	char *elf_interpreter;
	mm_segment_t old_fs;

	load_addr = 0;
	has_interp = has_ephdr = 0;
	elf_ihdr = elf_ephdr = 0;
	elf_ex = *((struct elfhdr *) bprm->buf);
	retval = -ENOEXEC;

	if (verify_binary(&elf_ex, bprm))
		goto out;

#ifdef DEBUG_ELF
	print_elfhdr(&elf_ex);
#endif

	/* Now read in all of the header information */
	size = elf_ex.e_phentsize * elf_ex.e_phnum;
	if (size > 65536)
		goto out;
	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
	if (elf_phdata == NULL) {
		retval = -ENOMEM;
		goto out;
	}

	retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *)elf_phdata, size);
	if (retval < 0)
		goto out_free_ph;

#ifdef DEBUG_ELF
	dump_phdrs(elf_phdata, elf_ex.e_phnum);
#endif

	/* Set some things for later. */
	for(i = 0; i < elf_ex.e_phnum; i++) {
		switch(elf_phdata[i].p_type) {
		case PT_INTERP:
			has_interp = 1;
			elf_ihdr = &elf_phdata[i];
			break;
		case PT_PHDR:
			has_ephdr = 1;
			elf_ephdr = &elf_phdata[i];
			break;
		};
	}
#ifdef DEBUG_ELF
	printk("\n");
#endif

	elf_bss = 0;
	elf_brk = 0;

	elf_stack = 0xffffffff;
	elf_interpreter = NULL;
	start_code = 0xffffffff;
	end_code = 0;
	end_data = 0;

	retval = look_for_irix_interpreter(&elf_interpreter,
	                                   &interpreter,
					   &interp_elf_ex, elf_phdata, bprm,
					   elf_ex.e_phnum);
	if (retval)
		goto out_free_file;

	if (elf_interpreter) {
		retval = verify_irix_interpreter(&interp_elf_ex);
		if(retval)
			goto out_free_interp;
	}

	/* OK, we are done with that, now set up the arg stuff,
	 * and then start this sucker up.
	 */
	retval = -E2BIG;
	if (!bprm->sh_bang && !bprm->p)
		goto out_free_interp;

	/* Flush all traces of the currently running executable */
	retval = flush_old_exec(bprm);
	if (retval)
		goto out_free_dentry;

	/* OK, This is the point of no return */
	current->mm->end_data = 0;
	current->mm->end_code = 0;
	current->mm->mmap = NULL;
	current->flags &= ~PF_FORKNOEXEC;
	elf_entry = (unsigned int) elf_ex.e_entry;

	/* Do this so that we can load the interpreter, if need be.  We will
	 * change some of these later.
	 */
	current->mm->rss = 0;
	setup_arg_pages(bprm);
	current->mm->start_stack = bprm->p;

	/* At this point, we assume that the image should be loaded at
	 * fixed address, not at a variable address.
	 */
	old_fs = get_fs();
	set_fs(get_ds());

	map_executable(bprm->file, elf_phdata, elf_ex.e_phnum, &elf_stack,
	               &load_addr, &start_code, &elf_bss, &end_code,
	               &end_data, &elf_brk);

	if(elf_interpreter) {
		retval = map_interpreter(elf_phdata, &interp_elf_ex,
					 interpreter, &interp_load_addr,
					 elf_ex.e_phnum, old_fs, &elf_entry);
		kfree(elf_interpreter);
		if(retval) {
			set_fs(old_fs);
			printk("Unable to load IRIX ELF interpreter\n");
			send_sig(SIGSEGV, current, 0);
			retval = 0;
			goto out_free_file;
		}
	}

	set_fs(old_fs);

	kfree(elf_phdata);
	set_personality(PER_IRIX32);
	set_binfmt(&irix_format);
	compute_creds(bprm);
	current->flags &= ~PF_FORKNOEXEC;
	bprm->p = (unsigned long)
	  create_irix_tables((char *)bprm->p, bprm->argc, bprm->envc,
			(elf_interpreter ? &elf_ex : NULL),
			load_addr, interp_load_addr, regs, elf_ephdr);
	current->mm->start_brk = current->mm->brk = elf_brk;
	current->mm->end_code = end_code;
	current->mm->start_code = start_code;
	current->mm->end_data = end_data;
	current->mm->start_stack = bprm->p;

	/* Calling set_brk effectively mmaps the pages that we need for the
	 * bss and break sections.
	 */
	set_brk(elf_bss, elf_brk);

	/*
	 * IRIX maps a page at 0x200000 which holds some system
	 * information.  Programs depend on this.
	 */
	irix_map_prda_page ();

	padzero(elf_bss);

#ifdef DEBUG_ELF
	printk("(start_brk) %lx\n" , (long) current->mm->start_brk);
	printk("(end_code) %lx\n" , (long) current->mm->end_code);
	printk("(start_code) %lx\n" , (long) current->mm->start_code);
	printk("(end_data) %lx\n" , (long) current->mm->end_data);
	printk("(start_stack) %lx\n" , (long) current->mm->start_stack);
	printk("(brk) %lx\n" , (long) current->mm->brk);
#endif

#if 0 /* XXX No f*****g way dude... */
	/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
	 * and some applications "depend" upon this behavior.
	 * Since we do not have the power to recompile these, we
	 * emulate the SVr4 behavior.  Sigh.
	 */
	down_write(&current->mm->mmap_sem);
	(void) do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
		       MAP_FIXED | MAP_PRIVATE, 0);
	up_write(&current->mm->mmap_sem);
#endif

	start_thread(regs, elf_entry, bprm->p);
	if (current->ptrace & PT_PTRACED)
		send_sig(SIGTRAP, current, 0);
	return 0;
out:
	return retval;

out_free_dentry:
	allow_write_access(interpreter);
	fput(interpreter);
out_free_interp:
	if (elf_interpreter)
		kfree(elf_interpreter);
out_free_file:
out_free_ph:
	kfree (elf_phdata);
	goto out;
}