Exemplo n.º 1
0
static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
{
	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
	unsigned long *rmapp;
	struct kvm_mmu_page *rev_sp;
	gfn_t gfn;

	rev_sp = page_header(__pa(sptep));
	gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);

	if (!gfn_to_memslot(kvm, gfn)) {
		if (!__ratelimit(&ratelimit_state))
			return;
		audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
		audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
		       (long int)(sptep - rev_sp->spt), rev_sp->gfn);
		dump_stack();
		return;
	}

	rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
	if (!*rmapp) {
		if (!__ratelimit(&ratelimit_state))
			return;
		audit_printk(kvm, "no rmap for writable spte %llx\n",
			     *sptep);
		dump_stack();
	}
}
Exemplo n.º 2
0
static void inspect_spte_has_rmap(struct vm *pvm, u64 *sptep)
{
	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
	unsigned long *rmapp;
	struct vmmr0_mmu_page *rev_sp;
	gfn_t gfn;

	rev_sp = page_header(__pa(sptep));
#ifdef HOST_LINUX_OPTIMIZED
	gfn = vmmr0_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
#else
	gfn = vmmr0_mmu_page_get_gfn(rev_sp, (u64*)__pa(sptep) - (u64*)__pa(rev_sp->spt));
#endif

	if (!mmu_gfn_to_memslot(vmmr0, gfn)) {
		if (!__ratelimit(&ratelimit_state))
			return;
		audit_printk(vmmr0, "no memslot for gfn %llx\n", gfn);
		audit_printk(vmmr0, "index %ld of sp (gfn=%llx)\n",
		       (long int)(sptep - rev_sp->spt), rev_sp->gfn);
		dump_stack();
		return;
	}

	rmapp = gfn_to_rmap(vmmr0, gfn, rev_sp->role.level);
	if (!*rmapp) {
		if (!__ratelimit(&ratelimit_state))
			return;
		audit_printk(vmmr0, "no rmap for writable spte %llx\n",
			     *sptep);
		dump_stack();
	}
}
Exemplo n.º 3
0
static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
	struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);

	if (!get_ldev(mdev)) {
		if (__ratelimit(&drbd_ratelimit_state))
			dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
		kfree(udw);
		return 1;
	}

	drbd_bm_write_sect(mdev, udw->enr);
	put_ldev(mdev);

	kfree(udw);

	if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) {
		switch (mdev->state.conn) {
		case C_SYNC_SOURCE:  case C_SYNC_TARGET:
		case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T:
			drbd_resync_finished(mdev);
		default:
			/* nothing to do */
			break;
		}
	}
	drbd_bcast_sync_progress(mdev);

	return 1;
}
Exemplo n.º 4
0
static void check_poison_mem(unsigned char *mem, size_t bytes)
{
	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
	unsigned char *start;
	unsigned char *end;

	if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
		return;

	start = memchr_inv(mem, PAGE_POISON, bytes);
	if (!start)
		return;

	for (end = mem + bytes - 1; end > start; end--) {
		if (*end != PAGE_POISON)
			break;
	}

	if (!__ratelimit(&ratelimit))
		return;
	else if (start == end && single_bit_flip(*start, PAGE_POISON))
		pr_err("pagealloc: single bit error\n");
	else
		pr_err("pagealloc: memory corruption\n");

	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
			end - start + 1, 1);
	dump_stack();
}
Exemplo n.º 5
0
void
ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
{
	struct pt_regs *old_regs = set_irq_regs(regs);
	unsigned long saved_tpr;

#if IRQ_DEBUG
	{
		unsigned long bsp, sp;

		bsp = ia64_getreg(_IA64_REG_AR_BSP);
		sp = ia64_getreg(_IA64_REG_SP);

		if ((sp - bsp) < 1024) {
			static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);

			if (__ratelimit(&ratelimit)) {
				printk("ia64_handle_irq: DANGER: less than "
				       "1KB of free stack space!!\n"
				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
			}
		}
	}
#endif 

	irq_enter();
	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
	ia64_srlz_d();
	while (vector != IA64_SPURIOUS_INT_VECTOR) {
		int irq = local_vector_to_irq(vector);
		struct irq_desc *desc = irq_to_desc(irq);

		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
			smp_local_flush_tlb();
			kstat_incr_irqs_this_cpu(irq, desc);
		} else if (unlikely(IS_RESCHEDULE(vector))) {
			scheduler_ipi();
			kstat_incr_irqs_this_cpu(irq, desc);
		} else {
			ia64_setreg(_IA64_REG_CR_TPR, vector);
			ia64_srlz_d();

			if (unlikely(irq < 0)) {
				printk(KERN_ERR "%s: Unexpected interrupt "
				       "vector %d on CPU %d is not mapped "
				       "to any IRQ!\n", __func__, vector,
				       smp_processor_id());
			} else
				generic_handle_irq(irq);

			local_irq_disable();
			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
		}
		ia64_eoi();
		vector = ia64_get_ivr();
	}
	irq_exit();
	set_irq_regs(old_regs);
}
Exemplo n.º 6
0
static void diag_hsic_read_complete_callback(void *ctxt, char *buf, int buf_size, int actual_size)
{
	int err = -2;
	int index = (int)ctxt;
	static DEFINE_RATELIMIT_STATE(rl, 10 * HZ, 1);

	if (!diag_hsic[index].hsic_ch) {
		/*
		 * The HSIC channel is closed. Return the buffer to
		 * the pool.  Do not send it on.
		 */
		diagmem_free(driver, buf, index + POOL_TYPE_HSIC);
		pr_debug("diag: In %s: hsic_ch == 0, actual_size: %d\n", __func__, actual_size);
		return;
	}

	/*
	 * Note that zero length is valid and still needs to be sent to
	 * the USB only when we are logging data to the USB
	 */
	if ((actual_size > 0) || ((actual_size == 0) && (driver->logging_mode == USB_MODE))) {
		if (!buf) {
			pr_err("diag: Out of diagmem for HSIC\n");
		} else {
			/*
			 * Send data in buf to be written on the
			 * appropriate device, e.g. USB MDM channel
			 */
			diag_bridge[index].write_len = actual_size;
			err = diag_device_write((void *)buf, index + HSIC_DATA, NULL);
			/* If an error, return buffer to the pool */
			if (err) {
				diagmem_free(driver, buf, index + POOL_TYPE_HSIC);
				if (__ratelimit(&rl))
					pr_err("diag: In %s, error calling diag_device_write, err: %d\n", __func__, err);
			}
		}
	} else {
		/*
		 * The buffer has an error status associated with it. Do not
		 * pass it on. Note that -ENOENT is sent when the diag bridge
		 * is closed.
		 */
		diagmem_free(driver, buf, index + POOL_TYPE_HSIC);
		pr_debug("diag: In %s: error status: %d\n", __func__, actual_size);
	}

	/*
	 * If for some reason there was no HSIC data to write to the
	 * mdm channel, set up another read
	 */
	if (err && ((driver->logging_mode == MEMORY_DEVICE_MODE) || (diag_bridge[index].usb_connected && !diag_hsic[index].hsic_suspend))) {
		queue_work(diag_bridge[index].wq, &diag_hsic[index].diag_read_hsic_work);
	}
}
Exemplo n.º 7
0
static void __vmmr0_mmu_audit(struct vmmr0_vcpu *vcpu, int point)
{
	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);

	if (!__ratelimit(&ratelimit_state))
		return;

	vcpu->vmmr0->arch.audit_point = point;
	audit_all_active_sps(vcpu->vmmr0);
	audit_vcpu_spte(vcpu);
}
Exemplo n.º 8
0
static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point)
{
	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);

	if (!__ratelimit(&ratelimit_state))
		return;

	vcpu->kvm->arch.audit_point = point;
	audit_all_active_sps(vcpu->kvm);
	audit_vcpu_spte(vcpu);
}
Exemplo n.º 9
0
static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
{
	struct mdp4_kms *mdp4_kms = container_of(irq, struct mdp4_kms, error_handler);
	static DEFINE_RATELIMIT_STATE(rs, 5*HZ, 1);
	extern bool dumpstate;

	DRM_ERROR_RATELIMITED("errors: %08x\n", irqstatus);

	if (dumpstate && __ratelimit(&rs)) {
		struct drm_printer p = drm_info_printer(mdp4_kms->dev->dev);
		drm_state_dump(mdp4_kms->dev, &p);
	}
}
Exemplo n.º 10
0
static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
{
	const unsigned long s = req->rq_state;

	/* remove it from the transfer log.
	 * well, only if it had been there in the first
	 * place... if it had not (local only or conflicting
	 * and never sent), it should still be "empty" as
	 * initialized in drbd_req_new(), so we can list_del() it
	 * here unconditionally */
	list_del(&req->tl_requests);

	/* if it was a write, we may have to set the corresponding
	 * bit(s) out-of-sync first. If it had a local part, we need to
	 * release the reference to the activity log. */
	if (rw == WRITE) {
		/* Set out-of-sync unless both OK flags are set
		 * (local only or remote failed).
		 * Other places where we set out-of-sync:
		 * READ with local io-error */
		if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
			drbd_set_out_of_sync(mdev, req->sector, req->size);

		if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
			drbd_set_in_sync(mdev, req->sector, req->size);

		/* one might be tempted to move the drbd_al_complete_io
		 * to the local io completion callback drbd_endio_pri.
		 * but, if this was a mirror write, we may only
		 * drbd_al_complete_io after this is RQ_NET_DONE,
		 * otherwise the extent could be dropped from the al
		 * before it has actually been written on the peer.
		 * if we crash before our peer knows about the request,
		 * but after the extent has been dropped from the al,
		 * we would forget to resync the corresponding extent.
		 */
		if (s & RQ_LOCAL_MASK) {
			if (get_ldev_if_state(mdev, D_FAILED)) {
				if (s & RQ_IN_ACT_LOG)
					drbd_al_complete_io(mdev, req->sector);
				put_ldev(mdev);
			} else if (__ratelimit(&drbd_ratelimit_state)) {
				dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
				     "but my Disk seems to have failed :(\n",
				     (unsigned long long) req->sector);
			}
		}
	}

	drbd_req_free(req);
}
Exemplo n.º 11
0
static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
{
    struct drbd_bitmap *b = mdev->bitmap;
    if (!__ratelimit(&drbd_ratelimit_state))
        return;
    dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
            current == mdev->receiver.task ? "receiver" :
            current == mdev->asender.task  ? "asender"  :
            current == mdev->worker.task   ? "worker"   : current->comm,
            func, b->bm_why ?: "?",
            b->bm_task == mdev->receiver.task ? "receiver" :
            b->bm_task == mdev->asender.task  ? "asender"  :
            b->bm_task == mdev->worker.task   ? "worker"   : "?");
}
Exemplo n.º 12
0
/**
  @brief wpalReadRegister provides a mechansim for a client
         to read data from a hardware data register

  @param  address:  Physical memory address of the register
  @param  data:     Return location for value that is read

  @return SUCCESS if the data was successfully read
*/
wpt_status wpalReadRegister
(
   wpt_uint32   address,
   wpt_uint32  *data
)
{
   /* if SSR is in progress, and WCNSS is not out of reset (re-init
    * not invoked), then do not access WCNSS registers */
   if (NULL == gpEnv || wcnss_device_is_shutdown() ||
        (vos_is_logp_in_progress(VOS_MODULE_ID_WDI, NULL) &&
            !vos_is_reinit_in_progress(VOS_MODULE_ID_WDI, NULL))) {
       /* Ratelimit wpalReadRegister failure messages which
        * can flood serial console during improper system
        * initialization or wcnss_device in shutdown state.
        * wpalRegisterInterrupt() call to wpalReadRegister is
        * likely to cause flooding. */
       if (__ratelimit(&wpalReadRegister_rs)) {
           WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                   "%s: invoked before subsystem initialized",
                   __func__);
       }
       return eWLAN_PAL_STATUS_E_INVAL;
   }

   address = (address | gpEnv->wcnss_memory->start);

   if ((address < gpEnv->wcnss_memory->start) ||
       (address > gpEnv->wcnss_memory->end)) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: Register address 0x%0x out of range 0x%0x - 0x%0x",
                 __func__, address,
                 (u32) gpEnv->wcnss_memory->start,
                 (u32) gpEnv->wcnss_memory->end);
      return eWLAN_PAL_STATUS_E_INVAL;
   }

   if (0 != (address & 0x3)) {
      WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR,
                 "%s: Register address 0x%0x is not word aligned",
                 __func__, address);
      return eWLAN_PAL_STATUS_E_INVAL;
   }

   *data = readl_relaxed(gpEnv->mmio + (address - gpEnv->wcnss_memory->start));
   rmb();

   return eWLAN_PAL_STATUS_SUCCESS;
}
Exemplo n.º 13
0
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
			      const char *message)
{
	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
	struct task_struct *tsk;

	if (!show_unhandled_signals || !__ratelimit(&rs))
		return;

	tsk = current;

	printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
	       level, tsk->comm, task_pid_nr(tsk),
	       message, regs->ip, regs->cs,
	       regs->sp, regs->ax, regs->si, regs->di);
}
Exemplo n.º 14
0
static void oom_kill_process(struct oom_control *oc, const char *message)
{
	struct task_struct *victim = oc->chosen;
	struct mem_cgroup *oom_group;
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
					      DEFAULT_RATELIMIT_BURST);

	/*
	 * If the task is already exiting, don't alarm the sysadmin or kill
	 * its children or threads, just give it access to memory reserves
	 * so it can die quickly
	 */
	task_lock(victim);
	if (task_will_free_mem(victim)) {
		mark_oom_victim(victim);
		wake_oom_reaper(victim);
		task_unlock(victim);
		put_task_struct(victim);
		return;
	}
	task_unlock(victim);

	if (__ratelimit(&oom_rs))
		dump_header(oc, victim);

	/*
	 * Do we need to kill the entire memory cgroup?
	 * Or even one of the ancestor memory cgroups?
	 * Check this out before killing the victim task.
	 */
	oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);

	__oom_kill_process(victim, message);

	/*
	 * If necessary, kill all tasks in the selected memory cgroup.
	 */
	if (oom_group) {
		mem_cgroup_print_oom_group(oom_group);
		mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
				      (void*)message);
		mem_cgroup_put(oom_group);
	}
}
Exemplo n.º 15
0
static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
		size_t count, loff_t *ppos)
{
	struct efivar_entry *var = file->private_data;
	unsigned long datasize = 0;
	u32 attributes;
	void *data;
	ssize_t size = 0;
	int err;

	while (!__ratelimit(&file->f_cred->user->ratelimit)) {
		if (!msleep_interruptible(50))
			return -EINTR;
	}

	err = efivar_entry_size(var, &datasize);

	/*
	 * efivarfs represents uncommitted variables with
	 * zero-length files. Reading them should return EOF.
	 */
	if (err == -ENOENT)
		return 0;
	else if (err)
		return err;

	data = kmalloc(datasize + sizeof(attributes), GFP_KERNEL);

	if (!data)
		return -ENOMEM;

	size = efivar_entry_get(var, &attributes, &datasize,
				data + sizeof(attributes));
	if (size)
		goto out_free;

	memcpy(data, &attributes, sizeof(attributes));
	size = simple_read_from_buffer(userbuf, count, ppos,
				       data, datasize + sizeof(attributes));
out_free:
	kfree(data);

	return size;
}
static int print_extlog_rcd(const char *pfx,
			    struct acpi_generic_status *estatus, int cpu)
{
	/* Not more than 2 messages every 5 seconds */
	static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
	static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
	struct ratelimit_state *ratelimit;

	if (estatus->error_severity == CPER_SEV_CORRECTED ||
	    (estatus->error_severity == CPER_SEV_INFORMATIONAL))
		ratelimit = &ratelimit_corrected;
	else
		ratelimit = &ratelimit_uncorrected;
	if (__ratelimit(ratelimit)) {
		__print_extlog_rcd(pfx, estatus, cpu);
		return 0;
	}

	return 1;
}
Exemplo n.º 17
0
void fscrypt_msg(struct super_block *sb, const char *level,
		 const char *fmt, ...)
{
	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);
	struct va_format vaf;
	va_list args;

	if (!__ratelimit(&rs))
		return;

	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
	if (sb)
		printk("%sfscrypt (%s): %pV\n", level, sb->s_id, &vaf);
	else
		printk("%sfscrypt: %pV\n", level, &vaf);
	va_end(args);
}
Exemplo n.º 18
0
static void check_poison_mem(unsigned char *mem, size_t bytes)
{
	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
	unsigned char *start;
	unsigned char *end;

/* IAMROOT-12AB:
 * -------------
 * 지정된 주소부터 bytes 만큼 c가 아닌 값이 있는지 찾아 그 주소를 반환한다.
 * 정상적으로 모든 값이 c가 있는 경우 함수를 빠져나간다.
 */
	start = memchr_inv(mem, PAGE_POISON, bytes);
	if (!start)
		return;

	for (end = mem + bytes - 1; end > start; end--) {
		if (*end != PAGE_POISON)
			break;
	}

/* IAMROOT-12AB:
 * -------------
 * ratelimit(5초에 10번)을 초과하는 경우 함수를 빠져나간다.
 */
	if (!__ratelimit(&ratelimit))
		return;

/* IAMROOT-12AB:
 * -------------
 * 1비트만 다른 경우와 그 이상의 비트가 오류가 있는지에 따라 구분하여 출력한다.
 */
	else if (start == end && single_bit_flip(*start, PAGE_POISON))
		printk(KERN_ERR "pagealloc: single bit error\n");
	else
		printk(KERN_ERR "pagealloc: memory corruption\n");

	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
			end - start + 1, 1);
	dump_stack();
}
Exemplo n.º 19
0
static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw)
{
	const unsigned long s = req->rq_state;

	list_del(&req->tl_requests);

	if (rw == WRITE) {
		if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
			drbd_set_out_of_sync(mdev, req->sector, req->size);

		if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
			drbd_set_in_sync(mdev, req->sector, req->size);

		/* one might be tempted to move the drbd_al_complete_io
		 * to the local io completion callback drbd_endio_pri.
		 * but, if this was a mirror write, we may only
		 * drbd_al_complete_io after this is RQ_NET_DONE,
		 * otherwise the extent could be dropped from the al
		 * before it has actually been written on the peer.
		 * if we crash before our peer knows about the request,
		 * but after the extent has been dropped from the al,
		 * we would forget to resync the corresponding extent.
		 */
		if (s & RQ_LOCAL_MASK) {
			if (get_ldev_if_state(mdev, D_FAILED)) {
				if (s & RQ_IN_ACT_LOG)
					drbd_al_complete_io(mdev, req->sector);
				put_ldev(mdev);
			} else if (__ratelimit(&drbd_ratelimit_state)) {
				dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), "
				     "but my Disk seems to have failed :(\n",
				     (unsigned long long) req->sector);
			}
		}
	}

	drbd_req_free(req);
}
Exemplo n.º 20
0
static void arm64_show_signal(int signo, const char *str)
{
	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);
	struct task_struct *tsk = current;
	unsigned int esr = tsk->thread.fault_code;
	struct pt_regs *regs = task_pt_regs(tsk);

	/* Leave if the signal won't be shown */
	if (!show_unhandled_signals ||
	    !unhandled_signal(tsk, signo) ||
	    !__ratelimit(&rs))
		return;

	pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
	if (esr)
		pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);

	pr_cont("%s", str);
	print_vma_addr(KERN_CONT " in ", regs->pc);
	pr_cont("\n");
	__show_regs(regs);
}
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
	struct task_struct *selected = NULL;
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
	int selected_tasksize = 0;
	int selected_oom_score_adj;
#ifdef CONFIG_SAMP_HOTNESS
	int selected_hotness_adj = 0;
#endif
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free;
	int other_file;
	unsigned long nr_to_scan = sc->nr_to_scan;
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
	static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 1);
#endif
	unsigned long nr_cma_free;
	struct reclaim_state *reclaim_state = current->reclaim_state;
#if defined(CONFIG_CMA_PAGE_COUNTING)
	unsigned long nr_cma_inactive_file;
	unsigned long nr_cma_active_file;
	unsigned long cma_page_ratio;
	bool is_active_high;
	bool flag = 0;
#endif

	if (nr_to_scan > 0) {
		if (mutex_lock_interruptible(&scan_mutex) < 0)
			return 0;
	}

	other_free = global_page_state(NR_FREE_PAGES);

	nr_cma_free = global_page_state(NR_FREE_CMA_PAGES);
#ifdef CONFIG_ZSWAP
	if (!current_is_kswapd() || sc->priority <= 6)
#endif
		other_free -= nr_cma_free;

#if defined(CONFIG_CMA_PAGE_COUNTING)
	nr_cma_inactive_file = global_page_state(NR_CMA_INACTIVE_FILE);
	nr_cma_active_file = global_page_state(NR_CMA_ACTIVE_FILE);
	cma_page_ratio = 100 * global_page_state(NR_CMA_INACTIVE_FILE) /
				global_page_state(NR_INACTIVE_FILE);
	is_active_high = (global_page_state(NR_ACTIVE_FILE) >
				global_page_state(NR_INACTIVE_FILE)) ? 1 : 0;
#endif
	other_file = global_page_state(NR_FILE_PAGES);

#if defined(CONFIG_CMA_PAGE_COUNTING) && defined(CONFIG_EXCLUDE_LRU_LIVING_IN_CMA)
	if (get_nr_swap_pages() < SSWAP_LMK_THRESHOLD && cma_page_ratio >= CMA_PAGE_RATIO
			&& !is_active_high) {
		other_file = other_file - (nr_cma_inactive_file + nr_cma_active_file);
		flag = 1;
	}
#endif
	if (global_page_state(NR_SHMEM) + total_swapcache_pages < other_file)
		other_file -= global_page_state(NR_SHMEM) + total_swapcache_pages;
	else
		other_file = 0;

	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;
	for (i = 0; i < array_size; i++) {
		if (other_free < lowmem_minfree[i] &&
		    other_file < lowmem_minfree[i]) {
			min_score_adj = lowmem_adj[i];
			break;
		}
	}
	if (nr_to_scan > 0)
		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
				nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
	rem = global_page_state(NR_ACTIVE_ANON) +
		global_page_state(NR_ACTIVE_FILE) +
		global_page_state(NR_INACTIVE_ANON) +
		global_page_state(NR_INACTIVE_FILE);
	if (nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
			     nr_to_scan, sc->gfp_mask, rem);

		if (nr_to_scan > 0)
			mutex_unlock(&scan_mutex);

		return rem;
	}
	selected_oom_score_adj = min_score_adj;

	rcu_read_lock();
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;
#ifdef CONFIG_SAMP_HOTNESS
		int hotness_adj = 0;
#endif

		if (tsk->flags & PF_KTHREAD)
			continue;

		/* if task no longer has any memory ignore it */
		if (test_task_flag(tsk, TIF_MM_RELEASED))
			continue;

		if (time_before_eq(jiffies, lowmem_deathpending_timeout)) {
			if (test_task_flag(tsk, TIF_MEMDIE)) {
				rcu_read_unlock();
				/* give the system time to free up the memory */
				msleep_interruptible(20);
				mutex_unlock(&scan_mutex);
				return 0;
			}
		}

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
#if defined(CONFIG_ZSWAP)
		if (atomic_read(&zswap_stored_pages)) {
			lowmem_print(3, "shown tasksize : %d\n", tasksize);
			tasksize += atomic_read(&zswap_pool_pages) * get_mm_counter(p->mm, MM_SWAPENTS)
				/ atomic_read(&zswap_stored_pages);
			lowmem_print(3, "real tasksize : %d\n", tasksize);
		}
#endif

#ifdef CONFIG_SAMP_HOTNESS
		hotness_adj = p->signal->hotness_adj;
#endif
		task_unlock(p);
		if (tasksize <= 0)
			continue;
		if (selected) {
#ifdef CONFIG_SAMP_HOTNESS
			if (min_score_adj <= lowmem_adj[4]) {
#endif
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
#ifdef CONFIG_SAMP_HOTNESS
			} else {
				if (hotness_adj > selected_hotness_adj)
					continue;
				if (hotness_adj == selected_hotness_adj && tasksize <= selected_tasksize)
					continue;
			}
#endif
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
#ifdef CONFIG_SAMP_HOTNESS
		selected_hotness_adj = hotness_adj;
#endif
		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
	}
	if (selected) {
#if defined(CONFIG_CMA_PAGE_COUNTING)
#ifdef CONFIG_SAMP_HOTNESS
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, "
			"ofree %d, ofile %d(%c), is_kswapd %d - "
			"cma_free %lu priority %d cma_i_file %lu cma_a_file %lu, hotness %d\n",
			selected->pid, selected->comm,
			selected_oom_score_adj, selected_tasksize,
			other_free, other_file, flag ? '-' : '+',
			!!current_is_kswapd(),
			nr_cma_free, sc->priority,
			nr_cma_inactive_file, nr_cma_active_file, selected_hotness_adj);
#else
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, "
			"ofree %d, ofile %d(%c), is_kswapd %d - "
			"cma_free %lu priority %d cma_i_file %lu cma_a_file %lu\n",
			selected->pid, selected->comm,
			selected_oom_score_adj, selected_tasksize,
			other_free, other_file, flag ? '-' : '+',
			!!current_is_kswapd(),
			nr_cma_free, sc->priority,
			nr_cma_inactive_file, nr_cma_active_file);
#endif

#else
#ifdef CONFIG_SAMP_HOTNESS
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, "
				"free memory = %d, reclaimable memory = %d "
				"is_kswapd %d cma_free %lu priority %d, hotness %d\n",
				selected->pid, selected->comm,
				selected_oom_score_adj, selected_tasksize,
				other_free, other_file,
				!!current_is_kswapd(),
				nr_cma_free, sc->priority, selected_hotness_adj);
#else
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d, "
				"free memory = %d, reclaimable memory = %d "
				"is_kswapd %d cma_free %lu priority %d\n",
				selected->pid, selected->comm,
				selected_oom_score_adj, selected_tasksize,
				other_free, other_file,
				!!current_is_kswapd(),
				nr_cma_free, sc->priority);
#endif
#endif
		lowmem_deathpending_timeout = jiffies + HZ;
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
		rcu_read_unlock();
#ifdef LMK_COUNT_READ
                lmk_count++;
#endif

#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
		if ((selected_oom_score_adj < lowmem_adj[5]) && __ratelimit(&lmk_rs)) {
			lowmem_print(1, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
					nr_to_scan, sc->gfp_mask, other_free,
					other_file, min_score_adj);
			show_mem(SHOW_MEM_FILTER_NODES);
			dump_tasks_info();
		}
#endif
		/* give the system time to free up the memory */
		msleep_interruptible(20);
		if(reclaim_state)
			reclaim_state->reclaimed_slab = selected_tasksize;
	} else
		rcu_read_unlock();

	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
		     nr_to_scan, sc->gfp_mask, rem);
	mutex_unlock(&scan_mutex);
	return rem;
}
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
	struct task_struct *tsk;
#ifdef ENHANCED_LMK_ROUTINE
	struct task_struct *selected[LOWMEM_DEATHPENDING_DEPTH] = {NULL,};
#else
	struct task_struct *selected = NULL;
#endif
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE
	static DEFINE_RATELIMIT_STATE(lmk_rs, DEFAULT_RATELIMIT_INTERVAL, 0);
#else
	static DEFINE_RATELIMIT_STATE(lmk_rs, 6*DEFAULT_RATELIMIT_INTERVAL, 0);
#endif
#endif
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
#ifdef ENHANCED_LMK_ROUTINE
	int selected_tasksize[LOWMEM_DEATHPENDING_DEPTH] = {0,};
	int selected_oom_score_adj[LOWMEM_DEATHPENDING_DEPTH] = {OOM_ADJUST_MAX,};
	int all_selected_oom = 0;
	int max_selected_oom_idx = 0;
#else
	int selected_tasksize = 0;
	int selected_oom_score_adj;
#endif
	int array_size = ARRAY_SIZE(lowmem_adj);
#if (!defined(CONFIG_MACH_JF) \
	&& !defined(CONFIG_SEC_PRODUCT_8960)\
	)
	unsigned long nr_to_scan = sc->nr_to_scan;
#endif
#ifndef CONFIG_CMA
	int other_free = global_page_state(NR_FREE_PAGES);
#else
	int other_free = global_page_state(NR_FREE_PAGES) -
				global_page_state(NR_FREE_CMA_PAGES);
#endif
	int other_file = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM);
#ifdef CONFIG_ZRAM_FOR_ANDROID
	other_file -= total_swapcache_pages;
#endif /* CONFIG_ZRAM_FOR_ANDROID */
	if (lowmem_adj_size < array_size)
		array_size = lowmem_adj_size;
	if (lowmem_minfree_size < array_size)
		array_size = lowmem_minfree_size;
	for (i = 0; i < array_size; i++) {
		if (other_free < lowmem_minfree[i] &&
		    other_file < lowmem_minfree[i]) {
			min_score_adj = lowmem_adj[i];
			break;
		}
	}
	if (sc->nr_to_scan > 0)
		lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
				sc->nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
	rem = global_page_state(NR_ACTIVE_ANON) +
		global_page_state(NR_ACTIVE_FILE) +
		global_page_state(NR_INACTIVE_ANON) +
		global_page_state(NR_INACTIVE_FILE);
	if (sc->nr_to_scan <= 0 || min_score_adj == OOM_SCORE_ADJ_MAX + 1) {
		lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
			     sc->nr_to_scan, sc->gfp_mask, rem);
		return rem;
	}

#ifdef ENHANCED_LMK_ROUTINE
	for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++)
		selected_oom_score_adj[i] = min_score_adj;
#else
	selected_oom_score_adj = min_score_adj;
#endif

#ifdef CONFIG_ZRAM_FOR_ANDROID
	atomic_set(&s_reclaim.lmk_running, 1);
#endif /* CONFIG_ZRAM_FOR_ANDROID */
	read_lock(&tasklist_lock);
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;
#ifdef ENHANCED_LMK_ROUTINE
		int is_exist_oom_task = 0;
#endif

		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
			time_before_eq(jiffies, lowmem_deathpending_timeout)) {
				task_unlock(p);
				read_unlock(&tasklist_lock);
#ifdef CONFIG_ZRAM_FOR_ANDROID
				atomic_set(&s_reclaim.lmk_running, 0);
#endif /* CONFIG_ZRAM_FOR_ANDROID */
				return 0;
		}
		
		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;

#ifdef ENHANCED_LMK_ROUTINE
		if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH) {
			for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
				if (!selected[i]) {
					is_exist_oom_task = 1;
					max_selected_oom_idx = i;
					break;
				}
			}
		} else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj ||
			(selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj &&
			selected_tasksize[max_selected_oom_idx] < tasksize)) {
			is_exist_oom_task = 1;
		}

		if (is_exist_oom_task) {
			selected[max_selected_oom_idx] = p;
			selected_tasksize[max_selected_oom_idx] = tasksize;
			selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj;

			if (all_selected_oom < LOWMEM_DEATHPENDING_DEPTH)
				all_selected_oom++;

			if (all_selected_oom == LOWMEM_DEATHPENDING_DEPTH) {
				for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
					if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx])
						max_selected_oom_idx = i;
					else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] &&
						selected_tasksize[i] < selected_tasksize[max_selected_oom_idx])
						max_selected_oom_idx = i;
				}
			}

			lowmem_print(2, "select %d (%s), adj %d, \
					size %d, to kill\n",
				p->pid, p->comm, oom_score_adj, tasksize);
		}
#else
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#endif
	}
#ifdef ENHANCED_LMK_ROUTINE
	for (i = 0; i < LOWMEM_DEATHPENDING_DEPTH; i++) {
		if (selected[i]) {
			lowmem_print(1, "send sigkill to %d (%s), adj %d,\
				     size %d, free memory = %d, reclaimable memory = %d\n",
				     selected[i]->pid, selected[i]->comm,
				     selected_oom_score_adj[i],
				     selected_tasksize[i],
				     other_free, other_file);
			lowmem_deathpending_timeout = jiffies + HZ;
			send_sig(SIGKILL, selected[i], 0);
			set_tsk_thread_flag(selected[i], TIF_MEMDIE);
			rem -= selected_tasksize[i];
#ifdef LMK_COUNT_READ
			lmk_count++;
#endif
		}
	}
#else
	if (selected) {
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize);
		lowmem_deathpending_timeout = jiffies + HZ;
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
#ifdef LMK_COUNT_READ
		lmk_count++;
#endif
	}
#endif
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
	if (__ratelimit(&lmk_rs)) {
		lowmem_print(1, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
				nr_to_scan, sc->gfp_mask, other_free,
				other_file, min_score_adj);
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO_VERBOSE
		show_mem(SHOW_MEM_FILTER_NODES);
		dump_tasks_info();
#endif
	}
#endif
	lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
		     sc->nr_to_scan, sc->gfp_mask, rem);
	read_unlock(&tasklist_lock);
#ifdef CONFIG_ZRAM_FOR_ANDROID
	atomic_set(&s_reclaim.lmk_running, 0);
#endif /* CONFIG_ZRAM_FOR_ANDROID */
	return rem;
}
static int android_oom_handler(struct notifier_block *nb,
				      unsigned long val, void *data)
{
	struct task_struct *tsk;
#ifdef MULTIPLE_OOM_KILLER
	struct task_struct *selected[OOM_DEPTH] = {NULL,};
#else
	struct task_struct *selected = NULL;
#endif
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
#ifdef MULTIPLE_OOM_KILLER
	int selected_tasksize[OOM_DEPTH] = {0,};
	int selected_oom_score_adj[OOM_DEPTH] = {OOM_ADJUST_MAX,};
	int all_selected_oom = 0;
	int max_selected_oom_idx = 0;
#else
	int selected_tasksize = 0;
	int selected_oom_score_adj;
#endif
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL/5, 1);

	unsigned long *freed = data;

	/* show status */
	pr_warning("%s invoked Android-oom-killer: "
		"oom_adj=%d, oom_score_adj=%d\n",
		current->comm, current->signal->oom_adj,
		current->signal->oom_score_adj);
	dump_stack();
	show_mem(SHOW_MEM_FILTER_NODES);
	if (__ratelimit(&oom_rs))
		dump_tasks_info();

	min_score_adj = 0;
#ifdef MULTIPLE_OOM_KILLER
	for (i = 0; i < OOM_DEPTH; i++)
		selected_oom_score_adj[i] = min_score_adj;
#else
	selected_oom_score_adj = min_score_adj;
#endif

#ifdef CONFIG_ZRAM_FOR_ANDROID
	atomic_set(&s_reclaim.lmk_running, 1);
#endif

	read_lock(&tasklist_lock);
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;
#ifdef MULTIPLE_OOM_KILLER
		int is_exist_oom_task = 0;
#endif

		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;

		lowmem_print(2, "oom: ------ %d (%s), adj %d, size %d\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#ifdef MULTIPLE_OOM_KILLER
		if (all_selected_oom < OOM_DEPTH) {
			for (i = 0; i < OOM_DEPTH; i++) {
				if (!selected[i]) {
					is_exist_oom_task = 1;
					max_selected_oom_idx = i;
					break;
				}
			}
		} else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj ||
			(selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj &&
			selected_tasksize[max_selected_oom_idx] < tasksize)) {
			is_exist_oom_task = 1;
		}

		if (is_exist_oom_task) {
			selected[max_selected_oom_idx] = p;
			selected_tasksize[max_selected_oom_idx] = tasksize;
			selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj;

			if (all_selected_oom < OOM_DEPTH)
				all_selected_oom++;

			if (all_selected_oom == OOM_DEPTH) {
				for (i = 0; i < OOM_DEPTH; i++) {
					if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx])
						max_selected_oom_idx = i;
					else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] &&
						selected_tasksize[i] < selected_tasksize[max_selected_oom_idx])
						max_selected_oom_idx = i;
				}
			}

			lowmem_print(2, "oom: max_selected_oom_idx(%d) select %d (%s), adj %d, \
					size %d, to kill\n",
				max_selected_oom_idx, p->pid, p->comm, oom_score_adj, tasksize);
		}
#else
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(2, "oom: select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#endif
	}
#ifdef MULTIPLE_OOM_KILLER
	for (i = 0; i < OOM_DEPTH; i++) {
		if (selected[i]) {
			lowmem_print(1, "oom: send sigkill to %d (%s), adj %d,\
				     size %d\n",
				     selected[i]->pid, selected[i]->comm,
				     selected_oom_score_adj[i],
				     selected_tasksize[i]);
			send_sig(SIGKILL, selected[i], 0);
			rem -= selected_tasksize[i];
			*freed += (unsigned long)selected_tasksize[i];
#ifdef OOM_COUNT_READ
			oom_count++;
#endif

		}
	}
#else
	if (selected) {
		lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, size %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize);
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
		*freed += (unsigned long)selected_tasksize;
#ifdef OOM_COUNT_READ
		oom_count++;
#endif
	}
#endif
	read_unlock(&tasklist_lock);

#ifdef CONFIG_ZRAM_FOR_ANDROID
	atomic_set(&s_reclaim.lmk_running, 0);
#endif

	lowmem_print(2, "oom: get memory %lu", *freed);
	return rem;
}
static inline int pm8058_can_print(void)
{
	return __ratelimit(&pm8058_msg_ratelimit);
}
Exemplo n.º 25
0
/*
 * All net warning printk()s should be guarded by this function.
 */
int net_ratelimit(void)
{
	return __ratelimit(&net_ratelimit_state);
}
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
{
	int ret;
	int i;
	struct intr_data irqdata;
	char linebuf[128];
	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
	struct wcd9xxx_core_resource *wcd9xxx_res = data;
	int num_irq_regs = wcd9xxx_res->num_irq_regs;
	u8 status[num_irq_regs], status1[num_irq_regs];

	if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
		dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
		return IRQ_NONE;
	}

	if (!wcd9xxx_res->codec_bulk_read) {
		dev_err(wcd9xxx_res->dev,
				"%s: Codec Bulk Register read callback not supplied\n",
			   __func__);
		goto err_disable_irq;
	}

	ret = wcd9xxx_res->codec_bulk_read(wcd9xxx_res,
				WCD9XXX_A_INTR_STATUS0,
				num_irq_regs, status);

	if (ret < 0) {
		dev_err(wcd9xxx_res->dev,
				"Failed to read interrupt status: %d\n", ret);
		goto err_disable_irq;
	}

	
	for (i = 0; i < num_irq_regs; i++)
		status[i] &= ~wcd9xxx_res->irq_masks_cur[i];

	memcpy(status1, status, sizeof(status1));

	for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
		irqdata = wcd9xxx_res->intr_table[i];
		if (status[BIT_BYTE(irqdata.intr_num)] &
			BYTE_BIT_MASK(irqdata.intr_num)) {
			wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
			status1[BIT_BYTE(irqdata.intr_num)] &=
					~BYTE_BIT_MASK(irqdata.intr_num);
		}
	}

	if (unlikely(!memcmp(status, status1, sizeof(status)))) {
		if (__ratelimit(&ratelimit)) {
			pr_warn("%s: Unhandled irq found\n", __func__);
			hex_dump_to_buffer(status, sizeof(status), 16, 1,
					   linebuf, sizeof(linebuf), false);
			pr_warn("%s: status0 : %s\n", __func__, linebuf);
			hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
					   linebuf, sizeof(linebuf), false);
			pr_warn("%s: status1 : %s\n", __func__, linebuf);
		}

		memset(status, 0xff, num_irq_regs);

		wcd9xxx_res->codec_bulk_write(wcd9xxx_res,
				WCD9XXX_A_INTR_CLEAR0,
				num_irq_regs, status);
		if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
			wcd9xxx_res->codec_reg_write(wcd9xxx_res,
					WCD9XXX_A_INTR_MODE, 0x02);
	}
	wcd9xxx_unlock_sleep(wcd9xxx_res);

	return IRQ_HANDLED;

err_disable_irq:
		dev_err(wcd9xxx_res->dev,
				"Disable irq %d\n", wcd9xxx_res->irq);

		disable_irq_wake(wcd9xxx_res->irq);
		disable_irq_nosync(wcd9xxx_res->irq);
		wcd9xxx_unlock_sleep(wcd9xxx_res);
		return IRQ_NONE;
}
static void diag_read_hsic_work_fn(struct work_struct *work)
{
	unsigned char *buf_in_hsic = NULL;
	int num_reads_submitted = 0;
	int err = 0;
	int write_ptrs_available;
	struct diag_hsic_dev *hsic_struct = container_of(work,
				struct diag_hsic_dev, diag_read_hsic_work);
	int index = hsic_struct->id;
	static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);

	if (!diag_hsic[index].hsic_ch) {
		pr_err("DIAG in %s: diag_hsic[index].hsic_ch == 0\n", __func__);
		return;
	}

	/*
	 * Determine the current number of available buffers for writing after
	 * reading from the HSIC has completed.
	 */
	if (driver->logging_mode == MEMORY_DEVICE_MODE)
		write_ptrs_available = diag_hsic[index].poolsize_hsic_write -
					diag_hsic[index].
						num_hsic_buf_tbl_entries;
	else
		write_ptrs_available = diag_hsic[index].poolsize_hsic_write -
					diag_hsic[index].count_hsic_write_pool;

	/*
	 * Queue up a read on the HSIC for all available buffers in the
	 * pool, exhausting the pool.
	 */
	do {
		/*
		 * If no more write buffers are available,
		 * stop queuing reads
		 */
		if (write_ptrs_available <= 0)
			break;

		write_ptrs_available--;

		/*
		 * No sense queuing a read if the HSIC bridge was
		 * closed in another thread
		 */
		if (!diag_hsic[index].hsic_ch)
			break;

		buf_in_hsic = diagmem_alloc(driver, READ_HSIC_BUF_SIZE,
							index+POOL_TYPE_HSIC);
		if (buf_in_hsic) {
			/*
			 * Initiate the read from the HSIC.  The HSIC read is
			 * asynchronous.  Once the read is complete the read
			 * callback function will be called.
			 */
			pr_debug("diag: read from HSIC\n");
			num_reads_submitted++;
			err = diag_bridge_read(hsic_data_bridge_map[index],
					       (char *)buf_in_hsic,
					       READ_HSIC_BUF_SIZE);
			if (err) {
				num_reads_submitted--;

				/* Return the buffer to the pool */
				diagmem_free(driver, buf_in_hsic,
						index+POOL_TYPE_HSIC);

				if (__ratelimit(&rl))
					pr_err("diag: Error initiating HSIC read, err: %d\n",
					err);
				/*
				 * An error occurred, discontinue queuing
				 * reads
				 */
				break;
			}
		}
	} while (buf_in_hsic);

	/*
	 * If there are read buffers available and for some reason the
	 * read was not queued, and if no unrecoverable error occurred
	 * (-ENODEV is an unrecoverable error), then set up the next read
	 */
	if ((diag_hsic[index].count_hsic_pool <
		diag_hsic[index].poolsize_hsic) &&
		(num_reads_submitted == 0) && (err != -ENODEV) &&
		(diag_hsic[index].hsic_ch != 0))
		queue_work(diag_bridge[index].wq,
				 &diag_hsic[index].diag_read_hsic_work);
}
static void diag_hsic_read_complete_callback(void *ctxt, char *buf,
					int buf_size, int actual_size)
{
	int err = 0;
	int index = (int)ctxt;
	static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);

	if (!diag_hsic[index].hsic_ch) {
		/*
		 * The HSIC channel is closed. Return the buffer to
		 * the pool.  Do not send it on.
		 */
		diagmem_free(driver, buf, index+POOL_TYPE_HSIC);
		pr_debug("diag: In %s: hsic_ch == 0, actual_size: %d\n",
			__func__, actual_size);
		return;
	}

	/*
	 * Note that zero length is valid and still needs to be sent to
	 * the USB only when we are logging data to the USB
	 */
	if ((actual_size > 0) ||
		((actual_size == 0) && (driver->logging_mode == USB_MODE))) {
		if (!buf) {
			pr_err("diag: Out of diagmem for HSIC\n");
		} else {
			/*
			 * Send data in buf to be written on the
			 * appropriate device, e.g. USB MDM channel
			 */
			diag_bridge[index].write_len = actual_size;
			err = diag_device_write((void *)buf, index+HSIC_DATA,
									NULL);
			/* If an error, return buffer to the pool */
			if (err) {
				diagmem_free(driver, buf, index +
							POOL_TYPE_HSIC);
				if (__ratelimit(&rl))
					pr_err("diag: In %s, error calling diag_device_write, err: %d\n",
					__func__, err);
			}
		}
	} else {
		/*
		 * The buffer has an error status associated with it. Do not
		 * pass it on. Note that -ENOENT is sent when the diag bridge
		 * is closed.
		 */
		diagmem_free(driver, buf, index+POOL_TYPE_HSIC);
		pr_debug("diag: In %s: error status: %d\n", __func__,
			actual_size);
	}

	/*
	 * Actual Size is a negative error value when read complete
	 * fails. Don't queue a read in this case. Doing so will not let
	 * HSIC to goto suspend.
	 *
	 * Queue another read only when the read completes successfully
	 * and Diag is either in Memory device mode or USB is connected.
	 */
	if (actual_size >= 0 && (driver->logging_mode == MEMORY_DEVICE_MODE ||
				 diag_bridge[index].usb_connected)) {
		queue_work(diag_bridge[index].wq,
				 &diag_hsic[index].diag_read_hsic_work);
	}
}
Exemplo n.º 29
0
int printk_ratelimit(void)
{
	return __ratelimit(&printk_ratelimit_state);
}
static int android_oom_handler(struct notifier_block *nb,
				      unsigned long val, void *data)
{
	struct task_struct *tsk;
#ifdef MULTIPLE_OOM_KILLER
	struct task_struct *selected[OOM_DEPTH] = {NULL,};
#else
	struct task_struct *selected = NULL;
#endif
	int rem = 0;
	int tasksize;
	int i;
	int min_score_adj = OOM_SCORE_ADJ_MAX + 1;
#ifdef MULTIPLE_OOM_KILLER
	int selected_tasksize[OOM_DEPTH] = {0,};
	int selected_oom_score_adj[OOM_DEPTH] = {OOM_ADJUST_MAX,};
	int all_selected_oom = 0;
	int max_selected_oom_idx = 0;
#else
	int selected_tasksize = 0;
	int selected_oom_score_adj;
#endif
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL/5, 1);
#endif

	unsigned long *freed = data;
#if defined(CONFIG_CMA_PAGE_COUNTING)
	unsigned long nr_cma_free;
	unsigned long nr_cma_inactive_file;
	unsigned long nr_cma_active_file;
	int other_free;
	int other_file;

	nr_cma_free = global_page_state(NR_FREE_CMA_PAGES);
	other_free = global_page_state(NR_FREE_PAGES) - nr_cma_free;

	nr_cma_inactive_file = global_page_state(NR_CMA_INACTIVE_FILE);
	nr_cma_active_file = global_page_state(NR_CMA_ACTIVE_FILE);
	other_file = global_page_state(NR_FILE_PAGES) -
					global_page_state(NR_SHMEM) -
					total_swapcache_pages -
					nr_cma_inactive_file -
					nr_cma_active_file;
#endif

	/* show status */
	pr_warning("%s invoked Android-oom-killer: "
		"oom_adj=%d, oom_score_adj=%d\n",
		current->comm, current->signal->oom_adj,
		current->signal->oom_score_adj);
#ifdef CONFIG_SEC_DEBUG_LMK_MEMINFO
	if (__ratelimit(&oom_rs)) {
		dump_stack();
		show_mem(SHOW_MEM_FILTER_NODES);
		dump_tasks_info();
	}
#endif

	min_score_adj = 0;
#ifdef MULTIPLE_OOM_KILLER
	for (i = 0; i < OOM_DEPTH; i++)
		selected_oom_score_adj[i] = min_score_adj;
#else
	selected_oom_score_adj = min_score_adj;
#endif

	read_lock(&tasklist_lock);
	for_each_process(tsk) {
		struct task_struct *p;
		int oom_score_adj;
#ifdef MULTIPLE_OOM_KILLER
		int is_exist_oom_task = 0;
#endif

		if (tsk->flags & PF_KTHREAD)
			continue;

		p = find_lock_task_mm(tsk);
		if (!p)
			continue;

		oom_score_adj = p->signal->oom_score_adj;
		if (oom_score_adj < min_score_adj) {
			task_unlock(p);
			continue;
		}
		tasksize = get_mm_rss(p->mm);
		task_unlock(p);
		if (tasksize <= 0)
			continue;

		lowmem_print(2, "oom: ------ %d (%s), adj %d, size %d\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#ifdef MULTIPLE_OOM_KILLER
		if (all_selected_oom < OOM_DEPTH) {
			for (i = 0; i < OOM_DEPTH; i++) {
				if (!selected[i]) {
					is_exist_oom_task = 1;
					max_selected_oom_idx = i;
					break;
				}
			}
		} else if (selected_oom_score_adj[max_selected_oom_idx] < oom_score_adj ||
			(selected_oom_score_adj[max_selected_oom_idx] == oom_score_adj &&
			selected_tasksize[max_selected_oom_idx] < tasksize)) {
			is_exist_oom_task = 1;
		}

		if (is_exist_oom_task) {
			selected[max_selected_oom_idx] = p;
			selected_tasksize[max_selected_oom_idx] = tasksize;
			selected_oom_score_adj[max_selected_oom_idx] = oom_score_adj;

			if (all_selected_oom < OOM_DEPTH)
				all_selected_oom++;

			if (all_selected_oom == OOM_DEPTH) {
				for (i = 0; i < OOM_DEPTH; i++) {
					if (selected_oom_score_adj[i] < selected_oom_score_adj[max_selected_oom_idx])
						max_selected_oom_idx = i;
					else if (selected_oom_score_adj[i] == selected_oom_score_adj[max_selected_oom_idx] &&
						selected_tasksize[i] < selected_tasksize[max_selected_oom_idx])
						max_selected_oom_idx = i;
				}
			}

			lowmem_print(2, "oom: max_selected_oom_idx(%d) select %d (%s), adj %d, \
					size %d, to kill\n",
				max_selected_oom_idx, p->pid, p->comm, oom_score_adj, tasksize);
		}
#else
		if (selected) {
			if (oom_score_adj < selected_oom_score_adj)
				continue;
			if (oom_score_adj == selected_oom_score_adj &&
			    tasksize <= selected_tasksize)
				continue;
		}
		selected = p;
		selected_tasksize = tasksize;
		selected_oom_score_adj = oom_score_adj;
		lowmem_print(2, "oom: select %d (%s), adj %d, size %d, to kill\n",
			     p->pid, p->comm, oom_score_adj, tasksize);
#endif
	}
#ifdef MULTIPLE_OOM_KILLER
	for (i = 0; i < OOM_DEPTH; i++) {
		if (selected[i]) {
#if defined(CONFIG_CMA_PAGE_COUNTING)
			lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, "
				"size %d ofree %d ofile %d "
				"cma_free %lu cma_i_file %lu cma_a_file %lu\n",
				selected[i]->pid, selected[i]->comm,
				selected_oom_score_adj[i],
				selected_tasksize[i],
				other_free, other_file,
				nr_cma_free, nr_cma_inactive_file, nr_cma_active_file);
#else
			lowmem_print(1, "oom: send sigkill to %d (%s), adj %d,\
				     size %d\n",
				     selected[i]->pid, selected[i]->comm,
				     selected_oom_score_adj[i],
				     selected_tasksize[i]);
#endif
			send_sig(SIGKILL, selected[i], 0);
			rem -= selected_tasksize[i];
			*freed += (unsigned long)selected_tasksize[i];
#ifdef OOM_COUNT_READ
			oom_count++;
#endif

		}
	}
#else
	if (selected) {
		lowmem_print(1, "oom: send sigkill to %d (%s), adj %d, size %d\n",
			     selected->pid, selected->comm,
			     selected_oom_score_adj, selected_tasksize);
		send_sig(SIGKILL, selected, 0);
		set_tsk_thread_flag(selected, TIF_MEMDIE);
		rem -= selected_tasksize;
		*freed += (unsigned long)selected_tasksize;
#ifdef OOM_COUNT_READ
		oom_count++;
#endif
	}
#endif
	read_unlock(&tasklist_lock);

	lowmem_print(2, "oom: get memory %lu", *freed);
	return rem;
}