Ejemplo n.º 1
0
std::vector<DexType*> find(const Scope& scope, const GetNewSpec& get_new_spec) {
  // Compute what the new prototypes will be after we convert a method. Check
  // the prototypes against existing methods and other prototypes created by
  // this method.
  std::vector<DexType*> result;
  for (const DexClass* cls : scope) {
    std::unordered_set<DexMethodSpec> new_specs;
    for (const DexMethod* m : cls->get_dmethods()) {
      if (is_init(m)) {
        std::vector<DexType*> unsafe_refs;
        const auto& new_spec = get_new_spec(m, &unsafe_refs);
        if (new_spec) {
          const auto& pair = new_specs.emplace(*new_spec);
          bool already_there = !pair.second;
          if (already_there || DexMethod::get_method(*new_spec)) {
            always_assert_log(
                !unsafe_refs.empty(),
                "unsafe_refs should be filled with the types that will be "
                "replaced on this <init> method's prototype");
            result.insert(result.end(), unsafe_refs.begin(), unsafe_refs.end());
          }
        }
      }
    }
  }
  return result;
}
Ejemplo n.º 2
0
    cv::Mat SizeupOP::execute_current(const cv::Mat& img,
                                      const vector<string>& fields) {
        cv::Mat ret;
        if (!is_init()) {
            LOG(ERROR) << "SizeupOp is not initialized";
        } else {
            int size = size_;

            if (size_fno_ > 0 && size_fno_ <= fields.size()) {
                size = std::stoi(fields[size_fno_ - 1]);
            }

            if (size <= 0) {
                LOG(ERROR) << "Invalid size (" << size <<"): " << get_key(fields);
            } else {
                double scale = 1.0 * size / max(img.rows, img.cols);
                cv::resize(img, ret,
                           cv::Size((int)(img.cols * scale + 0.5),
                                    (int)(img.rows * scale + 0.5)),
                           CV_INTER_CUBIC);
            }
        }

        return ret;
    }
Ejemplo n.º 3
0
    cv::Mat SaveOP::execute_current(const cv::Mat& img,
                                    const vector<string>& fields) {
        if (!is_init()) {
            LOG(ERROR) << "Failed to save image: NULL db";
        } else {
            string key = key_;
            if (key_fno_ > 0 && key_fno_ <= fields.size()) {
                key = fields[key_fno_ - 1];
            }

            if (key.empty()) {
                LOG(ERROR) << "No save key for image: " << get_key(fields);
            } else {
                std::size_t last_dot = key.rfind(".");
                string ext;
                if (last_dot != string::npos) {
                    ext = key.substr(last_dot);
                }
                vector<unsigned char> img_content;
                cv::imencode(ext, img, img_content);
                writer_->put(key, img_content);
            }
        }

        return img;
    }
Ejemplo n.º 4
0
 void SaveOP::flush() {
     if (is_init()) {
         writer_->flush();
         delete writer_;
         writer_ = db_->new_writer();
     }
 }
Ejemplo n.º 5
0
/*
 * Signal sysrq helper function.  Sends a signal to all user processes.
 */
static void send_sig_all(int sig)
{
	struct task_struct *p;

	for_each_process(p) {
		if (p->mm && !is_init(p))
			/* Not swapper, init nor kernel thread */
			force_sig(sig, p);
	}
}
Ejemplo n.º 6
0
DexMethod* MethodCreator::make_static_from(DexString* name,
                                           DexProto* proto,
                                           DexMethod* meth,
                                           DexClass* target_cls) {
  assert(!(meth->get_access() & ACC_STATIC));
  assert(!is_init(meth) && !is_clinit(meth));
  auto smeth = DexMethod::make_method(target_cls->get_type(), name, proto);
  smeth->make_concrete(
      meth->get_access() | ACC_STATIC, std::move(meth->get_code()), false);
  target_cls->add_method(smeth);
  return smeth;
}
Ejemplo n.º 7
0
    bool SaveOP::init(const map<string, string>& config) {
        string db_url = map_get(config, "db");
        if (!db_url.empty()) {
            db_ = db::open_db(db_url, db::WRITE);
            if (db_) {
                writer_ = db_->new_writer();
            }
        }

        get_string_value(config, "key", key_fno_, key_);

        return is_init();
    }
Ejemplo n.º 8
0
DexMethod* MethodCreator::make_static_from(DexString* name,
                                           DexProto* proto,
                                           DexMethod* meth,
                                           DexClass* target_cls) {
  assert(!(meth->get_access() & ACC_STATIC));
  assert(!is_init(meth) && !is_clinit(meth));
  auto smeth = DexMethod::make_method(target_cls->get_type(), name, proto);
  smeth->make_concrete(
      meth->get_access() | ACC_STATIC, meth->get_code(), false);
  insert_sorted(target_cls->get_dmethods(), smeth, compare_dexmethods);
  meth->set_code(nullptr);
  return smeth;
}
Ejemplo n.º 9
0
    cv::Mat GrayOP::execute_current(const cv::Mat& img,
                                    const vector<string>& fields) {
        cv::Mat ret;
        if (!is_init()) {
            LOG(ERROR) << "GrayOp is not initialized";
        } else {
            if (img.channels() >= 3) {
                cv::cvtColor(img, ret, CV_BGR2GRAY);
            } else {
                ret = img;
            }
        }

        return ret;
    }
Ejemplo n.º 10
0
void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe)
{
	/* Derived from fs/exec.c:compute_creds. */
	kernel_cap_t new_permitted, working;

	new_permitted = cap_intersect (bprm->cap_permitted, cap_bset);
	working = cap_intersect (bprm->cap_inheritable,
				 current->cap_inheritable);
	new_permitted = cap_combine (new_permitted, working);

	if (bprm->e_uid != current->uid || bprm->e_gid != current->gid ||
	    !cap_issubset (new_permitted, current->cap_permitted)) {
		current->mm->dumpable = suid_dumpable;

		if (unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
			if (!capable(CAP_SETUID)) {
				bprm->e_uid = current->uid;
				bprm->e_gid = current->gid;
			}
			if (!capable (CAP_SETPCAP)) {
				new_permitted = cap_intersect (new_permitted,
							current->cap_permitted);
			}
		}
	}

	current->suid = current->euid = current->fsuid = bprm->e_uid;
	current->sgid = current->egid = current->fsgid = bprm->e_gid;

	/* For init, we want to retain the capabilities set
	 * in the init_task struct. Thus we skip the usual
	 * capability rules */
	if (!is_init(current)) {
		current->cap_permitted = new_permitted;
		current->cap_effective =
		    cap_intersect (new_permitted, bprm->cap_effective);
	}

	/* AUD: Audit candidate if current->cap_effective is set */

	current->keep_capabilities = 0;
}
Ejemplo n.º 11
0
    cv::Mat RotateOP::execute_current(const cv::Mat& img,
                                      const vector<string>& fields) {
        cv::Mat ret;
        if (!is_init()) {
            LOG(ERROR) << "RotateOp is not initialized";
        } else {
            int angle = angle_;

            if (angle_fno_ > 0 && angle_fno_ <= fields.size()) {
                angle = std::stoi(fields[angle_fno_ - 1]);
            }

            cv::Point2f center(img.cols/2.0F, img.rows/2.0F);
            cv::Mat rot = getRotationMatrix2D(center, angle, 1.0);
            cv::Rect bbox = cv::RotatedRect(center, img.size(), angle).boundingRect();
            rot.at<double>(0, 2) += bbox.width/2.0 - center.x;
            rot.at<double>(1, 2) += bbox.height/2.0 - center.y;
            warpAffine(img, ret, rot, bbox.size(), cv::INTER_CUBIC);
        }

        return ret;
    }
Ejemplo n.º 12
0
/**
 * Check if a visibility/accessibility change would turn a method referenced
 * in a callee to virtual methods as they are inlined into the caller.
 * That is, once a callee is inlined we need to ensure that everything that was
 * referenced by a callee is visible and accessible in the caller context.
 * This step would not be needed if we changed all private instance to static.
 */
bool MultiMethodInliner::create_vmethod(DexInstruction* insn) {
  auto opcode = insn->opcode();
  if (opcode == OPCODE_INVOKE_DIRECT || opcode == OPCODE_INVOKE_DIRECT_RANGE) {
    auto method = static_cast<DexOpcodeMethod*>(insn)->get_method();
    method = resolver(method, MethodSearch::Direct);
    if (method == nullptr) {
      info.need_vmethod++;
      return true;
    }
    always_assert(method->is_def());
    if (is_init(method)) {
      if (!method->is_concrete() && !is_public(method)) {
        info.non_pub_ctor++;
        return true;
      }
      // concrete ctors we can handle because they stay invoke_direct
      return false;
    }
    info.need_vmethod++;
    return true;
  }
  return false;
}
Ejemplo n.º 13
0
// Check that visibility / accessibility changes to the current method
// won't need to change a referenced method into a virtual or static one.
bool gather_invoked_methods_that_prevent_relocation(
    const DexMethod* method,
    std::unordered_set<DexMethodRef*>* methods_preventing_relocation) {
  auto code = method->get_code();
  always_assert(code);

  bool can_relocate = true;
  for (const auto& mie : InstructionIterable(code)) {
    auto insn = mie.insn;
    auto opcode = insn->opcode();
    if (is_invoke(opcode)) {
      auto meth = resolve_method(insn->get_method(), opcode_to_search(insn));
      if (!meth && opcode == OPCODE_INVOKE_VIRTUAL &&
          unknown_virtuals::is_method_known_to_be_public(insn->get_method())) {
        continue;
      }
      if (meth) {
        always_assert(meth->is_def());
        if (meth->is_external() && !is_public(meth)) {
          meth = nullptr;
        } else if (opcode == OPCODE_INVOKE_DIRECT && !is_init(meth)) {
          meth = nullptr;
        }
      }
      if (!meth) {
        can_relocate = false;
        if (!methods_preventing_relocation) {
          break;
        }
        methods_preventing_relocation->emplace(insn->get_method());
      }
    }
  }

  return can_relocate;
}
Ejemplo n.º 14
0
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
			      unsigned long address)
{
	struct vm_area_struct * vma = NULL;
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
	const int field = sizeof(unsigned long) * 2;
	siginfo_t info;

#if 0
	printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", smp_processor_id(),
	       current->comm, current->pid, field, address, write,
	       field, regs->cp0_epc);
#endif

	info.si_code = SEGV_MAPERR;

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 */
	if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
		goto vmalloc_fault;

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */
	if (in_atomic() || !mm)
		goto bad_area_nosemaphore;

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;
/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
	info.si_code = SEGV_ACCERR;

	if (write) {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	} else {
		if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
			goto bad_area;
	}

survive:
	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */
	switch (handle_mm_fault(mm, vma, address, write)) {
	case VM_FAULT_MINOR:
		tsk->min_flt++;
		break;
	case VM_FAULT_MAJOR:
		tsk->maj_flt++;
		break;
	case VM_FAULT_SIGBUS:
		goto do_sigbus;
	case VM_FAULT_OOM:
		goto out_of_memory;
	default:
		BUG();
	}

	up_read(&mm->mmap_sem);
	return;

/*
 * Something tried to access memory that isn't in our memory map..
 * Fix it, but check if it's kernel or user first..
 */
bad_area:
	up_read(&mm->mmap_sem);

bad_area_nosemaphore:
	/* User mode accesses just cause a SIGSEGV */
	if (user_mode(regs)) {
		tsk->thread.cp0_badvaddr = address;
		tsk->thread.error_code = write;
#if 0
		printk("do_page_fault() #2: sending SIGSEGV to %s for "
		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
		       tsk->comm,
		       write ? "write access to" : "read access from",
		       field, address,
		       field, (unsigned long) regs->cp0_epc,
		       field, (unsigned long) regs->regs[31]);
#endif
		info.si_signo = SIGSEGV;
		info.si_errno = 0;
		/* info.si_code has been set above */
		info.si_addr = (void __user *) address;
		force_sig_info(SIGSEGV, &info, tsk);
		return;
	}

no_context:
	/* Are we prepared to handle this kernel fault?  */
	if (fixup_exception(regs)) {
		current->thread.cp0_baduaddr = address;
		return;
	}

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
	 * terminate things with extreme prejudice.
	 */
	bust_spinlocks(1);

	printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
	       "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
	       smp_processor_id(), field, address, field, regs->cp0_epc,
	       field,  regs->regs[31]);
	die("Oops", regs);

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	up_read(&mm->mmap_sem);
	if (is_init(tsk)) {
		yield();
		down_read(&mm->mmap_sem);
		goto survive;
	}
	printk("VM: killing process %s\n", tsk->comm);
	if (user_mode(regs))
		do_exit(SIGKILL);
	goto no_context;

do_sigbus:
	up_read(&mm->mmap_sem);

	/* Kernel mode? Handle exceptions or die */
	if (!user_mode(regs))
		goto no_context;
	else
	/*
	 * Send a sigbus, regardless of whether we were in kernel
	 * or user mode.
	 */
#if 0
		printk("do_page_fault() #3: sending SIGBUS to %s for "
		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
		       tsk->comm,
		       write ? "write access to" : "read access from",
		       field, address,
		       field, (unsigned long) regs->cp0_epc,
		       field, (unsigned long) regs->regs[31]);
#endif
	tsk->thread.cp0_badvaddr = address;
	info.si_signo = SIGBUS;
	info.si_errno = 0;
	info.si_code = BUS_ADRERR;
	info.si_addr = (void __user *) address;
	force_sig_info(SIGBUS, &info, tsk);

	return;
vmalloc_fault:
	{
		/*
		 * Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 *
		 * Do _not_ use "tsk" here. We might be inside
		 * an interrupt in the middle of a task switch..
		 */
		int offset = __pgd_offset(address);
		pgd_t *pgd, *pgd_k;
		pud_t *pud, *pud_k;
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;

		pgd = (pgd_t *) pgd_current[smp_processor_id()] + offset;
		pgd_k = init_mm.pgd + offset;

		if (!pgd_present(*pgd_k))
			goto no_context;
		set_pgd(pgd, *pgd_k);

		pud = pud_offset(pgd, address);
		pud_k = pud_offset(pgd_k, address);
		if (!pud_present(*pud_k))
			goto no_context;

		pmd = pmd_offset(pud, address);
		pmd_k = pmd_offset(pud_k, address);
		if (!pmd_present(*pmd_k))
			goto no_context;
		set_pmd(pmd, *pmd_k);

		pte_k = pte_offset_kernel(pmd_k, address);
		if (!pte_present(*pte_k))
			goto no_context;
		return;
	}
}
Ejemplo n.º 15
0
    bool SizeupOP::init(const map<string, string>& config) {
        get_int_value(config, "size", size_fno_, size_);

        return is_init();
    }
Ejemplo n.º 16
0
    bool RotateOP::init(const map<string, string>& config) {
        get_int_value(config, "angle", angle_fno_, angle_);

        return is_init();
    }
Ejemplo n.º 17
0
/*
 * This routine handles page faults.  It determines the problem, and
 * then passes it off to one of the appropriate routines.
 *
 * error_code:
 *	bit 0 == 0 means no page found, 1 means protection fault
 *	bit 1 == 0 means read, 1 means write
 *
 * If this routine detects a bad access, it returns 1, otherwise it
 * returns 0.
 */
int do_page_fault(struct pt_regs *regs, unsigned long address,
			      unsigned long error_code)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct * vma;
	int write, fault;

#ifdef DEBUG
	printk ("do page fault:\nregs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
		regs->sr, regs->pc, address, error_code,
		current->mm->pgd);
#endif

	/*
	 * If we're in an interrupt or have no user
	 * context, we must not take the fault..
	 */
	if (in_atomic() || !mm)
		goto no_context;

	down_read(&mm->mmap_sem);

	vma = find_vma(mm, address);
	if (!vma)
		goto map_err;
	if (vma->vm_flags & VM_IO)
		goto acc_err;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto map_err;
	if (user_mode(regs)) {
		/* Accessing the stack below usp is always a bug.  The
		   "+ 256" is there due to some instructions doing
		   pre-decrement on the stack and that doesn't show up
		   until later.  */
		if (address + 256 < rdusp())
			goto map_err;
	}
	if (expand_stack(vma, address))
		goto map_err;

/*
 * Ok, we have a good vm_area for this memory access, so
 * we can handle it..
 */
good_area:
#ifdef DEBUG
	printk("do_page_fault: good_area\n");
#endif
	write = 0;
	switch (error_code & 3) {
		default:	/* 3: write, present */
			/* fall through */
		case 2:		/* write, not present */
			if (!(vma->vm_flags & VM_WRITE))
				goto acc_err;
			write++;
			break;
		case 1:		/* read, present */
			goto acc_err;
		case 0:		/* read, not present */
			if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
				goto acc_err;
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
	 * the fault.
	 */

 survive:
	fault = handle_mm_fault(mm, vma, address, write);
#ifdef DEBUG
	printk("handle_mm_fault returns %d\n",fault);
#endif
	if (unlikely(fault & VM_FAULT_ERROR)) {
		if (fault & VM_FAULT_OOM)
			goto out_of_memory;
		else if (fault & VM_FAULT_SIGBUS)
			goto bus_err;
		BUG();
	}
	if (fault & VM_FAULT_MAJOR)
		current->maj_flt++;
	else
		current->min_flt++;

	up_read(&mm->mmap_sem);
	return 0;

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	up_read(&mm->mmap_sem);
	if (is_init(current)) {
		yield();
		down_read(&mm->mmap_sem);
		goto survive;
	}

	printk("VM: killing process %s\n", current->comm);
	if (user_mode(regs))
		do_exit(SIGKILL);

no_context:
	current->thread.signo = SIGBUS;
	current->thread.faddr = address;
	return send_fault_sig(regs);

bus_err:
	current->thread.signo = SIGBUS;
	current->thread.code = BUS_ADRERR;
	current->thread.faddr = address;
	goto send_sig;

map_err:
	current->thread.signo = SIGSEGV;
	current->thread.code = SEGV_MAPERR;
	current->thread.faddr = address;
	goto send_sig;

acc_err:
	current->thread.signo = SIGSEGV;
	current->thread.code = SEGV_ACCERR;
	current->thread.faddr = address;

send_sig:
	up_read(&mm->mmap_sem);
	return send_fault_sig(regs);
}
Ejemplo n.º 18
0
Archivo: trap.c Proyecto: ivucica/linux
/* Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by segv(). */
int handle_page_fault(unsigned long address, unsigned long ip,
		      int is_write, int is_user, int *code_out)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	int err = -EFAULT;

	*code_out = SEGV_MAPERR;

	/* If the fault was during atomic operation, don't take the fault, just
	 * fail. */
	if (in_atomic())
		goto out_nosemaphore;

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
	if(!vma)
		goto out;
	else if(vma->vm_start <= address)
		goto good_area;
	else if(!(vma->vm_flags & VM_GROWSDOWN))
		goto out;
	else if(is_user && !ARCH_IS_STACKGROW(address))
		goto out;
	else if(expand_stack(vma, address))
		goto out;

good_area:
	*code_out = SEGV_ACCERR;
	if(is_write && !(vma->vm_flags & VM_WRITE))
		goto out;

	/* Don't require VM_READ|VM_EXEC for write faults! */
        if(!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC)))
                goto out;

	do {
survive:
		switch (handle_mm_fault(mm, vma, address, is_write)){
		case VM_FAULT_MINOR:
			current->min_flt++;
			break;
		case VM_FAULT_MAJOR:
			current->maj_flt++;
			break;
		case VM_FAULT_SIGBUS:
			err = -EACCES;
			goto out;
		case VM_FAULT_OOM:
			err = -ENOMEM;
			goto out_of_memory;
		default:
			BUG();
		}
		pgd = pgd_offset(mm, address);
		pud = pud_offset(pgd, address);
		pmd = pmd_offset(pud, address);
		pte = pte_offset_kernel(pmd, address);
	} while(!pte_present(*pte));
	err = 0;
	/* The below warning was added in place of
	 *	pte_mkyoung(); if (is_write) pte_mkdirty();
	 * If it's triggered, we'd see normally a hang here (a clean pte is
	 * marked read-only to emulate the dirty bit).
	 * However, the generic code can mark a PTE writable but clean on a
	 * concurrent read fault, triggering this harmlessly. So comment it out.
	 */
#if 0
	WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
#endif
	flush_tlb_page(vma, address);
out:
	up_read(&mm->mmap_sem);
out_nosemaphore:
	return(err);

/*
 * We ran out of memory, or some other thing happened to us that made
 * us unable to handle the page fault gracefully.
 */
out_of_memory:
	if (is_init(current)) {
		up_read(&mm->mmap_sem);
		yield();
		down_read(&mm->mmap_sem);
		goto survive;
	}
	goto out;
}
Ejemplo n.º 19
0
 bool GrayOP::init(const map<string, string>& config) {
     return is_init();
 }