示例#1
0
文件: taint.c 项目: brainsmoke/minemu
static int taint_val(int fd)
{
	char *fd_type = get_thread_ctx()->files->fd_type;

	if ( (fd > 1023) || (fd < 0) )
		return TAINT_CLEAR;

	if ( fd_type[fd] == FD_UNKNOWN )
	{
		struct kernel_stat64 s;
		if ( ( sys_fstat64(fd, &s) < 0 ) || (s.st_mode & __S_IFMT) != __S_IFREG )
			fd_type[fd] = FD_SOCKET;
		else
			fd_type[fd] = FD_FILE;
	}

	if (fd_type[fd] == FD_FILE)
	{
		if (is_trusted_file(fd))
			fd_type[fd] = FD_TRUSTED_FILE;
		else
			fd_type[fd] = FD_UNTRUSTED_FILE;
	}

	if (fd_type[fd] == FD_SOCKET)
		return TAINT_SOCKET;

	if (fd_type[fd] == FD_UNTRUSTED_FILE)
		return TAINT_FILE;

	return TAINT_CLEAR;
}
示例#2
0
/**
 * Pthread-based implementation for timeout function
 *
 * @param context
 * @return timer id
 */
static int timer_count_timeout(Context *ctx)
{
	static int last_id = 0;

	plugin_pthread_ctx_lock(ctx);

	timer_reset_timeout(ctx);
	ThreadContext *thread_ctx = get_thread_ctx(ctx);

	if (thread_ctx->timeout_thread == NULL) {
		thread_ctx->timeout_thread = malloc(sizeof(pthread_t));
		ctx->timeout_action.id = ++last_id;

		DEBUG("timer: Creating timeout counter thread id %d, time: %d",
		      ctx->timeout_action.id,
		      ctx->timeout_action.timeout);

		int return_code = pthread_create(thread_ctx->timeout_thread,
						 NULL, timer_run, (void *) ctx);

		if (return_code) {
			DEBUG("ERROR timer: return code from "
			      "pthread_create() is %d", return_code);
			return 0;
		}
	}

	plugin_pthread_ctx_unlock(ctx);
	return ctx->timeout_action.id;
}
示例#3
0
/**
 * Blocks current thread and waits for timeout thread termination.
 * This plug-in feature is actually used by unit-testing only.
 *
 * @param context
 * @return thread termination value
 */
static void timer_wait_for_timeout(Context *ctx)
{
	DEBUG(" timer: Waiting for timeout thread termination.");
	ThreadContext *thread_ctx = get_thread_ctx(ctx);
	pthread_join(*thread_ctx->timeout_thread, NULL);

	free(thread_ctx->timeout_thread);
	thread_ctx->timeout_thread = NULL;
}
示例#4
0
/**
 * Lock the context received.
 *
 * @param ctx the context that will be locked. NULL = lock GIL
 */
static void plugin_pthread_ctx_lock(Context *ctx)
{
	if (ctx) {
		ThreadContext *thread_ctx = get_thread_ctx(ctx);
		pthread_mutex_lock(&thread_ctx->mutex);
	} else {
		pthread_mutex_lock(&gil);
	}
}
示例#5
0
/**
 * Finalize thread context, in this case destroy pthread mutex.
 * @param ctx current context.
 */
static void plugin_pthread_ctx_finalize(Context *ctx)
{
	if (!ctx)
		return;

	timer_reset_timeout(ctx);

	ThreadContext *thread_ctx = get_thread_ctx(ctx);

	pthread_mutexattr_destroy(&thread_ctx->mutex_attr);
	pthread_mutex_destroy(&thread_ctx->mutex);

	free(ctx->multithread);
	ctx->multithread = NULL;
}
示例#6
0
/**
 * Reset timeout counter state.
 * This method locks the communication layer thread.
 * @param context
 */
static void timer_reset_timeout(Context *ctx)
{
	plugin_pthread_ctx_lock(ctx);
	ThreadContext *thread_ctx = get_thread_ctx(ctx);

	if (thread_ctx != NULL && thread_ctx->timeout_thread != NULL) {
		DEBUG(" timer: Reseting timeout thread ");

		pthread_cancel(*thread_ctx->timeout_thread);
		pthread_join(*thread_ctx->timeout_thread, NULL);
	
		free(thread_ctx->timeout_thread);
		thread_ctx->timeout_thread = NULL;
	}

	plugin_pthread_ctx_unlock(ctx);
}
示例#7
0
文件: taint.c 项目: brainsmoke/minemu
void do_taint(long ret, long call, long arg1, long arg2, long arg3, long arg4, long arg5, long arg6)
{
	if (ret < 0)
		return;

	switch (call)
	{
		case __NR_read:
			taint_mem((char *)arg2, ret, taint_val(arg1));
			return;
		case __NR_readv:
			taint_iov( (struct iovec *)arg2, arg3, ret, taint_val(arg1));
			return;
		case __NR_open:
		case __NR_creat:
		case __NR_openat:
			set_fd(ret, FD_FILE);

			if (strcmp((char *)(call == __NR_openat ? arg2 : arg1), "/proc/self/stat") == 0)
			{
				taint_val(ret);
				fake_proc_self_stat(ret);
			}
			return;
		case __NR_dup:
		case __NR_dup2:
			set_fd( ret, get_thread_ctx()->files->fd_type[arg1]);
			return;
		case __NR_pipe:
			set_fd( ((long *)arg1)[0], FD_SOCKET);
			set_fd( ((long *)arg1)[1], FD_SOCKET);
			return;
		case __NR_socketcall:
		{
			long *sockargs = (long *)arg2;

			switch (arg1)
			{
				case SYS_GETPEERNAME:
					if ( (ret >= 0) && sockargs[1] && sockargs[2])
						taint_mem((char *)sockargs[1], *(long *)sockargs[2], TAINT_SOCKADDR);
					return;
				case SYS_ACCEPT:
					if ( (ret >= 0) && sockargs[1] && sockargs[2])
						taint_mem((char *)sockargs[1], *(long *)sockargs[2], TAINT_SOCKADDR);
				case SYS_SOCKET:
					set_fd(ret, FD_SOCKET);
					return;
				case SYS_RECV:
				case SYS_RECVFROM:
					taint_mem((char *)sockargs[1], ret, TAINT_SOCKET);
					return;
				case SYS_RECVMSG:
				{
					struct msghdr *msg = (struct msghdr *)sockargs[1];
					taint_iov( msg->msg_iov, msg->msg_iovlen, ret, TAINT_SOCKET );
					return;
				}
				default:
					return;
			}
		}
		default:
			return;
	}
}
示例#8
0
文件: taint.c 项目: brainsmoke/minemu
static void set_fd(int fd, int type)
{
	if ( (fd < 1024) && (fd > -1) )
		get_thread_ctx()->files->fd_type[fd] = type;
}
示例#9
0
static inline void
handle_event(torque_ctx *ctx,const kevententry *e){
#ifdef TORQUE_LINUX
	if(e->events & EVREAD){
#else
	if(e->filter == EVFILT_READ){
#endif
		handle_evsource_read(ctx->eventtables.fdarray,KEVENTENTRY_ID(e));
	}
#ifdef TORQUE_LINUX
	if(e->events & EVWRITE){
#else
	else if(e->filter == EVFILT_WRITE){
#endif
		handle_evsource_write(ctx->eventtables.fdarray,KEVENTENTRY_ID(e));
	}
#ifdef TORQUE_FREEBSD
	else if(e->filter == EVFILT_SIGNAL){
		handle_evsource_read(ctx->eventtables.sigarray,KEVENTENTRY_ID(e));
        }else if(e->filter == EVFILT_TIMER){
		timer_curry(KEVENTENTRY_IDPTR(e));
	}
#endif
}

void rxcommonsignal(int sig,void *cbstate){
	if(sig == EVTHREAD_TERM || sig == EVTHREAD_INT){
		const torque_ctx *ctx = cbstate;
		void *ret = PTHREAD_CANCELED;
		evhandler *e = get_thread_evh();
		struct rusage ru;
		int r;

		// There's no POSIX thread cancellation going on here, nor are
		// we terminating due to signal; we're catching the signal and
		// exiting from this thread only. The trigger signal might be
		// delivered to any one of our threads; if we're here, though,
		// we cannot be holding the efd. Progress is thus assured.
		pthread_kill(e->nexttid,sig);
		// We rely on EDEADLK to cut off our circular join()list
		if((r = pthread_join(e->nexttid,&ret)) && r != EDEADLK){
			ret = NULL;
		}
		// FIXME this is kind of lame. I'd like to emulate
		// RUSAGE_THREAD when it's unavailable, and either way the test
		// ought be based on whether RUSAGE_THREAD is *expected*, not
		// *defined* (Linux 2.6.26+) for future-proofing.
#ifdef RUSAGE_THREAD
		getrusage(RUSAGE_THREAD,&ru);
#else
		getrusage(RUSAGE_SELF,&ru);
#endif
		e->stats.utimeus = ru.ru_utime.tv_sec * 1000000 + ru.ru_utime.tv_usec;
		e->stats.stimeus = ru.ru_stime.tv_sec * 1000000 + ru.ru_stime.tv_usec;
		e->stats.vctxsw = ru.ru_nvcsw;
		e->stats.ictxsw = ru.ru_nivcsw;
		destroy_evhandler(ctx,e);
		pthread_exit(ret); // FIXME need clean up stack
	}
}

#if defined(TORQUE_LINUX) && !defined(TORQUE_LINUX_SIGNALFD)
static sig_atomic_t sem_rxcommonsignal;

static inline void
check_for_termination(void){
	if(sem_rxcommonsignal){
		int s;

		s = sem_rxcommonsignal;
		sem_rxcommonsignal = 0;
		rxcommonsignal(s,get_thread_ctx());
	}
}

static void
rxcommonsignal_handler(int sig,void *cbstate __attribute__ ((unused))){
	sem_rxcommonsignal = sig;
}
#else
#define check_for_termination(...)
#endif

void event_thread(torque_ctx *ctx,evhandler *e){
	tsd_evhandler = e;
	tsd_ctx = ctx;
	while(1){
		int events;

		check_for_termination();
		events = Kevent(e->evq->efd,NULL,0,PTR_TO_EVENTV(&e->evec),e->evec.vsizes);
		++e->stats.rounds;
		if(events < 0){
			if(errno != EINTR){
				++e->stats.pollerr;
			}
			continue;
		}
		while(events--){
#ifdef TORQUE_LINUX
			handle_event(ctx,&PTR_TO_EVENTV(&e->evec)->events[events]);
#else
			handle_event(ctx,&PTR_TO_EVENTV(&e->evec)[events]);
#endif
			++e->stats.events;
		}
	}
}

static int
#ifdef TORQUE_LINUX
create_evector(struct kevent *kv,int n){
	if((kv->events = malloc(n * sizeof(*kv->events))) == NULL){
		return -1;
	}
	if((kv->ctldata = malloc(n * sizeof(*kv->ctldata))) == NULL){
		free(kv->events);
		return -1;
	}
	return 0;
#else
create_evector(struct kevent **kv,int n){
	if((*kv = malloc(n * sizeof(**kv))) == NULL){
		return -1;
	}
	return 0;
#endif
}

static void
#ifdef TORQUE_LINUX
destroy_evector(struct kevent *kv){
	free(kv->events);
	free(kv->ctldata);
#else
destroy_evector(struct kevent **kv){
	free(*kv);
#endif
}

static int
init_evectors(evectors *ev){
	// We probably want about a half (small) page's worth...? FIXME
	ev->vsizes = 512;
	if(create_evector(&ev->eventv,ev->vsizes)){
		return -1;
	}
	return 0;
}
示例#10
0
long syscall_emu(long call, long arg1, long arg2, long arg3,
                            long arg4, long arg5, long arg6)
{
	long ret;
	switch (call)
	{
 		case __NR_brk:
 		case __NR_mmap2:
 		case __NR_mmap:
 		case __NR_mremap:
 		case __NR_mprotect:
 		case __NR_madvise:

 		case __NR_sigaltstack:
 		case __NR_signal:
 		case __NR_sigaction:
		case __NR_sigreturn:
 		case __NR_rt_sigaction:
		case __NR_rt_sigreturn:

		case __NR_fork:
		case __NR_vfork:
		case __NR_clone:
		case __NR_exit:

		case __NR_execve:
		case __NR_exit_group:
			break;

		case __NR_read:
		case __NR_readv:
		case __NR_open:
		case __NR_creat:
		case __NR_dup:
		case __NR_dup2:
		case __NR_openat:
		case __NR_pipe:
		case __NR_socketcall:
			ret = syscall_intr(call,arg1,arg2,arg3,arg4,arg5,arg6);

			if ( taint_flag == TAINT_ON )
				do_taint(ret,call,arg1,arg2,arg3,arg4,arg5,arg6);

			return ret;

 		case __NR_ipc:
			if ( arg1 == SHMAT )
				break;
			/* fall through */
		default:
			return syscall_intr(call,arg1,arg2,arg3,arg4,arg5,arg6);
	}

	ret = call;
	if (!try_block_signals())
		return ret; /* we have a signal in progress, revert to pre-syscall state */

	switch (call)
	{
		/* these calls are all non-blocking right?
		 * blocked signals during blocking calls is a bad thing
		 */
 		case __NR_brk:
			ret = user_brk(arg1);
			break;
 		case __NR_mmap2:
			ret = user_mmap2(arg1,arg2,arg3,arg4,arg5,arg6);
			break;
 		case __NR_mmap:
			ret = user_old_mmap((struct kernel_mmap_args *)arg1);
			break;
 		case __NR_mremap:
			ret = user_mremap(arg1,arg2,arg3,arg4,arg5);
			break;
 		case __NR_mprotect:
			ret = user_mprotect(arg1,arg2,arg3);
			break;
 		case __NR_madvise:
			ret = user_madvise(arg1,arg2,arg3);
			break;
 		case __NR_ipc:
			if (arg1 == SHMAT)
				ret = user_shmat(arg2,(char *)arg5,arg3,(unsigned long *)arg4);
			else
				die("should not have caught IPC call: %d", arg1);
			break;

 		case __NR_sigaltstack:
			ret = user_sigaltstack((stack_t *)arg1, (stack_t *)arg2);
			break;
 		case __NR_signal:
		{
			ret = (long)user_signal(arg1, (kernel_sighandler_t)arg2);
			break;
		}
 		case __NR_sigaction:
			ret = user_sigaction(arg1, (struct kernel_old_sigaction *)arg2,
			                           (struct kernel_old_sigaction *)arg3);
			break;
 		case __NR_rt_sigaction:
			ret = user_rt_sigaction(arg1, (struct kernel_sigaction *)arg2,
			                              (struct kernel_sigaction *)arg3, arg4);
			break;
 		case __NR_sigreturn:
			user_sigreturn();
			break;
 		case __NR_rt_sigreturn:
			user_rt_sigreturn();
			break;

		case __NR_vfork:
			ret = user_clone(SIGCHLD, 0, NULL, NULL, NULL);
//			ret = user_clone(CLONE_VFORK | CLONE_VM | SIGCHLD, 0, NULL, NULL, NULL);
			break;
		case __NR_fork:
			ret = user_clone(SIGCHLD, 0, NULL, NULL, NULL);
			break;
		case __NR_clone:
			ret = user_clone(arg1, arg2, (void *)arg3, (void *)arg4, (void*)arg5);
			break;
		case __NR_exit:
			user_exit(arg1);
			break;

		case __NR_execve:
			ret = user_execve((char *)arg1, (char **)arg2, (char **)arg3);
			break;
		case __NR_exit_group:
			if (dump_on_exit)
			{
				long regs[] = { call, arg2, arg3, arg1, get_thread_ctx()->user_esp, arg6, arg4, arg5 };
				do_taint_dump(regs);
			}
			sys_exit_group(arg1);
		default:
			die("unimplemented syscall");
			break;
	}
	unblock_signals();
	return ret;
}