ThreadPool::~ThreadPool() {
  clearThreads();

  for(unsigned int i=0; i<workThreads_.size(); i++) {
    delete workThreads_[i];
  }
}
Пример #2
0
void XDbgProxy::onDbgConnect()
{
	// MutexGuard guard(this);

	clearThreads();
	addAllThreads(XDbgGetCurrentThreadId());
	delThread(_apiThread.getId());
	suspendAll(XDbgGetCurrentThreadId());

	DebugEventPacket event;
	DebugAckPacket ack;
	event.event.dwProcessId = XDbgGetCurrentProcessId();
	event.event.dwThreadId = getFirstThread();
	event.event.dwDebugEventCode = ATTACHED_EVENT; // THIS MSG DONT PASS TO DEBUGGER
	sendDbgEvent(event, ack, false);
	ignore_dbgstr = ack.args.ignore_dbgstr;
	inject_method = ack.args.inject_method;
	simu_attach_bp = ack.args.simu_attach_bp;

	sendProcessInfo(ack.dwThreadId);
	sendThreadInfo();
	sendModuleInfo(ack.dwThreadId);

	// attach breakpoint
	if (simu_attach_bp && !ack.args.createProcess) {
		DWORD tid;
		HANDLE hThread = ::CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)DebugBreak, NULL, 0, &tid);
		CloseHandle(hThread);
	}

	resumeAll(XDbgGetCurrentThreadId());
}
Пример #3
0
/**
 * @param val the current pgd-cr3 value
 *
 * The task list can be obtained in two ways.
 * 1. Using next_task_struct which uses the global symbol for init_task as the starting point
 * 2. Using the current_task_struct structure. This should just "work" although there are some
 *   special considerations in some special cases. current_task_struct returns the actual
 *   task struct - which can either be a process task struct or a thread task struct (if it
 *   is single threaded). Either way, it is guaranteed that the next pointer in task struct
 *   will either point to the next process task struct or init_task. Which means the loop
 *   should still work.
 *
 * Perhaps it is helpful to illustrate how the process/thread list really works
 * There are two fields in the task_struct that are of interest
 * 1. The next pointer that points to the next task struct
 * 2. The thread_group field (which is of type struct list_head { list_head* next, prev })
 *    This means task_struct.thred_group will automatically give you next. The tricky thing
 *    is that this list points to the thread_group field of the next task_struct that belongs
 *    to this group. See the figure below.
 * To put things together, lets assume that we have two running processes, 30 and 31.
 * 31 is single threaded and 30 is multi-threaded with two additional threads 32 and 33.
 * Given that we always have init_task, we should have a total of 5 task_structs, 1 for init
 *   2 for the processes and 2 for the threads.
 * The following is a graphical representation of the "process list"
 *
 * ,--------------------------------------------------------------------,
 * |     _____________          _____________          _____________    |
 * |---> | pid = 0   |    ,---> | pid = 30  |    ,---> | pid = 31  |    |
 * |     | tgid = 0  |    |     | tgid = 30 |    |     | tgid = 31 |    |
 * |     | next      | ---'     | next      | ---|     | next      | ---'
 * | ,-> | t-group   | -,   ,-> | t-group   | -, | ,-> | t-group   | --,
 * | |   |___________|  |  /    |___________|  | | |   |___________|   |
 * | '------------------' /                    | | '-------------------'
 * |                     / ,-------------------' |
 * |                    |  |    _____________    |
 * |                    |  |    | pid = 32  |    |
 * |                    |  |    | tgid = 30 |    |
 * |                    |  |    | next      | ---' (points to real next)
 * |                    |  '--> | t-group   | --,
 * |                    |       |___________|   |
 * |                    |  ,--------------------'
 * |                    |  |    _____________
 * |                    |  |    | pid = 33  |
 * |                    |  |    | tgid = 30 |
 * |                    |  |    | next      | ----, (points to init_task)
 * |                    |  '--> | t-group   | --, |
 * |                    |       |___________|   | |
 * |                    '-----------------------' |
 * '----------------------------------------------'
 *
 * Some things to emphasize (again?)
 * 1. thread_group.next (represented by t-group) points to the next thread_group field!
 * 2. next of the process task struct in the process list is guaranteed to point to the next
 *   task struct. The next in the thread task_struct might point to next or init_task
 * 3. According to online references, the pid's are always unique - this is why thread ids
 *   for the process 30 are 30 (the main thread) 32 and 33 (the other two threads).
 * 4. The tgid shows the real pid.
 *
 * The above example does not include the "thread_info" structure. Each task_struct
 *   is associated with its own thread_info structure which is pointed to by the "stack" field of the
 *   task_struct. To DECAF_get the stack address of the task - we will have to go into the thread info structure
 *   and look at the cpu_context field to grab the stack pointer. More info on the cpu_context and copy_thread
 *   (called from copy_process called from do_fork) can be found in arch/ARCH/kernel/process.c
 */
gva_t updateProcessListByTask(CPUState* env, gva_t task, int updateMask, int bNeedMark)
{
  DECAF_Processes_Callback_Params params;

  gpid_t pid;
  gpid_t parentPid;
  gpid_t tgid;
  gpid_t glpid;
  target_ulong uid;
  target_ulong gid;
  target_ulong euid;
  target_ulong egid;

  gpid_t t_pid;
  gpid_t t_tgid;

  gpa_t pgd;
  char name[MAX_PROCESS_INFO_NAME_LEN];
  char argName[MAX_PROCESS_INFO_NAME_LEN];
  gva_t i = task;

  argName[0] = '\0';
  name[0] = '\0';

  pid = DECAF_get_pid(env, i);
  tgid = DECAF_get_tgid(env, i);
  glpid = DECAF_get_group_leader_pid(env, i);
  uid = DECAF_get_uid(env, i);
  gid = DECAF_get_gid(env, i);
  euid = DECAF_get_euid(env, i);
  egid = DECAF_get_egid(env, i);
  parentPid = DECAF_get_parent_pid(env, i);
  pgd = pgd_strip(DECAF_get_pgd(env, i));

  int ret = 0;

  if (curProcessPGD == pgd)
  {
    if (DECAF_get_arg_name(env, i, argName, MAX_PROCESS_INFO_NAME_LEN) < 0)
    {
      argName[0] = '\0';
    }
  }
  if (DECAF_get_name(env, i, name, MAX_PROCESS_INFO_NAME_LEN) < 0) //get the name
  {
    name[0] = '\0';
  }

  //update the info if needed
  if ( ((bNeedMark) && (processMark(pid) == 1))
       || ((!bNeedMark) && (findProcessByPID(pid) == NULL)) 
     ) // i.e. it doesn't exist
  {
    addProcess(i, pid, parentPid, tgid, glpid, uid, gid, euid, egid, pgd, (argName[0] == '\0') ? NULL : argName, (name[0] == '\0') ? NULL : name);
    processMark(pid);
    params.cp.pid = pid;
    params.cp.pgd = pgd;
    SimpleCallback_dispatch(&DroidScope_callbacks[DECAF_PROCESSES_CREATE_PROCESS_CB], &params);
    //force a module and thread update
    updateMask |= UPDATE_THREADS | UPDATE_MODULES;
  }
  else
  {
    ret = updateProcess(i, pid, parentPid, tgid, glpid, uid, gid, euid, egid, pgd, (argName[0] == '\0') ? NULL : argName, (name[0] == '\0') ? NULL : name);
    if (ret > 0)
    {
      params.pu.pid = pid;
      params.pu.mask = ret;
      SimpleCallback_dispatch(&DroidScope_callbacks[DECAF_PROCESSES_PROCESS_UPDATED_CB], &params);
    }
  }

  if (updateMask & UPDATE_THREADS)
  {
    //update (repopulate) the threads
    gva_t j = i;
    clearThreads(pid);
    do
    {
      if ((j != 0) && (j != -1))
      {
        t_pid = DECAF_get_pid(env, j);
        t_tgid = DECAF_get_tgid(env, j);

        //run through the thread group
        gva_t parentTI = DECAF_get_stack(env, j);
        addThread(t_tgid, t_pid, parentTI);
      }

      j = DECAF_get_thread_group(env, j);
      if ( (j == -1) || (j == 0) )
      {
        break;
      }
      j -= task_struct_thread_group_offset;//this gives you the next one immediately
    } while ( i != j );
  } //end bUpdateThreads

  //update (repopulate) the module list
  if (updateMask & UPDATE_MODULES)
  {
    updateProcessModuleList(env, pid);
  }

  i = DECAF_get_next_task_struct(env, i);
  return (i);
}