Example #1
0
int ReapThreadProc(void* lpParameter)
{
  fprintf(stdout, "Thread execution started.\n");
  libusb_device::TListTransfers& listTransfers (*(libusb_device::TListTransfers*)lpParameter);
	while(!listTransfers.Empty())
	{
    listTransfers.Head()->libusb.dev_handle->dev->ctx->processing.Wait();
		transfer_wrapper* wrapper = listTransfers.Head();
		if (NULL != wrapper)
			ReapTransfer(wrapper, 1000);

    if (hProblem.Check())
    {
      fprintf(stderr, "Thread is waiting for user reaction...\n");
      // wait for user reaction...
      hReaction.Wait();
      // did the user decide to abort?
      if (hAbort.Check())
      {
        fprintf(stderr, "Thread is aborting: releasing transfers...\n");
        while(!listTransfers.Empty())
        {
          transfer_wrapper* wrapper = listTransfers.Head();
          libusb_cancel_transfer(&(wrapper->libusb));
          ReapTransfer(wrapper, 0);
        }
        fprintf(stderr, "Thread execution aborted.\n");
        return(LIBUSB_ERROR_INTERRUPTED);
      }
    }
	}
  fprintf(stdout, "Thread execution finished.\n");
	return(0);
}
Example #2
0
int ReapThreadProc(void* lpParameter)
{
  fprintf(stdout, "Thread execution started.\n");
  libusb_device::TListTransfers& listTransfers (*(libusb_device::TListTransfers*)lpParameter);

  mutexReady.Enter();
  assert(mapDeviceTransfersReady.find(&listTransfers) == mapDeviceTransfersReady.end());
  mapDeviceTransfersReady[&listTransfers] = new libusb_device::TListTransfers;
  libusb_device::TListTransfers& lstReady = *mapDeviceTransfersReady[&listTransfers];
  mutexReady.Leave();

  while(!listTransfers.Empty() || !lstReady.Empty())
	{
    if (!listTransfers.Empty())
    {
		  transfer_wrapper* wrapper = listTransfers.Head();
		  if (NULL != wrapper)
			  //ReapTransfer(wrapper, 10000, &lstReady);  // producer-consumer model
        ReapTransfer(wrapper, 10000);
    }
    else
    {
      // This is important! Otherwise the thread may "take control" of the CPU
      // if it happens to be running at TIME_CRITICAL priority...
      fprintf(stdout, "ReapThreadProc(): nothing to do, sleeping...\n");
      QuickThread::Yield();
    }

    if (hProblem.Check())
    {
      fprintf(stderr, "Thread is waiting for user reaction...\n");
      // wait for user reaction...
      hReaction.Wait();
      // did the user decide to abort?
      if (hAbort.Check())
      {
        fprintf(stderr, "Thread is aborting: releasing transfers...\n");
        while(!listTransfers.Empty())
        {
          transfer_wrapper* wrapper = listTransfers.Head();
          libusb_cancel_transfer(&(wrapper->libusb));
          ReapTransfer(wrapper, 0);
        }
        fprintf(stderr, "Thread execution aborted.\n");
        return(LIBUSB_ERROR_INTERRUPTED);
      }
    }
	}

  mutexReady.Enter();
  SAFE_DELETE(mapDeviceTransfersReady[&listTransfers]);
  mapDeviceTransfersReady.erase(&listTransfers);
  mutexReady.Leave();

  fprintf(stdout, "Thread execution finished.\n");
	return(0);
}
Example #3
0
int libusb_handle_events(libusb_context* ctx)
{
  int ret (0);

  int(*ReapStrategy)(const libusb_device&) (ReapThreaded);

  // ReapThreaded() is already spawning threads (one per stream) at very high
  // (time-critical) priority; the other stream reap strategies perform more
  // sequentially and the thread must be promoted to higher priority in order
  // to alleviate sequence losses; however, even at such extreme conditions,
  // sequence losses still happen at frequent pace without ReapThreaded().
  if (ReapStrategy != ReapThreaded)
    QuickThread::Myself().RaisePriority();

  //HANDLE hMyself (GetCurrentThread());
  libusb_context::TMapDevices::iterator it  (ctx->devices.begin());
  libusb_context::TMapDevices::iterator end (ctx->devices.end());
  for (; it!=end; ++it)
  {
    const libusb_device& dev (it->second);
    if (dev.refcount > 0)
      ReapStrategy(dev);
  }

  // Fail Guard to prevent THREAD_PRIORITY_TIME_CRITICAL from rendering the
  // system unresponsive: press ESC key on the CONSOLE window to kill the
  // thread; if the reap strategy is ReapThreaded(), this will also kill all
  // of the children threads internally spawned from within it.
  if (_kbhit())
    if (27 == _getch()) // ESC
      hProblem.Signal();
  if (hAbort.Check())
    ret = LIBUSB_ERROR_INTERRUPTED;
  else if (hProblem.Check())
  {
    hReaction.Reset();
    int user_option =
    MessageBoxA(GetDesktopWindow(),
                "The libusb_handle_events() fail guard of libusbemu was reached!\n"
                "This was caused by pressing the [ESC] key on the console window.\n"
                "If it was unintentional, click Cancel to resume normal execution;\n"
                "otherwise, click OK to effectively terminate the thread (note that\n"
                "the host program might run abnormally after such termination).",
                "WARNING: libusbemu thread fail guard reached!", MB_ICONWARNING | MB_OKCANCEL);
    if (IDOK == user_option)
      hAbort.Signal();
    else
      hProblem.Reset();
    hReaction.Signal();
  }

  // 0 on success, or a LIBUSB_ERROR code on failure
  return(ret);
}
Example #4
0
// ReapThreaded Rationale: for each transfer list (stream), delegate the reap
// to a dedicated thread for that stream
int ReapThreaded(const libusb_device& dev)
{
  static std::map<const libusb_device*, std::map<int,QuickThread*> > mapDeviceEndPointThreads;

  if (hAbort.Check())
  {
    std::map<int,QuickThread*>& mThreads = mapDeviceEndPointThreads[&dev];
    std::map<int,QuickThread*>::iterator it  (mThreads.begin());
	  std::map<int,QuickThread*>::iterator end (mThreads.end());
    for (; it!=end; ++it)
    {
      QuickThread*& hThread = it->second;
      if (NULL != hThread)
        if (hThread->TryJoin())
          SAFE_DELETE(hThread);
    }
    return(-1);
  }

  if (hProblem.Check())
    return(-1);

  libusb_device::TMapIsocTransfers::iterator it  (dev.isoTransfers->begin());
	libusb_device::TMapIsocTransfers::iterator end (dev.isoTransfers->end());
  for (; it!=end; ++it)
  {
    std::map<int,QuickThread*>& mThreads = mapDeviceEndPointThreads[&dev];
    const int endpoint (it->first);
    QuickThread*& hThread = mThreads[endpoint];
    if (NULL != hThread)
    {
      if (hThread->TryJoin())
      {
        SAFE_DELETE(hThread);
        mThreads.erase(endpoint);
      }
    }
    else
    {
      if (!it->second.Empty())
      {
        libusb_device::TListTransfers& listTransfers (it->second);
        hThread = new QuickThread(ReapThreadProc, (void*)&listTransfers);
        hThread->RaisePriority();
      }
    }
  }
  QuickThread::Yield();
	return(0);
}
Example #5
0
// ReapThreaded Rationale: for each transfer list (stream) of a given device,
// delegate the reap to a dedicated thread for that stream
int ReapThreaded(const libusb_device& dev)
{
  static std::map<const libusb_device*, std::map<int,QuickThread*> > mapDeviceEndPointThreads;

  if (hAbort.Check())
  {
    std::map<int,QuickThread*>& mThreads = mapDeviceEndPointThreads[&dev];
    std::map<int,QuickThread*>::iterator it  (mThreads.begin());
	  std::map<int,QuickThread*>::iterator end (mThreads.end());
    for (; it!=end; ++it)
    {
      QuickThread*& hThread = it->second;
      if (NULL != hThread)
        if (hThread->TryJoin())
          SAFE_DELETE(hThread);
    }

    {
      // On fail guard, release all "ready" transfers as well...
      mutexReady.Enter();
      std::map<libusb_device::TListTransfers*,libusb_device::TListTransfers*>::iterator it  = mapDeviceTransfersReady.begin();
      std::map<libusb_device::TListTransfers*,libusb_device::TListTransfers*>::iterator end = mapDeviceTransfersReady.end();
      for (; it!=end; ++it)
      {
        libusb_device::TListTransfers& lstReady (*(it->second));
        while (!lstReady.Empty())
        {
          transfer_wrapper* wrapper = lstReady.Head();
          libusb_device::TListTransfers::Remove(wrapper);
          libusb_transfer* transfer = &wrapper->libusb;
          transfer->status = LIBUSB_TRANSFER_CANCELLED;
          int read = transfer->actual_length;
          if (read > 0);
            PreprocessTransfer(&wrapper->libusb, wrapper->libusb.actual_length);
          transfer->callback(&wrapper->libusb);
          libusbemu_clear_transfer(wrapper);
        }
      }
      mutexReady.Leave();
    }
    return(-1);
  }

  if (hProblem.Check())
    return(-1);

  libusb_device::TMapIsocTransfers::iterator it  (dev.isoTransfers->begin());
	libusb_device::TMapIsocTransfers::iterator end (dev.isoTransfers->end());
  for (; it!=end; ++it)
  {
    std::map<int,QuickThread*>& mThreads = mapDeviceEndPointThreads[&dev];
    const int endpoint (it->first);
    QuickThread*& hThread = mThreads[endpoint];
    if (NULL != hThread)
    {
      if (hThread->TryJoin())
      {
        SAFE_DELETE(hThread);
        mThreads.erase(endpoint);
      }
    }
    else
    {
      if (!it->second.Empty())
      {
        libusb_device::TListTransfers& listTransfers (it->second);
        hThread = new QuickThread(ReapThreadProc, (void*)&listTransfers);
        hThread->RaisePriority();
      }
    }
  }

  {
  int procs (0);
  libusb_device::TMapIsocTransfers::iterator it  (dev.isoTransfers->begin());
	libusb_device::TMapIsocTransfers::iterator end (dev.isoTransfers->end());
  for (; it!=end; ++it)
  {
    libusb_device::TListTransfers& listTransfers (it->second);
    mutexReady.Enter();
    std::map<libusb_device::TListTransfers*,libusb_device::TListTransfers*>::iterator itReady = mapDeviceTransfersReady.find(&listTransfers);
    if (itReady != mapDeviceTransfersReady.end())
    {
      libusb_device::TListTransfers& listReady = *(itReady->second);
      while (!listReady.Empty())
      {
        ++procs;
        transfer_wrapper* wrapper = listReady.Head();
        libusb_device::TListTransfers::Remove(wrapper);
        libusb_transfer* transfer = &wrapper->libusb;
        int read = transfer->actual_length;
        if (read > 0);
          PreprocessTransfer(&wrapper->libusb, wrapper->libusb.actual_length);
        transfer->callback(&wrapper->libusb);
        libusbemu_clear_transfer(wrapper);
      }
    }
    mutexReady.Leave();
  }
  if (0 == procs)
    QuickThread::Yield();
  }

	return(0);
}