Exemplo n.º 1
0
    //--------------------------------------------------------------------------
    //
    // Runs the next appropriate waiting Job.
    //
    // Pre-conditions:
    //  A RunnableJob must exist in the JobSet
    //
    // Post-conditions:
    //  The chosen RunnableJob will have Job::doJob() called.
    //
    // Invariants:
    //  <none>
    //
    void processTask ()
    {
        Job job;

        {
            ScopedLock lock (m_mutex);
            getNextJob (job, lock);
            ++m_processCount;
        }

        JobTypeData& data (getJobTypeData (job.getType ()));

        // Skip the job if we are stopping and the
        // skipOnStop flag is set for the job type
        //
        if (!isStopping() || !data.info.skip ())
        {
            beast::Thread::setCurrentThreadName (data.name ());
            m_journal.trace << "Doing " << data.name () << " job";

            Job::clock_type::time_point const start_time (
                Job::clock_type::now());

            on_dequeue (job.getType (), start_time - job.queue_time ());
            job.doJob ();
            on_execute (job.getType (), Job::clock_type::now() - start_time);
        }
        else
        {
            m_journal.trace << "Skipping processTask ('" << data.name () << "')";
        }

        {
            ScopedLock lock (m_mutex);
            finishJob (job, lock);
            --m_processCount;
            checkStopped (lock);
        }

        // Note that when Job::~Job is called, the last reference
        // to the associated LoadEvent object (in the Job) may be destroyed.
    }
Exemplo n.º 2
0
void CFileDownloader::memberThreadFunc()
{
	NetworkClient nm;

	// Providing callback function to stop downloading
	nm.setProgressCallback(CFileDownloader::ProgressFunc, this);
	m_CS.Lock();
	if (onConfigureNetworkClient)
		onConfigureNetworkClient(&nm);
	m_CS.Unlock();

	for (;; )
	{
		DownloadFileListItem curItem;
		if (!getNextJob(curItem))
			break;

		std::string url = curItem.url;
		if (url.empty())
			break;

		nm.setOutputFile(curItem.fileName);
		if ( !curItem.referer.empty() ) {
			nm.setReferer(curItem.referer);
		}
		nm.doGet(url);
		if (m_NeedStop)
			break;

		m_CS.Lock();
		if (nm.responseCode() >= 200 && nm.responseCode() <= 299)
		{
			std::string name = IuCoreUtils::ExtractFileName(url);
			if (!onFileFinished.empty())
				onFileFinished(true, nm.responseCode(), curItem);                                                                                                                                                                                                                                                  // delegate call
		}
		else
		{
			if (!onFileFinished.empty())
				onFileFinished(false, nm.responseCode(), curItem);                                                                                                                                                                                                                                                 // delegate call
		}

		if (m_NeedStop)
			m_fileList.clear();
		m_CS.Unlock();
	}

	m_CS.Lock();

	HANDLE hThread = GetCurrentThread();
	for (size_t i = 0; i < m_hThreads.size(); i++)
	{
		if (m_hThreads[i] == hThread)
		{
			m_hThreads.erase(m_hThreads.begin() + i);
			break;
		}
	}

	m_nRunningThreads--;

	if (m_NeedStop)
		m_fileList.clear();
	m_CS.Unlock();  // We need to release  mutex before calling  onQueueFinished()

	// otherwise we may get a deadlock
	if (!m_nRunningThreads)
	{
		m_IsRunning = false;
		m_NeedStop = false;
		if (onQueueFinished)                                                                                                                         // it is a delegate
			onQueueFinished();
	}

	return;
}
Exemplo n.º 3
0
// Sets up and runs the parallel kenken solver
void runParallel(unsigned P) {
  int i, pid;
  long long myNodeCount;
  job_t* myJob;
  cell_t* myCells;
  constraint_t* myConstraints;
  struct timeval startCompTime, endCompTime;

  // Begin parallel
  omp_set_num_threads(P);

  // Run algorithm
#pragma omp parallel default(shared) private(i, pid, myNodeCount, myJob, \
                                             myCells, myConstraints)
{
  // Initialize local variables and data-structures
  pid = omp_get_thread_num();
  myNodeCount = 0;

  myConstraints = (constraint_t*)calloc(sizeof(constraint_t), numConstraints);
  if (!myConstraints)
    unixError("Failed to allocate memory for myConstraints");

  myCells = (cell_t*)calloc(sizeof(cell_t), totalNumCells);
  if (!myCells)
    unixError("Failed to allocate memory for myCells");

  myJob = (job_t*)malloc(sizeof(job_t));
  if (!myJob)
    unixError("Failed to allocate memory for myJob");

  // Record start of computation time
  #pragma omp single
    gettimeofday(&startCompTime, NULL);

  // Get and complete new job until none left, or solution found
  while (getNextJob(pid, myJob)) {
    memcpy(myConstraints, constraints, numConstraints * sizeof(constraint_t));
    memcpy(myCells, cells, totalNumCells * sizeof(cell_t));

    for (i = 0; i < myJob->length; i++)
      applyValue(myCells, myConstraints, myJob->assignments[i].cellIndex,
                 myJob->assignments[i].value);

    if (ADD_TO_QUEUE(&(jobQueues[pid]), myJob)) {
      myNodeCount++;
      // Guarenteed to succeed given ADD_TO_QUEUE(...) returned true
      addToQueue(myJob->length, myCells, myConstraints, &(jobQueues[pid]),
                 myJob->assignments, AVAILABLE(&jobQueues[pid]));
    }
    else
      solve(myJob->length, myCells, myConstraints, &myNodeCount);
  }

  #pragma omp critical
    nodeCount += myNodeCount;
}

  // Calculate computation time
  gettimeofday(&endCompTime, NULL);
  compTime = TIME_DIFF(endCompTime, startCompTime);
}