void scheduleQueries(TaskGraph::Node taskId, uint32_t queryType, Scheduler& scheduler, ScheduleGraph& taskGraph, queryfiles::QueryBatcher& batches,
   runtime::QueryState& queryState, bool logScheduling) {
      // Schedule query tasks
      TaskGroup queryTasks;
      unsigned count=0;
      auto taskBatches=batches.getBatches(queryType);
      for(auto batchIter=taskBatches.begin(); batchIter!=taskBatches.end(); batchIter++) {
         queryfiles::QueryBatch* batch = *batchIter;
         assert(batch->queryType==queryType);

         queryTasks.schedule(LambdaRunner::createLambdaTask(RunBatch(scheduler, taskGraph, taskId, queryState, batch), taskId));
         count++;
      }

      queryTasks.join(LambdaRunner::createLambdaTask(UpdateTask(taskGraph, taskId),taskId));

      if(logScheduling) {
      	assert(batches.batchCounts[queryType]==count);
      	LOG_PRINT("[Queries] Schedule " << count << " of type: "<< queryType);
      }

      // Disable early close
      taskGraph.updateTask(taskId, 1);
      if(taskId==TaskGraph::Query1) {
         scheduler.schedule(queryTasks.close(), Priorities::LOW, false);
      } else {
         scheduler.schedule(queryTasks.close(), Priorities::CRITICAL, false);
      }
}
Пример #2
0
void
ISPCSync(void *h) {
    TaskGroup *taskGroup = (TaskGroup *)h;
    if (taskGroup != NULL) {
        taskGroup->Sync();
        FreeTaskGroup(taskGroup);
    }
}
Пример #3
0
void *
ISPCAlloc(void **taskGroupPtr, int64_t size, int32_t alignment) {
    TaskGroup *taskGroup;
    if (*taskGroupPtr == NULL) {
        InitTaskSystem();
        taskGroup = AllocTaskGroup();
        *taskGroupPtr = taskGroup;
    }
    else
        taskGroup = (TaskGroup *)(*taskGroupPtr);

    return taskGroup->AllocMemory(size, alignment);
}
Пример #4
0
void TaskGroupView::fillView()
{
	TaskGroup *g = (TaskGroup *)node();
	ui->tableWidget->setColumnCount(1);
	ui->tableWidget->setRowCount(g->children().count());
	for(int i=0; i<g->children().count(); ++i)
	{
		QWidget *widget = cell(g->children()[i]);
		widget->adjustSize();
		if (widget->width()>ui->tableWidget->columnWidth(0))
			ui->tableWidget->setColumnWidth(0, widget->width());
		ui->tableWidget->setRowHeight(i, widget->height());
		ui->tableWidget->setItem(i,0, new QTableWidgetItem());
		ui->tableWidget->setCellWidget(i, 0, widget);
	}
}
Пример #5
0
void
ISPCLaunch(void **taskGroupPtr, void *func, void *data, int count) {
    TaskGroup *taskGroup;
    if (*taskGroupPtr == NULL) {
        InitTaskSystem();
        taskGroup = AllocTaskGroup();
        *taskGroupPtr = taskGroup;
    }
    else
        taskGroup = (TaskGroup *)(*taskGroupPtr);

    int baseIndex = taskGroup->AllocTaskInfo(count);
    for (int i = 0; i < count; ++i) {
        TaskInfo *ti = taskGroup->GetTaskInfo(baseIndex+i);
        ti->func = (TaskFuncType)func;
        ti->data = data;
        ti->taskIndex = i;
        ti->taskCount = count;
    }
    taskGroup->Launch(baseIndex, count);
}
TaskGroup* AbstractGroupingStrategy::createGroup(ItemList items)
{
    GroupPtr oldGroup;
    if (!items.isEmpty() && items.first()->isGrouped()) {
        oldGroup = items.first()->parentGroup();
    } else {
        oldGroup = rootGroup();
    }

    TaskGroup *newGroup = new TaskGroup(d->groupManager);
    ItemList oldGroupMembers = oldGroup->members();
    int index = oldGroupMembers.count();
    d->createdGroups.append(newGroup);
    //kDebug() << "added group" << d->createdGroups.count();
    // NOTE: Queued is vital to make sure groups only get removed after their children, for
    // correct QAbstractItemModel (TasksModel) transaction semantics.
    connect(newGroup, SIGNAL(itemRemoved(AbstractGroupableItem*)), this, SLOT(checkGroup()), Qt::QueuedConnection);
    foreach (AbstractGroupableItem * item, items) {
        int idx = oldGroupMembers.indexOf(item);
        if (idx >= 0 && idx < index) {
            index = idx;
        }
        newGroup->add(item);
    }
Пример #7
0
bool fcExrContext::endFrame()
{
    if (m_task == nullptr) {
        fcDebugLog("fcExrContext::endFrame(): maybe beginFrame() is not called.");
        return false;
    }

    m_frame_prev = nullptr;

    fcExrTaskData *exr = m_task;
    m_task = nullptr;
    ++m_active_task_count;
    m_tasks.run([this, exr](){
        endFrameTask(exr);
        --m_active_task_count;
    });
    return true;
}
Пример #8
0
bool fcExrContext::beginFrame(const char *path, int width, int height)
{
    if (m_task != nullptr) {
        fcDebugLog("fcExrContext::beginFrame(): beginFrame() is already called. maybe you forgot to call endFrame().");
        return false;
    }

    // 実行中のタスクの数が上限に達している場合適当に待つ
    if (m_active_task_count >= m_conf.max_tasks)
    {
        std::this_thread::sleep_for(std::chrono::milliseconds(10));
        if (m_active_task_count >= m_conf.max_tasks)
        {
            m_tasks.wait();
        }
    }

    m_task = new fcExrTaskData(path, width, height, m_conf.compression);
    return true;
}
Пример #9
0
void ThreadPool::yield(TaskGroup &wait)
{
    std::chrono::milliseconds waitSpan(10);
    uint32 id = _threadCount; // Threads not in the pool get a previously unassigned id

    auto iter = _idToNumericId.find(std::this_thread::get_id());
    if (iter != _idToNumericId.end())
        id = iter->second;

    while (!wait.isDone() && !_terminateFlag) {
        uint32 subTaskId;
        std::shared_ptr<TaskGroup> task;
        {
            std::unique_lock<std::mutex> lock(_taskMutex);
            if (!_taskCond.wait_for(lock, waitSpan, [this] {return _terminateFlag || !_tasks.empty();}))
                continue;
            task = acquireTask(subTaskId);
        }
        if (task)
            task->run(id, subTaskId);
    }
}
Пример #10
0
inline void
TaskGroup::Sync() {
    DBG(fprintf(stderr, "syncing %p - %d unfinished\n", tg, numUnfinishedTasks));

    while (numUnfinishedTasks > 0) {
        // All of the tasks in this group aren't finished yet.  We'll try
        // to help out here since we don't have anything else to do...

        DBG(fprintf(stderr, "while syncing %p - %d unfinished\n", tg, 
                    numUnfinishedTasks));

        //
        // Acquire the global task system mutex to grab a task to work on
        //
        int err;
        if ((err = pthread_mutex_lock(&taskSysMutex)) != 0) {
            fprintf(stderr, "Error from pthread_mutex_lock: %s\n", strerror(err));
            exit(1);
        }

        TaskInfo *myTask = NULL;
        TaskGroup *runtg = this;
        if (waitingTasks.size() > 0) {
            int taskNumber = waitingTasks.back();
            waitingTasks.pop_back();

            if (waitingTasks.size() == 0) {
                // There's nothing left to start running from this group,
                // so remove it from the active task list.
                activeTaskGroups.erase(std::find(activeTaskGroups.begin(),
                                                 activeTaskGroups.end(), this));
                inActiveList = false;
            }
            myTask = GetTaskInfo(taskNumber);
            DBG(fprintf(stderr, "running task %d from group %p in sync\n", taskNumber, tg));
        }
        else {
            // Other threads are already working on all of the tasks in
            // this group, so we can't help out by running one ourself.
            // We'll try to run one from another group to make ourselves
            // useful here.
            if (activeTaskGroups.size() == 0) {
                // No active task groups left--there's nothing for us to do.
                if ((err = pthread_mutex_unlock(&taskSysMutex)) != 0) {
                    fprintf(stderr, "Error from pthread_mutex_unlock: %s\n", strerror(err));
                    exit(1);
                }
                // FIXME: We basically end up busy-waiting here, which is
                // extra wasteful in a world with hyper-threading.  It would
                // be much better to put this thread to sleep on a
                // condition variable that was signaled when the last task
                // in this group was finished.
#ifndef ISPC_IS_KNC
                usleep(1);
#else
                _mm_delay_32(8);
#endif
                continue;
            }

            // Get a task to run from another task group.
            runtg = activeTaskGroups.back();
            assert(runtg->waitingTasks.size() > 0);

            int taskNumber = runtg->waitingTasks.back();
            runtg->waitingTasks.pop_back();
            if (runtg->waitingTasks.size() == 0) {
                // There's left to start running from this group, so remove
                // it from the active task list.
                activeTaskGroups.pop_back();
                runtg->inActiveList = false;
            }
            myTask = runtg->GetTaskInfo(taskNumber);
            DBG(fprintf(stderr, "running task %d from other group %p in sync\n", 
                        taskNumber, runtg));
        }

        if ((err = pthread_mutex_unlock(&taskSysMutex)) != 0) {
            fprintf(stderr, "Error from pthread_mutex_unlock: %s\n", strerror(err));
            exit(1);
        }
    
        //
        // Do work for _myTask_
        //
        // FIXME: bogus values for thread index/thread count here as well..
        myTask->func(myTask->data, 0, 1, myTask->taskIndex, myTask->taskCount);

        //
        // Decrement the number of unfinished tasks counter
        //
        lMemFence();
        lAtomicAdd(&runtg->numUnfinishedTasks, -1);
    }
    DBG(fprintf(stderr, "sync for %p done!n", tg));
}
Пример #11
0
static void *
lTaskEntry(void *arg) {
    int threadIndex = (int)((int64_t)arg);
    int threadCount = nThreads;

    while (1) {
        int err;
        //
        // Wait on the semaphore until we're woken up due to the arrival of
        // more work.
        //
        if ((err = sem_wait(workerSemaphore)) != 0) {
            fprintf(stderr, "Error from sem_wait: %s\n", strerror(err));
            exit(1);
        }

        //
        // Acquire the mutex
        //
        if ((err = pthread_mutex_lock(&taskSysMutex)) != 0) {
            fprintf(stderr, "Error from pthread_mutex_lock: %s\n", strerror(err));
            exit(1);
        }

        if (activeTaskGroups.size() == 0) {
            //
            // Task queue is empty, go back and wait on the semaphore
            //
            if ((err = pthread_mutex_unlock(&taskSysMutex)) != 0) {
                fprintf(stderr, "Error from pthread_mutex_unlock: %s\n", strerror(err));
                exit(1);
            }
            continue;
        }

        //
        // Get the last task group on the active list and the last task
        // from its waiting tasks list.
        //
        TaskGroup *tg = activeTaskGroups.back();
        assert(tg->waitingTasks.size() > 0);
        int taskNumber = tg->waitingTasks.back();
        tg->waitingTasks.pop_back();

        if (tg->waitingTasks.size() == 0) {
            // We just took the last task from this task group, so remove
            // it from the active list.
            activeTaskGroups.pop_back();
            tg->inActiveList = false;
        }
    
        if ((err = pthread_mutex_unlock(&taskSysMutex)) != 0) {
            fprintf(stderr, "Error from pthread_mutex_unlock: %s\n", strerror(err));
            exit(1);
        }

        //
        // And now actually run the task
        //
        DBG(fprintf(stderr, "running task %d from group %p\n", taskNumber, tg));
        TaskInfo *myTask = tg->GetTaskInfo(taskNumber);
        myTask->func(myTask->data, threadIndex, threadCount, myTask->taskIndex,
                     myTask->taskCount);

        //
        // Decrement the "number of unfinished tasks" counter in the task
        // group.
        //
        lMemFence();
        lAtomicAdd(&tg->numUnfinishedTasks, -1);
    }

    pthread_exit(NULL);
    return 0;
}
Пример #12
0
/// This is where the link is actually performed.
bool Driver::link(const TargetInfo &targetInfo) {
  // Honor -mllvm
  if (!targetInfo.llvmOptions().empty()) {
    unsigned numArgs = targetInfo.llvmOptions().size();
    const char **args = new const char*[numArgs + 2];
    args[0] = "lld (LLVM option parsing)";
    for (unsigned i = 0; i != numArgs; ++i)
      args[i + 1] = targetInfo.llvmOptions()[i];
    args[numArgs + 1] = 0;
    llvm::cl::ParseCommandLineOptions(numArgs + 1, args);
  }

  // Read inputs
  ScopedTask readTask(getDefaultDomain(), "Read Args");
  std::vector<std::vector<std::unique_ptr<File>>> files(
      targetInfo.inputFiles().size());
  size_t index = 0;
  std::atomic<bool> fail(false);
  TaskGroup tg;
  for (const auto &input : targetInfo.inputFiles()) {
    if (targetInfo.logInputFiles())
      llvm::outs() << input.getPath() << "\n";

    tg.spawn([ &, index]{
      if (error_code ec = targetInfo.readFile(input.getPath(), files[index])) {
        llvm::errs() << "Failed to read file: " << input.getPath() << ": "
                     << ec.message() << "\n";
        fail = true;
        return;
      }
    });
    ++index;
  }
  tg.sync();
  readTask.end();

  if (fail)
    return true;

  InputFiles inputs;
  for (auto &f : files)
    inputs.appendFiles(f);

  // Give target a chance to add files.
  targetInfo.addImplicitFiles(inputs);

  // assign an ordinal to each file so sort() can preserve command line order
  inputs.assignFileOrdinals();

  // Do core linking.
  ScopedTask resolveTask(getDefaultDomain(), "Resolve");
  Resolver resolver(targetInfo, inputs);
  if (resolver.resolve()) {
    if (!targetInfo.allowRemainingUndefines())
      return true;
  }
  MutableFile &merged = resolver.resultFile();
  resolveTask.end();

  // Run passes on linked atoms.
  ScopedTask passTask(getDefaultDomain(), "Passes");
  PassManager pm;
  targetInfo.addPasses(pm);
  pm.runOnFile(merged);
  passTask.end();

  // Give linked atoms to Writer to generate output file.
  ScopedTask writeTask(getDefaultDomain(), "Write");
  if (error_code ec = targetInfo.writeFile(merged)) {
    llvm::errs() << "Failed to write file '" << targetInfo.outputPath()
                 << "': " << ec.message() << "\n";
    return true;
  }

  return false;
}
Пример #13
0
/// This is where the link is actually performed.
bool Driver::link(const LinkingContext &context, raw_ostream &diagnostics) {
  // Honor -mllvm
  if (!context.llvmOptions().empty()) {
    unsigned numArgs = context.llvmOptions().size();
    const char **args = new const char *[numArgs + 2];
    args[0] = "lld (LLVM option parsing)";
    for (unsigned i = 0; i != numArgs; ++i)
      args[i + 1] = context.llvmOptions()[i];
    args[numArgs + 1] = 0;
    llvm::cl::ParseCommandLineOptions(numArgs + 1, args);
  }
  InputGraph &inputGraph = context.inputGraph();
  if (!inputGraph.numFiles())
    return false;

  // Read inputs
  ScopedTask readTask(getDefaultDomain(), "Read Args");
  std::vector<std::vector<std::unique_ptr<File> > > files(
      inputGraph.numFiles());
  size_t index = 0;
  std::atomic<bool> fail(false);
  TaskGroup tg;
  std::vector<std::unique_ptr<LinkerInput> > linkerInputs;
  for (auto &ie : inputGraph.inputElements()) {
    if (ie->kind() == InputElement::Kind::File) {
      FileNode *fileNode = (llvm::dyn_cast<FileNode>)(ie.get());
      auto linkerInput = fileNode->createLinkerInput(context);
      if (!linkerInput) {
        llvm::outs() << fileNode->errStr(error_code(linkerInput)) << "\n";
        return false;
      }
      linkerInputs.push_back(std::move(*linkerInput));
    }
    else {
      llvm_unreachable("Not handling other types of InputElements");
    }
  }
  for (const auto &input : linkerInputs) {
    if (context.logInputFiles())
      llvm::outs() << input->getUserPath() << "\n";

    tg.spawn([ &, index]{
      if (error_code ec = context.parseFile(*input, files[index])) {
        diagnostics << "Failed to read file: " << input->getUserPath() << ": "
                    << ec.message() << "\n";
        fail = true;
        return;
      }
    });
    ++index;
  }
  tg.sync();
  readTask.end();

  if (fail)
    return false;

  InputFiles inputs;

  for (auto &f : inputGraph.internalFiles())
    inputs.appendFile(*f.get());

  for (auto &f : files)
    inputs.appendFiles(f);

  // Give target a chance to add files.
  context.addImplicitFiles(inputs);

  // assign an ordinal to each file so sort() can preserve command line order
  inputs.assignFileOrdinals();

  // Do core linking.
  ScopedTask resolveTask(getDefaultDomain(), "Resolve");
  Resolver resolver(context, inputs);
  if (resolver.resolve()) {
    if (!context.allowRemainingUndefines())
      return false;
  }
  MutableFile &merged = resolver.resultFile();
  resolveTask.end();

  // Run passes on linked atoms.
  ScopedTask passTask(getDefaultDomain(), "Passes");
  PassManager pm;
  context.addPasses(pm);
  pm.runOnFile(merged);
  passTask.end();

  // Give linked atoms to Writer to generate output file.
  ScopedTask writeTask(getDefaultDomain(), "Write");
  if (error_code ec = context.writeFile(merged)) {
    diagnostics << "Failed to write file '" << context.outputPath()
                << "': " << ec.message() << "\n";
    return false;
  }

  return true;
}