void Dispatcher::Pump() {
		if (!Dispatcher::running_) return;

  	std::lock_guard<std::mutex> dispatchLock(dispatch_queue_mutex_);
  	for (auto i : *dispatch_events_) {  // for every event
      	try {
						// handle microsoft map implementation
						if (mapped_events_->count(i.first) == 0) {
								std::cerr << "Event \"" + i.first + "\" does not apply to any Subscribers." << std::endl;
								continue;
						}
            for (auto obj : *(mapped_events_->at(i.first))) {  // for every Subscriber* for that event
                if (obj == nullptr) continue;
                std::lock_guard<std::mutex> lock(thread_queue_mutex_);  // unlocked on out-of-scope
                if (obj->serialized) {
                    thread_queue_->push_back(std::pair<Subscriber*, std::shared_ptr<void>>(obj, i.second));
                    thread_signal_.notify_one();
                } else {
                    nonserial_queue_->push_back(std::pair<Subscriber*, std::shared_ptr<void>>(obj, i.second));
                }
          }
      	} catch (std::string msg) {
						// catch overflow for linux
          	std::cerr << "Event \"" + i.first + "\" does not apply to any Subscribers." << std::endl;
      	}
  	}
    dispatch_events_->clear();  // we queued them all for processing so clear the cache
}
int GonkNativeWindowClient::perform(int operation, va_list args)
{
    int res = NO_ERROR;
    switch (operation) {
    case NATIVE_WINDOW_CONNECT:
        // deprecated. must return NO_ERROR.
        break;
    case NATIVE_WINDOW_DISCONNECT:
        // deprecated. must return NO_ERROR.
        break;
    case NATIVE_WINDOW_SET_USAGE:
        res = dispatchSetUsage(args);
        break;
    case NATIVE_WINDOW_SET_CROP:
        res = dispatchSetCrop(args);
        break;
    case NATIVE_WINDOW_SET_BUFFER_COUNT:
        res = dispatchSetBufferCount(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
        res = dispatchSetBuffersGeometry(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM:
        res = dispatchSetBuffersTransform(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
        res = dispatchSetBuffersTimestamp(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS:
        res = dispatchSetBuffersDimensions(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS:
        res = dispatchSetBuffersUserDimensions(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
        res = dispatchSetBuffersFormat(args);
        break;
    case NATIVE_WINDOW_LOCK:
        res = dispatchLock(args);
        break;
    case NATIVE_WINDOW_UNLOCK_AND_POST:
        res = dispatchUnlockAndPost(args);
        break;
    case NATIVE_WINDOW_SET_SCALING_MODE:
        res = dispatchSetScalingMode(args);
        break;
    case NATIVE_WINDOW_API_CONNECT:
        res = dispatchConnect(args);
        break;
    case NATIVE_WINDOW_API_DISCONNECT:
        res = dispatchDisconnect(args);
        break;
    default:
        res = NAME_NOT_FOUND;
        break;
    }
    return res;
}
int SurfaceTextureClient::perform(int operation, va_list args)
{
    int res = NO_ERROR;
    switch (operation) {
    case NATIVE_WINDOW_CONNECT:
        // deprecated. must return NO_ERROR.
        break;
    case NATIVE_WINDOW_DISCONNECT:
        // deprecated. must return NO_ERROR.
        break;
    case NATIVE_WINDOW_SET_USAGE:
        res = dispatchSetUsage(args);
        break;
    case NATIVE_WINDOW_SET_CROP:
        res = dispatchSetCrop(args);
        break;
    case NATIVE_WINDOW_SET_BUFFER_COUNT:
        res = dispatchSetBufferCount(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY:
        res = dispatchSetBuffersGeometry(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM:
        res = dispatchSetBuffersTransform(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP:
        res = dispatchSetBuffersTimestamp(args);
        break;
#ifdef OMAP_ENHANCEMENT_CPCAM
    case NATIVE_WINDOW_SET_BUFFERS_METADATA:
        res = dispatchSetBuffersMetadata(args);
        break;
#endif
    case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS:
        res = dispatchSetBuffersDimensions(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS:
        res = dispatchSetBuffersUserDimensions(args);
        break;
    case NATIVE_WINDOW_SET_BUFFERS_FORMAT:
        res = dispatchSetBuffersFormat(args);
        break;
    case NATIVE_WINDOW_LOCK:
        res = dispatchLock(args);
        break;
    case NATIVE_WINDOW_UNLOCK_AND_POST:
        res = dispatchUnlockAndPost(args);
        break;
    case NATIVE_WINDOW_SET_SCALING_MODE:
        res = dispatchSetScalingMode(args);
        break;
    case NATIVE_WINDOW_API_CONNECT:
        res = dispatchConnect(args);
        break;
    case NATIVE_WINDOW_API_DISCONNECT:
        res = dispatchDisconnect(args);
        break;
#ifdef OMAP_ENHANCEMENT_CPCAM
    case NATIVE_WINDOW_UPDATE_AND_GET_CURRENT:
        res = dispatchUpdateAndGetCurrent(args);
        break;
    case NATIVE_WINDOW_ADD_BUFFER_SLOT:
        res = dispatchAddBufferSlot(args);
        break;
    case NATIVE_WINDOW_GET_ID:
        res = dispatchGetId(args);
        break;
    case NATIVE_WINDOW_RELEASE_BUFFER:
        res = dispatchReleaseBuffer(args);
        break;
#endif
    default:
        res = NAME_NOT_FOUND;
        break;
    }
    return res;
}
Beispiel #4
0
bool ShadowTree::tryCommit(ShadowTreeCommitTransaction transaction) const {
  SystraceSection s("ShadowTree::tryCommit");

  auto telemetry = MountingTelemetry{};
  telemetry.willCommit();

  SharedRootShadowNode oldRootShadowNode;

  {
    // Reading `rootShadowNode_` in shared manner.
    std::shared_lock<better::shared_mutex> lock(commitMutex_);
    oldRootShadowNode = rootShadowNode_;
  }

  UnsharedRootShadowNode newRootShadowNode = transaction(oldRootShadowNode);

  if (!newRootShadowNode) {
    return false;
  }

  std::vector<LayoutableShadowNode const *> affectedLayoutableNodes{};
  affectedLayoutableNodes.reserve(1024);

  telemetry.willLayout();
  newRootShadowNode->layout(&affectedLayoutableNodes);
  telemetry.didLayout();

  newRootShadowNode->sealRecursive();

  auto revisionNumber = ShadowTreeRevision::Number{};

  {
    // Updating `rootShadowNode_` in unique manner if it hasn't changed.
    std::unique_lock<better::shared_mutex> lock(commitMutex_);

    if (rootShadowNode_ != oldRootShadowNode) {
      return false;
    }

    rootShadowNode_ = newRootShadowNode;

    {
      std::lock_guard<std::mutex> dispatchLock(EventEmitter::DispatchMutex());

      updateMountedFlag(
          oldRootShadowNode->getChildren(), newRootShadowNode->getChildren());
    }

    revisionNumber_++;
    revisionNumber = revisionNumber_;
  }

  emitLayoutEvents(affectedLayoutableNodes);

  telemetry.didCommit();

  mountingCoordinator_->push(
      ShadowTreeRevision{newRootShadowNode, revisionNumber, telemetry});

  if (delegate_) {
    delegate_->shadowTreeDidCommit(*this, mountingCoordinator_);
  }

  return true;
}
void Dispatcher::Pump() {
    if (!running)
        return;

    std::vector<WorkPair> serialized;
    std::vector<WorkPair> unserialized;

    std::lock_guard<std::recursive_mutex> dispatchLock(dispatch_queue_mutex);
    try {
        for (auto i : *dispatch_events) {
            // try is needed to handle linux map implementations
            try {
                CheckKey(i.first);

                // DispatchImmediate(i.first, i.second);

                // If there's nothing to deliver this event to just drop it
                if (mapped_events->at(i.first)->size() == 0) {
                    return;
                }

                std::lock_guard<std::recursive_mutex> mapped_event_lock(mapped_event_mutex);
                for (auto it = mapped_events->at(i.first)->begin(); it != mapped_events->at(i.first)->end(); it++) {

                    // Remove nullptr Subscriber* from the processing
                    if (*it == nullptr) {
                        it = mapped_events->at(i.first)->erase(it);
                        continue;
                    }

                    if ((*it)->_serialized) {
                        // thread_queue->push_back(std::pair<Subscriber*, std::shared_ptr<void>>(*it, eventData));
                        serialized.push_back(
                            std::pair<Subscriber*, std::shared_ptr<void>>(*it, i.second)); // coppying right now...
                    } else {
                        // nonserial_queue->push_back(std::pair<Subscriber*, std::shared_ptr<void>>(*it, eventData));
                        unserialized.push_back(
                            std::pair<Subscriber*, std::shared_ptr<void>>(*it, i.second)); // coppying right now...
                    }
                }
            } catch (std::string msg) {
                std::cerr << "Internal Logic error in Dispatcher::Pump()" << std::endl << msg << std::endl;
            }
        }
    } catch (std::string message) {
        std::cerr << "Error in Dispatcher::Pump()" << std::endl << message << std::endl;
    }

    { // scope control vs try-catch is preference
        std::lock_guard<std::recursive_mutex> thread_queue_lock(thread_queue_mutex);
        thread_queue->insert(thread_queue->end(), serialized.begin(), serialized.end());
    }

    { // scope control vs try-catch is preference
        std::lock_guard<std::recursive_mutex> nonserial_queue_lock(nonserial_queue_mutex);
        nonserial_queue->insert(nonserial_queue->end(), unserialized.begin(), unserialized.end());
    }

    thread_signal.notify_all(); // it's possible we're in a pseudo-deadlock because the entire thread_queue
                                // wasn't consumed in 1 pass of the thread pool
                                // so wake up all the threads even if there are no new events to process
                                // but there's probably events because you're using a high performance event library...

    dispatch_events->clear(); // we queued them all for processing so clear the cache
}
void Dispatcher::DispatchEvent(const EventType eventID, const WorkArguments eventData) {
    std::lock_guard<std::recursive_mutex> dispatchLock(dispatch_queue_mutex);
    dispatch_events->push_back(std::pair<EventType, WorkArguments>(eventID, eventData));
}
void Dispatcher::DispatchEvent(const EventType eventID, const std::shared_ptr<void> eventData) {
    // std::cout << "Dispatcher --->  Received event " << eventID << "." << std::endl;
    std::lock_guard<std::mutex> dispatchLock(dispatch_queue_mutex_);
    dispatch_events_->push_back(std::pair<EventType, std::shared_ptr<void>>(eventID, eventData));
}