/// <summary>
    ///     Causes the thread proxy running atop this virtual processor root to temporarily stop dispatching pContext.
    /// </summary>
    /// <param name="pContext">
    ///     The context which should temporarily stop being dispatched by the thread proxy running atop this virtual processor root.
    /// </param>
    bool FreeVirtualProcessorRoot::Deactivate(Concurrency::IExecutionContext *pContext)
    {
        if (pContext == NULL)
            throw std::invalid_argument("pContext");

        if (m_pExecutingProxy == NULL)
            throw invalid_operation();

        FreeThreadProxy * pProxy = static_cast<FreeThreadProxy *> (pContext->GetProxy());

        if (m_pExecutingProxy != pProxy)
        {
            throw invalid_operation();
        }

        LONG newVal = InterlockedDecrement(&m_activationFence);

        if (newVal == 0)
        {
            //
            // Reduce the subscription level on the core while the root is suspended. The count is used by dynamic resource management
            // to tell which cores allocated to a scheduler are unused, so that they can be temporarily repurposed.
            //
            InterlockedExchangePointer(reinterpret_cast<void * volatile *>(&m_pDeactivatedProxy), m_pExecutingProxy);
            Unsubscribe();
            pProxy->SuspendExecution();
        }
        else
        {
            //
            // There should be no Deactivate/Remove races.
            //
            ASSERT(newVal == 1);

            Concurrency::IExecutionContext *pActivatedContext = AcquireActivatedContext();

            //
            // If we got here, it means while activated we saw an activation of pCtxX and a subsequent deactivation of pCtxY.  These contexts
            // must be equal to be spec legal.
            //
            ASSERT(pActivatedContext == pContext);

            //
            // The activation for this deactivation came in early, so we return early here without making a kernel transition.
            //
        }

        return true;
    }
Exemple #2
0
    void set_scheduler(sched_ptr scheduler)
    {
        if (m_state == pre_ctor || m_state == post_dtor) {
            throw invalid_operation("Scheduler cannot be initialized now");
        }

        ::pplx::details::_Scoped_spin_lock lock(m_spinlock);

        if (m_scheduler != nullptr)
        {
            throw invalid_operation("Scheduler is already initialized");
        }

        m_scheduler = std::move(scheduler);
    }
Exemple #3
0
      /** @brief Trows exceptions that reflect OpenCL error codes */
      static void raise_exception(cl_int err)
      {
        switch (err)
        {
          case CL_DEVICE_NOT_FOUND:               throw device_not_found();
          case CL_DEVICE_NOT_AVAILABLE:           throw device_not_available();
          case CL_COMPILER_NOT_AVAILABLE:         throw compiler_not_available();
          case CL_MEM_OBJECT_ALLOCATION_FAILURE:  throw mem_object_allocation_failure();
          case CL_OUT_OF_RESOURCES:               throw out_of_resources();
          case CL_OUT_OF_HOST_MEMORY:             throw out_of_host_memory();
          case CL_PROFILING_INFO_NOT_AVAILABLE:   throw profiling_info_not_available();
          case CL_MEM_COPY_OVERLAP:               throw mem_copy_overlap();
          case CL_IMAGE_FORMAT_MISMATCH:          throw image_format_mismatch();
          case CL_IMAGE_FORMAT_NOT_SUPPORTED:     throw image_format_not_supported();
          case CL_BUILD_PROGRAM_FAILURE:          throw build_program_failure();
          case CL_MAP_FAILURE:                    throw map_failure();

          case CL_INVALID_VALUE:                  throw invalid_value();
          case CL_INVALID_DEVICE_TYPE:            throw invalid_device_type();
          case CL_INVALID_PLATFORM:               throw invalid_platform();
          case CL_INVALID_DEVICE:                 throw invalid_device();
          case CL_INVALID_CONTEXT:                throw invalid_context();
          case CL_INVALID_QUEUE_PROPERTIES:       throw invalid_queue_properties();
          case CL_INVALID_COMMAND_QUEUE:          throw invalid_command_queue();
          case CL_INVALID_HOST_PTR:               throw invalid_host_ptr();
          case CL_INVALID_MEM_OBJECT:             throw invalid_mem_object();
          case CL_INVALID_IMAGE_FORMAT_DESCRIPTOR: throw invalid_image_format_descriptor();
          case CL_INVALID_IMAGE_SIZE:             throw invalid_image_size();
          case CL_INVALID_SAMPLER:                throw invalid_sampler();
          case CL_INVALID_BINARY:                 throw invalid_binary();
          case CL_INVALID_BUILD_OPTIONS:          throw invalid_build_options();
          case CL_INVALID_PROGRAM:                throw invalid_program();
          case CL_INVALID_PROGRAM_EXECUTABLE:     throw invalid_program_executable();
          case CL_INVALID_KERNEL_NAME:            throw invalid_kernel_name();
          case CL_INVALID_KERNEL_DEFINITION:      throw invalid_kernel_definition();
          case CL_INVALID_KERNEL:                 throw invalid_kernel();
          case CL_INVALID_ARG_INDEX:              throw invalid_arg_index();
          case CL_INVALID_ARG_VALUE:              throw invalid_arg_value();
          case CL_INVALID_ARG_SIZE:               throw invalid_arg_size();
          case CL_INVALID_KERNEL_ARGS:            throw invalid_kernel_args();
          case CL_INVALID_WORK_DIMENSION:         throw invalid_work_dimension();
          case CL_INVALID_WORK_GROUP_SIZE:        throw invalid_work_group_size();
          case CL_INVALID_WORK_ITEM_SIZE:         throw invalid_work_item_size();
          case CL_INVALID_GLOBAL_OFFSET:          throw invalid_global_offset();
          case CL_INVALID_EVENT_WAIT_LIST:        throw invalid_event_wait_list();
          case CL_INVALID_EVENT:                  throw invalid_event();
          case CL_INVALID_OPERATION:              throw invalid_operation();
          case CL_INVALID_GL_OBJECT:              throw invalid_gl_object();
          case CL_INVALID_BUFFER_SIZE:            throw invalid_buffer_size();
          case CL_INVALID_MIP_LEVEL:              throw invalid_mip_level();
          case CL_INVALID_GLOBAL_WORK_SIZE:       throw invalid_global_work_size();
      #ifdef CL_INVALID_PROPERTY
    case CL_INVALID_PROPERTY:               throw invalid_property();
      #endif
          //  return "CL_INVALID_GLOBAL_WORK_SIZE";

          default: throw unknown_error();
        }

      } //getErrorString
    /// <summary>
    ///     Forces all data in the memory heirarchy of one processor to be visible to all other processors.
    /// </summary>
    /// <param name="pContext">
    ///     The context which is currently being dispatched by this root.
    /// </param>
    void FreeVirtualProcessorRoot::EnsureAllTasksVisible(Concurrency::IExecutionContext *pContext)
    {
        if (pContext == NULL)
            throw std::invalid_argument("pContext");

        if (m_pExecutingProxy == NULL)
            throw invalid_operation();

        FreeThreadProxy * pProxy = static_cast<FreeThreadProxy *> (pContext->GetProxy());

        if (m_pExecutingProxy != pProxy)
        {
            throw invalid_operation();
        }

        GetSchedulerProxy()->GetResourceManager()->FlushStoreBuffers();
    }
Exemple #5
0
static void
ensure_not_printing (struct device *d)
{
  if (!NILP (DEVICE_FRAME_LIST (d)))
  {
    Lisp_Object device = wrap_device (d);

    invalid_operation ("Cannot change settings while print job is active",
		       device);
  }
}
    /// <summary>
    ///     Causes the scheduler to start running a thread proxy on the specified virtual processor root which will execute
    ///     the Dispatch method of the context supplied by pContext. Alternatively, it can be used to resume a
    ///     virtual processor root that was de-activated by a previous call to Deactivate.
    /// </summary>
    /// <param name="pContext">
    ///     The context which will be dispatched on a (potentially) new thread running atop this virtual processor root.
    /// </param>
    void FreeVirtualProcessorRoot::Activate(Concurrency::IExecutionContext *pContext)
    {
        if (pContext == NULL)
            throw std::invalid_argument("pContext");

        //
        // If the context is being reused, it had better return a NULL thread proxy when we ask!  This is part of the spec contract.
        //
        FreeThreadProxy * pProxy = static_cast<FreeThreadProxy *> (pContext->GetProxy());
        if (pProxy == NULL)
        {
            pProxy = static_cast<FreeThreadProxy *> (GetSchedulerProxy()->GetNewThreadProxy(pContext));
        }

        //
        // All calls to Activate after the first one can potentially race with the paired deactivate. This is allowed by the API, and we use the fence below
        // to reduce kernel transitions in case of this race.
        //
        // We must also be careful because calls to activate can race with ResetOnIdle from either a SwitchOut() or a return from dispatch and we must
        // be prepared to deal with this and the implications around trying to bind pContext.
        //
        LONG newVal = InterlockedIncrement(&m_activationFence);
        if (newVal == 2)
        {
            ASSERT(m_pDeactivatedProxy == NULL);
            //
            // We received two activations in a row. According to the contract with the client, this is allowed, but we should expect a deactivation, a
            // SwitchOut, or a return from dispatch loop soon after.
            //
            // Simply return instead of signalling the event. The deactivation will reduce the count back to 1. In addition, we're not responsible
            // for changing the idle state on the core.
            //
            SetActivatedContext(pContext);
        }
        else
        {
            ASSERT(newVal == 1);

            SpinUntilIdle();
            ASSERT(m_pExecutingProxy == m_pDeactivatedProxy);

            if (m_pExecutingProxy != NULL)
            {
                //
                // The root already has an associated thread proxy.  Check that the context provided is associated with
                // the same proxy.
                //
                if (pProxy != m_pExecutingProxy)
                {
                    //
                    // This is a fatal exception.  We can potentially correct the state of the fence, but the scheduler is beyond confused about
                    // the spec.  @TODO: Is it worth making some attempt to correct *our* state given that it's already messed up above us?
                    //
                    throw invalid_operation();
                }
            }

            m_pDeactivatedProxy = NULL;

            //
            // An activated root increases the subscription level on the underlying core.
            //
            Subscribe();

            //
            // Affinitization sets this as the executing proxy for the virtual processor root.
            //
            Affinitize(pProxy);

            ASSERT(m_pExecutingProxy == pProxy);
            ASSERT(pProxy->GetVirtualProcessorRoot() != NULL);
            ASSERT(pProxy->GetExecutionContext() != NULL);

            pProxy->ResumeExecution();
        }
    }
Exemple #7
0
static void
signal_open_printer_error (struct device *d)
{
  invalid_operation ("Failed to open printer", DEVICE_CONNECTION (d));
}
Exemple #8
0
void check_is_configured()
{
	if(pt.empty())
		BOOST_THROW_EXCEPTION(invalid_operation() << msg_info("Enviroment properties has not been configured"));
}