voxelizer_generate_voxel_list_fragment(const gl::rendering_system &rs,
										   voxel_storage *voxels,
										   const scene *s)
		: Base(rs,
			   gl::device_pipeline_graphics_configurations{},
			   "sparse_voxelizer.vert",
			   "sparse_voxelizer.geom",
			   "sparse_voxelizer.frag"),
		  s(s),
		  voxels(voxels),
		  empty_fb(lib::allocate_unique<gl::framebuffer>(rs.get_creating_context(),
														 "voxelizer framebuffer",
														 gl::framebuffer_layout(),
														 glm::u32vec2{ voxelizer_fb_extent, voxelizer_fb_extent })) {
		draw_task.attach_pipeline(pipeline());
		draw_task.attach_vertex_buffer(s->get_object_group().get_draw_buffers().get_vertex_buffer());
		draw_task.attach_index_buffer(s->get_object_group().get_draw_buffers().get_index_buffer());

		// Attach empty framebuffer
		pipeline().attach_framebuffer(*empty_fb);
		// Configure voxelization pipeline
		pipeline()["voxel_assembly_list_counter_binding"] = gl::bind(voxels->voxel_assembly_list_counter_buffer());
		pipeline()["voxel_assembly_list_binding"] = gl::bind(voxels->voxel_assembly_list_buffer());
		voxels->configure_voxel_pipeline(pipeline());
	}
示例#2
0
 virtual void keyPressEvent(unsigned int, vl::EKey key)
 {
   if (key == vl::Key_F10)
   {
     _camera_read_pixels ->setup( 0, 0, pipeline()->camera()->viewport()->width(), pipeline()->camera()->viewport()->width(), vl::RDB_BACK_LEFT );
     pipeline()->camera()->addRenderFinishedCallback(_camera_read_pixels .get());
     _camera_read_pixels ->setRemoveAfterCall(true);
     std::string filename = vl::Say( title() + "-%n.tif") << (int)vl::Time::timerSeconds();
     _camera_read_pixels ->setSavePath( filename );
     vl::Log::print( vl::Say("Screenshot: '%s'\n") << filename );
   }
 }
示例#3
0
StatusWith<ResolvedView> ViewCatalog::resolveView(OperationContext* txn,
                                                  const NamespaceString& nss) {
    stdx::lock_guard<stdx::mutex> lk(_mutex);
    const NamespaceString* resolvedNss = &nss;
    std::vector<BSONObj> resolvedPipeline;

    for (int i = 0; i < ViewGraph::kMaxViewDepth; i++) {
        auto view = _lookup_inlock(txn, resolvedNss->ns());
        if (!view)
            return StatusWith<ResolvedView>({*resolvedNss, resolvedPipeline});

        resolvedNss = &(view->viewOn());

        // Prepend the underlying view's pipeline to the current working pipeline.
        const std::vector<BSONObj>& toPrepend = view->pipeline();
        resolvedPipeline.insert(resolvedPipeline.begin(), toPrepend.begin(), toPrepend.end());

        // If the first stage is a $collStats, then we return early with the viewOn namespace.
        if (toPrepend.size() > 0 && !toPrepend[0]["$collStats"].eoo()) {
            return StatusWith<ResolvedView>({*resolvedNss, resolvedPipeline});
        }
    }

    return {ErrorCodes::ViewDepthLimitExceeded,
            str::stream() << "View depth too deep or view cycle detected; maximum depth is "
                          << ViewGraph::kMaxViewDepth};
}
示例#4
0
void GrOpFlushState::executeDrawsAndUploadsForMeshDrawOp(
        const GrOp* op, const SkRect& chainBounds, GrProcessorSet&& processorSet,
        GrPipeline::InputFlags pipelineFlags, const GrUserStencilSettings* stencilSettings) {
    SkASSERT(this->rtCommandBuffer());

    GrPipeline::InitArgs pipelineArgs;
    pipelineArgs.fInputFlags = pipelineFlags;
    pipelineArgs.fDstProxy = this->dstProxy();
    pipelineArgs.fCaps = &this->caps();
    pipelineArgs.fResourceProvider = this->resourceProvider();
    pipelineArgs.fUserStencil = stencilSettings;
    GrPipeline pipeline(pipelineArgs, std::move(processorSet), this->detachAppliedClip());

    while (fCurrDraw != fDraws.end() && fCurrDraw->fOp == op) {
        GrDeferredUploadToken drawToken = fTokenTracker->nextTokenToFlush();
        while (fCurrUpload != fInlineUploads.end() &&
               fCurrUpload->fUploadBeforeToken == drawToken) {
            this->rtCommandBuffer()->inlineUpload(this, fCurrUpload->fUpload);
            ++fCurrUpload;
        }
        this->rtCommandBuffer()->draw(
                *fCurrDraw->fGeometryProcessor, pipeline, fCurrDraw->fFixedDynamicState,
                fCurrDraw->fDynamicStateArrays, fCurrDraw->fMeshes, fCurrDraw->fMeshCnt,
                chainBounds);
        fTokenTracker->flushToken();
        ++fCurrDraw;
    }
}
void
Consumer::run(const Name& prefix,
              const size_t pipelineSize,
              const bool mustBeFresh,
              const time::milliseconds& lifetime,
              const bool isVerbose)
{
  m_nextToPrint = 0;

  Face face;

  engine::PipelineInterests pipeline(face, pipelineSize,
                                     bind(&Consumer::onData, this, _1, _2),
                                     bind(&Consumer::onFailure, this, _1),
                                     mustBeFresh, lifetime, 3, isVerbose);

  engine::DiscoverVersion discover(face, prefix,
                                   bind(&engine::PipelineInterests::runWithData, &pipeline, _2),
                                   bind(&Consumer::onFailure, this, _1),
                                   mustBeFresh, lifetime, isVerbose);

  discover.run();
  try
    {
      face.processEvents();
    }
  catch (std::exception& e)
    {
      std::cerr << "ERROR: " << e.what() << std::endl;
    }
}
示例#6
0
int main() {
  int loop = 1;
  int input = -1;
  while(loop) {
    printf("Pipelined/Superscalar instruction performance\n");
    printf("---------------------------------------------\n");
    printf("1) Enter instructions\n");
    printf("2) Calculate total cycle count on a 6-stage pipelined architecture\n");
    printf("3) Calculate total cycle count on a 6-stage superscalar architecture\n");
    printf("4) Quit\n");
    printf("\n");
    printf("Enter selection: ");
    scanf("%d", &input);
    switch(input) {
    case 1:
      enterInstr();
      break;
    case 2:
      pipeline();
      break;
    case 3:
      superscalar();
      break;
    case 4:
      loop = 0;
      break;
    default:
      printf("Invalid selection.");
    }
    printf("\n\n");
  }
  return 0;
}
示例#7
0
文件: main.cpp 项目: TBFMX/opencvWork
/**
 * Processes a recorded video or live view from web-camera and allows you to adjust homography refinement and 
 * reprojection threshold in runtime.
 */
void processVideo(const cv::Mat& patternImage, CameraCalibration& calibration, cv::VideoCapture& capture)
{
    // Grab first frame to get the frame dimensions
    cv::Mat currentFrame;  
    capture >> currentFrame;

    // Check the capture succeeded:
    if (currentFrame.empty())
    {
        std::cout << "Cannot open video capture device" << std::endl;
        return;
    }

    cv::Size frameSize(currentFrame.cols, currentFrame.rows);

    ARPipeline pipeline(patternImage, calibration);
    ARDrawingContext drawingCtx("Markerless AR", frameSize, calibration);

    bool shouldQuit = false;
    do
    {
        capture >> currentFrame;
        if (currentFrame.empty())
        {
            shouldQuit = true;
            continue;
        }

        shouldQuit = processFrame(currentFrame, pipeline, drawingCtx);
    } while (!shouldQuit);
}
示例#8
0
bool              prompt_init()
{
  bool            is_running;
  bool            is_success;
  char*           cmd;
  int             parser_ret;
  t_cmd_list*     cmd_current;
  t_cmd_list*     cmd_list;

  is_running = true;
  is_success = true;
  parser_ret = 0;
  while (is_running)
  {
    prompt_show();
    cmd = prompt_cmd_read();
    if (cmd == NULL)
    {
      is_success = false;
      free(cmd);
      break;
    }
    if (my_strlen(cmd) > 0)
    {
      cmd_list = prompt_cmd_split(cmd);
      if (cmd_list == NULL)
      {
        is_success = false;
        break;
      }
      cmd_current = cmd_list;
      while(cmd_current != NULL)
      {
        if (cmd_current->is_piped)
        {
          cmd_current = pipeline(cmd_current, -1);
        }
        else if ((cmd_current->is_redirect_input) || (cmd_current->is_redirect_output))
        {
          cmd_current = redirections(cmd_current);
        }
        else
        {
          parser_ret = parser(cmd_current->cmd);
        }
        cmd_current = cmd_current->next;
      }
      prompt_cmd_split_free(cmd_list);
    }
    else
    {
      free(cmd);
    }
    if (parser_ret == BUILTIN_EXIT)
    {
      is_running = false;
    }
  }
  return (is_success);
}
示例#9
0
int main()
{
	vpp::ContextSettings settings = { /* ... */ };
	
	auto window = initWindow();
	auto context = vpp::Win32Context(settings, hinstance, window);

	vpp::VertexBufferLayout vbLayout({vpp::VertexBufferLayout::Point3fColor3f});
	vpp::DescriptorSetLayout dsLayout(context.device(), {vk::DescriptorType::UniformBuffer});

	vpp::GraphicsPipeline::CreateInfo createInfo;
	createInfo.renderPass = context.swapChain().vkRenderPass();
	createInfo.vertexBufferLayouts = vblayout;
	createInfo.descriptorSetLayouts = dsLayout;
	createInfo.shaderProgram = vpp::ShaderProgram({
			{vk::ShaderStageFlags::Vertex, "vert.sprv"}, 
			{vk::ShaderStageFlags::Fragment, "frag.sprv"}
		});

	vpp::GraphicsPipeline pipeline(context.device(), createInfo);

	//buffer, descriptors
	vpp::Buffer vertexBuffer(context.device(), vertices);
	vpp::Buffer uniformBuffer(context.device(), someTransformMatrix);

	vpp::DescriptorSet descriptorSet(dsLayout);
	static_cast<vpp::BufferDescriptor>(descriptorSet[0]).write(uniformBuffer); //#1
	descriptorSet.writeBuffers(0, {uniformBuffer}) //#2; 

	//later
	pipeline.drawCommands(commandBuffer, {vertexBuffer}, {descriptorSet});
}
nsresult
LocalSourceStreamInfo::TakePipelineFrom(RefPtr<LocalSourceStreamInfo>& info,
                                        const std::string& oldTrackId,
                                        MediaStreamTrack& aNewTrack,
                                        const std::string& newTrackId)
{
  if (mPipelines.count(newTrackId)) {
    CSFLogError(logTag, "%s: Pipeline already exists for %s/%s",
                __FUNCTION__, mId.c_str(), newTrackId.c_str());
    return NS_ERROR_INVALID_ARG;
  }

  RefPtr<MediaPipeline> pipeline(info->ForgetPipelineByTrackId_m(oldTrackId));

  if (!pipeline) {
    // Replacetrack can potentially happen in the middle of offer/answer, before
    // the pipeline has been created.
    CSFLogInfo(logTag, "%s: Replacing track before the pipeline has been "
                       "created, nothing to do.", __FUNCTION__);
    return NS_OK;
  }

  nsresult rv =
    static_cast<MediaPipelineTransmit*>(pipeline.get())->ReplaceTrack(aNewTrack);
  NS_ENSURE_SUCCESS(rv, rv);

  mPipelines[newTrackId] = pipeline;

  return NS_OK;
}
示例#11
0
//parses a pipeline, returning command struct
//modifies global variable t
CMD *pipeline()
{
	CMD *cmd = 0, *tmp = 0;
	cmd = stage();
	while (t && ISPIPE(t->type))
	{
		if (!cmd)
		{
			DIE("Parse: null command\n");
			freeCMD(cmd);
			return 0;
		}
		if (cmd->toType != NONE)
		{
			DIE("Parse: two output redirects\n");
			freeCMD(cmd);
			return 0;
		}
		tmp = cmd;
		cmd = mallocCMD();
		cmd->type = t->type;
		cmd->left = tmp;
		t = t->next;
		cmd->right = pipeline();
		if (!cmd->right)
		{
			DIE("Parse: null command\n");
			freeCMD(cmd);
			return 0;
		}
	}
	return cmd;
}
示例#12
0
//parses an and-or statement, returning command struct
//modifies global variable t
CMD *andOr()
{
	CMD *cmd = 0 /*output*/, *tmp = 0; //swap
	cmd = pipeline();
	while (t && (t->type == SEP_AND || t->type == SEP_OR))
	{
		if (!cmd)
		{
			DIE("Parse: null command\n");
			freeCMD(cmd);
			return 0;
		}
		tmp = cmd;
		cmd = mallocCMD();
		cmd->type = t->type;
		cmd->left = tmp;
		t = t->next;
		cmd->right = andOr();
		if (!cmd->right)
		{
			DIE("Parse: null command\n");
			freeCMD(cmd);
			return 0;
		}
	}
	return cmd;
}
	linked_light_lists_gen_fragment(const gl::rendering_system &rs,
									linked_light_lists *lll)
		: Base(rs,
			   "linked_light_lists_gen.comp"),
		lll(lll)
	{
		dispatch_task.attach_pipeline(pipeline());
	}
示例#14
0
void MilkdropPreset::Render(const BeatDetect &music, const PipelineContext &context)
{
	_presetInputs.update(music, context);

	evaluateFrame();
	pipeline().Render(music, context);

}
示例#15
0
bool vtkBalloonWidgetTest::Run(bool run)
{
    vtkBalloonWidgetTest test;
    vtkWidgetsTestPipelineTemplate<vtkBalloonWidgetTest> pipeline(test);
    pipeline.StartInteractor();

    return true;
}
示例#16
0
StatusWith<intrusive_ptr<Pipeline>> Pipeline::create(
SourceContainer stages, const intrusive_ptr<ExpressionContext>& expCtx) {
    intrusive_ptr<Pipeline> pipeline(new Pipeline(stages, expCtx));
    auto status = pipeline->ensureAllStagesAreInLegalPositions();
    if (!status.isOK()) {
        return status;
    }
    pipeline->stitch();
    return pipeline;
}
示例#17
0
文件: main.cpp 项目: TBFMX/opencvWork
/**
 * Processes single image. The processing goes in a loop.
 * It allows you to control the detection process by adjusting homography refinement switch and 
 * reprojection threshold in runtime.
 */
void processSingleImage(const cv::Mat& patternImage, CameraCalibration& calibration, const cv::Mat& image)
{
    cv::Size frameSize(image.cols, image.rows);
    ARPipeline pipeline(patternImage, calibration);
    ARDrawingContext drawingCtx("Markerless AR", frameSize, calibration);

    bool shouldQuit = false;
    do
    {
        shouldQuit = processFrame(image, pipeline, drawingCtx);
    } while (!shouldQuit);
}
 void onExecute(GrOpFlushState* state) override {
     GrRenderTarget* rt = state->drawOpArgs().fRenderTarget;
     GrPipeline pipeline(rt, fScissorState, SkBlendMode::kSrc);
     SkSTArray<kNumMeshes, GrMesh> meshes;
     for (int i = 0; i < kNumMeshes; ++i) {
         GrMesh& mesh = meshes.emplace_back(GrPrimitiveType::kTriangleStrip);
         mesh.setNonIndexedNonInstanced(4);
         mesh.setVertexData(fVertexBuffer.get(), 4 * i);
     }
     state->commandBuffer()->draw(pipeline, GrPipelineDynamicStateTestProcessor(),
                                  meshes.begin(), kDynamicStates, 4,
                                  SkRect::MakeIWH(kScreenSize, kScreenSize));
 }
示例#19
0
void GrDrawPathOp::onExecute(GrOpFlushState* state, const SkRect& chainBounds) {
    GrAppliedClip appliedClip = state->detachAppliedClip();
    GrPipeline::FixedDynamicState fixedDynamicState(appliedClip.scissorState().rect());
    GrPipeline pipeline(this->pipelineInitArgs(*state), this->detachProcessors(),
                        std::move(appliedClip));
    sk_sp<GrPathProcessor> pathProc(GrPathProcessor::Create(this->color(), this->viewMatrix()));

    GrStencilSettings stencil;
    init_stencil_pass_settings(*state, this->fillType(), &stencil);
    state->gpu()->pathRendering()->drawPath(state->drawOpArgs().renderTarget(),
                                            state->drawOpArgs().origin(),
                                            *pathProc, pipeline, fixedDynamicState, stencil,
                                            fPath.get());
}
示例#20
0
文件: parser.c 项目: hakan-akan/cor
STATIC union node *
andor() {
      union node *n1, *n2, *n3;
      int t;

      n1 = pipeline();
      for (;;) {
            if ((t = readtoken()) == TAND) {
                  t = NAND;
            } else if (t == TOR) {
                  t = NOR;
            } else {
                  tokpushback++;
                  return n1;
            }
            n2 = pipeline();
            n3 = (union node *)stalloc(sizeof (struct nbinary));
            n3->type = t;
            n3->nbinary.ch1 = n1;
            n3->nbinary.ch2 = n2;
            n1 = n3;
      }
}
示例#21
0
文件: main.cpp 项目: caomw/SfM-4
int main(int argc, char** argv)
{
	/**
	 * Argument parsing
	 */
	po::options_description desc("Available options");
	desc.add_options()
	    ("help", "Show this message")
	    ("data-path,d", po::value<std::string>(), "Input dataset folder path")
	    ("show-clouds,s", po::bool_switch()->default_value(false), "Show result point clouds")
	    ("no-save-clouds,n", po::bool_switch()->default_value(false), "Don't save cloud outputs")
	;

	po::positional_options_description p;
	p.add("data-path", -1);

	po::variables_map vm;
	po::store(po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);
	po::notify(vm);

	if (vm.size() == 0 || vm.count("help") || !vm.count("data-path")) {
		std::cout << "Usage: " << argv[0]
			<< " [options] data-path" << std::endl
			<< desc;
		return 1;
	}

	std::string folder_path = vm["data-path"].as<std::string>();
	bool show_clouds = vm["show-clouds"].as<bool>();
	bool save_clouds = !vm["no-save-clouds"].as<bool>();


	/**
	 * Pipeline
	 */

	// Create instance of pipeline
	Pipeline pipeline(folder_path);
	// and run
	pipeline.run(save_clouds, show_clouds);


	/**
	 * End
	 */
	return 0;

}
示例#22
0
/*
  Build a pipeline from a JSON filter specification.
*/
void TranslateKernel::makeJSONPipeline()
{
    std::string json;

    if (pdal::FileUtils::fileExists(m_filterJSON))
        json = pdal::FileUtils::readFileIntoString(m_filterJSON);

    if (json.empty())
        json = m_filterJSON;

    Json::Reader jsonReader;
    Json::Value filters;
    jsonReader.parse(json, filters);
    if (filters.type() != Json::arrayValue || filters.empty())
        throw pdal_error("JSON must be an array of filter specifications");

    Json::Value pipeline(Json::arrayValue);

    // Add the input file, the filters (as provided) and the output file.
    if (m_readerType.size())
    {
        Json::Value node(Json::objectValue);
        node["filename"] = m_inputFile;
        node["type"] = m_readerType;
        pipeline.append(node);
    }
    else
        pipeline.append(Json::Value(m_inputFile));
    for (Json::ArrayIndex i = 0; i < filters.size(); ++i)
        pipeline.append(filters[i]);
    if (m_writerType.size())
    {
        Json::Value node(Json::objectValue);
        node["filename"] = m_outputFile;
        node["type"] = m_writerType;
        pipeline.append(node);
    }
    else
        pipeline.append(Json::Value(m_outputFile));

    Json::Value root;
    root["pipeline"] = pipeline;

    std::stringstream pipeline_str;
    pipeline_str << root;
    m_manager.readPipeline(pipeline_str);
}
bool RemoteSourceStreamInfo::SetUsingBundle_m(int aLevel, bool decision) {
  ASSERT_ON_THREAD(mParent->GetMainThread());

  RefPtr<MediaPipeline> pipeline(GetPipelineByLevel_m(aLevel));

  if (pipeline) {
    RUN_ON_THREAD(mParent->GetSTSThread(),
                  WrapRunnable(
                      pipeline,
                      &MediaPipeline::SetUsingBundle_s,
                      decision
                  ),
                  NS_DISPATCH_NORMAL);
    return true;
  }
  return false;
}
示例#24
0
  void ShapeRenderer::drawRectangle(m2::AnyRectD const & r, graphics::Color const & c, double depth)
  {
    uint32_t id = base_t::mapInfo(Brush::Info(c));
    Resource const * res = base_t::fromID(id);

    if (res == 0)
    {
      LOG(LDEBUG, ("cannot map color"));
      return;
    }

    m2::PointD rectPts[4];

    r.GetGlobalPoints(rectPts);
    swap(rectPts[2], rectPts[3]);

    m2::PointF rectPtsF[4];
    for (int i = 0; i < 4; ++i)
      rectPtsF[i] = m2::PointF(rectPts[i].x, rectPts[i].y);

    GeometryPipeline & p = pipeline(res->m_pipelineID);

    shared_ptr<gl::BaseTexture> texture = p.texture();

    if (!texture)
    {
      LOG(LDEBUG, ("returning as no texture is reserved"));
      return;
    }

    m2::PointF texPt = texture->mapPixel(m2::RectF(res->m_texRect).Center());

    m2::PointF normal(0, 0);

    addTexturedStripStrided(
          rectPtsF,
          sizeof(m2::PointF),
          &normal,
          0,
          &texPt,
          0,
          4,
          depth,
          res->m_pipelineID);
  }
示例#25
0
  void ShapeRenderer::drawRectangle(m2::RectD const & r, graphics::Color const & c, double depth)
  {
    uint32_t id = base_t::mapInfo(Brush::Info(c));
    Resource const * res = base_t::fromID(id);

    if (res == 0)
    {
      LOG(LDEBUG, ("cannot map color"));
      return;
    }

    m2::PointF rectPts[4] = {
      m2::PointF(r.minX(), r.minY()),
      m2::PointF(r.maxX(), r.minY()),
      m2::PointF(r.minX(), r.maxY()),
      m2::PointF(r.maxX(), r.maxY())
    };

    GeometryPipeline & p = pipeline(res->m_pipelineID);

    shared_ptr<gl::BaseTexture> texture = p.texture();

    if (!texture)
    {
      LOG(LDEBUG, ("returning as no texture is reserved"));
      return;
    }

    m2::PointF texPt = texture->mapPixel(m2::RectF(res->m_texRect).Center());

    m2::PointF normal(0, 0);

    addTexturedStripStrided(
          rectPts,
          sizeof(m2::PointF),
          &normal,
          0,
          &texPt,
          0,
          4,
          depth,
          res->m_pipelineID
          );
  }
示例#26
0
StatusWith<ResolvedView> ViewCatalog::resolveView(OperationContext* opCtx,
                                                  const NamespaceString& nss) {
    stdx::lock_guard<stdx::mutex> lk(_mutex);
    const NamespaceString* resolvedNss = &nss;
    std::vector<BSONObj> resolvedPipeline;
    BSONObj collation;

    for (int i = 0; i < ViewGraph::kMaxViewDepth; i++) {
        auto view = _lookup_inlock(opCtx, resolvedNss->ns());
        if (!view) {
            // Return error status if pipeline is too large.
            int pipelineSize = 0;
            for (auto obj : resolvedPipeline) {
                pipelineSize += obj.objsize();
            }
            if (pipelineSize > ViewGraph::kMaxViewPipelineSizeBytes) {
                return {ErrorCodes::ViewPipelineMaxSizeExceeded,
                        str::stream() << "View pipeline exceeds maximum size; maximum size is "
                                      << ViewGraph::kMaxViewPipelineSizeBytes};
            }
            return StatusWith<ResolvedView>(
                {*resolvedNss, std::move(resolvedPipeline), std::move(collation)});
        }

        resolvedNss = &(view->viewOn());
        collation = view->defaultCollator() ? view->defaultCollator()->getSpec().toBSON()
                                            : CollationSpec::kSimpleSpec;

        // Prepend the underlying view's pipeline to the current working pipeline.
        const std::vector<BSONObj>& toPrepend = view->pipeline();
        resolvedPipeline.insert(resolvedPipeline.begin(), toPrepend.begin(), toPrepend.end());

        // If the first stage is a $collStats, then we return early with the viewOn namespace.
        if (toPrepend.size() > 0 && !toPrepend[0]["$collStats"].eoo()) {
            return StatusWith<ResolvedView>(
                {*resolvedNss, std::move(resolvedPipeline), std::move(collation)});
        }
    }

    return {ErrorCodes::ViewDepthLimitExceeded,
            str::stream() << "View depth too deep or view cycle detected; maximum depth is "
                          << ViewGraph::kMaxViewDepth};
}
already_AddRefed<MediaPipeline>
LocalSourceStreamInfo::ForgetPipelineByTrackId_m(const std::string& trackId)
{
  ASSERT_ON_THREAD(mParent->GetMainThread());

  // Refuse to hand out references if we're tearing down.
  // (Since teardown involves a dispatch to and from STS before MediaPipelines
  // are released, it is safe to start other dispatches to and from STS with a
  // RefPtr<MediaPipeline>, since that reference won't be the last one
  // standing)
  if (mMediaStream) {
    if (mPipelines.count(trackId)) {
      RefPtr<MediaPipeline> pipeline(mPipelines[trackId]);
      mPipelines.erase(trackId);
      return pipeline.forget();
    }
  }

  return nullptr;
}
示例#28
0
SkColor4f SkColorFilter::filterColor4f(const SkColor4f& c, SkColorSpace* colorSpace) const {
    SkPMColor4f dst, src = c.premul();

    // determined experimentally, seems to cover compose+colormatrix
    constexpr size_t kEnoughForCommonFilters = 512;
    SkSTArenaAlloc<kEnoughForCommonFilters> alloc;
    SkRasterPipeline    pipeline(&alloc);

    pipeline.append_constant_color(&alloc, src.vec());

    SkPaint dummyPaint;
    SkStageRec rec = {
        &pipeline, &alloc, kRGBA_F32_SkColorType, colorSpace, dummyPaint, nullptr, SkMatrix::I()
    };
    this->onAppendStages(rec, c.fA == 1);
    SkRasterPipeline_MemoryCtx dstPtr = { &dst, 0 };
    pipeline.append(SkRasterPipeline::store_f32, &dstPtr);
    pipeline.run(0,0, 1,1);

    return dst.unpremul();
}
cx::CompositeTimedAlgorithmPtr ReconstructionExecuter::assembleReconstructionPipeline(std::vector<ReconstructCorePtr> cores, ReconstructCore::InputParams par, USReconstructInputData fileData)
{
	cx::CompositeSerialTimedAlgorithmPtr pipeline(new cx::CompositeSerialTimedAlgorithm("US Reconstruction"));

	ReconstructPreprocessorPtr preprocessor = this->createPreprocessor(par, fileData);
	pipeline->append(ThreadedTimedReconstructPreprocessor::create(mPatientModelService, preprocessor, cores));

	cx::CompositeTimedAlgorithmPtr temp = pipeline;
	if(this->canCoresRunInParallel(cores) && cores.size()>1)
	{
		cx::CompositeParallelTimedAlgorithmPtr parallel(new cx::CompositeParallelTimedAlgorithm());
		pipeline->append(parallel);
		temp = parallel;
		reportDebug("Running reconstruction cores in parallel.");
	}

	for (unsigned i=0; i<cores.size(); ++i)
		temp->append(ThreadedTimedReconstructCore::create(mPatientModelService, mViewService, cores[i]));

	return pipeline;
}
示例#30
0
int run(handle_t module_ptr, handle_t function,
        const buffer *input_buffersPtrs, int input_buffersLen,
        buffer *output_buffersPtrs, int output_buffersLen,
        const buffer *input_scalarsPtrs, int input_scalarsLen) {
    // Get a pointer to the argv version of the pipeline.
    typedef int (*pipeline_argv_t)(void **);
    pipeline_argv_t pipeline = reinterpret_cast<pipeline_argv_t>(function);

    // Construct a list of arguments. This is only part of a
    // buffer_t. We know that the only field of buffer_t that the
    // generated code should access is the host field (any other
    // fields should be passed as their own scalar parameters) so we
    // can just make this dummy buffer_t type.
    struct buffer_t {
        uint64_t dev;
        uint8_t* host;
    };
    void **args = (void **)__builtin_alloca((input_buffersLen + input_scalarsLen + output_buffersLen) * sizeof(void *));
    buffer_t *buffers = (buffer_t *)__builtin_alloca((input_buffersLen + output_buffersLen) * sizeof(buffer_t));

    void **next_arg = &args[0];
    buffer_t *next_buffer_t = &buffers[0];
    // Input buffers come first.
    for (int i = 0; i < input_buffersLen; i++, next_arg++, next_buffer_t++) {
        next_buffer_t->host = input_buffersPtrs[i].data;
        *next_arg = next_buffer_t;
    }
    // Output buffers are next.
    for (int i = 0; i < output_buffersLen; i++, next_arg++, next_buffer_t++) {
        next_buffer_t->host = output_buffersPtrs[i].data;
        *next_arg = next_buffer_t;
    }
    // Input scalars are last.
    for (int i = 0; i < input_scalarsLen; i++, next_arg++) {
        *next_arg = input_scalarsPtrs[i].data;
    }

    // Call the pipeline and return the result.
    return pipeline(args);
}