Пример #1
0
    InputSystem::InputSystem()
    {
        //set the send input callback
        SetEventCallback(&InputSystem::SendInput);
        s_Input = this;

        //load the map
        m_Map.LoadMap(Config::INPUT_SYSTEM_LL_PATH);


    }
Пример #2
0
static
void
skc_styling_grid_pfn_execute(skc_grid_t const grid)
{
  struct skc_styling_impl * const impl    = skc_grid_get_data(grid);
  struct skc_styling      * const styling = impl->styling;

  //
  // unmap all extents
  //
  cl_event complete;

  skc_extent_phwN_pdrN_unmap(&impl->layers,styling->layers.extent,impl->cq,NULL);
  skc_extent_phwN_pdrN_unmap(&impl->groups,styling->groups.extent,impl->cq,NULL);
  skc_extent_phwN_pdrN_unmap(&impl->extras,styling->extras.extent,impl->cq,&complete);

  // set the event
  cl(SetEventCallback(complete,CL_COMPLETE,skc_styling_unmap_cb,grid));
  cl(ReleaseEvent(complete));

  // flush command queue
  cl(Flush(impl->cq));
}
Пример #3
0
static
void
skc_raster_builder_cohort_grid_pfn_execute(skc_grid_t const grid)
{
  //
  // ALLOCATED RESOURCES
  //
  // path_ids          i
  // raster_ids        i
  // transforms        i
  // clips             i
  // fill_cmds         i
  // cq                -
  // cohort atomics    -
  // cmds              -
  // keys              -
  // meta              -
  //

  // allocate the cohort
  struct skc_raster_cohort       * const cohort  = skc_grid_get_data(grid);

  // get impl
  struct skc_raster_builder_impl * const impl    = cohort->impl;
  struct skc_runtime             * const runtime = impl->runtime;

  // acquire in-order cq
  cohort->cq = skc_runtime_acquire_cq_in_order(runtime);

  // alloc the snapshot -- could be zero-sized
  skc_extent_phw1g_tdrNs_snap_alloc(runtime,
                                    &impl->fill_cmds,
                                    &cohort->fill_cmds,
                                    cohort->cq,NULL);

  // flush the cq to get the fill running
  // cl(Flush(cohort->cq));

  // create split atomics
  skc_extent_thr_tdrw_alloc(runtime,&cohort->atomics,sizeof(struct skc_raster_cohort_atomic));

  // zero the atomics
  skc_extent_thr_tdrw_zero(&cohort->atomics,cohort->cq,NULL);

  // get config
  struct skc_config const * const config = runtime->config;

  // acquire device-side extents
  skc_extent_tdrw_alloc(runtime,
                        &cohort->cmds,
                        sizeof(union skc_cmd_rasterize) * config->raster_cohort.expand.cmds);

  //
  // FILLS EXPAND
  //
  // need result of cmd counts before launching RASTERIZE grids
  //
  // - OpenCL 1.2: copy atomic counters back to host and launch RASTERIZE grids from host
  // - OpenCL 2.x: have a kernel size and launch RASTERIZE grids from device
  // - or launch a device-wide grid that feeds itself but that's unsatisfying
  //

  // how many commands?  could be zero
  skc_uint const work_size = skc_extent_ring_snap_count(cohort->fill_cmds.snap);

  if (work_size > 0)
    {
      cl(SetKernelArg(impl->kernels.fills_expand,0,SKC_CL_ARG(impl->runtime->block_pool.blocks.drw)));
      cl(SetKernelArg(impl->kernels.fills_expand,1,SKC_CL_ARG(cohort->atomics.drw)));
      cl(SetKernelArg(impl->kernels.fills_expand,2,SKC_CL_ARG(runtime->handle_pool.map.drw)));
      cl(SetKernelArg(impl->kernels.fills_expand,3,SKC_CL_ARG(cohort->fill_cmds.drN)));
      cl(SetKernelArg(impl->kernels.fills_expand,4,SKC_CL_ARG(cohort->cmds.drw)));

      skc_device_enqueue_kernel(runtime->device,
                                SKC_DEVICE_KERNEL_ID_FILLS_EXPAND,
                                cohort->cq,
                                impl->kernels.fills_expand,
                                work_size,
                                0,NULL,NULL);
    }

  //
  // copyback number of rasterization commands
  //
  cl_event complete;

  skc_extent_thr_tdrw_read(&cohort->atomics,cohort->cq,&complete);

  cl(SetEventCallback(complete,CL_COMPLETE,skc_raster_cohort_fills_expand_cb,grid));
  cl(ReleaseEvent(complete));

  // flush command queue
  cl(Flush(cohort->cq));

  //
  // ALLOCATED RESOURCES
  //
  // path_ids          i
  // raster_ids        i
  // transforms        i
  // clips             i
  // fill_cmds         s
  // cq                a
  // cohort atomics    a
  // cmds              a
  // keys              -
  // meta              -
  //
}
Пример #4
0
static
void
skc_raster_cohort_rasterize(skc_grid_t const grid)
{
  //
  // ALLOCATED RESOURCES
  //
  // path_ids          i
  // raster_ids        i
  // transforms        i
  // clips             i
  // fill_cmds         s
  // cq                a
  // cohort atomics    a
  // cmds              a
  // cmds_quad         a
  // cmds_cubic        a
  // keys              -
  // meta              -

  // use the backpointers
  struct skc_raster_cohort       * const cohort  = skc_grid_get_data(grid);
  struct skc_raster_builder_impl * const impl    = cohort->impl;
  struct skc_runtime             * const runtime = impl->runtime;

  //
  // RELEASED RESOURCES
  //
  // cmds       snap
  //

  // release the cmds extent and snap since it's only used by the expand stage
  skc_extent_phw1g_tdrNs_snap_free(runtime,&cohort->fill_cmds);

  //
  // NEW ALLOCATED RESOURCES
  //
  // transforms snap
  // clips snap
  // ttrk keys
  //
  skc_extent_phw1g_tdrNs_snap_alloc(runtime,
                                    &impl->transforms,
                                    &cohort->transforms,
                                    cohort->cq,NULL);

  skc_extent_phw1g_tdrNs_snap_alloc(runtime,
                                    &impl->clips,
                                    &cohort->clips,
                                    cohort->cq,NULL);

  // acquire device-side extent
  skc_extent_tdrw_alloc(runtime,
                        &cohort->keys,
                        sizeof(union skc_ttrk) * runtime->config->raster_cohort.rasterize.keys);

  // skc_extent_thrw_tdrw_alloc(runtime,
  //                            &cohort->keys,
  //                            sizeof(union skc_ttrk) * runtime->config->raster_cohort.rasterize.keys);

  //
  // acquire out-of-order command queue
  //
  // and launch up to 3 kernels
  //
  // for each kernel:
  //
  //   set runtime "global" kernel args:
  //
  //   - block pool atomics
  //   - block pool extent
  //
  //   set cohort "local" kernel args:
  //
  //   - atomics
  //   - cmds
  //
  // enqueue barrier
  // enqueue copy back of atomics on the command queue
  // set callback on copy back event
  // release command queue
  //
  struct skc_raster_cohort_atomic const * const atomics = cohort->atomics.hr;

  if (atomics->cmds > 0)
    {
      cl(SetKernelArg(impl->kernels.rasterize_all,0,SKC_CL_ARG(runtime->block_pool.atomics.drw)));
      cl(SetKernelArg(impl->kernels.rasterize_all,1,SKC_CL_ARG(runtime->block_pool.blocks.drw)));
      cl(SetKernelArg(impl->kernels.rasterize_all,2,SKC_CL_ARG(runtime->block_pool.ids.drw)));
      cl(SetKernelArg(impl->kernels.rasterize_all,3,SKC_CL_ARG(runtime->block_pool.size->ring_mask)));

      cl(SetKernelArg(impl->kernels.rasterize_all,4,SKC_CL_ARG(cohort->atomics.drw)));
      cl(SetKernelArg(impl->kernels.rasterize_all,5,SKC_CL_ARG(cohort->keys.drw)));

      cl(SetKernelArg(impl->kernels.rasterize_all,6,SKC_CL_ARG(cohort->transforms.drN)));
      cl(SetKernelArg(impl->kernels.rasterize_all,7,SKC_CL_ARG(cohort->clips.drN)));
      cl(SetKernelArg(impl->kernels.rasterize_all,8,SKC_CL_ARG(cohort->cmds.drw)));
      cl(SetKernelArg(impl->kernels.rasterize_all,9,SKC_CL_ARG(atomics->cmds)));

      skc_device_enqueue_kernel(runtime->device,
                                SKC_DEVICE_KERNEL_ID_RASTERIZE_ALL,
                                cohort->cq,
                                impl->kernels.rasterize_all,
                                atomics->cmds,
                                0,NULL,NULL);
    }

  //
  // copyback number of TTSK keys
  //
  cl_event complete;

  skc_extent_thr_tdrw_read(&cohort->atomics,cohort->cq,&complete);

  cl(SetEventCallback(complete,CL_COMPLETE,skc_raster_cohort_rasterize_cb,grid));
  cl(ReleaseEvent(complete));

  // flush command queue
  cl(Flush(cohort->cq));

  //
  // ALLOCATED RESOURCES
  //
  // path_ids          i
  // raster_ids        i
  // transforms        a
  // clips             a
  // fill_cmds         -
  // cq                a
  // cohort atomics    a
  // cmds              a
  // keys              a
  // meta              -
}
Пример #5
0
static
void
skc_raster_cohort_sort_prefix(skc_grid_t const grid)
{
  //
  // ALLOCATED RESOURCES
  //
  // path_ids          i
  // raster_ids        i
  // transforms        a
  // clips             a
  // fill_cmds         -
  // cq                a
  // cohort atomics    a
  // cmds              a
  // keys              a
  // meta              -
  //

  // use the backpointers
  struct skc_raster_cohort       * const cohort  = skc_grid_get_data(grid);
  struct skc_raster_builder_impl * const impl    = cohort->impl;
  struct skc_runtime             * const runtime = impl->runtime;

  // release transforms
  skc_extent_phw1g_tdrNs_snap_free(runtime,&cohort->transforms);

  // release clips
  skc_extent_phw1g_tdrNs_snap_free(runtime,&cohort->clips);

  // release expanded cmds
  skc_extent_tdrw_free(runtime,&cohort->cmds);

  // alloc the snapshost -- could be zero-sized
  skc_extent_phrwg_tdrNs_snap_alloc(runtime,
                                    &impl->raster_ids,
                                    &cohort->raster_ids,
                                    cohort->cq,NULL);

  // will never be zero
  skc_uint const rasters = skc_extent_ring_snap_count(cohort->raster_ids.snap);

  // acquire fixed-size device-side extent
  skc_extent_tdrw_alloc(runtime,
                        &cohort->metas,
                        sizeof(struct skc_raster_cohort_meta));

  // skc_extent_thrw_tdrw_alloc(runtime,
  //                            &cohort->metas,
  //                            sizeof(struct skc_raster_cohort_meta));

  // zero the metas
  skc_extent_tdrw_zero(&cohort->metas,cohort->cq,NULL);

  // get the read-only host copy of the device atomics
  struct skc_raster_cohort_atomic const * const atomics = cohort->atomics.hr;

  //
  // SORT
  //
  if (atomics->keys > 0)
    {
#ifndef NDEBUG
      fprintf(stderr,"raster cohort sort: %u\n",atomics->keys);
#endif

      //
      //
      //
      uint32_t keys_padded_in, keys_padded_out;

      hs_cl_pad(runtime->hs,atomics->keys,&keys_padded_in,&keys_padded_out);

      hs_cl_sort(runtime->hs,
                 cohort->cq,
                 0,NULL,NULL,
                 cohort->keys.drw,
                 NULL,
                 atomics->keys,
                 keys_padded_in,
                 keys_padded_out,
                 false);

      cl(SetKernelArg(impl->kernels.segment,0,SKC_CL_ARG(cohort->keys.drw)));
      cl(SetKernelArg(impl->kernels.segment,1,SKC_CL_ARG(cohort->metas.drw)));

#ifndef NDEBUG
      fprintf(stderr,"post-sort\n");
#endif

      // find start of each tile
      skc_device_enqueue_kernel(runtime->device,
                                SKC_DEVICE_KERNEL_ID_SEGMENT_TTRK,
                                cohort->cq,
                                impl->kernels.segment,
                                atomics->keys,
                                0,NULL,NULL);

#ifndef NDEBUG
      fprintf(stderr,"post-segment\n");
#endif

      //
      // DELETE ALL THIS WHEN READY
      //

#if 0
      //
      //
      //
      cl(Finish(cohort->cq));

      // map keys to host
      union skc_ttrk * const keys = skc_extent_thrw_tdrw_map(&cohort->keys,
                                                             cohort->cq,
                                                             NULL);
      // map meta to host
      struct skc_raster_cohort_meta * const metas = skc_extent_thrw_tdrw_map(&cohort->metas,
                                                                             cohort->cq,
                                                                             NULL);
      // block until done
      cl(Finish(cohort->cq));

      // sort keys
      qsort(keys,atomics->keys,sizeof(*keys),cmp64);

      // mask to determine if rk id is a new block
      skc_uint const subblock_mask = runtime->config->block.subblocks - 1;

      //
      // some counters
      //
      union skc_raster_cohort_meta_in meta_in = {
        .blocks = 0,
        .offset = 0,
        .pk     = 0,
        .rk     = 0
      };

      // get first key
      union skc_ttrk curr = keys[0];

      skc_uint ii=0, jj=0;

      // for all TTRK keys
      while (true)
        {
          // increment ttrk count
          meta_in.rk += 1;

          // was this a new block?
          if ((curr.u32v2.lo & subblock_mask) == 0)
            meta_in.blocks += 1;

          // break if we're out of keys
          if (++ii >= atomics->keys)
            break;

          // otherwise, process next key
          union skc_ttrk const next = keys[ii];

          // if new cohort then save curr meta and init next meta
          if (next.cohort != curr.cohort)
            {
              fprintf(stderr,"[ %u, %u, %u, %u ]\n",
                      meta_in.blocks,
                      meta_in.offset,
                      meta_in.pk,
                      meta_in.rk);

              // store back to buffer
              metas->inout[curr.cohort].in = meta_in;

              // update meta_in
              meta_in.blocks = 0;
              meta_in.offset = ii;
              meta_in.pk     = 0;
              meta_in.rk     = 0;
            }
          // otherwise, if same y but new x then increment TTPK count
          else if ((next.y == curr.y) && (next.x != curr.x))
            {
              meta_in.pk += 1;

#if 0
              fprintf(stderr,"%3u : %3u : ( %3u, %3u ) -> ( %3u )\n",
                      jj++,curr.cohort,curr.y,curr.x,next.x);
#endif
            }

#if 0
          fprintf(stderr,"( %3u, %3u )\n",next.y,next.x);
#endif

          curr = next;
        }

      fprintf(stderr,"[ %u, %u, %u, %u ]\n",
              meta_in.blocks,
              meta_in.offset,
              meta_in.pk,
              meta_in.rk);

      // store back to buffer
      metas->inout[curr.cohort].in = meta_in;


      // unmap
      skc_extent_thrw_tdrw_unmap(&cohort->keys,
                                 keys,
                                 cohort->cq,
                                 NULL);

      // unmap
      skc_extent_thrw_tdrw_unmap(&cohort->metas,
                                 metas,
                                 cohort->cq,
                                 NULL);
#endif
    }

#ifndef NDEBUG
  fprintf(stderr,"rasters_alloc: %u\n",rasters);
#endif

  //
  // RASTER ALLOC/INIT
  //
  cl(SetKernelArg(impl->kernels.rasters_alloc,0,SKC_CL_ARG(runtime->block_pool.atomics.drw)));
  cl(SetKernelArg(impl->kernels.rasters_alloc,1,SKC_CL_ARG(runtime->block_pool.ids.drw)));
  cl(SetKernelArg(impl->kernels.rasters_alloc,2,SKC_CL_ARG(runtime->block_pool.size->ring_mask)));
  cl(SetKernelArg(impl->kernels.rasters_alloc,3,SKC_CL_ARG(runtime->handle_pool.map.drw)));
  cl(SetKernelArg(impl->kernels.rasters_alloc,4,SKC_CL_ARG(cohort->metas.drw)));
  cl(SetKernelArg(impl->kernels.rasters_alloc,5,SKC_CL_ARG(cohort->raster_ids.drN)));
  cl(SetKernelArg(impl->kernels.rasters_alloc,6,SKC_CL_ARG(rasters)));

  skc_device_enqueue_kernel(runtime->device,
                            SKC_DEVICE_KERNEL_ID_RASTERS_ALLOC,
                            cohort->cq,
                            impl->kernels.rasters_alloc,
                            rasters,
                            0,NULL,NULL);

#ifndef NDEBUG
  fprintf(stderr,"post-alloc\n");
#endif

  //
  // PREFIX
  //
  cl(SetKernelArg(impl->kernels.prefix,0,SKC_CL_ARG(runtime->block_pool.atomics.drw)));
  cl(SetKernelArg(impl->kernels.prefix,1,SKC_CL_ARG(runtime->block_pool.ids.drw)));
  cl(SetKernelArg(impl->kernels.prefix,2,SKC_CL_ARG(runtime->block_pool.blocks.drw)));
  cl(SetKernelArg(impl->kernels.prefix,3,SKC_CL_ARG(runtime->block_pool.size->ring_mask)));

  cl(SetKernelArg(impl->kernels.prefix,4,SKC_CL_ARG(cohort->keys.drw)));
  cl(SetKernelArg(impl->kernels.prefix,5,SKC_CL_ARG(runtime->handle_pool.map.drw)));

  cl(SetKernelArg(impl->kernels.prefix,6,SKC_CL_ARG(cohort->metas.drw)));
  cl(SetKernelArg(impl->kernels.prefix,7,SKC_CL_ARG(rasters)));

  cl_event complete;

  skc_device_enqueue_kernel(runtime->device,
                            SKC_DEVICE_KERNEL_ID_PREFIX,
                            cohort->cq,
                            impl->kernels.prefix,
                            rasters,
                            0,NULL,
                            &complete);

  cl(SetEventCallback(complete,CL_COMPLETE,skc_raster_cohort_prefix_cb,grid));
  cl(ReleaseEvent(complete));

#ifndef NDEBUG
  fprintf(stderr,"post-prefix\n");
#endif

  // flush command queue
  cl(Flush(cohort->cq));

  //
  // ALLOCATED RESOURCES
  //
  // path_ids          a
  // raster_ids        a
  // transforms        -
  // clips             -
  // fill_cmds         -
  // cq                a
  // cohort atomics    a
  // cmds              -
  // keys              a
  // meta              a
  //
}
Пример #6
0
CVIDetect::CVIDetect() {
	SetEventCallback(fnEventCallBack);
}
Пример #7
0
static
void
skc_styling_pfn_unseal(struct skc_styling_impl * const impl, skc_bool const block)
{
  // return if already unsealed
  if (impl->state == SKC_STYLING_STATE_UNSEALED)
    return;

  //
  // otherwise, we're going to need to pump the scheduler
  //
  struct skc_runtime   * const runtime   = impl->runtime;
  struct skc_scheduler * const scheduler = runtime->scheduler;

  //
  // wait for UNSEALING > UNSEALED transition
  //
  if (impl->state == SKC_STYLING_STATE_UNSEALING)
    {
      if (block) {
        SKC_SCHEDULER_WAIT_WHILE(scheduler,impl->state != SKC_STYLING_STATE_UNSEALED);
      }
      return;
    }

  //
  // otherwise, wait for SEALING > SEALED transition ...
  //
  if (impl->state == SKC_STYLING_STATE_SEALING)
    {
      // wait if sealing
      SKC_SCHEDULER_WAIT_WHILE(scheduler,impl->state != SKC_STYLING_STATE_SEALED);
    }

  // wait for rendering locks to be released
  SKC_SCHEDULER_WAIT_WHILE(scheduler,impl->lock_count > 0);

  // ... and then unseal the styling object
  impl->state = SKC_STYLING_STATE_UNSEALING;

  // defensively NULL the grid reference
  impl->grid  = NULL; // defensive

  // set styling pointers with mapped extents
  cl_event complete;

  struct skc_styling * const styling = impl->styling;

  styling->layers.extent = skc_extent_phwN_pdrN_map(&impl->layers,impl->cq,NULL);
  styling->groups.extent = skc_extent_phwN_pdrN_map(&impl->groups,impl->cq,NULL);
  styling->extras.extent = skc_extent_phwN_pdrN_map(&impl->extras,impl->cq,&complete);

  cl(SetEventCallback(complete,CL_COMPLETE,skc_styling_unseal_cb,impl));
  cl(ReleaseEvent(complete));

  // flush it
  cl(Flush(impl->cq));

  // wait until unsealed...
  if (block) {
    SKC_SCHEDULER_WAIT_WHILE(scheduler,impl->state != SKC_STYLING_STATE_UNSEALED);
  }
}
Пример #8
0
static
void
skc_surface_grid_pfn_execute(skc_grid_t const grid)
{
  struct skc_surface_render   * const render  = skc_grid_get_data(grid);
  struct skc_surface_impl     * const impl    = render->impl;
  struct skc_runtime          * const runtime = impl->runtime;

  // get the composition args
  struct skc_composition_impl * const ci      = render->composition->impl;
  struct skc_place_atomics    * const atomics = ci->atomics.hr;

  if (atomics->offsets > 0)
    {
      // acquire the rbo/tex
      if (render->fb->type != SKC_FRAMEBUFFER_CL_IMAGE2D)
        cl(EnqueueAcquireGLObjects(impl->cq,1,&render->fb->mem,0,NULL,NULL));

      // get the styling args
      struct skc_styling_impl * const si = render->styling->impl;

      cl(SetKernelArg(impl->kernels.render,0,SKC_CL_ARG(si->layers.drN)));
      cl(SetKernelArg(impl->kernels.render,1,SKC_CL_ARG(si->groups.drN)));
      cl(SetKernelArg(impl->kernels.render,2,SKC_CL_ARG(si->extras.drN)));

      cl(SetKernelArg(impl->kernels.render,3,SKC_CL_ARG(ci->keys.drw)));
      cl(SetKernelArg(impl->kernels.render,4,SKC_CL_ARG(atomics->keys)));
      cl(SetKernelArg(impl->kernels.render,5,SKC_CL_ARG(ci->offsets.drw)));
      cl(SetKernelArg(impl->kernels.render,6,SKC_CL_ARG(atomics->offsets)));

      // block pool
      cl(SetKernelArg(impl->kernels.render,7,SKC_CL_ARG(impl->runtime->block_pool.blocks.drw)));

      // surface
      cl(SetKernelArg(impl->kernels.render,8,SKC_CL_ARG(render->fb->mem)));

#if 1
      // tile clip
      cl(SetKernelArg(impl->kernels.render,9,sizeof(skc_uint4),render->clip));
#else
      // surface pitch (height)
      skc_uint const surface_pitch = SKC_SURFACE_HEIGHT;
      cl(SetKernelArg(impl->kernels.render,9,SKC_CL_ARG(surface_pitch)));
      // tile clip
      cl(SetKernelArg(impl->kernels.render,10,sizeof(skc_uint4),render->clip));
#endif

      // launch render kernel
      skc_device_enqueue_kernel(runtime->device,
                                SKC_DEVICE_KERNEL_ID_RENDER,
                                impl->cq,
                                impl->kernels.render,
                                atomics->offsets,
                                0,NULL,NULL);


      cl_event complete;

      // give the rbo back
      if (render->fb->type != SKC_FRAMEBUFFER_CL_IMAGE2D)
        {
          cl(EnqueueReleaseGLObjects(impl->cq,1,&render->fb->mem,0,NULL,&complete));

          //
          // blit the rbo to fbo0
          //
          render->fb->post_render(render->fb->interop);

          //
          // clear the rbo -- FIXME -- we shouldn't have to do this here
          //
          float    const rgba[4] = { 1.0f, 1.0f, 1.0f, 1.0f };
          uint32_t       rect[4] = { 0 };

          skc_interop_get_size(render->fb->interop,rect+2,rect+3);

          skc_surface_debug_clear(impl,render->fb,rgba,rect);
        }

      // notify anyone listening...
      cl(SetEventCallback(complete,CL_COMPLETE,skc_surface_render_cb,render));
      cl(ReleaseEvent(complete));

      // flush it
      cl(Flush(impl->cq));
    }
  else
    {
      skc_surface_render_complete(render);
    }
}