示例#1
0
int main(int argc, char *argv[]) {
  std::list<cl::platform_ref> platforms;
  std::list<cl::device_ref> devices;

  cl::platform_ref::get_platforms(platforms);
  cl::platform_ref platform = platforms.front();

  platform.get_devices(devices);
  cl::device_ref device = devices.front();

  cl::context_ref context(platform, device);
  cl::command_queue_ref queue(context, device, true, true);

  cl::program_ref program(context, div_kernel_opencl_source);
  program.build();

  cl::kernel_ref with_div = program.get_kernel("with_div");
  cl::kernel_ref with_sync = program.get_kernel("with_sync");
  cl::kernel_ref no_div = program.get_kernel("no_div");

  std::vector<float> buffer(1024*768);
  for(int i=0; i<buffer.size(); ++i) {
    buffer[i] = random();
    buffer[i] -= RAND_MAX/2;
  }

  cl::buffer_ref in_buffer(context,
      sizeof(float)*1024*768);
  cl::buffer_ref out_buffer(context,
      sizeof(float)*1024*768);
  std::size_t global_size[] = { 1024, 768 };
  std::size_t local_size[] = { 16, 16 };

  cl::kernel_ref use_program = with_div;
  for(int i=0; i<3; ++i) {
    switch(i) {
      case 0: use_program = with_div; break;
      case 1: use_program = with_sync; break;
      case 2: use_program = no_div; break;
    }
    use_program.set_arg(0, in_buffer);
    use_program.set_arg(1, out_buffer);
    for(int i=0; i<32; ++i) {
      boost::progress_timer timer;
      queue.run_kernel(use_program,
          2,
          global_size,
          local_size).wait();
    }
  }

  return EXIT_SUCCESS;
}
示例#2
0
void	parse_map_allowed_chars(struct s_lines *map_content)
{
  int	i;

  while (map_content)
    {
      i = 0;
      while (i < map_content->length)
	{
	  if (!in_buffer(map_content->line[i], MAP_ALLOWED_CHARS))
	    fatal_error("Map error: unallowed char in map file\n");
	  i++;
	}
      map_content = map_content->next;
    }
}
示例#3
0
std::vector<sframe> shuffle(
    sframe sframe_in,
    size_t n,
    std::function<size_t(const std::vector<flexible_type>&)> hash_fn) {

    ASSERT_GT(n, 0);

    // split the work to threads
    // for n bins let's assign n / log(n) workers, assuming rows are evenly distributed.
    size_t num_rows = sframe_in.num_rows();
    size_t num_workers = graphlab::thread::cpu_count();
    size_t rows_per_worker = num_rows / num_workers;

    // prepare the out sframe
    std::vector<sframe> sframe_out;
    std::vector<sframe::iterator> sframe_out_iter;
    sframe_out.resize(n);
    for (auto& sf: sframe_out) {
      sf.open_for_write(sframe_in.column_names(), sframe_in.column_types(), "",  1);
      sframe_out_iter.push_back(sf.get_output_iterator(0));
    }
    std::vector<std::unique_ptr<std::mutex>> sframe_out_locks;
    for (size_t i = 0; i < n; ++i) {
      sframe_out_locks.push_back(std::unique_ptr<std::mutex>(new std::mutex));
    }

    auto reader = sframe_in.get_reader();
    parallel_for(0, num_workers, [&](size_t worker_id) {
        size_t start_row = worker_id * rows_per_worker;
        size_t end_row = (worker_id == (num_workers-1)) ? num_rows
                                                        : (worker_id + 1) * rows_per_worker;

        // prepare thread local output buffer for each sframe
        std::vector<buffered_writer<std::vector<flexible_type>, sframe::iterator>> writers;
        for (size_t i = 0; i < n; ++i) {
          writers.push_back(
            buffered_writer<std::vector<flexible_type>, sframe::iterator>
            (sframe_out_iter[i], *sframe_out_locks[i],
             WRITER_BUFFER_SOFT_LIMIT, WRITER_BUFFER_HARD_LIMIT)
          );
        }

        std::vector<std::vector<flexible_type>> in_buffer(READER_BUFFER_SIZE);
        while (start_row < end_row) {
          // read a chunk of rows to shuffle
          size_t rows_to_read = std::min<size_t>((end_row - start_row), READER_BUFFER_SIZE);
          size_t rows_read = reader->read_rows(start_row, start_row + rows_to_read, in_buffer);
          DASSERT_EQ(rows_read, rows_to_read);
          start_row += rows_read;

          for (auto& row : in_buffer) {
            size_t out_index = hash_fn(row) % n;
            writers[out_index].write(row);
          }
        } // end of while

        // flush the rest of the buffer
        for (size_t i = 0; i < n; ++i) {
          writers[i].flush();
        }
    });

    // close all sframe writers
    for (auto& sf: sframe_out) {
      sf.close();
    }
    return sframe_out;
}
示例#4
0
void KMessageClient::processMessage (const QByteArray &msg)
{
  if (d->isLocked)
  { // must NOT happen, since we check in processIncomingMessage as well as in processFirstMessage
    d->delayedMessages.append(msg);
    return;
  }
  QBuffer in_buffer (msg);
  in_buffer.open (IO_ReadOnly);
  QDataStream in_stream (&in_buffer);


  bool unknown = false;

  Q_UINT32 messageID;
  in_stream >> messageID;
  switch (messageID)
  {
    case KMessageServer::MSG_BROADCAST:
      {
        Q_UINT32 clientID;
        in_stream >> clientID;
        emit broadcastReceived (in_buffer.readAll(), clientID);
      }
      break;

    case KMessageServer::MSG_FORWARD:
      {
        Q_UINT32 clientID;
        QValueList <Q_UINT32> receivers;
        in_stream >> clientID >> receivers;
        emit forwardReceived (in_buffer.readAll(), clientID, receivers);
      }
      break;

    case KMessageServer::ANS_CLIENT_ID:
      {
        bool old_admin = isAdmin();
        Q_UINT32 clientID;
        in_stream >> clientID;
        d->connection->setId (clientID);
        if (old_admin != isAdmin())
          emit adminStatusChanged (isAdmin());
      }
      break;

    case KMessageServer::ANS_ADMIN_ID:
      {
        bool old_admin = isAdmin();
        in_stream >> d->adminID;
        if (old_admin != isAdmin())
          emit adminStatusChanged (isAdmin());
      }
      break;

    case KMessageServer::ANS_CLIENT_LIST:
      {
        in_stream >> d->clientList;
      }
      break;

    case KMessageServer::EVNT_CLIENT_CONNECTED:
      {
        Q_UINT32 id;
        in_stream >> id;

        if (d->clientList.contains (id))
          kdWarning (11001) << k_funcinfo << ": Adding a client that already existed!" << endl;
        else
          d->clientList.append (id);

        emit eventClientConnected (id);
      }
      break;

    case KMessageServer::EVNT_CLIENT_DISCONNECTED:
      {
        Q_UINT32 id;
        Q_INT8 broken;
        in_stream >> id >> broken;

        if (!d->clientList.contains (id))
          kdWarning (11001) << k_funcinfo << ": Removing a client that doesn't exist!" << endl;
        else
          d->clientList.remove (id);

        emit eventClientDisconnected (id, bool (broken));
      }
      break;

    default:
      unknown = true;
  }

  if (!unknown && !in_buffer.atEnd())
    kdWarning (11001) << k_funcinfo << ": Extra data received for message ID " << messageID << endl;

  emit serverMessageReceived (msg, unknown);

  if (unknown)
    kdWarning (11001) << k_funcinfo << ": received unknown message ID " << messageID << endl;
}
示例#5
0
bool NetPlayServer::CompressFileIntoPacket(const std::string& file_path, sf::Packet& packet)
{
  File::IOFile file(file_path, "rb");
  if (!file)
  {
    PanicAlertT("Failed to open file \"%s\".", file_path.c_str());
    return false;
  }

  const sf::Uint64 size = file.GetSize();
  packet << size;

  if (size == 0)
    return true;

  std::vector<u8> in_buffer(NETPLAY_LZO_IN_LEN);
  std::vector<u8> out_buffer(NETPLAY_LZO_OUT_LEN);
  std::vector<u8> wrkmem(LZO1X_1_MEM_COMPRESS);

  lzo_uint i = 0;
  while (true)
  {
    lzo_uint32 cur_len = 0;  // number of bytes to read
    lzo_uint out_len = 0;    // number of bytes to write

    if ((i + NETPLAY_LZO_IN_LEN) >= size)
    {
      cur_len = static_cast<lzo_uint32>(size - i);
    }
    else
    {
      cur_len = NETPLAY_LZO_IN_LEN;
    }

    if (cur_len <= 0)
      break;  // EOF

    if (!file.ReadBytes(in_buffer.data(), cur_len))
    {
      PanicAlertT("Error reading file: %s", file_path.c_str());
      return false;
    }

    if (lzo1x_1_compress(in_buffer.data(), cur_len, out_buffer.data(), &out_len, wrkmem.data()) !=
        LZO_E_OK)
    {
      PanicAlertT("Internal LZO Error - compression failed");
      return false;
    }

    // The size of the data to write is 'out_len'
    packet << static_cast<u32>(out_len);
    for (size_t j = 0; j < out_len; j++)
    {
      packet << out_buffer[j];
    }

    if (cur_len != NETPLAY_LZO_IN_LEN)
      break;

    i += cur_len;
  }

  // Mark end of data
  packet << static_cast<u32>(0);

  return true;
}