Example #1
0
/**
 * tries to determine the physical package, a cpu belongs to
 */
int get_pkg(int cpu)
{
    int pkg=-1;
    char buffer[10];

    if (cpu == -1) { cpu = get_cpu(); }
    if (cpu != -1)
    {
        sprintf(path, "/sys/devices/system/cpu/cpu%i/topology/physical_package_id", cpu);
        if( read_file(path, buffer, sizeof(buffer)) ) pkg = atoi(buffer);

        /* fallbacks if sysfs is not working */
        if (pkg == -1)
        {
            /* assume 0 if there is only one CPU or only one package */
            if ((num_cpus() == 1) || (num_packages() == 1)) { pkg = 0; }
            /* get the physical package id from /proc/cpuinfo */
            else if(!get_proc_cpuinfo_data("physical id", buffer, cpu)) { pkg = atoi(buffer); }
            /* if the number of cpus equals the number of packages assume pkg_id = cpu_id*/
            else if (num_cpus() == num_packages()) { pkg = cpu; }
            /* if there is only one core per package assume pkg_id = core_id */
            else if (num_cores_per_package() == 1) { pkg = get_core_id(cpu); }
            /* if the number of packages equals the number of numa nodes assume pkg_id = numa node */
            else if (num_numa_nodes() == num_packages()) { pkg = get_numa_node(cpu); }

            /* NOTE pkg_id in UMA Systems with multiple sockets and more than 1 Core per socket can't be determined
            without correct topology information in sysfs*/
        }
    }

    return pkg;
}
void LogReducer::expand_positions_buffers_if_needed(uint64_t required_size_per_buffer) {
  ASSERT_ND(input_positions_slice_.get_size() == output_positions_slice_.get_size());
  if (input_positions_slice_.get_size() < required_size_per_buffer) {
    uint64_t new_size = required_size_per_buffer * 2;
    LOG(WARNING) << to_string() << " automatically expanding positions_buffers from "
      << positions_buffers_.get_size() << " to " << new_size << ". if this happens often,"
      << " our sizing is wrong.";
      positions_buffers_.alloc(
        new_size,
        memory::kHugepageSize,
        memory::AlignedMemory::kNumaAllocOnnode,
        get_numa_node());
      input_positions_slice_ = memory::AlignedMemorySlice(
        &positions_buffers_,
        0,
        positions_buffers_.get_size() >> 1);
      output_positions_slice_ = memory::AlignedMemorySlice(
        &positions_buffers_,
        positions_buffers_.get_size() >> 1,
        positions_buffers_.get_size() >> 1);
  }
void LogReducer::expand_if_needed(
  uint64_t required_size,
  memory::AlignedMemory *memory,
  const std::string& name) {
  if (memory->is_null() || memory->get_size() < required_size) {
    if (memory->is_null()) {
      LOG(INFO) << to_string() << " initially allocating " << name << "."
        << assorted::Hex(required_size) << " bytes.";
    } else {
      LOG(WARNING) << to_string() << " automatically expanding " << name << " from "
        << assorted::Hex(memory->get_size()) << " bytes to "
        << assorted::Hex(required_size) << " bytes. if this happens often,"
        << " our sizing is wrong.";
    }
    memory->alloc(
      required_size,
      memory::kHugepageSize,
      memory::AlignedMemory::kNumaAllocOnnode,
      get_numa_node());
  }
}