Ejemplo n.º 1
0
void 
Cmvmi::execREAD_CONFIG_REQ(Signal* signal)
{
  jamEntry();

  const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();

  Uint32 ref = req->senderRef;
  Uint32 senderData = req->senderData;

  const ndb_mgm_configuration_iterator * p = 
    m_ctx.m_config.getOwnConfigIterator();
  ndbrequire(p != 0);

  Uint64 page_buffer = 64*1024*1024;
  ndb_mgm_get_int64_parameter(p, CFG_DB_DISK_PAGE_BUFFER_MEMORY, &page_buffer);
  
  Uint32 pages = 0;
  pages += page_buffer / GLOBAL_PAGE_SIZE; // in pages
  pages += LCP_RESTORE_BUFFER;
  m_global_page_pool.setSize(pages + 64, true);
  
  Uint64 shared_mem = 8*1024*1024;
  ndb_mgm_get_int64_parameter(p, CFG_DB_SGA, &shared_mem);
  shared_mem /= GLOBAL_PAGE_SIZE;
  if (shared_mem)
  {
    Resource_limit rl;
    rl.m_min = 0;
    rl.m_max = shared_mem;
    rl.m_resource_id = 0;
    m_ctx.m_mm.set_resource_limit(rl);
  }
  
  ndbrequire(m_ctx.m_mm.init());
  {
    void* ptr = m_ctx.m_mm.get_memroot();
    m_shared_page_pool.set((GlobalPage*)ptr, ~0);
  }
  
  ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
  conf->senderRef = reference();
  conf->senderData = senderData;
  sendSignal(ref, GSN_READ_CONFIG_CONF, signal, 
	     ReadConfigConf::SignalLength, JBB);
}
Ejemplo n.º 2
0
Uint32
compute_acc_32kpages(const ndb_mgm_configuration_iterator * p)
{
  Uint64 accmem = 0;
  ndb_mgm_get_int64_parameter(p, CFG_DB_INDEX_MEM, &accmem);
  if (accmem)
  {
    accmem /= GLOBAL_PAGE_SIZE;
    
    Uint32 lqhInstances = 1;
    if (globalData.isNdbMtLqh)
    {
      lqhInstances = globalData.ndbMtLqhWorkers;
    }
    
    accmem += lqhInstances * (32 / 4); // Added as safty in Configuration.cpp
  }
  return Uint32(accmem);
}
Ejemplo n.º 3
0
static int
init_global_memory_manager(EmulatorData &ed, Uint32 *watchCounter)
{
  const ndb_mgm_configuration_iterator * p =
    ed.theConfiguration->getOwnConfigIterator();
  if (p == 0)
  {
    abort();
  }

  Uint32 numa = 0;
  ndb_mgm_get_int_parameter(p, CFG_DB_NUMA, &numa);
  if (numa == 1)
  {
    int res = NdbNuma_setInterleaved();
    g_eventLogger->info("numa_set_interleave_mask(numa_all_nodes) : %s",
                        res == 0 ? "OK" : "no numa support");
  }

  Uint64 shared_mem = 8*1024*1024;
  ndb_mgm_get_int64_parameter(p, CFG_DB_SGA, &shared_mem);
  Uint32 shared_pages = Uint32(shared_mem /= GLOBAL_PAGE_SIZE);

  Uint32 tupmem = 0;
  if (ndb_mgm_get_int_parameter(p, CFG_TUP_PAGE, &tupmem))
  {
    g_eventLogger->alert("Failed to get CFG_TUP_PAGE parameter from "
                        "config, exiting.");
    return -1;
  }

  {
    /**
     * IndexMemory
     */
    Uint32 accpages = compute_acc_32kpages(p);
    tupmem += accpages; // Add to RG_DATAMEM
  }

  Uint32 lqhInstances = 1;
  if (globalData.isNdbMtLqh)
  {
    lqhInstances = globalData.ndbMtLqhWorkers;
  }

  if (tupmem)
  {
    Resource_limit rl;
    rl.m_min = tupmem;
    rl.m_max = tupmem;
    rl.m_resource_id = RG_DATAMEM;
    ed.m_mem_manager->set_resource_limit(rl);
  }

  Uint32 maxopen = 4 * 4; // 4 redo parts, max 4 files per part
  Uint32 filebuffer = NDB_FILE_BUFFER_SIZE;
  Uint32 filepages = (filebuffer / GLOBAL_PAGE_SIZE) * maxopen;

  {
    /**
     * RedoBuffer
     */
    Uint32 redomem = 0;
    ndb_mgm_get_int_parameter(p, CFG_DB_REDO_BUFFER,
                              &redomem);

    if (redomem)
    {
      redomem /= GLOBAL_PAGE_SIZE;
      Uint32 tmp = redomem & 15;
      if (tmp != 0)
      {
        redomem += (16 - tmp);
      }

      filepages += lqhInstances * redomem; // Add to RG_FILE_BUFFERS
    }
  }

  if (filepages)
  {
    Resource_limit rl;
    rl.m_min = filepages;
    rl.m_max = filepages;
    rl.m_resource_id = RG_FILE_BUFFERS;
    ed.m_mem_manager->set_resource_limit(rl);
  }

  Uint32 jbpages = compute_jb_pages(&ed);
  if (jbpages)
  {
    Resource_limit rl;
    rl.m_min = jbpages;
    rl.m_max = jbpages;
    rl.m_resource_id = RG_JOBBUFFER;
    ed.m_mem_manager->set_resource_limit(rl);
  }

  Uint32 sbpages = 0;
  if (globalTransporterRegistry.get_using_default_send_buffer() == false)
  {
    Uint64 mem = globalTransporterRegistry.get_total_max_send_buffer();
    sbpages = Uint32((mem + GLOBAL_PAGE_SIZE - 1) / GLOBAL_PAGE_SIZE);
    Resource_limit rl;
    rl.m_min = sbpages;
    rl.m_max = sbpages;
    rl.m_resource_id = RG_TRANSPORTER_BUFFERS;
    ed.m_mem_manager->set_resource_limit(rl);
  }

  Uint32 pgman_pages = 0;
  {
    /**
     * Disk page buffer memory
     */
    Uint64 page_buffer = 64*1024*1024;
    ndb_mgm_get_int64_parameter(p, CFG_DB_DISK_PAGE_BUFFER_MEMORY,&page_buffer);

    Uint32 pages = 0;
    pages += Uint32(page_buffer / GLOBAL_PAGE_SIZE); // in pages
    pages += LCP_RESTORE_BUFFER * lqhInstances;

    pgman_pages += pages;
    pgman_pages += 64;

    Resource_limit rl;
    rl.m_min = pgman_pages;
    rl.m_max = pgman_pages;
    rl.m_resource_id = RG_DISK_PAGE_BUFFER;  // Add to RG_DISK_PAGE_BUFFER
    ed.m_mem_manager->set_resource_limit(rl);
  }

  Uint32 sum = shared_pages + tupmem + filepages + jbpages + sbpages +
    pgman_pages;

  if (sum)
  {
    Resource_limit rl;
    rl.m_min = 0;
    rl.m_max = sum;
    rl.m_resource_id = 0;
    ed.m_mem_manager->set_resource_limit(rl);
  }

  if (!ed.m_mem_manager->init(watchCounter))
  {
    struct ndb_mgm_param_info dm;
    struct ndb_mgm_param_info sga;
    size_t size;

    size = sizeof(ndb_mgm_param_info);
    ndb_mgm_get_db_parameter_info(CFG_DB_DATA_MEM, &dm, &size);
    size = sizeof(ndb_mgm_param_info);
    ndb_mgm_get_db_parameter_info(CFG_DB_SGA, &sga, &size);

    g_eventLogger->alert("Malloc (%lld bytes) for %s and %s failed, exiting",
                         Uint64(shared_mem + tupmem) * GLOBAL_PAGE_SIZE,
                         dm.m_name, sga.m_name);
    return -1;
  }

  Uint32 late_alloc = 0;
  ndb_mgm_get_int_parameter(p, CFG_DB_LATE_ALLOC,
                            &late_alloc);

  Uint32 memlock = 0;
  ndb_mgm_get_int_parameter(p, CFG_DB_MEMLOCK, &memlock);

  if (late_alloc)
  {
    /**
     * Only map these groups that are required for ndb to even "start"
     */
    Uint32 rg[] = { RG_JOBBUFFER, RG_FILE_BUFFERS, RG_TRANSPORTER_BUFFERS, 0 };
    ed.m_mem_manager->map(watchCounter, memlock, rg);
  }
  else
  {
    ed.m_mem_manager->map(watchCounter, memlock); // Map all
  }

  return 0;                     // Success
}