TEST_F(TransactionTests, SingleTransactionTest) {
  for (auto test_type : TEST_TYPES) {
    concurrency::TransactionManagerFactory::Configure(test_type);
    auto &txn_manager = concurrency::TransactionManagerFactory::GetInstance();
    std::unique_ptr<storage::DataTable> table(
        TestingTransactionUtil::CreateTable());
    // Just scan the table
    {
      TransactionScheduler scheduler(1, table.get(), &txn_manager);
      scheduler.Txn(0).Scan(0);
      scheduler.Txn(0).Commit();

      scheduler.Run();

      EXPECT_EQ(10, scheduler.schedules[0].results.size());
    }
    // read, read, read, read, update, read, read not exist
    // another txn read
    {
      TransactionScheduler scheduler(2, table.get(), &txn_manager);
      scheduler.Txn(0).Read(0);
      scheduler.Txn(0).Read(0);
      scheduler.Txn(0).Read(0);
      scheduler.Txn(0).Read(0);
      scheduler.Txn(0).Update(0, 1);
      scheduler.Txn(0).Read(0);
      scheduler.Txn(0).Read(100);
      scheduler.Txn(0).Commit();
      scheduler.Txn(1).Read(0);
      scheduler.Txn(1).Commit();

      scheduler.Run();

      EXPECT_EQ(ResultType::SUCCESS, scheduler.schedules[0].txn_result);
      EXPECT_EQ(ResultType::SUCCESS, scheduler.schedules[1].txn_result);
      EXPECT_EQ(0, scheduler.schedules[0].results[0]);
      EXPECT_EQ(0, scheduler.schedules[0].results[1]);
      EXPECT_EQ(0, scheduler.schedules[0].results[2]);
      EXPECT_EQ(0, scheduler.schedules[0].results[3]);
      EXPECT_EQ(1, scheduler.schedules[0].results[4]);
      EXPECT_EQ(-1, scheduler.schedules[0].results[5]);
      EXPECT_EQ(1, scheduler.schedules[1].results[0]);
    }

    // // update, update, update, update, read
    {
      TransactionScheduler scheduler(1, table.get(), &txn_manager);
      scheduler.Txn(0).Update(0, 1);
      scheduler.Txn(0).Update(0, 2);
      scheduler.Txn(0).Update(0, 3);
      scheduler.Txn(0).Update(0, 4);
      scheduler.Txn(0).Read(0);
      scheduler.Txn(0).Commit();

      scheduler.Run();

      EXPECT_EQ(ResultType::SUCCESS, scheduler.schedules[0].txn_result);
      EXPECT_EQ(4, scheduler.schedules[0].results[0]);
    }

    // // delete not exist, delete exist, read deleted, update deleted,
    // // read deleted, insert back, update inserted, read newly updated,
    // // delete inserted, read deleted
    {
      TransactionScheduler scheduler(1, table.get(), &txn_manager);
      scheduler.Txn(0).Delete(100);
      scheduler.Txn(0).Delete(0);
      scheduler.Txn(0).Read(0);
      scheduler.Txn(0).Update(0, 1);
      scheduler.Txn(0).Read(0);
      scheduler.Txn(0).Insert(0, 2);
      scheduler.Txn(0).Update(0, 3);
      scheduler.Txn(0).Read(0);
      scheduler.Txn(0).Delete(0);
      scheduler.Txn(0).Read(0);
      scheduler.Txn(0).Commit();

      scheduler.Run();

      EXPECT_EQ(ResultType::SUCCESS, scheduler.schedules[0].txn_result);
      EXPECT_EQ(-1, scheduler.schedules[0].results[0]);
      EXPECT_EQ(-1, scheduler.schedules[0].results[1]);
      EXPECT_EQ(3, scheduler.schedules[0].results[2]);
      EXPECT_EQ(-1, scheduler.schedules[0].results[3]);
      LOG_INFO("FINISH THIS");
    }

    // // insert, delete inserted, read deleted, insert again, delete again
    // // read deleted, insert again, read inserted, update inserted, read
    // updated
    {
      TransactionScheduler scheduler(1, table.get(), &txn_manager);

      scheduler.Txn(0).Insert(1000, 0);
      scheduler.Txn(0).Delete(1000);
      scheduler.Txn(0).Read(1000);
      scheduler.Txn(0).Insert(1000, 1);
      scheduler.Txn(0).Delete(1000);
      scheduler.Txn(0).Read(1000);
      scheduler.Txn(0).Insert(1000, 2);
      scheduler.Txn(0).Read(1000);
      scheduler.Txn(0).Update(1000, 3);
      scheduler.Txn(0).Read(1000);
      scheduler.Txn(0).Commit();

      scheduler.Run();

      EXPECT_EQ(ResultType::SUCCESS, scheduler.schedules[0].txn_result);
      EXPECT_EQ(-1, scheduler.schedules[0].results[0]);
      EXPECT_EQ(-1, scheduler.schedules[0].results[1]);
      EXPECT_EQ(2, scheduler.schedules[0].results[2]);
      EXPECT_EQ(3, scheduler.schedules[0].results[3]);
    }

  }
}
Example #2
0
w_rc_t asc_sort_man_impl::get_sort_iter(asc_sort_iter_impl* &sort_iter)
{
    sort_iter = new asc_sort_iter_impl(table(), this);
    return (RCOK);
}
void OXMLi_ListenerState_Table::startElement (OXMLi_StartElementRequest * rqst)
{
	if (nameMatches(rqst->pName, NS_W_KEY, "tbl"))
	{
		OXML_Element_Table* pTable = new OXML_Element_Table("");
		m_tableStack.push(pTable);
		OXML_SharedElement table(pTable);
		rqst->stck->push(table);
		rqst->handled = true;
		pTable->setCurrentRowNumber(-1);
		pTable->setCurrentColNumber(-1);
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "tr"))
	{
		if(m_tableStack.empty())
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		OXML_Element_Table* table = m_tableStack.top();
		OXML_Element_Row* pRow = new OXML_Element_Row("", table);
		m_rowStack.push(pRow);
		OXML_SharedElement row(pRow);
		rqst->stck->push(row);
		rqst->handled = true;
		table->incrementCurrentRowNumber();
		table->setCurrentColNumber(0);
		pRow->setRowNumber(table->getCurrentRowNumber());
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "tc"))
	{
		if(m_tableStack.empty() || m_rowStack.empty())
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		OXML_Element_Table* table = m_tableStack.top();				
		OXML_Element_Row* row = m_rowStack.top();				
		OXML_Element_Cell* pCell = new OXML_Element_Cell("", table, row, 
								table->getCurrentColNumber(), table->getCurrentColNumber()+1, //left right
								table->getCurrentRowNumber(), table->getCurrentRowNumber()+1); //top,bottom
		m_cellStack.push(pCell);
		OXML_SharedElement cell(pCell);
		rqst->stck->push(cell);
		rqst->handled = true;
		table->incrementCurrentColNumber();
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "gridSpan"))
	{
		if(m_tableStack.empty() || m_cellStack.empty())
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		OXML_Element_Table* table = m_tableStack.top();				
		const gchar* val = attrMatches(NS_W_KEY, "val", rqst->ppAtts);
		if(val)
		{
			int span = atoi(val);
			int left = table->getCurrentColNumber()-1;
			int right = left + span;
			//change current cell's right index
			OXML_Element_Cell* cell = m_cellStack.top();
			cell->setRight(right);
			//update column index of current table			
			table->setCurrentColNumber(right);
		}
		rqst->handled = true;
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "vMerge"))
	{
		if(m_cellStack.empty())
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		OXML_Element_Cell* cell = m_cellStack.top();				
		cell->setVerticalMergeStart(false); //default to continue if the attribute is missing
		const gchar* val = attrMatches(NS_W_KEY, "val", rqst->ppAtts);
		if(val && !strcmp(val, "restart")) 
		{
			cell->setVerticalMergeStart(true);
		}
		rqst->handled = true;
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "hMerge"))
	{
		if(m_cellStack.empty())
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		OXML_Element_Cell* cell = m_cellStack.top();				
		cell->setHorizontalMergeStart(false); //default to continue if the attribute is missing
		const gchar* val = attrMatches(NS_W_KEY, "val", rqst->ppAtts);
		if(val && !strcmp(val, "restart")) 
		{
			cell->setHorizontalMergeStart(true);
		}
		rqst->handled = true;
	}

	//Table Properties
	else if(nameMatches(rqst->pName, NS_W_KEY, "gridCol") && 
			contextMatches(rqst->context->back(), NS_W_KEY, "tblGrid"))
	{
		if(m_tableStack.empty())
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		OXML_Element_Table* table = m_tableStack.top();				
		const gchar* w = attrMatches(NS_W_KEY, "w", rqst->ppAtts);
		if(w) 
		{
			//append this width to table-column-props property
			const gchar* tableColumnProps = NULL;
			UT_Error ret = table->getProperty("table-column-props", tableColumnProps);
			if((ret != UT_OK) || !tableColumnProps)
				tableColumnProps = "";				
			std::string cols(tableColumnProps);
			cols += _TwipsToPoints(w);
			cols += "pt/";
			ret = table->setProperty("table-column-props", cols);
			if(ret != UT_OK)
				UT_DEBUGMSG(("FRT:OpenXML importer can't set table-column-props:%s\n", cols.c_str()));				
		}
		rqst->handled = true;
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "trHeight") && 
			contextMatches(rqst->context->back(), NS_W_KEY, "trPr"))
	{
		if(m_tableStack.empty())
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		OXML_Element_Table* table = m_tableStack.top();				
		const gchar* val = attrMatches(NS_W_KEY, "val", rqst->ppAtts);
		if(val) 
		{
			const gchar* tableRowHeights = NULL;
			UT_Error ret = table->getProperty("table-row-heights", tableRowHeights);
			if((ret != UT_OK) || !tableRowHeights)
				tableRowHeights = "";				
			std::string rowHeights(tableRowHeights);
			rowHeights += _TwipsToPoints(val);
			rowHeights += "pt/";
			ret = table->setProperty("table-row-heights", rowHeights);
			if(ret != UT_OK)
				UT_DEBUGMSG(("FRT:OpenXML importer can't set table-row-heights:%s\n", rowHeights.c_str()));				
		}
		rqst->handled = true;
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "left") ||
			nameMatches(rqst->pName, NS_W_KEY, "right") ||
			nameMatches(rqst->pName, NS_W_KEY, "top") ||
			nameMatches(rqst->pName, NS_W_KEY, "bottom"))
	{
		rqst->handled = true;
		const gchar* color = attrMatches(NS_W_KEY, "color", rqst->ppAtts);
		const gchar* sz = attrMatches(NS_W_KEY, "sz", rqst->ppAtts);
		const gchar* val = attrMatches(NS_W_KEY, "val", rqst->ppAtts);

		UT_Error ret = UT_OK;

		std::string borderName(rqst->pName);
		borderName = borderName.substr(strlen(NS_W_KEY)+1);
		if(!borderName.compare("bottom"))
			borderName = "bot";

		std::string borderStyle = borderName + "-style";
		std::string borderColor = borderName + "-color";
		std::string borderThickness = borderName + "-thickness";

		OXML_Element* element = NULL;

		if(rqst->context->empty())
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		if(contextMatches(rqst->context->back(), NS_W_KEY, "tcBorders"))
			element = m_cellStack.empty() ? NULL : m_cellStack.top();
		else if(contextMatches(rqst->context->back(), NS_W_KEY, "tblBorders"))
			element = m_tableStack.empty() ? NULL : m_tableStack.top();

		if(!element)
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		if(color && strcmp(color, "auto")) 
		{
			ret = element->setProperty(borderColor, color);
			if(ret != UT_OK)
				UT_DEBUGMSG(("FRT:OpenXML importer can't set %s:%s\n", borderColor.c_str(), color));	
		}
		if(sz) 
		{
			std::string szVal(_EighthPointsToPoints(sz));
			szVal += "pt";
			ret = element->setProperty(borderThickness, szVal);
			if(ret != UT_OK)
				UT_DEBUGMSG(("FRT:OpenXML importer can't set %s:%s\n", borderThickness.c_str(), color));	
		}

		std::string styleValue = "1"; //single line border by default
		if(val)
		{
			if(!strcmp(val, "dashed"))
				styleValue = "0"; 
		}

		ret = element->setProperty(borderStyle, styleValue);
		if(ret != UT_OK)
			UT_DEBUGMSG(("FRT:OpenXML importer can't set %s:0\n", borderStyle.c_str()));

	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "shd"))
	{
		const gchar* fill = attrMatches(NS_W_KEY, "fill", rqst->ppAtts);

		UT_Error ret = UT_OK;
		OXML_Element* element = NULL;

		if(rqst->context->empty())
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		if(contextMatches(rqst->context->back(), NS_W_KEY, "tcPr"))
			element = m_cellStack.empty() ? NULL : m_cellStack.top();
		else if(contextMatches(rqst->context->back(), NS_W_KEY, "tblPr"))
			element = m_tableStack.empty() ? NULL : m_tableStack.top();

		if(!element)
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		if(fill && strcmp(fill, "auto")) 
		{
			ret = element->setProperty("background-color", fill);
			if(ret != UT_OK)
				UT_DEBUGMSG(("FRT:OpenXML importer can't set background-color:%s\n", fill));	
		}
		rqst->handled = true;
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "tblStyle"))
	{
		if(m_tableStack.empty())
		{
			rqst->handled = false;
			rqst->valid = false;
			return;
		}

		OXML_Element_Table* table = m_tableStack.top();				
		const gchar* val = attrMatches(NS_W_KEY, "val", rqst->ppAtts);
		if(val && table) 
		{
			std::string styleName(val);
			OXML_Document* doc = OXML_Document::getInstance();
			if(doc)
				table->applyStyle(doc->getStyleById(styleName));
		}
		rqst->handled = true;
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "tblPr"))
	{
		if(m_tableStack.empty())
		{
			//we must be in tblStyle in styles, so let's push the table instance to m_tableStack
			OXML_Element_Table* tbl = static_cast<OXML_Element_Table*>(get_pointer(rqst->stck->top()));
			m_tableStack.push(tbl);
		}
		rqst->handled = true;
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "trPr"))
	{
		if(m_rowStack.empty())
		{
			//we must be in styles, so let's push the row instance to m_rowStack
			OXML_Element_Row* row = static_cast<OXML_Element_Row*>(get_pointer(rqst->stck->top()));
			m_rowStack.push(row);
		}
		rqst->handled = true;
	}
	else if(nameMatches(rqst->pName, NS_W_KEY, "tcPr"))
	{
		if(m_cellStack.empty())
		{
			//we must be in styles, so let's push the cell instance to m_cellStack
			OXML_Element_Cell* cell = static_cast<OXML_Element_Cell*>(get_pointer(rqst->stck->top()));
			m_cellStack.push(cell);
		}
		rqst->handled = true;
	}
	//TODO: more coming here
}
Example #4
0
void Jacobi::operator () (unsigned p,QType result[]) const
 {
  LogCountTable table(*this,p);
  
  for(unsigned k=1; k<p-1 ;k++,result+=p) table(k,result);
 }
Example #5
0
inline
bool
Table::iterator::equal( const iterator & it ) const
{
    return table().data_ == it.table().data_ && row_ == it.row_ ;
}
Example #6
0
void triple_apply_pagerank(sgraph& g, size_t& num_iter, double& total_pagerank, double& total_delta) {
  typedef sgraph_compute::sgraph_engine<flexible_type>::graph_data_type graph_data_type;
  typedef sgraph::edge_direction edge_direction;

  // initialize every vertex with core id kmin
  g.init_vertex_field(PAGERANK_COLUMN, reset_probability);
  g.init_vertex_field(PREV_PAGERANK_COLUMN, 1.0);
  g.init_vertex_field(DELTA_COLUMN, 0.0);

  // Initialize degree count
  sgraph_compute::sgraph_engine<flexible_type> ga;
  auto degrees = ga.gather(
          g,
          [=](const graph_data_type& center,
              const graph_data_type& edge,
              const graph_data_type& other,
              edge_direction edgedir,
              flexible_type& combiner) {
              combiner += 1;
          },
          flexible_type(0),
          edge_direction::OUT_EDGE);
  g.add_vertex_field(degrees, OUT_DEGREE_COLUMN);

  num_iter = 0;
  total_delta = 0.0;
  total_pagerank = 0.0;
  timer mytimer;

  // Triple apply
  double w = (1 - reset_probability);
  const size_t degree_idx = g.get_vertex_field_id(OUT_DEGREE_COLUMN);
  const size_t pr_idx = g.get_vertex_field_id(PAGERANK_COLUMN);
  const size_t old_pr_idx = g.get_vertex_field_id(PREV_PAGERANK_COLUMN);

  sgraph_compute::triple_apply_fn_type apply_fn =
    [&](sgraph_compute::edge_scope& scope) {
      auto& source = scope.source();
      auto& target = scope.target();
      scope.lock_vertices();
      target[pr_idx] += w * source[old_pr_idx] / source[degree_idx];
      scope.unlock_vertices();
    };

  table_printer table({{"Iteration", 0}, 
                                {"L1 change in pagerank", 0}});
  table.print_header();

  for (size_t iter = 0; iter < max_iterations; ++iter) {
    if(cppipc::must_cancel()) {
      log_and_throw(std::string("Toolkit cancelled by user."));
    }

    mytimer.start();
    ++num_iter;

    g.init_vertex_field(PAGERANK_COLUMN, reset_probability);

    sgraph_compute::triple_apply(g, apply_fn, {PAGERANK_COLUMN});

    // compute the change in pagerank
    auto delta = sgraph_compute::vertex_apply(
        g,
        flex_type_enum::FLOAT,
        [&](const std::vector<flexible_type>& vdata) {
          return std::abs((double)(vdata[pr_idx]) - (double)(vdata[old_pr_idx]));
        });

    // make the current pagerank the old pagerank
    g.copy_vertex_field(PAGERANK_COLUMN, PREV_PAGERANK_COLUMN);
    g.replace_vertex_field(delta, DELTA_COLUMN);

    total_delta = 
        sgraph_compute::vertex_reduce<double>(g, 
                               DELTA_COLUMN,
                               [](const flexible_type& v, double& acc) {
                                 acc += (flex_float)v;
                               },
                               [](const double& v, double& acc) {
                                 acc += v;
                               });


    table.print_row(iter+1, total_delta);

    // check convergence
    if (total_delta < threshold) {
      break;
    }
  } // end of pagerank iterations

  table.print_footer();

  // cleanup
  g.remove_vertex_field(PREV_PAGERANK_COLUMN);
  g.remove_vertex_field(OUT_DEGREE_COLUMN);
  total_pagerank =
      sgraph_compute::vertex_reduce<double>(g, 
                                     PAGERANK_COLUMN,
                                     [](const flexible_type& v, double& acc) {
                                       acc += (flex_float)v;
                                     },
                                     [](const double& v, double& acc) {
                                       acc += v;
                                     });
}
Example #7
0
 IntVector ZFan::getConeIndices(int dimension, int index, bool orbit, bool maximal)const
 {
   assert(index>=0);
   assert(index<numberOfConesOfDimension(dimension,orbit,maximal));
   return table(orbit,maximal)[dimension][index];
 }
Example #8
0
*
* Description:  DWARF line information processing.
*
****************************************************************************/


#include <stdio.h>
#include <stdlib.h>
#include <setjmp.h>
#include <string.h>

#include "wdglb.h"
#include "wdfunc.h"

static readable_name readableStandardOps[] = {
    table( DW_LNS_copy ),
    table( DW_LNS_advance_pc ),
    table( DW_LNS_advance_line ),
    table( DW_LNS_set_file ),
    table( DW_LNS_set_column ),
    table( DW_LNS_negate_stmt ),
    table( DW_LNS_set_basic_block ),
    table( DW_LNS_const_add_pc ),
    table( DW_LNS_fixed_advance_pc )
};
#define NUM_STANDARD_OPS \
    ( sizeof( readableStandardOps ) / sizeof( readableStandardOps[0] ) )

typedef struct {
    uint_32                     address;
    uint                        file;
Example #9
0
void Repo::loadGlobalData(bool allowFailure /* = false */) {
  m_lsrp.load();

  if (!RuntimeOption::RepoAuthoritative) return;

  std::vector<std::string> failures;

  /*
   * This should probably just go to the Local repo always, except
   * that our unit test suite is currently running RepoAuthoritative
   * tests with the compiled repo as the Central repo.
   */
  for (int repoId = RepoIdCount - 1; repoId >= 0; --repoId) {
    if (repoName(repoId).empty()) {
      // The repo wasn't loadable
      continue;
    }
    try {
      RepoStmt stmt(*this);
      const auto& tbl = table(repoId, "GlobalData");
      stmt.prepare(
        folly::format(
          "SELECT count(*), data from {};", tbl
        ).str()
      );
      RepoTxn txn(*this);
      RepoTxnQuery query(txn, stmt);
      query.step();
      if (!query.row()) {
        throw RepoExc("Can't find table %s", tbl.c_str());
      };
      int val;
      query.getInt(0, val);
      if (val == 0) {
        throw RepoExc("No rows in %s. Did you forget to compile that file with "
                      "this HHVM version?", tbl.c_str());
      }
      BlobDecoder decoder = query.getBlob(1);
      decoder(s_globalData);

      txn.commit();
    } catch (RepoExc& e) {
      failures.push_back(repoName(repoId) + ": "  + e.msg());
      continue;
    }

    // TODO: this should probably read out the other elements of the global data
    // which control Option or RuntimeOption values -- the others are read out
    // in an inconsistent and ad-hoc manner. But I don't understand their uses
    // and interactions well enough to feel comfortable fixing now.
    RuntimeOption::PHP7_IntSemantics = s_globalData.PHP7_IntSemantics;
    RuntimeOption::PHP7_ScalarTypes  = s_globalData.PHP7_ScalarTypes;
    RuntimeOption::PHP7_Substr       = s_globalData.PHP7_Substr;
    RuntimeOption::AutoprimeGenerators = s_globalData.AutoprimeGenerators;
    HHBBC::options.HardTypeHints = s_globalData.HardTypeHints;
    HHBBC::options.HardReturnTypeHints = s_globalData.HardReturnTypeHints;
    return;
  }

  if (allowFailure) return;

  if (failures.empty()) {
    std::fprintf(stderr, "No repo was loadable. Check all the possible repo "
                 "locations (Repo.Central.Path, HHVM_REPO_CENTRAL_PATH, and "
                 "$HOME/.hhvm.hhbc) to make sure one of them is a valid "
                 "sqlite3 HHVM repo built with this exact HHVM version.\n");
  } else {
    // We should always have a global data section in RepoAuthoritative
    // mode, or the repo is messed up.
    std::fprintf(stderr, "Failed to load Repo::GlobalData:\n");
    for (auto& f : failures) {
      std::fprintf(stderr, "  %s\n", f.c_str());
    }
  }

  assert(Process::IsInMainThread());
  exit(1);
}
void StorageSystemMutations::fillData(MutableColumns & res_columns, const Context & context, const SelectQueryInfo & query_info) const
{
    /// Collect a set of *MergeTree tables.
    std::map<String, std::map<String, StoragePtr>> merge_tree_tables;
    for (const auto & db : context.getDatabases())
    {
        if (context.hasDatabaseAccessRights(db.first))
        {
            for (auto iterator = db.second->getIterator(context); iterator->isValid(); iterator->next())
            {
                if (dynamic_cast<const StorageMergeTree *>(iterator->table().get())
                    || dynamic_cast<const StorageReplicatedMergeTree *>(iterator->table().get()))
                {
                    merge_tree_tables[db.first][iterator->name()] = iterator->table();
                }
            }
        }
    }

    MutableColumnPtr col_database_mut = ColumnString::create();
    MutableColumnPtr col_table_mut = ColumnString::create();

    for (auto & db : merge_tree_tables)
    {
        for (auto & table : db.second)
        {
            col_database_mut->insert(db.first);
            col_table_mut->insert(table.first);
        }
    }

    ColumnPtr col_database = std::move(col_database_mut);
    ColumnPtr col_table = std::move(col_table_mut);

    /// Determine what tables are needed by the conditions in the query.
    {
        Block filtered_block
        {
            { col_database, std::make_shared<DataTypeString>(), "database" },
            { col_table, std::make_shared<DataTypeString>(), "table" },
        };

        VirtualColumnUtils::filterBlockWithQuery(query_info.query, filtered_block, context);

        if (!filtered_block.rows())
            return;

        col_database = filtered_block.getByName("database").column;
        col_table = filtered_block.getByName("table").column;
    }

    for (size_t i_storage = 0; i_storage < col_database->size(); ++i_storage)
    {
        auto database = (*col_database)[i_storage].safeGet<String>();
        auto table = (*col_table)[i_storage].safeGet<String>();

        std::vector<MergeTreeMutationStatus> statuses;
        {
            const IStorage * storage = merge_tree_tables[database][table].get();
            if (const auto * merge_tree = dynamic_cast<const StorageMergeTree *>(storage))
                statuses = merge_tree->getMutationsStatus();
            else if (const auto * replicated = dynamic_cast<const StorageReplicatedMergeTree *>(storage))
                statuses = replicated->getMutationsStatus();
        }

        for (const MergeTreeMutationStatus & status : statuses)
        {
            Array block_partition_ids;
            block_partition_ids.reserve(status.block_numbers.size());
            Array block_numbers;
            block_numbers.reserve(status.block_numbers.size());
            for (const auto & pair : status.block_numbers)
            {
                block_partition_ids.emplace_back(pair.first);
                block_numbers.emplace_back(pair.second);
            }

            size_t col_num = 0;
            res_columns[col_num++]->insert(database);
            res_columns[col_num++]->insert(table);

            res_columns[col_num++]->insert(status.id);
            res_columns[col_num++]->insert(status.command);
            res_columns[col_num++]->insert(UInt64(status.create_time));
            res_columns[col_num++]->insert(block_partition_ids);
            res_columns[col_num++]->insert(block_numbers);
            res_columns[col_num++]->insert(status.parts_to_do);
            res_columns[col_num++]->insert(status.is_done);
        }
    }
}
Example #11
0
void toResultDrawing::query(const QString &, toQueryParams const& params)
{
    toResult::setParams(params);
    typedef QPair<QString, QString> Reference;
    toConnection &conn = connection();

    m_dotGraphView->initEmpty();
    QString schema(params.at(0)), table(params.at(1));
    QSet<QString> tables;
    QSet<Reference> references;

    tables.insert(table);

    DotGraph newGraph("dot");
    QMap<QString,QString> ga;
    ga["id"] = "Schema";
    ga["compound"] = "true";
    ga["shape"] = "box";
    ga["rankdir"] = "BT"; // BOTTOM to TOP arrows

    newGraph.setGraphAttributes(ga);

    toQValue c1, c2, c3, c4, c5, c6, c7, c8, c9;
    // TODO: use toCache here - additional attributes
    toConnectionSubLoan c(conn);
    toQuery QueryC(c, SQLTableFKWithDistance, toQueryParams() << schema.toUpper() << distance << table.toUpper());
    while (!QueryC.eof())
    {
        c1 = QueryC.readValue();
        c2 = QueryC.readValue();
        c3 = QueryC.readValue();
        c4 = QueryC.readValue();
        c5 = QueryC.readValue();
        c6 = QueryC.readValue();
        c7 = QueryC.readValue();
        c8 = QueryC.readValue();
        c9 = QueryC.readValue();

        if ( c5.isNull() && c1.isNull())
        {
            // Here collect FK details (a column list for compound keys)
        }

        if ( c5.isNull() && !c1.isNull()) // c5 (column_name) is null - see rollup def
        {
            tables.insert((QString)c4); // table_name
            tables.insert((QString)c7); // r_table_name
            references.insert( Reference((QString)c4, (QString)c7));
        }
    }


    Q_FOREACH(QString const&t, tables)
    {
        QMap<QString,QString> ta; // table atributes
        ta["name"] = t;
        ta["label"] = t;
        ta["fontsize"] = "12";
        ta["comment"]= t;
        ta["id"]= t;
        ta["tooltip"] = t;
        newGraph.addNewNode(ta);
    }
Example #12
0
int Discrete::find_max_floor(int ncase, int ndrop)
{
    vector<vector<int> > table(ncase+1, vector<int>(ndrop+1, -1));
    return find_max_floor_helper(table, ncase, ndrop);
}
Example #13
0
bool CharProperty::compile(const char *cfile,
                           const char *ufile,
                           const char *ofile) {
  scoped_fixed_array<char, BUF_SIZE> line;
  scoped_fixed_array<char *, 512> col;
  size_t id = 0;
  std::vector<Range> range;
  std::map<std::string, CharInfo> category;
  std::vector<std::string> category_ary;
  std::ifstream ifs(WPATH(cfile));
  std::istringstream iss(CHAR_PROPERTY_DEF_DEFAULT);
  std::istream *is = &ifs;

  if (!ifs) {
    std::cerr << cfile
              << " is not found. minimum setting is used" << std::endl;
    is = &iss;
  }

  while (is->getline(line.get(), line.size())) {
    if (std::strlen(line.get()) == 0 || line[0] == '#') {
      continue;
    }
    const size_t size = tokenize2(line.get(), "\t ", col.get(), col.size());
    CHECK_DIE(size >= 2) << "format error: " << line.get();

    // 0xFFFF..0xFFFF hoge hoge hgoe #
    if (std::strncmp(col[0], "0x", 2) == 0) {
      std::string low = col[0];
      std::string high;
      size_t pos = low.find("..");

      if (pos != std::string::npos) {
        high = low.substr(pos + 2, low.size() - pos - 2);
        low  = low.substr(0, pos);
      } else {
        high = low;
      }

      Range r;
      r.low = atohex(low.c_str());
      r.high = atohex(high.c_str());

      CHECK_DIE(r.low >= 0 && r.low < 0xffff &&
                r.high >= 0 && r.high < 0xffff &&
                r.low <= r.high)
          << "range error: low=" << r.low << " high=" << r.high;

      for (size_t i = 1; i < size; ++i) {
        if (col[i][0] == '#') {
          break;  // skip comments
        }
        CHECK_DIE(category.find(std::string(col[i])) != category.end())
            << "category [" << col[i] << "] is undefined";
        r.c.push_back(col[i]);
      }
      range.push_back(r);
    } else {
      CHECK_DIE(size >= 4) << "format error: " << line.get();

      std::string key = col[0];
      CHECK_DIE(category.find(key) == category.end())
          << "category " << key << " is already defined";

      CharInfo c;
      std::memset(&c, 0, sizeof(c));
      c.invoke  = std::atoi(col[1]);
      c.group   = std::atoi(col[2]);
      c.length  = std::atoi(col[3]);
      c.default_type = id++;

      category.insert(std::pair<std::string, CharInfo>(key, c));
      category_ary.push_back(key);
    }
  }

  CHECK_DIE(category.size() < 18) << "too many categories(>= 18)";

  CHECK_DIE(category.find("DEFAULT") != category.end())
      << "category [DEFAULT] is undefined";

  CHECK_DIE(category.find("SPACE") != category.end())
      << "category [SPACE] is undefined";

  std::istringstream iss2(UNK_DEF_DEFAULT);
  std::ifstream ifs2(WPATH(ufile));
  std::istream *is2 = &ifs2;

  if (!ifs2) {
    std::cerr << ufile
              << " is not found. minimum setting is used." << std::endl;
    is2 = &iss2;
  }

  std::set<std::string> unk;
  while (is2->getline(line.get(), line.size())) {
    const size_t n = tokenizeCSV(line.get(), col.get(), 2);
    CHECK_DIE(n >= 1) << "format error: " << line.get();
    const std::string key = col[0];
    CHECK_DIE(category.find(key) != category.end())
        << "category [" << key << "] is undefined in " << cfile;
    unk.insert(key);
  }

  for (std::map<std::string, CharInfo>::const_iterator it = category.begin();
       it != category.end();
       ++it) {
    CHECK_DIE(unk.find(it->first) != unk.end())
        << "category [" << it->first << "] is undefined in " << ufile;
  }

  std::vector<CharInfo> table(0xffff);
  {
    std::vector<std::string> tmp;
    tmp.push_back("DEFAULT");
    const CharInfo c = encode(tmp, &category);
    std::fill(table.begin(), table.end(), c);
  }

  for (std::vector<Range>::const_iterator it = range.begin();
       it != range.end();
       ++it) {
    const CharInfo c = encode(it->c, &category);
    for (int i = it->low; i <= it->high; ++i) {
      table[i] = c;
    }
  }

  // output binary table
  {
    std::ofstream ofs(WPATH(ofile), std::ios::binary|std::ios::out);
    CHECK_DIE(ofs) << "permission denied: " << ofile;

    unsigned int size = static_cast<unsigned int>(category.size());
    ofs.write(reinterpret_cast<const char*>(&size), sizeof(size));
    for (std::vector<std::string>::const_iterator it = category_ary.begin();
         it != category_ary.end();
         ++it) {
      char buf[32];
      std::fill(buf, buf + sizeof(buf), '\0');
      std::strncpy(buf, it->c_str(), sizeof(buf) - 1);
      ofs.write(reinterpret_cast<const char*>(buf), sizeof(buf));
    }
    ofs.write(reinterpret_cast<const char*>(&table[0]),
              sizeof(CharInfo) * table.size());
    ofs.close();
  }

  return true;
}
Example #14
0
//static
void LLLuaTable::make_table(lua_State* L, const std::vector< lua_Number > vec)
{
	LLLuaTable table(L);
	for(std::vector<lua_Number>::const_iterator itr = vec.begin(); itr != vec.end(); itr++)
		table.pushvalue(L, (*itr));
}
void ReplicasStatusHandler::handleRequest(Poco::Net::HTTPServerRequest & request, Poco::Net::HTTPServerResponse & response)
{
	try
	{
		HTMLForm params(request);

		/// Даже в случае, когда отставание небольшое, выводить подробную информацию об отставании.
		bool verbose = params.get("verbose", "") == "1";

		const MergeTreeSettings & settings = context.getMergeTreeSettings();

		bool ok = true;
		std::stringstream message;

		auto databases = context.getDatabases();

		/// Перебираем все реплицируемые таблицы.
		for (const auto & db : databases)
		{
			for (auto iterator = db.second->getIterator(); iterator->isValid(); iterator->next())
			{
				auto & table = iterator->table();
				StorageReplicatedMergeTree * table_replicated = typeid_cast<StorageReplicatedMergeTree *>(table.get());

				if (!table_replicated)
					continue;

				time_t absolute_delay = 0;
				time_t relative_delay = 0;

				table_replicated->getReplicaDelays(absolute_delay, relative_delay);

				if ((settings.min_absolute_delay_to_close && absolute_delay >= static_cast<time_t>(settings.min_absolute_delay_to_close))
					|| (settings.min_relative_delay_to_close && relative_delay >= static_cast<time_t>(settings.min_relative_delay_to_close)))
					ok = false;

				message << backQuoteIfNeed(db.first) << "." << backQuoteIfNeed(iterator->name())
					<< ":\tAbsolute delay: " << absolute_delay << ". Relative delay: " << relative_delay << ".\n";
			}
		}

		setResponseDefaultHeaders(response);

		if (ok && !verbose)
		{
			const char * data = "Ok.\n";
			response.sendBuffer(data, strlen(data));
		}
		else
		{
			response.send() << message.rdbuf();
		}
	}
	catch (...)
	{
		tryLogCurrentException("ReplicasStatusHandler");

		try
		{
			response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_INTERNAL_SERVER_ERROR);

			if (!response.sent())
			{
				/// Ещё ничего не отправляли, и даже не знаем, нужно ли сжимать ответ.
				response.send() << getCurrentExceptionMessage(false) << std::endl;
			}
		}
		catch (...)
		{
			LOG_ERROR((&Logger::get("ReplicasStatusHandler")), "Cannot send exception to client");
		}
	}
}
Example #16
0
/** Compute the elastic (large!) deformation of a block of material.
 *  See problem description in ref06::PulledSheetProblem for a the boundary
 *  conditions. The problem is non-linear and therefore a Newton method is
 *  used. Moreover, the applied load (displacement or traction) is divided into
 *  load steps.
 *
 *  New features:
 *  - vectorial problem: DoFs are not scalar anymore
 *  - hyper-elasticity
 *
 */
int ref06::compressible( int argc, char * argv[] )
{
    // basic attributes of the computation
    const unsigned    geomDeg  = 1;
    const unsigned    fieldDeg = 2;
    const base::Shape shape    = base::HyperCubeShape<SPACEDIM>::value;

    // typedef mat::hypel::StVenant Material;
    typedef mat::hypel::NeoHookeanCompressible Material;

    // usage message
    if ( argc != 3 ) {
        std::cout << "Usage:  " << argv[0] << "  mesh.smf input.dat \n";
        return 0;
    }

    // read name of input file
    const std::string meshFile  = boost::lexical_cast<std::string>( argv[1] );
    const std::string inputFile = boost::lexical_cast<std::string>( argv[2] );

    // read from input file
    double E, nu, pull, traction, tolerance;
    unsigned maxIter, loadSteps;
    bool dispControlled;
    {    
        //Feed properties parser with the variables to be read
        base::io::PropertiesParser prop;
        prop.registerPropertiesVar( "E",                E );
        prop.registerPropertiesVar( "nu",               nu );
        prop.registerPropertiesVar( "pull",             pull );
        prop.registerPropertiesVar( "maxIter",          maxIter );
        prop.registerPropertiesVar( "loadSteps",        loadSteps );
        prop.registerPropertiesVar( "traction",         traction );
        prop.registerPropertiesVar( "dispControlled",   dispControlled );
        prop.registerPropertiesVar( "tolerance",        tolerance );

        // Read variables from the input file
        std::ifstream inp( inputFile.c_str()  );
        VERIFY_MSG( inp.is_open(), "Cannot open input file" );
        prop.readValues( inp );
        inp.close( );

        // Make sure all variables have been found
        if ( not prop.isEverythingRead() ) {
            prop.writeUnread( std::cerr );
            VERIFY_MSG( false, "Could not find above variables" );
        }
    }

    // find base name from mesh file
    const std::string baseName = base::io::baseName( meshFile, ".smf" );

    //--------------------------------------------------------------------------
    // define a mesh
    typedef base::Unstructured<shape,geomDeg>    Mesh;
    const unsigned dim = Mesh::Node::dim;

    // create a mesh and read from input
    Mesh mesh;
    {
        std::ifstream smf( meshFile.c_str() );
        base::io::smf::readMesh( smf, mesh );
        smf.close();
    }

    // quadrature objects for volume and surface
    const unsigned kernelDegEstimate = 3;
    typedef base::Quadrature<kernelDegEstimate,shape> Quadrature;
    Quadrature quadrature;
    typedef base::SurfaceQuadrature<kernelDegEstimate,shape> SurfaceQuadrature;
    SurfaceQuadrature surfaceQuadrature;

    // Create a field
    const unsigned    doFSize = dim;
    typedef base::fe::Basis<shape,fieldDeg>        FEBasis;
    typedef base::Field<FEBasis,doFSize>           Field;
    typedef Field::DegreeOfFreedom                 DoF;
    Field field;

    // generate DoFs from mesh
    base::dof::generate<FEBasis>( mesh, field );

    // Creates a list of <Element,faceNo> pairs along the boundary
    base::mesh::MeshBoundary meshBoundary;
    meshBoundary.create( mesh.elementsBegin(), mesh.elementsEnd() );

    // Create a boundary mesh from this list
    typedef base::mesh::BoundaryMeshBinder<Mesh::Element>::Type BoundaryMesh;
    BoundaryMesh boundaryMesh;
    {
        // Create a real mesh object from this list
        base::mesh::generateBoundaryMesh( meshBoundary.begin(),
                                          meshBoundary.end(),
                                          mesh, boundaryMesh );
    }

    // initial pull = (total amount) / (number of steps)
    const double firstPull = pull / static_cast<double>( loadSteps );

    // constrain the boundary
    base::dof::constrainBoundary<FEBasis>( meshBoundary.begin(),
                                           meshBoundary.end(),
                                           mesh, field, 
                                           boost::bind(
                                               &ref06::PulledSheet<dim>::dirichletBC<DoF>,
                                               _1, _2, dispControlled, firstPull ) );

    // Bind the fields together
    typedef base::asmb::FieldBinder<Mesh,Field> FieldBinder;
    FieldBinder fieldBinder( mesh, field );
    typedef FieldBinder::TupleBinder<1,1>::Type FTB;

    typedef base::asmb::SurfaceFieldBinder<BoundaryMesh,Field> SurfaceFieldBinder;
    SurfaceFieldBinder surfaceFieldBinder( boundaryMesh, field );
    typedef SurfaceFieldBinder::TupleBinder<1>::Type SFTB;

    // material object
    Material material( mat::Lame::lambda( E, nu), mat::Lame::mu( E, nu ) );

    // matrix kernel
    typedef solid::HyperElastic<Material,FTB::Tuple> HyperElastic;
    HyperElastic hyperElastic( material );
            
    // Number the degrees of freedom
    const std::size_t numDofs =
        base::dof::numberDoFsConsecutively( field.doFsBegin(), field.doFsEnd() );
    std::cout << "# Number of dofs " << numDofs << std::endl;

    // create table for writing the convergence behaviour of the nonlinear solves
    base::io::Table<4>::WidthArray widths = {{ 2, 5, 5, 15 }};
    base::io::Table<4> table( widths );
    table % "Step" % "Iter" % "|F|"  % "|x|";
    std::cout << "#" << table;

    // write a vtk file
    ref06::writeVTKFile( baseName, 0, mesh, field, material );


    //--------------------------------------------------------------------------
    // Loop over load steps
    //--------------------------------------------------------------------------
    for ( unsigned step = 0; step < loadSteps; step++ ) {

        // rescale constraints in every load step: (newValue / oldValue)
        const double  pullFactor =
            (step == 0 ?
             static_cast<double>( step+1 ) :
             static_cast<double>( step+1 )/ static_cast<double>(step) );

        // scale constraints
        base::dof::scaleConstraints( field, pullFactor );

        //----------------------------------------------------------------------
        // Nonlinear iterations
        //----------------------------------------------------------------------
        unsigned iter = 0;
        while ( iter < maxIter ) {

            table % step % iter;
    
            // Create a solver object
            typedef base::solver::Eigen3           Solver;
            Solver solver( numDofs );

            // apply traction boundary condition, if problem is not disp controlled
            if ( not dispControlled ) {
                
                // value of applied traction
                const double tractionFactor =
                    traction * 
                    static_cast<double>(step+1) / static_cast<double>( loadSteps );

                // apply traction load
                base::asmb::neumannForceComputation<SFTB>(
                    surfaceQuadrature, solver,
                    surfaceFieldBinder,
                    boost::bind( &ref06::PulledSheet<dim>::neumannBC,
                                 _1, _2, tractionFactor ) );
            }

            // residual forces
            base::asmb::computeResidualForces<FTB>( quadrature, solver,
                                                    fieldBinder,
                                                    hyperElastic );
            
            // Compute element stiffness matrices and assemble them
            base::asmb::stiffnessMatrixComputation<FTB>( quadrature, solver,
                                                         fieldBinder,
                                                         hyperElastic );

            // Finalise assembly
            solver.finishAssembly();

            // norm of residual 
            const double conv1 = solver.norm();
            table % conv1;

            // convergence via residual norm
            if ( conv1 < tolerance * E ) { // note the tolerance multiplier
                std::cout << table;
                break;
            }

            // Solve
            //solver.choleskySolve();
            solver.cgSolve();
            
            // distribute results back to dofs
            base::dof::addToDoFsFromSolver( solver, field );

            // norm of displacement increment
            const double conv2 = solver.norm();
            table % conv2;
            std::cout << table;
            iter++;
            
            // convergence via increment
            if ( conv2 < tolerance ) break;
        }
        // Finished non-linear iterations
        //----------------------------------------------------------------------

        // warning
        if ( iter == maxIter ) {
            std::cout << "# (WW) Step " << step << " has not converged within "
                      << maxIter << " iterations \n";
        }

        // write a vtk file
        ref06::writeVTKFile( baseName, step+1, mesh, field, material );
        
    }
    // Finished load steps
    //--------------------------------------------------------------------------
    
    return 0;
}
Example #17
0
/* Return the amount of physical memory available.  */
double
physmem_available (void)
{
#if defined _SC_AVPHYS_PAGES && defined _SC_PAGESIZE
    {   /* This works on linux-gnu, solaris2 and cygwin.  */
        double pages = sysconf (_SC_AVPHYS_PAGES);
        double pagesize = sysconf (_SC_PAGESIZE);
        if (0 <= pages && 0 <= pagesize)
            return pages * pagesize;
    }
#endif

#if HAVE_PSTAT_GETSTATIC && HAVE_PSTAT_GETDYNAMIC
    {   /* This works on hpux11.  */
        struct pst_static pss;
        struct pst_dynamic psd;
        if (0 <= pstat_getstatic (&pss, sizeof pss, 1, 0)
        && 0 <= pstat_getdynamic (&psd, sizeof psd, 1, 0))
        {
            double pages = psd.psd_free;
            double pagesize = pss.page_size;
            if (0 <= pages && 0 <= pagesize)
                return pages * pagesize;
        }
    }
#endif

#if HAVE_SYSMP && defined MP_SAGET && defined MPSA_RMINFO && defined _SC_PAGESIZE
    {   /* This works on irix6. */
        struct rminfo realmem;
        if (sysmp (MP_SAGET, MPSA_RMINFO, &realmem, sizeof realmem) == 0)
        {
            double pagesize = sysconf (_SC_PAGESIZE);
            double pages = realmem.availrmem;
            if (0 <= pages && 0 <= pagesize)
                return pages * pagesize;
        }
    }
#endif

#if HAVE_TABLE && defined TBL_VMSTATS
    {   /* This works on Tru64 UNIX V4/5.  */
        struct tbl_vmstats vmstats;

        if (table (TBL_VMSTATS, 0, &vmstats, 1, sizeof (vmstats)) == 1)
        {
            double pages = vmstats.free_count;
            double pagesize = vmstats.pagesize;

            if (0 <= pages && 0 <= pagesize)
                return pages * pagesize;
        }
    }
#endif

#if HAVE_SYSCTL && defined HW_USERMEM
    {   /* This works on *bsd and darwin.  */
        unsigned int usermem;
        size_t len = sizeof usermem;
        static int mib[2] = { CTL_HW, HW_USERMEM };

        if (sysctl (mib, ARRAY_SIZE (mib), &usermem, &len, NULL, 0) == 0
                && len == sizeof (usermem))
            return (double) usermem;
    }
#endif

#if defined _WIN32
    {   /* this works on windows */
        PFN_MS_EX pfnex;
        HMODULE h = GetModuleHandle ("kernel32.dll");

        if (!h)
            return 0.0;

        /*  Use GlobalMemoryStatusEx if available.  */
        if ((pfnex = (PFN_MS_EX) GetProcAddress (h, "GlobalMemoryStatusEx")))
        {
            lMEMORYSTATUSEX lms_ex;
            lms_ex.dwLength = sizeof lms_ex;
            if (!pfnex (&lms_ex))
                return 0.0;
            return (double) lms_ex.ullAvailPhys;
        }

        /*  Fall back to GlobalMemoryStatus which is always available.
            but returns wrong results for physical memory > 4GB  */
        else
        {
            MEMORYSTATUS ms;
            GlobalMemoryStatus (&ms);
            return (double) ms.dwAvailPhys;
        }
    }
#endif

    /* Guess 25% of physical memory.  */
    return physmem_total () / 4;
}
 std::vector<O_Page> 
 _page_class_id(const T_Page::Condition& c) const
 {
     T_Page table(c);
     return table.select(table._page_class_id() == _id());   
 }
Example #19
0
 int ZFan::numberOfConesOfDimension(int d, bool orbit, bool maximal)const
 {
   this->ensureComplex();
   return numberOf(table(orbit,maximal),d);
 }
 std::vector<O_Issue> 
 _issue_id(const T_Issue::Condition& c) const
 {
     T_Issue table(c);
     return table.select(table._issue_id() == _id());   
 }
Example #21
0
/*****************************************************
**
**   DasaExpert   ---   writeMore
**
******************************************************/
void DasaExpert::writeMore( Writer *writer, const Horoscope *horoscope, const int varga, const bool show_header )
{
    Formatter *formatter = Formatter::get();
    double startjd;
    unsigned int i;
    wxString s, s1, lord;

    VargaExpert vexpert;
    if ( show_header )
    {
        s.Printf( wxT( "%s %s" ), getName(), _( "Dasa" ) );
        writer->writeHeader1( s );
    }

    vector<Dasa*> v = getFirstLevel( horoscope, varga );
    vector<Dasa*> w;

    for( i = 0; i < v.size(); i++ )
    {
        if ( isRasiDasaExpert() ) lord = writer->getSignName( v[i]->getDasaLord(), TLARGE );
        else lord =  writer->getObjectName(v[i]->getDasaLord(), TLARGE, true );
        s.Printf( wxT( "%d. %s %s" ), i+1, _( "Mahadasa" ), (const wxChar*)lord );
        writer->writeHeader2( s );

        if ( ! v[i]->getExtraText().IsEmpty())
        {
            writer->writeLine( v[i]->getExtraText() );
        }

        startjd = Max( v[i]->getStartJD(), horoscope->getJD() );
        s.Printf( wxT( "%s: %s" ), _( "Start Date" ), (const wxChar*)formatter->getDateStringFromJD( startjd ) );
        writer->writeLine( s );
        s.Printf( wxT( "%s: %s" ), _( "End Date" ), (const wxChar*)formatter->getDateStringFromJD( v[i]->getEndJD() ) );
        writer->writeLine( s );
        getDasaDuration( s1, v[i]->getEndJD() - v[i]->getStartJD() );
        s.Printf( wxT( "%s: %s" ), _( "Duration" ), (const wxChar*)s1 );
        writer->writeLine( s );

        w = getNextLevel( v[i] );
        if ( w.size() == 0 ) continue;

        writer->writeHeader3( _( "Antardasas" ) );

        // Get the number of lines
        int lines = 1;
        for( unsigned int j = 0; j < w.size(); j++ )
        {
            if ( w[j]->getEndJD() >= horoscope->getJD()) lines++;
        }

        Table table( 4, lines );
        table.setHeader( 0, _( "Lord" ));
        table.setHeader( 1,  _( "Start Date" ));
        table.setHeader( 2,  _( "Final Date" ));
        table.setHeader( 3,  _( "Duration" ));

        int line = 1;
        for( unsigned int j = 0; j < w.size(); j++ )
        {
            if ( w[j]->getEndJD() >= horoscope->getJD())
            {
                if ( isRasiDasaExpert() ) lord = writer->getSignName(w[j]->getDasaLord(), TLARGE );
                else lord = writer->getObjectName(w[j]->getDasaLord(), TLARGE, true );
                table.setEntry( 0, line, lord );

                startjd = Max( w[j]->getStartJD(), horoscope->getJD() );
                table.setEntry( 1, line, formatter->getDateStringFromJD( startjd ) );

                table.setEntry( 2, line, formatter->getDateStringFromJD( w[j]->getEndJD() ) );

                getDasaDuration( s, w[j]->getEndJD() - w[j]->getStartJD() );
                table.setEntry( 3, line, s );
                line++;
            }
            delete w[j];
        }
        writer->writeTable( table );
    }
    for( i = 0; i < v.size(); i++ ) delete v[i];
}
Example #22
0
/**
 * Validate the provided failed blocks.
 *
 * This function checks if the specified failed blocks satisfy the redundancy
 * information using the data from the known valid parity blocks.
 *
 * It's similar at raid_check(), just with a different format for arguments.
 *
 * The number of failed blocks @nr must be strictly less than the number of
 * parities @nv, because you need one more parity to validate the recovering.
 *
 * No data or parity blocks are modified.
 *
 * @nr Number of failed data blocks.
 * @id[] Vector of @nr indexes of the failed data blocks.
 *   The indexes start from 0. They must be in order.
 * @nv Number of valid parity blocks.
 * @ip[] Vector of @nv indexes of the valid parity blocks.
 *   The indexes start from 0. They must be in order.
 * @nd Number of data blocks.
 * @size Size of the blocks pointed by @v. It must be a multipler of 64.
 * @v Vector of pointers to the blocks of data and parity.
 *   It has (@nd + @ip[@nv - 1] + 1) elements. The starting elements are the
 *   blocks for data, following with the parity blocks.
 *   Each block has @size bytes. 
 * @return 0 if the check is satisfied. -1 otherwise.
 */
static int raid_validate(int nr, int *id, int nv, int *ip, int nd, size_t size, void **vv)
{
	uint8_t **v = (uint8_t **)vv;
	const uint8_t *T[RAID_PARITY_MAX][RAID_PARITY_MAX];
	uint8_t G[RAID_PARITY_MAX * RAID_PARITY_MAX];
	uint8_t V[RAID_PARITY_MAX * RAID_PARITY_MAX];
	size_t i;
	int j, k, l;

	BUG_ON(nr >= nv);

	/* setup the coefficients matrix */
	for (j = 0; j < nr; ++j)
		for (k = 0; k < nr; ++k)
			G[j * nr + k] = A(ip[j], id[k]);

	/* invert it to solve the system of linear equations */
	raid_invert(G, V, nr);

	/* get multiplication tables */
	for (j = 0; j < nr; ++j)
		for (k = 0; k < nr; ++k)
			T[j][k] = table(V[j * nr + k]);

	/* check all positions */
	for (i = 0; i < size; ++i) {
		uint8_t p[RAID_PARITY_MAX];

		/* get parity */
		for (j = 0; j < nv; ++j)
			p[j] = v[nd + ip[j]][i];

		/* compute delta parity, skipping broken disks */
		for (j = 0, k = 0; j < nd; ++j) {
			uint8_t b;

			/* skip broken disks */
			if (k < nr && id[k] == j) {
				++k;
				continue;
			}

			b = v[j][i];
			for (l = 0; l < nv; ++l)
				p[l] ^= gfmul[b][gfgen[ip[l]][j]];
		}

		/* reconstruct data */
		for (j = 0; j < nr; ++j) {
			uint8_t b = 0;
			int idj = id[j];

			/* recompute the data */
			for (k = 0; k < nr; ++k)
				b ^= T[j][k][p[k]];

			/* add the parity contribution of the reconstructed data */
			for (l = nr; l < nv; ++l)
				p[l] ^= gfmul[b][gfgen[ip[l]][idj]];
		}

		/* check that the final parity is 0 */
		for (l = nr; l < nv; ++l)
			if (p[l] != 0)
				return -1;
	}

	return 0;
}
Example #23
0
int main() {
	int n = 0, x = 0;
	std::scanf("%d %d", &n, &x);
	--x;
	std::vector<beaver> beavers(n);
	for (int i = 0; i < n; ++i) {
		scanf("%d", &beavers[i].prev);
		if (--beavers[i].prev != -1) {
			beavers[beavers[i].prev].next = i;
		}
	}

	int xpos = 1;
	int j = x;
	beavers[x].visited = true;
	while (beavers[j].prev != -1) {
		j = beavers[j].prev;
		beavers[j].visited = true;
		++xpos;
	}
	j = x;
	while (beavers[j].next != -1) {
		j = beavers[j].next;
		beavers[j].visited = true;
	}

	std::multiset<int> queues;
	for (int i = 0; i < n; ++i) {
		if (!beavers[i].visited) {
			beavers[i].visited = true;
			int num = 1;
			int j = i;
			while (beavers[j].prev != -1) {
				j = beavers[j].prev;
				beavers[j].visited = true;
				++num;
			}
			j = i;
			while (beavers[j].next != -1) {
				j = beavers[j].next;
				beavers[j].visited = true;
				++num;
			}
			queues.insert(num);
		}
	}

	int queues_sum = 0;
	for (std::multiset<int>::const_iterator itr = queues.begin(); itr != queues.end(); ++itr) {
		queues_sum += *itr;
	}

	std::vector<bool> table((queues_sum + 1) * (queues.size() + 1), false);
	table[0] = true;

	j = 1;
	for (std::multiset<int>::const_iterator itr = queues.begin(); itr != queues.end(); ++itr) {
		for (int i = 0; i <= queues_sum; ++i) {
			if (table[(j - 1) * (queues_sum + 1) + i]) {
				table[j * (queues_sum + 1) + i] = true;
				table[j * (queues_sum + 1) + i + *itr] = true;
			}
		}
		++j;
	}

	for (int i = 0; i <= queues_sum; ++i) {
		if (table[queues.size() * (queues_sum + 1) + i]) {
			printf("%d\n", i + xpos);
		}
	}

	return 0;
}
Example #24
0
lua_tinker::table lua_tinker::read(lua_State *L, int index)
{
	return table(L, index);
}
Example #25
0
// Revised lookup semantics   introduced 1.3 (Kestral beta)
void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {

  // Note:  Arrays can have intermediate array supers.  Use java_super to skip them.
  KlassHandle super (THREAD, klass()->java_super());
  int nofNewEntries = 0;


  if (PrintVtables && !klass()->oop_is_array()) {
    ResourceMark rm(THREAD);
    tty->print_cr("Initializing: %s", _klass->name()->as_C_string());
  }

#ifdef ASSERT
  oop* end_of_obj = (oop*)_klass() + _klass()->size();
  oop* end_of_vtable = (oop*)&table()[_length];
  assert(end_of_vtable <= end_of_obj, "vtable extends beyond end");
#endif

  if (Universe::is_bootstrapping()) {
    // just clear everything
    for (int i = 0; i < _length; i++) table()[i].clear();
    return;
  }

  int super_vtable_len = initialize_from_super(super);
  if (klass()->oop_is_array()) {
    assert(super_vtable_len == _length, "arrays shouldn't introduce new methods");
  } else {
    assert(_klass->oop_is_instance(), "must be instanceKlass");

    objArrayHandle methods(THREAD, ik()->methods());
    int len = methods()->length();
    int initialized = super_vtable_len;

    // update_inherited_vtable can stop for gc - ensure using handles
    for (int i = 0; i < len; i++) {
      HandleMark hm(THREAD);
      assert(methods()->obj_at(i)->is_method(), "must be a methodOop");
      methodHandle mh(THREAD, (methodOop)methods()->obj_at(i));

      bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, checkconstraints, CHECK);

      if (needs_new_entry) {
        put_method_at(mh(), initialized);
        mh()->set_vtable_index(initialized); // set primary vtable index
        initialized++;
      }
    }

    // add miranda methods; it will also update the value of initialized
    fill_in_mirandas(initialized);

    // In class hierarchies where the accessibility is not increasing (i.e., going from private ->
    // package_private -> publicprotected), the vtable might actually be smaller than our initial
    // calculation.
    assert(initialized <= _length, "vtable initialization failed");
    for(;initialized < _length; initialized++) {
      put_method_at(NULL, initialized);
    }
    NOT_PRODUCT(verify(tty, true));
  }
}
Example #26
0
lua_tinker::table lua_tinker::pop(lua_State *L)
{
	return table(L, lua_gettop(L));
}
    void run() {
        foldnodeplot.reset(new PlotPoints());
        env->add(foldnodeplot);
        folddirplot.reset(new PlotLines(3));
        env->add(folddirplot);
        pickplot.reset(new PlotSpheres());
        env->add(pickplot);
        pickedNode = NULL;

        leftManipAxes.reset(new PlotAxes);
        env->add(leftManipAxes);
        rightManipAxes.reset(new PlotAxes);
        env->add(rightManipAxes);

        // load the robot
        pr2m.reset(new PR2Manager(*this));
        if (FlatteningConfig::useFakeGripper) {
            TelekineticGripper::Ptr fakeLeft(new TelekineticGripper(pr2m->pr2Left));
            fakeLeft->setTransform(pr2m->pr2Left->getTransform());
            env->add(fakeLeft);
            gleft.reset(new GenManip(fakeLeft));

            TelekineticGripper::Ptr fakeRight(new TelekineticGripper(pr2m->pr2Right));
            fakeRight->setTransform(pr2m->pr2Right->getTransform());
            gright.reset(new GenManip(fakeRight));
            env->add(fakeRight);

            pr2m->pr2->setTransform(btTransform(btQuaternion::getIdentity(), btVector3(0, 0, -100))); // out of view

        } else {
            gleft.reset(new GenManip(pr2m->pr2Left));
            gright.reset(new GenManip(pr2m->pr2Right));
        }

        // create the table
        const float table_height = .5;
        const float table_thickness = .05;
        tableExtents = GeneralConfig::scale * btVector3(.75,.75,table_thickness/2);
        tableTrans = btTransform(btQuaternion(0, 0, 0, 1), GeneralConfig::scale * btVector3(0.8, 0, table_height-table_thickness/2));
        BoxObject::Ptr table(new BoxObject(0, tableExtents, tableTrans));
        table->rigidBody->setFriction(0.1);
        env->add(table);
        cout << "table margin: " << table->rigidBody->getCollisionShape()->getMargin() << endl;

        // put the table in openrave
        /*
        OpenRAVE::KinBodyPtr raveTable = OpenRAVE::RaveCreateKinBody(rave->env);
        raveTable->SetName("table");
        vector<OpenRAVE::AABB> v;
        v.push_back(OpenRAVE::AABB(util::toRaveTransform(table_trans, 1./pr2m->pr2->scale).trans, 1./pr2m->pr2->scale * util::toRaveVector(table_extents)));
        raveTable->InitFromBoxes(v, true);
        rave->env->AddKinBody(raveTable);
        */

#if 0
        OpenRAVE::ViewerBasePtr raveViewer = OpenRAVE::RaveCreateViewer(rave->env, "qtcoin");
        rave->env->AddViewer(raveViewer);
        raveViewer->main(true);
#endif

        const int resx = 45, resy = 31;
//        const btScalar lenx = GeneralConfig::scale * 0.7, leny = GeneralConfig::scale * 0.5;
        const btScalar lenx = GeneralConfig::scale * 0.7/2, leny = GeneralConfig::scale * 0.5/2;
//        const btVector3 clothcenter = GeneralConfig::scale * btVector3(0.5, 0, table_height+0.01);
        const btVector3 clothcenter = GeneralConfig::scale * btVector3(0.3, 0.1, table_height+0.01);
//        cloth = makeSelfCollidingTowel(clothcenter, lenx, leny, resx, resy, env->bullet->softBodyWorldInfo);
        cloth.reset(new Cloth(resx, resy, lenx, leny, clothcenter, env->bullet->softBodyWorldInfo));
        env->add(cloth);

        facepicker.reset(new SoftBodyFacePicker(*this, viewer.getCamera(), cloth->softBody.get()));
        facepicker->setPickCallback(boost::bind(&CustomScene::pickCallback, this, _1));

        sbgripperleft.reset(new GenPR2SoftGripper(pr2m->pr2, gleft, true));
        sbgripperleft->setGrabOnlyOnContact(true);
        sbgripperleft->setTarget(cloth);

        GenPR2SoftGripperAction leftAction(pr2m->pr2, gleft->baseManip()->manip, sbgripperleft);
        leftAction.setTarget(cloth);
        leftAction.setExecTime(1.);
        addVoidKeyCallback('a', boost::bind(&CustomScene::runGripperAction, this, leftAction));
        addVoidKeyCallback('c', boost::bind(&CustomScene::graspPickedNode, this));
        addVoidKeyCallback('f', boost::bind(&CustomScene::greedyFlattenSingle, this));
        addVoidKeyCallback('F', boost::bind(&CustomScene::deepFlattenSingle, this));
        addVoidKeyCallback('g', boost::bind(&CustomScene::liftCloth, this));

        addPreDrawCallback(boost::bind(&CustomScene::markFolds, this));
        addPreDrawCallback(boost::bind(&CustomScene::drawPick, this));
        addPreDrawCallback(boost::bind(&CustomScene::drawManipAxes, this));
        addPreDrawCallback(boost::bind(&GenPR2SoftGripper::dbgDraw, sbgripperleft.get(), this));

        startViewer();
        startFixedTimestepLoop(BulletConfig::dt);
    }
Example #28
0
int
getloadavg (double loadavg[], int nelem)
{
  int elem = 0;			/* Return value.  */

# ifdef NO_GET_LOAD_AVG
#  define LDAV_DONE
  /* Set errno to zero to indicate that there was no particular error;
     this function just can't work at all on this system.  */
  errno = 0;
  elem = -1;
# endif

# if !defined (LDAV_DONE) && defined (HAVE_LIBKSTAT)
/* Use libkstat because we don't have to be root.  */
#  define LDAV_DONE
  kstat_ctl_t *kc;
  kstat_t *ksp;
  kstat_named_t *kn;

  kc = kstat_open ();
  if (kc == 0)
    return -1;
  ksp = kstat_lookup (kc, "unix", 0, "system_misc");
  if (ksp == 0 )
    return -1;
  if (kstat_read (kc, ksp, 0) == -1)
    return -1;


  kn = kstat_data_lookup (ksp, "avenrun_1min");
  if (kn == 0)
    {
      /* Return -1 if no load average information is available.  */
      nelem = 0;
      elem = -1;
    }

  if (nelem >= 1)
    loadavg[elem++] = (double) kn->value.ul/FSCALE;

  if (nelem >= 2)
    {
      kn = kstat_data_lookup (ksp, "avenrun_5min");
      if (kn != 0)
	{
	  loadavg[elem++] = (double) kn->value.ul/FSCALE;

	  if (nelem >= 3)
	    {
	      kn = kstat_data_lookup (ksp, "avenrun_15min");
	      if (kn != 0)
		loadavg[elem++] = (double) kn->value.ul/FSCALE;
	    }
	}
    }

  kstat_close (kc);
# endif /* HAVE_LIBKSTAT */

# if !defined (LDAV_DONE) && defined (hpux) && defined (HAVE_PSTAT_GETDYNAMIC)
/* Use pstat_getdynamic() because we don't have to be root.  */
#  define LDAV_DONE
#  undef LOAD_AVE_TYPE

  struct pst_dynamic dyn_info;
  if (pstat_getdynamic (&dyn_info, sizeof (dyn_info), 0, 0) < 0)
    return -1;
  if (nelem > 0)
    loadavg[elem++] = dyn_info.psd_avg_1_min;
  if (nelem > 1)
    loadavg[elem++] = dyn_info.psd_avg_5_min;
  if (nelem > 2)
    loadavg[elem++] = dyn_info.psd_avg_15_min;

# endif /* hpux && HAVE_PSTAT_GETDYNAMIC */

# if !defined (LDAV_DONE) && defined (__linux__)
#  define LDAV_DONE
#  undef LOAD_AVE_TYPE

#  ifndef LINUX_LDAV_FILE
#   define LINUX_LDAV_FILE "/proc/loadavg"
#  endif

  char ldavgbuf[40];
  double load_ave[3];
  int fd, count;

  fd = open (LINUX_LDAV_FILE, O_RDONLY);
  if (fd == -1)
    return -1;
  count = read (fd, ldavgbuf, 40);
  (void) close (fd);
  if (count <= 0)
    return -1;

  /* The following sscanf must use the C locale.  */
  setlocale (LC_NUMERIC, "C");
  count = sscanf (ldavgbuf, "%lf %lf %lf",
		  &load_ave[0], &load_ave[1], &load_ave[2]);
  setlocale (LC_NUMERIC, "");
  if (count < 1)
    return -1;

  for (elem = 0; elem < nelem && elem < count; elem++)
    loadavg[elem] = load_ave[elem];

  return elem;

# endif /* __linux__ */

# if !defined (LDAV_DONE) && defined (__NetBSD__)
#  define LDAV_DONE
#  undef LOAD_AVE_TYPE

#  ifndef NETBSD_LDAV_FILE
#   define NETBSD_LDAV_FILE "/kern/loadavg"
#  endif

  unsigned long int load_ave[3], scale;
  int count;
  FILE *fp;

  fp = fopen (NETBSD_LDAV_FILE, "r");
  if (fp == NULL)
    return -1;
  count = fscanf (fp, "%lu %lu %lu %lu\n",
		  &load_ave[0], &load_ave[1], &load_ave[2],
		  &scale);
  (void) fclose (fp);
  if (count != 4)
    return -1;

  for (elem = 0; elem < nelem; elem++)
    loadavg[elem] = (double) load_ave[elem] / (double) scale;

  return elem;

# endif /* __NetBSD__ */

# if !defined (LDAV_DONE) && defined (NeXT)
#  define LDAV_DONE
  /* The NeXT code was adapted from iscreen 3.2.  */

  host_t host;
  struct processor_set_basic_info info;
  unsigned info_count;

  /* We only know how to get the 1-minute average for this system,
     so even if the caller asks for more than 1, we only return 1.  */

  if (!getloadavg_initialized)
    {
      if (processor_set_default (host_self (), &default_set) == KERN_SUCCESS)
	getloadavg_initialized = 1;
    }

  if (getloadavg_initialized)
    {
      info_count = PROCESSOR_SET_BASIC_INFO_COUNT;
      if (processor_set_info (default_set, PROCESSOR_SET_BASIC_INFO, &host,
			      (processor_set_info_t) &info, &info_count)
	  != KERN_SUCCESS)
	getloadavg_initialized = 0;
      else
	{
	  if (nelem > 0)
	    loadavg[elem++] = (double) info.load_average / LOAD_SCALE;
	}
    }

  if (!getloadavg_initialized)
    return -1;
# endif /* NeXT */

# if !defined (LDAV_DONE) && defined (UMAX)
#  define LDAV_DONE
/* UMAX 4.2, which runs on the Encore Multimax multiprocessor, does not
   have a /dev/kmem.  Information about the workings of the running kernel
   can be gathered with inq_stats system calls.
   We only know how to get the 1-minute average for this system.  */

  struct proc_summary proc_sum_data;
  struct stat_descr proc_info;
  double load;
  register unsigned int i, j;

  if (cpus == 0)
    {
      register unsigned int c, i;
      struct cpu_config conf;
      struct stat_descr desc;

      desc.sd_next = 0;
      desc.sd_subsys = SUBSYS_CPU;
      desc.sd_type = CPUTYPE_CONFIG;
      desc.sd_addr = (char *) &conf;
      desc.sd_size = sizeof conf;

      if (inq_stats (1, &desc))
	return -1;

      c = 0;
      for (i = 0; i < conf.config_maxclass; ++i)
	{
	  struct class_stats stats;
	  memset (&stats, '\0', sizeof stats);

	  desc.sd_type = CPUTYPE_CLASS;
	  desc.sd_objid = i;
	  desc.sd_addr = (char *) &stats;
	  desc.sd_size = sizeof stats;

	  if (inq_stats (1, &desc))
	    return -1;

	  c += stats.class_numcpus;
	}
      cpus = c;
      samples = cpus < 2 ? 3 : (2 * cpus / 3);
    }

  proc_info.sd_next = 0;
  proc_info.sd_subsys = SUBSYS_PROC;
  proc_info.sd_type = PROCTYPE_SUMMARY;
  proc_info.sd_addr = (char *) &proc_sum_data;
  proc_info.sd_size = sizeof (struct proc_summary);
  proc_info.sd_sizeused = 0;

  if (inq_stats (1, &proc_info) != 0)
    return -1;

  load = proc_sum_data.ps_nrunnable;
  j = 0;
  for (i = samples - 1; i > 0; --i)
    {
      load += proc_sum_data.ps_nrun[j];
      if (j++ == PS_NRUNSIZE)
	j = 0;
    }

  if (nelem > 0)
    loadavg[elem++] = load / samples / cpus;
# endif /* UMAX */

# if !defined (LDAV_DONE) && defined (DGUX)
#  define LDAV_DONE
  /* This call can return -1 for an error, but with good args
     it's not supposed to fail.  The first argument is for no
     apparent reason of type 'long int *'.  */
  dg_sys_info ((long int *) &load_info,
	       DG_SYS_INFO_LOAD_INFO_TYPE,
	       DG_SYS_INFO_LOAD_VERSION_0);

  if (nelem > 0)
    loadavg[elem++] = load_info.one_minute;
  if (nelem > 1)
    loadavg[elem++] = load_info.five_minute;
  if (nelem > 2)
    loadavg[elem++] = load_info.fifteen_minute;
# endif /* DGUX */

# if !defined (LDAV_DONE) && defined (apollo)
#  define LDAV_DONE
/* Apollo code from [email protected] (Ray Lischner).

   This system call is not documented.  The load average is obtained as
   three long integers, for the load average over the past minute,
   five minutes, and fifteen minutes.  Each value is a scaled integer,
   with 16 bits of integer part and 16 bits of fraction part.

   I'm not sure which operating system first supported this system call,
   but I know that SR10.2 supports it.  */

  extern void proc1_$get_loadav ();
  unsigned long load_ave[3];

  proc1_$get_loadav (load_ave);

  if (nelem > 0)
    loadavg[elem++] = load_ave[0] / 65536.0;
  if (nelem > 1)
    loadavg[elem++] = load_ave[1] / 65536.0;
  if (nelem > 2)
    loadavg[elem++] = load_ave[2] / 65536.0;
# endif /* apollo */

# if !defined (LDAV_DONE) && defined (OSF_MIPS)
#  define LDAV_DONE

  struct tbl_loadavg load_ave;
  table (TBL_LOADAVG, 0, &load_ave, 1, sizeof (load_ave));
  loadavg[elem++]
    = (load_ave.tl_lscale == 0
       ? load_ave.tl_avenrun.d[0]
       : (load_ave.tl_avenrun.l[0] / (double) load_ave.tl_lscale));
# endif	/* OSF_MIPS */

# if !defined (LDAV_DONE) && (defined (__MSDOS__) || defined (WINDOWS32))
#  define LDAV_DONE

  /* A faithful emulation is going to have to be saved for a rainy day.  */
  for ( ; elem < nelem; elem++)
    {
      loadavg[elem] = 0.0;
    }
# endif  /* __MSDOS__ || WINDOWS32 */

# if !defined (LDAV_DONE) && defined (OSF_ALPHA)
#  define LDAV_DONE

  struct tbl_loadavg load_ave;
  table (TBL_LOADAVG, 0, &load_ave, 1, sizeof (load_ave));
  for (elem = 0; elem < nelem; elem++)
    loadavg[elem]
      = (load_ave.tl_lscale == 0
       ? load_ave.tl_avenrun.d[elem]
       : (load_ave.tl_avenrun.l[elem] / (double) load_ave.tl_lscale));
# endif /* OSF_ALPHA */

# if !defined (LDAV_DONE) && defined (VMS)
  /* VMS specific code -- read from the Load Ave driver.  */

  LOAD_AVE_TYPE load_ave[3];
  static int getloadavg_initialized = 0;
#  ifdef eunice
  struct
  {
    int dsc$w_length;
    char *dsc$a_pointer;
  } descriptor;
#  endif

  /* Ensure that there is a channel open to the load ave device.  */
  if (!getloadavg_initialized)
    {
      /* Attempt to open the channel.  */
#  ifdef eunice
      descriptor.dsc$w_length = 18;
      descriptor.dsc$a_pointer = "$$VMS_LOAD_AVERAGE";
#  else
      $DESCRIPTOR (descriptor, "LAV0:");
#  endif
      if (sys$assign (&descriptor, &channel, 0, 0) & 1)
	getloadavg_initialized = 1;
    }

  /* Read the load average vector.  */
  if (getloadavg_initialized
      && !(sys$qiow (0, channel, IO$_READVBLK, 0, 0, 0,
		     load_ave, 12, 0, 0, 0, 0) & 1))
    {
      sys$dassgn (channel);
      getloadavg_initialized = 0;
    }

  if (!getloadavg_initialized)
    return -1;
# endif /* VMS */

# if !defined (LDAV_DONE) && defined(LOAD_AVE_TYPE) && !defined(VMS)

  /* UNIX-specific code -- read the average from /dev/kmem.  */

#  define LDAV_PRIVILEGED		/* This code requires special installation.  */

  LOAD_AVE_TYPE load_ave[3];

  /* Get the address of LDAV_SYMBOL.  */
  if (offset == 0)
    {
#  ifndef sgi
#   ifndef NLIST_STRUCT
      strcpy (nl[0].n_name, LDAV_SYMBOL);
      strcpy (nl[1].n_name, "");
#   else /* NLIST_STRUCT */
#    ifdef HAVE_STRUCT_NLIST_N_UN_N_NAME
      nl[0].n_un.n_name = LDAV_SYMBOL;
      nl[1].n_un.n_name = 0;
#    else /* not HAVE_STRUCT_NLIST_N_UN_N_NAME */
      nl[0].n_name = LDAV_SYMBOL;
      nl[1].n_name = 0;
#    endif /* not HAVE_STRUCT_NLIST_N_UN_N_NAME */
#   endif /* NLIST_STRUCT */

#   ifndef SUNOS_5
      if (
#    if !(defined (_AIX) && !defined (ps2))
	  nlist (KERNEL_FILE, nl)
#    else  /* _AIX */
	  knlist (nl, 1, sizeof (nl[0]))
#    endif
	  >= 0)
	  /* Omit "&& nl[0].n_type != 0 " -- it breaks on Sun386i.  */
	  {
#    ifdef FIXUP_KERNEL_SYMBOL_ADDR
	    FIXUP_KERNEL_SYMBOL_ADDR (nl);
#    endif
	    offset = nl[0].n_value;
	  }
#   endif /* !SUNOS_5 */
#  else  /* sgi */
      int ldav_off;

      ldav_off = sysmp (MP_KERNADDR, MPKA_AVENRUN);
      if (ldav_off != -1)
	offset = (long) ldav_off & 0x7fffffff;
#  endif /* sgi */
    }

  /* Make sure we have /dev/kmem open.  */
  if (!getloadavg_initialized)
    {
#  ifndef SUNOS_5
      channel = open ("/dev/kmem", 0);
      if (channel >= 0)
	{
	  /* Set the channel to close on exec, so it does not
	     litter any child's descriptor table.  */
#   ifdef F_SETFD
#    ifndef FD_CLOEXEC
#     define FD_CLOEXEC 1
#    endif
	  (void) fcntl (channel, F_SETFD, FD_CLOEXEC);
#   endif
	  getloadavg_initialized = 1;
	}
#  else /* SUNOS_5 */
      /* We pass 0 for the kernel, corefile, and swapfile names
	 to use the currently running kernel.  */
      kd = kvm_open (0, 0, 0, O_RDONLY, 0);
      if (kd != 0)
	{
	  /* nlist the currently running kernel.  */
	  kvm_nlist (kd, nl);
	  offset = nl[0].n_value;
	  getloadavg_initialized = 1;
	}
#  endif /* SUNOS_5 */
    }

  /* If we can, get the load average values.  */
  if (offset && getloadavg_initialized)
    {
      /* Try to read the load.  */
#  ifndef SUNOS_5
      if (lseek (channel, offset, 0) == -1L
	  || read (channel, (char *) load_ave, sizeof (load_ave))
	  != sizeof (load_ave))
	{
	  close (channel);
	  getloadavg_initialized = 0;
	}
#  else  /* SUNOS_5 */
      if (kvm_read (kd, offset, (char *) load_ave, sizeof (load_ave))
	  != sizeof (load_ave))
        {
          kvm_close (kd);
          getloadavg_initialized = 0;
	}
#  endif /* SUNOS_5 */
    }

  if (offset == 0 || !getloadavg_initialized)
    return -1;
# endif /* LOAD_AVE_TYPE and not VMS */

# if !defined (LDAV_DONE) && defined (LOAD_AVE_TYPE) /* Including VMS.  */
  if (nelem > 0)
    loadavg[elem++] = LDAV_CVT (load_ave[0]);
  if (nelem > 1)
    loadavg[elem++] = LDAV_CVT (load_ave[1]);
  if (nelem > 2)
    loadavg[elem++] = LDAV_CVT (load_ave[2]);

#  define LDAV_DONE
# endif /* !LDAV_DONE && LOAD_AVE_TYPE */

# ifdef LDAV_DONE
  return elem;
# else
  /* Set errno to zero to indicate that there was no particular error;
     this function just can't work at all on this system.  */
  errno = 0;
  return -1;
# endif
}
Example #29
0
void AsynchronousMetrics::update()
{
    {
        if (auto mark_cache = context.getMarkCache())
        {
            set("MarkCacheBytes", mark_cache->weight());
            set("MarkCacheFiles", mark_cache->count());
        }
    }

    {
        if (auto uncompressed_cache = context.getUncompressedCache())
        {
            set("UncompressedCacheBytes", uncompressed_cache->weight());
            set("UncompressedCacheCells", uncompressed_cache->count());
        }
    }

    {
        auto databases = context.getDatabases();

        size_t max_queue_size = 0;
        size_t max_inserts_in_queue = 0;
        size_t max_merges_in_queue = 0;

        size_t sum_queue_size = 0;
        size_t sum_inserts_in_queue = 0;
        size_t sum_merges_in_queue = 0;

        size_t max_absolute_delay = 0;
        size_t max_relative_delay = 0;

        size_t max_part_count_for_partition = 0;

        for (const auto & db : databases)
        {
            for (auto iterator = db.second->getIterator(); iterator->isValid(); iterator->next())
            {
                auto & table = iterator->table();
                StorageMergeTree * table_merge_tree = typeid_cast<StorageMergeTree *>(table.get());
                StorageReplicatedMergeTree * table_replicated_merge_tree = typeid_cast<StorageReplicatedMergeTree *>(table.get());

                if (table_replicated_merge_tree)
                {
                    StorageReplicatedMergeTree::Status status;
                    table_replicated_merge_tree->getStatus(status, false);

                    calculateMaxAndSum(max_queue_size, sum_queue_size, status.queue.queue_size);
                    calculateMaxAndSum(max_inserts_in_queue, sum_inserts_in_queue, status.queue.inserts_in_queue);
                    calculateMaxAndSum(max_merges_in_queue, sum_merges_in_queue, status.queue.merges_in_queue);

                    try
                    {
                        time_t absolute_delay = 0;
                        time_t relative_delay = 0;
                        table_replicated_merge_tree->getReplicaDelays(absolute_delay, relative_delay);

                        calculateMax(max_absolute_delay, absolute_delay);
                        calculateMax(max_relative_delay, relative_delay);
                    }
                    catch (...)
                    {
                        tryLogCurrentException(__PRETTY_FUNCTION__,
                            "Cannot get replica delay for table: " + backQuoteIfNeed(db.first) + "." + backQuoteIfNeed(iterator->name()));
                    }

                    calculateMax(max_part_count_for_partition, table_replicated_merge_tree->getData().getMaxPartsCountForMonth());
                }

                if (table_merge_tree)
                {
                    calculateMax(max_part_count_for_partition, table_merge_tree->getData().getMaxPartsCountForMonth());
                }
            }
        }

        set("ReplicasMaxQueueSize", max_queue_size);
        set("ReplicasMaxInsertsInQueue", max_inserts_in_queue);
        set("ReplicasMaxMergesInQueue", max_merges_in_queue);

        set("ReplicasSumQueueSize", sum_queue_size);
        set("ReplicasSumInsertsInQueue", sum_inserts_in_queue);
        set("ReplicasSumMergesInQueue", sum_merges_in_queue);

        set("ReplicasMaxAbsoluteDelay", max_absolute_delay);
        set("ReplicasMaxRelativeDelay", max_relative_delay);

        set("MaxPartCountForPartition", max_part_count_for_partition);
    }

#if USE_TCMALLOC
    {
        /// tcmalloc related metrics. Remove if you switch to different allocator.

        MallocExtension & malloc_extension = *MallocExtension::instance();

        auto malloc_metrics =
        {
            "generic.current_allocated_bytes",
            "generic.heap_size",
            "tcmalloc.current_total_thread_cache_bytes",
            "tcmalloc.central_cache_free_bytes",
            "tcmalloc.transfer_cache_free_bytes",
            "tcmalloc.thread_cache_free_bytes",
            "tcmalloc.pageheap_free_bytes",
            "tcmalloc.pageheap_unmapped_bytes",
        };

        for (auto malloc_metric : malloc_metrics)
        {
            size_t value = 0;
            if (malloc_extension.GetNumericProperty(malloc_metric, &value))
                set(malloc_metric, value);
        }
    }
#endif

    /// Add more metrics as you wish.
}
Example #30
0
$PARAM CL = 1, V = 20, KA=1.1
$CMT DEPOT CENT
$PKMODEL ncmt=1, depot = TRUE,trans=1
$OMEGA 0 0 0
labels=s(ECL,EV,EKA)
$TABLE
table(CP) = CENT/pred_V;
$MAIN
pred_CL   = CL*exp(ETA(1));
pred_V    = V *exp(ETA(2));
pred_KA   = KA*exp(ETA(3));