void SliderGenerator::addSetting(libconfig::Config& cfg) { libconfig::Setting& setting = cfg.getRoot().add(configGroupName, libconfig::Setting::TypeGroup); setting.add(mainTmplSettingName, libconfig::Setting::TypeString) = editMainTemplate->text().toStdString(); setting.add(radioTmplSettingName, libconfig::Setting::TypeString) = editRadioTemplate->text().toStdString(); setting.add(radioCheckedSettingName, libconfig::Setting::TypeString) = editRadioCheckedTemplate->text().toStdString(); setting.add(slideTmplSettingName, libconfig::Setting::TypeString) = editSlideTemplate->text().toStdString(); setting.add(labelTmplSettingName, libconfig::Setting::TypeString) = editLabelTemplate->text().toStdString(); setting.add(cssSettingName, libconfig::Setting::TypeString) = editCSSControlTemplate->text().toStdString(); setting.add(outfileSettingName, libconfig::Setting::TypeString) = editOutFile->text().toStdString(); }
void sanitycheck(libconfig::Config& cfg, libconfig::Setting& cfgnode, const string& branch) { // Check that a branch with the given name exists under cfgnode. // if (!cfgnode.exists(branch)) throw MissingParameterException("Cannot find `" + branch + "' attribute in query subtree."); // Check that the child has a name. // if (!cfgnode[branch].exists("name")) throw MissingParameterException("Cannot find `name' attribute in query subtree."); // Check that the top-level contains a node of this name. // string name = cfgnode[branch]["name"]; if (!cfg.getRoot().exists(name)) throw MissingParameterException("Cannot find description for node `" + name + "'."); // Check that the top-level node of this name names a type. // if (!cfg.getRoot()[name].exists("type")) throw MissingParameterException("Cannot find mandatory `type' parameter in description for node `" + name + "'."); }
void open_config() { libconfig::Config cfg; try { cfg.readFile(TEST_CONF_FILE.c_str()); } catch(const libconfig::FileIOException &fioex) { ASSERT_TRUE(false); } catch(const libconfig::ParseException &pex) { ASSERT_TRUE(false); } const libconfig::Setting& config_root = cfg.getRoot(); test_regex_manager = new RegexManager(TEMP_DIR, config_root, &test_ip_database, &test_swabber_interface); }
void initialize_plugins(libconfig::Config& config) { libconfig::Setting& root = config.getRoot(); if (!root.exists("plugins")) { return; } libconfig::Setting& plugins = root["plugins"]; switch (plugins.getType()) { case libconfig::Setting::TypeArray: initialize_from_array(plugins); break; case libconfig::Setting::TypeString: initialize_from_string(plugins); break; default: break; } }
void ScanHdf5Op::init(libconfig::Config& root, libconfig::Setting& cfg) { ZeroInputOp::init(root, cfg); filename = (const char*) root.getRoot()["path"]; filename += "/"; filename += (const char*) cfg["file"]; // Remember partition id and total partitions. // int pid = 0; cfg.lookupValue("thispartition", pid); int ptotal = 1; cfg.lookupValue("totalpartitions", ptotal); assert(ptotal > 0); assert(pid < ptotal); thispartition = pid; totalpartitions = ptotal; // Store dataset names. // libconfig::Setting& grp = cfg["pick"]; unsigned int size = grp.getLength(); for (unsigned int i=0; i<size; ++i) { string n = grp[i]; datasetnames.push_back(n); } // Open file, open datasets. // hdf5file = H5Fopen(filename.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); for (unsigned int i=0; i<size; ++i) { hdf5sets.push_back(H5Dopen2(hdf5file, datasetnames[i].c_str(), H5P_DEFAULT)); hdf5space.push_back(H5Dget_space(hdf5sets[i])); } // Create schema from datasets, and check that types are supported. // for (unsigned int i=0; i<size; ++i) { appendFromDataset(hdf5sets[i], schema); } // Assert all datasets are vectors, not arrays. // hid_t space; for (unsigned int i=0; i<size; ++i) { space = H5Dget_space(hdf5sets[i]); assert(H5Sget_simple_extent_ndims(space) == 1); H5Sclose(space); } // Assert all datasets have same length. // Remember data size. // hsize_t length; assert(size != 0); space = H5Dget_space(hdf5sets[0]); H5Sget_simple_extent_dims(space, &length, NULL); H5Sclose(space); totaltuples = length; for (unsigned int i=1; i<size; ++i) { space = H5Dget_space(hdf5sets[i]); H5Sget_simple_extent_dims(space, &length, NULL); H5Sclose(space); assert(totaltuples == length); } assert(hdf5sets.size() == hdf5space.size()); sizeintup = buffsize/schema.getTupleSize(); memspace = H5Screate_simple(1, &sizeintup, NULL); // Specify totaltuples for requested partition. // unsigned long long step = totaltuples/totalpartitions; assert(totaltuples >= totalpartitions); origoffset = step * thispartition; totaltuples = ( thispartition == (totalpartitions - 1) ) ? totaltuples - origoffset : step; }
void constructsubtree( libconfig::Config& cfg, libconfig::Setting& cfgnode, Operator** rootaddr, //< output Query::UserDefinedOpMapT& udops, int level, Query::OperatorDepthT& depthmap //< output ) { // Lookup name and type. // string name = cfgnode["name"]; string type = cfg.getRoot()[name]["type"]; assert(*rootaddr == 0); // Based on type: // 1. Allocate and chain in appropriate object // 2. Check syntax for child // 3. sanitycheck() branch // 4. Recursively call self for children // 5. Initialize this node // if ( type == "scan" || type == "partitionedscan" || type == "parallelscan" || type == "generator_int" || type == "generator_long" #ifdef ENABLE_HDF5 || type == "hdf5scan" #ifdef ENABLE_FASTBIT || type == "hdf5index" || type == "hdf5random" #endif #endif #ifdef ENABLE_FASTBIT || type == "fastbitscan" #endif ) { // Scan operator, no children. // ZeroInputOp* tmp = NULL; if (type == "scan") tmp = new ScanOp(); else if (type == "partitionedscan") tmp = new PartitionedScanOp(); else if (type == "parallelscan") tmp = new ParallelScanOp(); else if (type == "generator_int") tmp = new IntGeneratorOp(); else if (type == "generator_long") tmp = new LongGeneratorOp(); #ifdef ENABLE_HDF5 else if (type == "hdf5scan") tmp = new ScanHdf5Op(); #ifdef ENABLE_FASTBIT else if (type == "hdf5index") tmp = new IndexHdf5Op(); else if (type == "hdf5random") tmp = new RandomLookupsHdf5Op(); #endif #endif #ifdef ENABLE_FASTBIT else if (type == "fastbitscan") tmp = new FastBitScanOp(); #endif (*rootaddr) = tmp; depthmap[tmp] = level; } else if ( type == "hashjoin" || type == "sortmergejoin" || type == "mpsmjoin" || type == "newmpsmjoin" || type == "preprejoin" || type == "indexhashjoin" ) { // Dual-input operator // DualInputOp* tmp = NULL; if (type == "hashjoin") tmp = new HashJoinOp(); else if (type == "sortmergejoin") tmp = new SortMergeJoinOp(); else if (type == "mpsmjoin") tmp = new OldMPSMJoinOp(); else if (type == "newmpsmjoin") tmp = new MPSMJoinOp(); else if (type == "preprejoin") tmp = new PresortedPrepartitionedMergeJoinOp(); else if (type == "indexhashjoin") tmp = new IndexHashJoinOp(); (*rootaddr) = tmp; depthmap[tmp] = level; sanitycheck(cfg, cfgnode, "build"); constructsubtree(cfg, cfgnode["build"], &(tmp->buildOp), udops, level+1, depthmap); sanitycheck(cfg, cfgnode, "probe"); constructsubtree(cfg, cfgnode["probe"], &(tmp->probeOp), udops, level+1, depthmap); } else { // Single-input operator // SingleInputOp* tmp = NULL; if (type == "aggregate_sum") tmp = new AggregateSum(); else if (type == "aggregate_count") tmp = new AggregateCount(); else if (type == "merge") tmp = new MergeOp(); else if (type == "shmwriter") tmp = new MemSegmentWriter(); else if (type == "filter") tmp = new Filter(); else if (type == "cycle_accountant") tmp = new CycleAccountant(); else if (type == "projection") tmp = new Project(); else if (type == "checker_callstate") tmp = new CallStateChecker(); else if (type == "printer_schema") tmp = new SchemaPrinter(); else if (type == "printer_tuplecount") tmp = new TupleCountPrinter(); else if (type == "printer_perfcount") tmp = new PerfCountPrinter(); else if (type == "sort") tmp = new SortLimit(); else if (type == "printer_bitentropy") tmp = new BitEntropyPrinter(); else if (type == "consumer") tmp = new ConsumeOp(); else if (type == "printer_callcount") tmp = new CallCountPrinter(); else if (type == "threadidprepend") tmp = new ThreadIdPrependOp(); else if (type == "partition") tmp = new PartitionOp(); else { // It's a user-defined type? // Query::UserDefinedOpMapT::iterator it; it = udops.find(type); // Not a user-defined type, no idea what is it. // if (it == udops.end()) throw MissingParameterException("`" + type + "' is neither a built-in nor a user-defined type."); // Claim ownership of operator. // tmp = it->second; it->second = 0; } (*rootaddr) = tmp; depthmap[tmp] = level; sanitycheck(cfg, cfgnode, "input"); constructsubtree(cfg, cfgnode["input"], &(tmp->nextOp), udops, level+1, depthmap); } // Call Operator::init on this node. // assert(*rootaddr != 0); (*rootaddr)->init(cfg, cfg.lookup(name)); }
/** * Constructs a query tree from the specified configuration file. * The pre-allocated user-defined operators are used if an operator type is * unknown. If a pre-allocated operator is used, its entry in the OpMap is * emptied, and it's now the responsibility of the Query to destroy the object. * * @param cfg Configuration file to initialize tree with. * @param udops User-defined operator map. Each operator must have been * allocated with new. If it is used, the entry is set to NULL, and then * it becomes the Query responsibility to call delete on the operator * object. */ void Query::create(libconfig::Config& cfg, UserDefinedOpMapT& udops) { sanitycheck(cfg, cfg.getRoot(), "treeroot"); constructsubtree(cfg, cfg.lookup("treeroot"), &tree, udops, 0, operatorDepth); }
/** * get the root */ libconfig::Setting& getRoot() { return config.getRoot(); }