void Table::process( const Eref& e, ProcPtr p ) { lastTime_ = p->currTime; // send out a request for data. This magically comes back in the // RecvDataBuf and is handled. requestData()->send( e, p->threadIndexInGroup, recvDataBuf()->getFid()); }
void Table::reinit( const Eref& e, ProcPtr p ) { input_ = 0.0; vec().resize( 0 ); lastTime_ = 0; // cout << "tabReinit on :" << p->groupId << ":" << p->threadIndexInGroup << endl << flush; requestData()->send( e, p->threadIndexInGroup, recvDataBuf()->getFid()); }
const Cinfo * HDF5DataWriter::initCinfo() { static DestFinfo process( "process", "Handle process calls. Write data to file and clear all Table objects" " associated with this. Hence you want to keep it on a slow clock" " 1000 times or more slower than that for the tables.", new ProcOpFunc<HDF5DataWriter>( &HDF5DataWriter::process) ); static DestFinfo reinit( "reinit", "Reinitialize the object. If the current file handle is valid, it tries" " to close that and open the file specified in current filename field.", new ProcOpFunc<HDF5DataWriter>( &HDF5DataWriter::reinit ) ); static Finfo * processShared[] = { &process, &reinit }; static SharedFinfo proc( "proc", "Shared message to receive process and reinit", processShared, sizeof( processShared ) / sizeof( Finfo* )); static Finfo * finfos[] = { requestOut(), clear(), recvDataBuf(), &proc, }; static string doc[] = { "Name", "HDF5DataWriter", "Author", "Subhasis Ray", "Description", "HDF5 file writer for saving data tables. It saves the tables connected" " to it via `requestOut` field into an HDF5 file. The path of the" " table is maintained in the HDF5 file, with a HDF5 group for each" " element above the table." "\n" "Thus, if you have a table `/data/VmTable` in MOOSE, then it will be" " written as an HDF5 table called `VmTable` inside an HDF5 Group called" " `data`." "\n" "However Table inside Table is considered a pathological case and is" " not handled.\n" "At every process call it writes the contents of the tables to the file" " and clears the table vectors. You can explicitly force writing of the" " data via the `flush` function." }; static Dinfo< HDF5DataWriter > dinfo; static Cinfo cinfo( "HDF5DataWriter", HDF5WriterBase::initCinfo(), finfos, sizeof(finfos)/sizeof(Finfo*), &dinfo, doc, sizeof( doc ) / sizeof( string )); return &cinfo; }
/** Write data to datasets in HDF5 file. Clear all data in the table objects associated with this object. */ void HDF5DataWriter::process(const Eref & e, ProcPtr p) { if (filehandle_ < 0){ return; } // cout << "HDF5DataWriter::process: currentTime=" << p->currTime << endl; requestOut()->send(e, recvDataBuf()->getFid()); for (map<string, vector < double > >:: iterator data_it = datamap_.begin(); data_it != datamap_.end(); ++data_it){ string path = data_it->first; // if (data_it->second.size() >= flushLimit_){ map < string, hid_t >::iterator node_it = nodemap_.find(path); assert (node_it != nodemap_.end()); if (node_it->second < 0){ nodemap_[path] = get_dataset(path); } herr_t status = appendToDataset(nodemap_[path], data_it->second); if (status < 0){ cerr << "Warning: appending data for object " << data_it->first << " returned status " << status << endl; } data_it->second.clear(); } }
const Cinfo* Table::initCinfo() { ////////////////////////////////////////////////////////////// // Field Definitions ////////////////////////////////////////////////////////////// static ValueFinfo< Table, double > threshold( "threshold", "threshold used when Table acts as a buffer for spikes", &Table::setThreshold, &Table::getThreshold ); ////////////////////////////////////////////////////////////// // MsgDest Definitions ////////////////////////////////////////////////////////////// static DestFinfo input( "input", "Fills data into the Table.", new OpFunc1< Table, double >( &Table::input ) ); static DestFinfo spike( "spike", "Fills spike timings into the Table. Signal has to exceed thresh", new OpFunc1< Table, double >( &Table::spike ) ); static DestFinfo process( "process", "Handles process call, updates internal time stamp.", new ProcOpFunc< Table >( &Table::process ) ); static DestFinfo reinit( "reinit", "Handles reinit call.", new ProcOpFunc< Table >( &Table::reinit ) ); ////////////////////////////////////////////////////////////// // SharedMsg Definitions ////////////////////////////////////////////////////////////// static Finfo* procShared[] = { &process, &reinit }; static SharedFinfo proc( "proc", "Shared message for process and reinit", procShared, sizeof( procShared ) / sizeof( const Finfo* ) ); ////////////////////////////////////////////////////////////// // Field Element for the vector data // Use a limit of 2^20 entries for the tables, about 1 million. ////////////////////////////////////////////////////////////// static Finfo* tableFinfos[] = { &threshold, // Value &input, // DestFinfo &spike, // DestFinfo recvDataBuf(), // DestFinfo requestData(), // SrcFinfo &proc, // SharedFinfo }; static Cinfo tableCinfo ( "Table", TableBase::initCinfo(), tableFinfos, sizeof( tableFinfos ) / sizeof ( Finfo* ), new Dinfo< Table >() ); return &tableCinfo; }