// To handle servers with customized mpq files, try to read Patch_D2.mpq using Stormlib // (http://www.zezula.net/en/mpq/stormlib.html). We load the StormLib dll with LoadLibrary // to avoid imposing any run- or compile-time dependencies on the user. If we can't load // the dll or read the mpq, we will fall back on a hard-coded list of the standard items. // // We do all this in the injector and write the info to a temp file because of problems // calling LoadLibrary in the injected dll. // Update: Can now load the dll from BH.dll, so no need to write to external files anymore bool ReadMPQFiles(std::string fileName) { int successfulFileCount = 0, desiredFileCount = 0; HMODULE dllHandle = LoadLibrary((BH::path + "StormLib.dll").c_str()); if (dllHandle) { SFileOpenArchive = (MPQOpenArchive)GetProcAddress(dllHandle, "SFileOpenArchive"); SFileCloseArchive = (MPQCloseArchive)GetProcAddress(dllHandle, "SFileCloseArchive"); SFileOpenFileEx = (MPQOpenFile)GetProcAddress(dllHandle, "SFileOpenFileEx"); SFileGetFileSize = (MPQGetSize)GetProcAddress(dllHandle, "SFileGetFileSize"); SFileReadFile = (MPQReadFile)GetProcAddress(dllHandle, "SFileReadFile"); SFileCloseFile = (MPQCloseFile)GetProcAddress(dllHandle, "SFileCloseFile"); HANDLE pMutex = CreateMutex(NULL, true, "Global\\BH_PATCH_D2_MPQ_MUTEX"); WaitForSingleObject( pMutex, // handle to mutex INFINITE); // no time-out interval if (SFileOpenArchive && SFileCloseArchive && SFileOpenFileEx && SFileCloseFile && SFileGetFileSize && SFileReadFile) { // Copy the MPQ file to avoid sharing access violations std::string copyFileName(fileName); size_t start_pos = copyFileName.find("Patch_D2.mpq"); if (start_pos != std::string::npos) { copyFileName.replace(start_pos, 12, "Patch_D2.copy.mpq"); } std::ifstream src(fileName.c_str(), std::ios::binary); std::ofstream dst(copyFileName.c_str(), std::ios::binary); dst << src.rdbuf(); dst.close(); src.close(); MPQArchive archive(copyFileName.c_str()); const int NUM_MPQS = 13; std::string mpqFiles[NUM_MPQS] = { "UniqueItems", "Armor", "Weapons", "Misc", "ItemTypes", "ItemStatCost", "Inventory", "Properties", "Runes", "SetItems", "skills", "MagicPrefix", "MagicSuffix" }; if (archive.error == ERROR_SUCCESS) { for (int i = 0; i < NUM_MPQS; i++){ std::string path = "data\\global\\excel\\" + mpqFiles[i] + ".txt"; MPQFile mpqFile(&archive, path.c_str()); desiredFileCount++; if (mpqFile.error == ERROR_SUCCESS) { successfulFileCount++; std::string key = mpqFiles[i]; std::transform(key.begin(), key.end(), key.begin(), ::tolower); MpqDataMap[key] = new MPQData(&mpqFile); } } } } FreeLibrary(dllHandle); ReleaseMutex(pMutex); CloseHandle(pMutex); } return true; }
CRhinoCommand::result CCommandSampleImportMeshes::RunCommand( const CRhinoCommandContext& context ) { CWnd* pMainWnd = CWnd::FromHandle(RhinoApp().MainWnd()); if (0 == pMainWnd) return CRhinoCommand::failure; CRhinoGetFileDialog gf; gf.SetScriptMode(context.IsInteractive() ? FALSE : TRUE); BOOL rc = gf.DisplayFileDialog(CRhinoGetFileDialog::open_rhino_only_dialog, 0, pMainWnd); if (!rc) return CRhinoCommand::cancel; ON_wString filename = gf.FileName(); filename.TrimLeftAndRight(); if (filename.IsEmpty()) return CRhinoCommand::nothing; if (!CRhinoFileUtilities::FileExists(filename)) { RhinoApp().Print(L"File not found\n"); return CRhinoCommand::failure; } FILE* archive_fp = ON::OpenFile(filename, L"rb"); if (0 == archive_fp) { RhinoApp().Print(L"Unable to open file\n"); return CRhinoCommand::failure; } ON_BinaryFile archive(ON::read3dm, archive_fp); ONX_Model model; rc = model.Read(archive) ? TRUE : FALSE; ON::CloseFile( archive_fp ); if (!rc) { RhinoApp().Print(L"Error reading file\n"); return CRhinoCommand::failure; } int num_imported = 0; for (int i = 0; i < model.m_object_table.Count(); i++) { const ONX_Model_Object& model_object = model.m_object_table[i]; const ON_Mesh* mesh = ON_Mesh::Cast(model_object.m_object); if (0 != mesh) { // CRhinoDoc::AddMeshObject makes a copy of the input mesh context.m_doc.AddMeshObject(*mesh); num_imported++; } } if (0 == num_imported) RhinoApp().Print(L"No meshes imported\n"); else if (1 == num_imported) RhinoApp().Print(L"1 mesh imported\n"); else RhinoApp().Print(L"%d meshes imported\n", num_imported); context.m_doc.Redraw(); return CRhinoCommand::success; }
bool Save_Cereal( const SfM_Data & data, const std::string & filename, ESfM_Data flags_part) { // List which part of the file must be considered const bool b_views = (flags_part & VIEWS) == VIEWS; const bool b_intrinsics = (flags_part & INTRINSICS) == INTRINSICS; const bool b_extrinsics = (flags_part & EXTRINSICS) == EXTRINSICS; const bool b_structure = (flags_part & STRUCTURE) == STRUCTURE; const bool b_control_point = (flags_part & CONTROL_POINTS) == CONTROL_POINTS; //Create the stream and check it is ok std::ofstream stream(filename.c_str(), std::ios::binary | std::ios::out); if (!stream.is_open()) return false; // Data serialization { archiveType archive(stream); // since OpenMVG 0.9, the sfm_data version 0.2 is introduced // - it adds control_points storage const std::string version = "0.2"; archive(cereal::make_nvp("sfm_data_version", version)); archive(cereal::make_nvp("root_path", data.s_root_path)); if (b_views) archive(cereal::make_nvp("views", data.views)); else archive(cereal::make_nvp("views", Views())); if (b_intrinsics) archive(cereal::make_nvp("intrinsics", data.intrinsics)); else archive(cereal::make_nvp("intrinsics", Intrinsics())); if (b_extrinsics) archive(cereal::make_nvp("extrinsics", data.poses)); else archive(cereal::make_nvp("extrinsics", Poses())); // Structure -> See for export in another file if (b_structure) archive(cereal::make_nvp("structure", data.structure)); else archive(cereal::make_nvp("structure", Landmarks())); if (version != "0.1") // fast check to assert we are at least using version 0.2 { if (b_control_point) archive(cereal::make_nvp("control_points", data.control_points)); else archive(cereal::make_nvp("control_points", Landmarks())); } } return true; }
bool Load_Cereal( SfM_Data & data, const std::string & filename, ESfM_Data flags_part) { const bool bBinary = stlplus::extension_part(filename) == "bin"; // List which part of the file must be considered const bool b_views = (flags_part & VIEWS) == VIEWS; const bool b_intrinsics = (flags_part & INTRINSICS) == INTRINSICS; const bool b_extrinsics = (flags_part & EXTRINSICS) == EXTRINSICS; const bool b_structure = (flags_part & STRUCTURE) == STRUCTURE; const bool b_control_point = (flags_part & CONTROL_POINTS) == CONTROL_POINTS; //Create the stream and check it is ok std::ifstream stream(filename.c_str(), std::ios::binary | std::ios::in); if (!stream.is_open()) return false; // Data serialization try { archiveType archive(stream); std::string version; archive(cereal::make_nvp("sfm_data_version", version)); archive(cereal::make_nvp("root_path", data.s_root_path)); if (b_views) archive(cereal::make_nvp("views", data.views)); else if (bBinary) { // Binary file require read all the member Views views; archive(cereal::make_nvp("views", views)); } if (b_intrinsics) archive(cereal::make_nvp("intrinsics", data.intrinsics)); else if (bBinary) { // Binary file require read all the member Intrinsics intrinsics; archive(cereal::make_nvp("intrinsics", intrinsics)); } if (b_extrinsics) archive(cereal::make_nvp("extrinsics", data.poses)); else if (bBinary) { // Binary file require read all the member Poses poses; archive(cereal::make_nvp("extrinsics", poses)); } if (b_structure) archive(cereal::make_nvp("structure", data.structure)); else if (bBinary) { // Binary file require read all the member Landmarks structure; archive(cereal::make_nvp("structure", structure)); } if (version != "0.1") // fast check to assert we are at least using version 0.2 { if (b_control_point) archive(cereal::make_nvp("control_points", data.control_points)); else if (bBinary) { // Binary file require read all the member Landmarks control_points; archive(cereal::make_nvp("control_points", control_points)); } } } catch (const cereal::Exception & e) { std::cerr << e.what() << std::endl; return false; } return true; }
void read() { Abc::IArchive archive(Alembic::AbcCoreOgawa::ReadArchive(), "HasAMaterial.abc"); Abc::IObject an_object(archive.getTop(), "an_object"); if (const Abc::PropertyHeader * header = an_object.getProperties().getPropertyHeader(".byanyothername")) { TESTING_ASSERT(Mat::OMaterialSchema::matches(*header)); if (Mat::OMaterialSchema::matches(*header)) { std::cout << ".byanyothername yes.\n"; Mat::IMaterialSchema mat(an_object.getProperties(), ".byanyothername"); printMaterialSchema(mat); } else { std::cout << ".byanyothername no.\n"; } } if (const Abc::PropertyHeader * header = an_object.getProperties().getPropertyHeader("butnotbythisone")) { TESTING_ASSERT( !Mat::OMaterialSchema::matches(*header) ); if (Mat::OMaterialSchema::matches(*header)) { std::cout << "butnotbythisone yes.\n"; } else { std::cout << "butnotbythisone no.\n"; } } if (const Abc::PropertyHeader * header = an_object.getProperties().getPropertyHeader(".material")) { TESTING_ASSERT(!Mat::OMaterialSchema::matches(*header)); if (Mat::OMaterialSchema::matches(*header)) { std::cout << "manually built .material yes.\n"; } else { std::cout << "manually built .material no.\n"; } } std::cout << "-----------\n"; Abc::IObject anotherObj(archive.getTop(), "another_object"); std::string assignmentPath; if (Mat::getMaterialAssignmentPath(anotherObj, assignmentPath)) { std::cout << "another_object assignment path: " << assignmentPath; std::cout << std::endl; } TESTING_ASSERT(assignmentPath == "/some/material"); Mat::IMaterialSchema hasMat; TESTING_ASSERT(Mat::hasMaterial(anotherObj, hasMat)); if (Mat::hasMaterial(anotherObj, hasMat)) { std::cout << "another_object has local material: " << std::endl; printMaterialSchema(hasMat); } }
void decode_message(parcelport& pp, parcelset::shmem::data_buffer parcel_data, performance_counters::parcels::data_point receive_data) { // protect from un-handled exceptions bubbling up try { try { // mark start of serialization util::high_resolution_timer timer; boost::int64_t overall_add_parcel_time = 0; { // De-serialize the parcel data data_buffer::data_buffer_type const& buffer = parcel_data.get_buffer(); util::portable_binary_iarchive archive( buffer, buffer.size(), boost::archive::no_header); std::size_t parcel_count = 0; archive >> parcel_count; for(std::size_t i = 0; i < parcel_count; ++i) { // de-serialize parcel and add it to incoming parcel queue parcel p; archive >> p; // make sure this parcel ended up on the right locality BOOST_ASSERT(p.get_destination_locality() == pp.here()); // be sure not to measure add_parcel as serialization time boost::int64_t add_parcel_time = timer.elapsed_nanoseconds(); pp.add_received_parcel(p); overall_add_parcel_time += timer.elapsed_nanoseconds() - add_parcel_time; } // complete received data with parcel count receive_data.num_parcels_ = parcel_count; receive_data.raw_bytes_ = archive.bytes_read(); // amount of uncompressed data } // store the time required for serialization receive_data.serialization_time_ = timer.elapsed_nanoseconds() - overall_add_parcel_time; pp.add_received_data(receive_data); } catch (hpx::exception const& e) { LPT_(error) << "decode_message: caught hpx::exception: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::system::system_error const& e) { LPT_(error) << "decode_message: caught boost::system::error: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::exception const&) { LPT_(error) << "decode_message: caught boost::exception."; hpx::report_error(boost::current_exception()); } catch (std::exception const& e) { // We have to repackage all exceptions thrown by the // serialization library as otherwise we will loose the // e.what() description of the problem, due to slicing. boost::throw_exception(boost::enable_error_info( hpx::exception(serialization_error, e.what()))); } } catch (...) { LPT_(error) << "decode_message: caught unknown exception."; hpx::report_error(boost::current_exception()); } }
int vgcreate(struct cmd_context *cmd, int argc, char **argv) { size_t max_lv, max_pv; uint32_t extent_size; char *vg_name; struct volume_group *vg; const char *tag; alloc_policy_t alloc; int clustered; if (!argc) { log_error("Please provide volume group name and " "physical volumes"); return EINVALID_CMD_LINE; } if (argc == 1) { log_error("Please enter physical volume name(s)"); return EINVALID_CMD_LINE; } vg_name = skip_dev_dir(cmd, argv[0], NULL); max_lv = arg_uint_value(cmd, maxlogicalvolumes_ARG, 0); max_pv = arg_uint_value(cmd, maxphysicalvolumes_ARG, 0); alloc = arg_uint_value(cmd, alloc_ARG, ALLOC_NORMAL); if (alloc == ALLOC_INHERIT) { log_error("Volume Group allocation policy cannot inherit " "from anything"); return EINVALID_CMD_LINE; } if (!(cmd->fmt->features & FMT_UNLIMITED_VOLS)) { if (!max_lv) max_lv = 255; if (!max_pv) max_pv = 255; if (max_lv > 255 || max_pv > 255) { log_error("Number of volumes may not exceed 255"); return EINVALID_CMD_LINE; } } if (arg_sign_value(cmd, physicalextentsize_ARG, 0) == SIGN_MINUS) { log_error("Physical extent size may not be negative"); return EINVALID_CMD_LINE; } if (arg_sign_value(cmd, maxlogicalvolumes_ARG, 0) == SIGN_MINUS) { log_error("Max Logical Volumes may not be negative"); return EINVALID_CMD_LINE; } if (arg_sign_value(cmd, maxphysicalvolumes_ARG, 0) == SIGN_MINUS) { log_error("Max Physical Volumes may not be negative"); return EINVALID_CMD_LINE; } /* Units of 512-byte sectors */ extent_size = arg_uint_value(cmd, physicalextentsize_ARG, DEFAULT_EXTENT) * 2; if (!extent_size) { log_error("Physical extent size may not be zero"); return EINVALID_CMD_LINE; } if (!validate_vg_name(cmd, vg_name)) { log_error("New volume group name \"%s\" is invalid", vg_name); return ECMD_FAILED; } /* Create the new VG */ if (!(vg = vg_create(cmd, vg_name, extent_size, max_pv, max_lv, alloc, argc - 1, argv + 1))) return ECMD_FAILED; if (max_lv != vg->max_lv) log_warn("WARNING: Setting maxlogicalvolumes to %d " "(0 means unlimited)", vg->max_lv); if (max_pv != vg->max_pv) log_warn("WARNING: Setting maxphysicalvolumes to %d " "(0 means unlimited)", vg->max_pv); if (arg_count(cmd, addtag_ARG)) { if (!(tag = arg_str_value(cmd, addtag_ARG, NULL))) { log_error("Failed to get tag"); return ECMD_FAILED; } if (!(vg->fid->fmt->features & FMT_TAGS)) { log_error("Volume group format does not support tags"); return ECMD_FAILED; } if (!str_list_add(cmd->mem, &vg->tags, tag)) { log_error("Failed to add tag %s to volume group %s", tag, vg_name); return ECMD_FAILED; } } if (arg_count(cmd, clustered_ARG)) clustered = !strcmp(arg_str_value(cmd, clustered_ARG, "n"), "y"); else /* Default depends on current locking type */ clustered = locking_is_clustered(); if (clustered) vg->status |= CLUSTERED; else vg->status &= ~CLUSTERED; if (!lock_vol(cmd, ORPHAN, LCK_VG_WRITE)) { log_error("Can't get lock for orphan PVs"); return ECMD_FAILED; } if (!lock_vol(cmd, vg_name, LCK_VG_WRITE | LCK_NONBLOCK)) { log_error("Can't get lock for %s", vg_name); unlock_vg(cmd, ORPHAN); return ECMD_FAILED; } if (!archive(vg)) { unlock_vg(cmd, vg_name); unlock_vg(cmd, ORPHAN); return ECMD_FAILED; } /* Store VG on disk(s) */ if (!vg_write(vg) || !vg_commit(vg)) { unlock_vg(cmd, vg_name); unlock_vg(cmd, ORPHAN); return ECMD_FAILED; } unlock_vg(cmd, vg_name); unlock_vg(cmd, ORPHAN); backup(vg); log_print("Volume group \"%s\" successfully created", vg->name); return ECMD_PROCESSED; }
int main(int argc, char **argv) { char *tmpdir; size_t tdlen; /* * Keep a reference to cwd, so we can always come back home. */ cwdfd = open(".", O_RDONLY | O_CLOEXEC); if (cwdfd < 0) { syswarn(1, errno, "Can't open current working directory."); return(exit_val); } /* * Where should we put temporary files? */ if ((tmpdir = getenv("TMPDIR")) == NULL || *tmpdir == '\0') tmpdir = _PATH_TMP; tdlen = strlen(tmpdir); while (tdlen > 0 && tmpdir[tdlen - 1] == '/') tdlen--; tempfile = malloc(tdlen + 1 + sizeof(_TFILE_BASE)); if (tempfile == NULL) { paxwarn(1, "Cannot allocate memory for temp file name."); return(exit_val); } if (tdlen) memcpy(tempfile, tmpdir, tdlen); tempbase = tempfile + tdlen; *tempbase++ = '/'; /* * parse options, determine operational mode, general init */ options(argc, argv); if ((gen_init() < 0) || (tty_init() < 0)) return(exit_val); /* * select a primary operation mode */ switch (act) { case EXTRACT: extract(); break; case ARCHIVE: archive(); break; case APPND: if (gzip_program != NULL) errx(1, "can not gzip while appending"); append(); break; case COPY: copy(); break; default: case LIST: list(); break; } return(exit_val); }
int gfxRhino3D::load3dm(QString inputFile){ int i, j; ON_TextLog dump_to_stdout; ON_TextLog* dump = &dump_to_stdout; bool ok; FILE *infile; char line[4096]; int cld=0, pnt=0, crv=0, srf=0, brp=0, msh=0; ON_PointCloud *cloud; ON_Point *point; ON_Curve *curve; ON_Surface *surface; ON_Brep *brep; ON_Mesh *mesh; // If type is 3dm, handle with OpenNURBS Toolkit... // dump->Print("\nOpenNURBS Archive File: %s\n", inputFile.toAscii().data()); // open file containing opennurbs archive infile = ON::OpenFile( inputFile.toAscii().data(), "rb"); if ( !infile ) return 2; // create achive object from file pointer ON_BinaryFile archive( ON::read3dm, infile ); // read the contents of the file into "model" bool rc = model.Read( archive, dump ); // close the file ON::CloseFile( infile ); // if (rc) printf("Read file sucessfully\n"); // if (model.IsValid(dump)) printf("Model is Valid\n"); // Here we attempt to cast objects and report the results... for (i=0;i<model.m_object_table.Count();i++){ cloud = (ON_PointCloud*)ON_PointCloud::Cast(model.m_object_table[i].m_object); if (cloud) cld++; point = (ON_Point*)ON_Point::Cast(model.m_object_table[i].m_object); if (point) pnt++; curve = (ON_Curve*)ON_Curve::Cast(model.m_object_table[i].m_object); if (curve) crv++; surface = (ON_Surface*)ON_Surface::Cast(model.m_object_table[i].m_object); if (surface) srf++; brep = (ON_Brep*)ON_Brep::Cast(model.m_object_table[i].m_object); if (brep) brp++; mesh = (ON_Mesh*)ON_Mesh::Cast(model.m_object_table[i].m_object); if (mesh) msh++; } /* printf("Model Contains:\n"); printf(" %i Layers\n",model.m_layer_table.Count()); printf(" %i Meshes\n",msh); printf(" %i Breps\n",brp); printf(" %i Surfaces\n",srf); printf(" %i Curves\n",crv); printf(" %i Points\n",pnt); printf(" %i Point Clouds\n",cld); printf("The following Layers are visible and will be meshed:\n"); for (i=0;i<model.m_layer_table.Count();i++){ if (model.m_layer_table[i].IsVisible()) printf(" %s\n",convertONstring(model.m_layer_table[i].LayerName()).toAscii().data()); } */ }
int vgextend(struct cmd_context *cmd, int argc, char **argv) { char *vg_name; struct volume_group *vg = NULL; int r = ECMD_FAILED; struct pvcreate_params pp; if (!argc) { log_error("Please enter volume group name and " "physical volume(s)"); return EINVALID_CMD_LINE; } vg_name = skip_dev_dir(cmd, argv[0], NULL); argc--; argv++; if (arg_count(cmd, metadatacopies_ARG)) { log_error("Invalid option --metadatacopies, " "use --pvmetadatacopies instead."); return EINVALID_CMD_LINE; } pvcreate_params_set_defaults(&pp); if (!pvcreate_params_validate(cmd, argc, argv, &pp)) { return EINVALID_CMD_LINE; } log_verbose("Checking for volume group \"%s\"", vg_name); vg = vg_read_for_update(cmd, vg_name, NULL, 0); if (vg_read_error(vg)) { vg_release(vg); stack; return ECMD_FAILED; } if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE)) { log_error("Can't get lock for orphan PVs"); unlock_and_release_vg(cmd, vg, vg_name); return ECMD_FAILED; } if (!archive(vg)) goto_bad; /* extend vg */ if (!vg_extend(vg, argc, argv, &pp)) goto_bad; /* ret > 0 */ log_verbose("Volume group \"%s\" will be extended by %d new " "physical volumes", vg_name, argc); /* store vg on disk(s) */ if (!vg_write(vg) || !vg_commit(vg)) goto_bad; backup(vg); log_print("Volume group \"%s\" successfully extended", vg_name); r = ECMD_PROCESSED; bad: unlock_vg(cmd, VG_ORPHANS); unlock_and_release_vg(cmd, vg, vg_name); return r; }
void saveToFile(const Mesh& mesh, const std::wstring& fileName) { std::ofstream fs(fileName, std::ios::binary); boost::archive::binary_oarchive archive(fs); archive << mesh; }
static void generateTests(InstructionData *data) { std::vector<size_t> indexCur, indexMax; std::vector<bool> flagSet; hwtest::TestFile testFile; auto complete = false; auto completeIndices = false; for (auto i = 0; i < data->read.size(); ++i) { auto &field = data->read[i]; indexCur.push_back(0); switch (field) { case Field::rA: case Field::rB: case Field::rS: indexMax.push_back(gValuesGPR.size()); break; case Field::frA: case Field::frB: case Field::frC: case Field::frS: indexMax.push_back(gValuesFPR.size()); break; case Field::crbA: case Field::crbB: indexMax.push_back(gValuesCRB.size()); break; case Field::simm: indexMax.push_back(gValuesSIMM.size()); break; case Field::sh: indexMax.push_back(gValuesSH.size()); break; case Field::mb: indexMax.push_back(gValuesMB.size()); break; case Field::me: indexMax.push_back(gValuesME.size()); break; case Field::uimm: indexMax.push_back(gValuesUIMM.size()); break; case Field::XERC: indexMax.push_back(gValuesXERC.size()); break; case Field::XERSO: indexMax.push_back(gValuesXERSO.size()); break; default: assert(false); } } for (auto i = 0; i < data->flags.size(); ++i) { flagSet.push_back(false); } while (!complete) { uint32_t gpr = 0, fpr = 0, crf = 0, crb = 0; hwtest::TestData test; memset(&test, 0, sizeof(hwtest::TestData)); test.instr = gInstructionTable.encode(data->id); for (auto i = 0; i < data->read.size(); ++i) { auto index = indexCur[i]; // Generate read field values switch (data->read[i]) { case Field::rA: test.instr.rA = gpr + hwtest::GPR_BASE; test.input.gpr[gpr++] = gValuesGPR[index]; break; case Field::rB: test.instr.rB = gpr + hwtest::GPR_BASE; test.input.gpr[gpr++] = gValuesGPR[index]; break; case Field::rS: test.instr.rS = gpr + hwtest::GPR_BASE; test.input.gpr[gpr++] = gValuesGPR[index]; break; case Field::frA: test.instr.frA = fpr + hwtest::FPR_BASE; test.input.fr[fpr++] = gValuesFPR[index]; break; case Field::frB: test.instr.frB = fpr + hwtest::FPR_BASE; test.input.fr[fpr++] = gValuesFPR[index]; break; case Field::frC: test.instr.frC = fpr + hwtest::FPR_BASE; test.input.fr[fpr++] = gValuesFPR[index]; break; case Field::frS: test.instr.frS = fpr + hwtest::FPR_BASE; test.input.fr[fpr++] = gValuesFPR[index]; break; case Field::crbA: test.instr.crbA = (crb++) + hwtest::CRB_BASE; setCRB(test.input, test.instr.crbA, gValuesCRB[index]); break; case Field::crbB: test.instr.crbB = (crb++) + hwtest::CRB_BASE; setCRB(test.input, test.instr.crbB, gValuesCRB[index]); break; case Field::simm: test.instr.simm = gValuesSIMM[index]; break; case Field::sh: test.instr.sh = gValuesSH[index]; break; case Field::mb: test.instr.mb = gValuesMB[index]; break; case Field::me: test.instr.me = gValuesME[index]; break; case Field::uimm: test.instr.uimm = gValuesUIMM[index]; break; case Field::XERC: test.input.xer.ca = gValuesXERC[index]; break; case Field::XERSO: test.input.xer.so = gValuesXERSO[index]; break; default: assert(false); } } // Generate write field values for (auto field : data->write) { switch (field) { case Field::rA: test.instr.rA = gpr + hwtest::GPR_BASE; gpr++; break; case Field::rD: test.instr.rD = gpr + hwtest::GPR_BASE; gpr++; break; case Field::frD: test.instr.frD = fpr + hwtest::FPR_BASE; fpr++; break; case Field::crfD: test.instr.crfD = crf + hwtest::CRF_BASE; crf++; break; case Field::crbD: test.instr.crbD = crb + hwtest::CRB_BASE; crb++; break; case Field::XERC: case Field::XERSO: case Field::FCRISI: case Field::FCRZDZ: case Field::FCRIDI: case Field::FCRSNAN: break; default: assert(false); } } testFile.tests.emplace_back(test); // Increase indices for (auto i = 0; i < indexCur.size(); ++i) { indexCur[i]++; if (indexCur[i] < indexMax[i]) { break; } else if (indexCur[i] == indexMax[i]) { indexCur[i] = 0; if (i == indexCur.size() - 1) { completeIndices = true; } } } if (completeIndices) { if (flagSet.size() == 0) { complete = true; break; } completeIndices = false; // Do next flag! for (auto i = 0; i < flagSet.size(); ++i) { if (!flagSet[i]) { flagSet[i] = true; break; } else { flagSet[i] = false; if (i == flagSet.size() - 1) { complete = true; break; } } } } } // Save tests to file auto filename = std::string("tests/cpu/input/") + data->name; std::ofstream out { filename, std::ofstream::out | std::ofstream::binary }; cereal::BinaryOutputArchive archive(out); archive(testFile); }
void decode_message( std::vector<char/*, allocator<char>*/ > const & parcel_data, boost::uint64_t inbound_data_size, parcelport& pp, performance_counters::parcels::data_point& receive_data ) { unsigned archive_flags = boost::archive::no_header; if (!pp.allow_array_optimizations()) archive_flags |= util::disable_array_optimization; archive_flags |= util::disable_data_chunking; // protect from un-handled exceptions bubbling up try { try { // mark start of serialization util::high_resolution_timer timer; boost::int64_t overall_add_parcel_time = 0; // De-serialize the parcel data util::portable_binary_iarchive archive(parcel_data, inbound_data_size, archive_flags); std::size_t parcel_count = 0; archive >> parcel_count; //-V128 BOOST_ASSERT(parcel_count > 0); for(std::size_t i = 0; i != parcel_count; ++i) { // de-serialize parcel and add it to incoming parcel queue parcel p; archive >> p; // make sure this parcel ended up on the right locality BOOST_ASSERT(p.get_destination_locality().get_rank() == pp.here().get_rank()); // be sure not to measure add_parcel as serialization time boost::int64_t add_parcel_time = timer.elapsed_nanoseconds(); pp.add_received_parcel(p); overall_add_parcel_time += timer.elapsed_nanoseconds() - add_parcel_time; } // complete received data with parcel count receive_data.num_parcels_ = parcel_count; receive_data.raw_bytes_ = archive.bytes_read(); // store the time required for serialization receive_data.serialization_time_ = timer.elapsed_nanoseconds() - overall_add_parcel_time; pp.add_received_data(receive_data); } catch (hpx::exception const& e) { LPT_(error) << "decode_message(mpi): caught hpx::exception: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::system::system_error const& e) { LPT_(error) << "decode_message(mpi): caught boost::system::error: " << e.what(); hpx::report_error(boost::current_exception()); } catch (boost::exception const&) { LPT_(error) << "decode_message(mpi): caught boost::exception."; hpx::report_error(boost::current_exception()); } catch (std::exception const& e) { // We have to repackage all exceptions thrown by the // serialization library as otherwise we will loose the // e.what() description of the problem, due to slicing. boost::throw_exception(boost::enable_error_info( hpx::exception(serialization_error, e.what()))); } } catch (...) { LPT_(error) << "decode_message(mpi): caught unknown exception."; hpx::report_error(boost::current_exception()); } }
int main() { std::cout << "[moeoQuadTreeArchive]\t=>\t"; moeoQuadTree<ObjectiveVector> tree; bool empty= tree.isEmpty(); std::cout <<"empty? " << empty << std::endl; ObjectiveVector obj1; obj1[0]=10.0; obj1[1]=10.0; obj1[2]=10.0; ObjectiveVector obj2; obj2[0]=9.0; obj2[1]=9.0; obj2[2]=9.0; ObjectiveVector obj3; obj3[0]=2.0; obj3[1]=11.0; obj3[2]=11.0; ObjectiveVector obj4; obj4[0]=1.0; obj4[1]=10.0; obj4[2]=10.0; ObjectiveVector obj5; obj5[0]=2.0; obj5[1]=2.0; obj5[2]=2.0; ObjectiveVector obj6; obj6[0]=26.0; obj6[1]=0.0; obj6[2]=5.0; ObjectiveVector obj7; obj7[0]=56.0; obj7[1]=22.0; obj7[2]=0.0; ObjectiveVector obj8; obj8[0]=87.0; obj8[1]=42.0; obj8[2]=62.0; ObjectiveVector obj9; obj9[0]=90.0; obj9[1]=69.0; obj9[2]=83.0; ObjectiveVector obj10; obj10[0]=68.0; obj10[1]=89.0; obj10[2]=22.0; // QuadTreeNode<ObjectiveVector> hop(obj1); // QuadTreeNode<ObjectiveVector> hop2(obj2); // QuadTreeNode<ObjectiveVector> hop3(obj3); // QuadTreeNode<ObjectiveVector> hop4(obj4); // empty = hop.getSubTree().empty(); // std::cout <<"empty? " << empty << std::endl; // std::vector< QuadTreeNode<ObjectiveVector> > nodes; // nodes.push_back(hop); // nodes.push_back(hop2); // nodes.push_back(hop3); // std::cout << nodes[1].getVec() << std::endl; // std::cout << "size: " << nodes.size() << std::endl; // tree.insert(obj1); // tree.insert(obj2); // tree.insert(obj3); // tree.insert(obj4); // tree.insert(obj5); std::cout << "\n\n\n"; // tree.insert(obj6); // tree.insert(obj7); // tree.insert(obj8); // tree.insert(obj9); // tree.insert(obj10); moeoUnboundedArchive<Solution> archive(false); eoPop<Solution> pop; pop.resize(1000); int tmp; for(int i= 0; i<1000 ; i++){ ObjectiveVector obj; obj[0]=floor(rng.uniform()*100); obj[1]=floor(rng.uniform()*100); obj[2]=floor(rng.uniform()*100); std::cout << obj << std::endl; pop[i].objectiveVector(obj); tree.insert(obj); archive(pop[i]); tree.printTree(); std::cout << std::endl; std::cout << std::endl; std::cout << "archive: " << archive << std::endl; // std::cin >> tmp; } // QuadTreeNode<ObjectiveVector> * a = tree.getRoot(); // QuadTreeNode<ObjectiveVector> * b = a->getSubTree()[1]; // QuadTreeNode<ObjectiveVector> * c = b->getSubTree()[2]; // // tree.reinsert(a,c); // std::cout << "achive: " << archive << std::endl; tree.printTree(); std::cout << "OK" << std::endl; return EXIT_SUCCESS; }
void test_parcel_serialization(hpx::parcelset::parcel outp, int in_archive_flags, int out_archive_flags, bool zero_copy) { // serialize data std::size_t arg_size = hpx::traits::get_type_size(outp); std::vector<char> out_buffer; std::vector<hpx::util::serialization_chunk> out_chunks; out_buffer.resize(arg_size + HPX_PARCEL_SERIALIZATION_OVERHEAD); { // create an output archive and serialize the parcel hpx::util::portable_binary_oarchive archive( out_buffer, zero_copy ? &out_chunks : 0, 0, out_archive_flags); archive << outp; arg_size = archive.bytes_written(); } out_buffer.resize(arg_size); // deserialize data hpx::parcelset::parcel inp; { // create an input archive and deserialize the parcel hpx::util::portable_binary_iarchive archive( out_buffer, &out_chunks, arg_size, in_archive_flags); archive >> inp; } // make sure the parcel has been deserialized properly HPX_TEST_EQ(outp.get_parcel_id(), inp.get_parcel_id()); HPX_TEST_EQ(outp.get_source(), inp.get_source()); HPX_TEST_EQ(outp.get_destination_locality(), inp.get_destination_locality()); HPX_TEST_EQ(outp.get_start_time(), inp.get_start_time()); hpx::actions::action_type outact = outp.get_action(); hpx::actions::action_type inact = inp.get_action(); HPX_TEST_EQ(outact->get_component_type(), inact->get_component_type()); HPX_TEST_EQ(outact->get_action_name(), inact->get_action_name()); HPX_TEST_EQ(outact->get_action_type(), inact->get_action_type()); HPX_TEST_EQ(outact->get_parent_locality_id(), inact->get_parent_locality_id()); HPX_TEST_EQ(outact->get_parent_thread_id(), inact->get_parent_thread_id()); HPX_TEST_EQ(outact->get_parent_thread_phase(), inact->get_parent_thread_phase()); HPX_TEST_EQ(outact->get_thread_priority(), inact->get_thread_priority()); HPX_TEST_EQ(outact->get_thread_stacksize(), inact->get_thread_stacksize()); HPX_TEST_EQ(outact->get_parent_thread_phase(), inact->get_parent_thread_phase()); hpx::actions::continuation_type outcont = outp.get_continuation(); hpx::actions::continuation_type incont = inp.get_continuation(); HPX_TEST_EQ(outcont->get_continuation_name(), incont->get_continuation_name()); HPX_TEST_EQ(outcont->get_gid(), incont->get_gid()); //// invoke action encapsulated in inp //naming::address const* inaddrs = pin.get_destination_addrs(); //hpx::threads::thread_init_data data; //inact->get_thread_init_data(inaddrs[0].address_, data); //data.func(hpx::threads::wait_signaled); }