ERRORCODE BackgroundObject::read_data(StorageDevicePtr device) { ERRORCODE error = GraphicObject::read_data(device); if (error == ERRORCODE_None) { // Convert to a graphic. record.select_flags = SELECT_FLAG_boundary | SELECT_FLAG_size_handles | SELECT_FLAG_move_handle | SELECT_FLAG_rotate_handle; remove_flags(OBJECT_FLAG_no_mask | OBJECT_FLAG_landscape); remove_refresh_flags(REFRESH_FLAG_opaque); ST_DEV_POSITION pos; device->tell(&pos); UpdateRotateHandle(); shrink_to_fit(); device->seek(pos, ST_DEV_SEEK_SET); my_type = OBJECT_TYPE_Graphic; } return error; }
mmap_vector_base(int fd, size_t capacity, size_t size = 0) : m_size(size), m_mapping(capacity, osmium::util::MemoryMapping::mapping_mode::write_shared, fd) { assert(size <= capacity); std::fill(data() + size, data() + capacity, osmium::index::empty_value<T>()); shrink_to_fit(); }
pmr_vector<char> _train_dictionary(const pmr_vector<T>& values, const pmr_vector<size_t>& sample_sizes) { /** * The recommended dictionary size is about 1/100th of size of all samples combined, but the size also has to be at * least 1KB. Smaller dictionaries won't work. */ auto max_dictionary_size = values.size() / 100; max_dictionary_size = std::max(max_dictionary_size, _minimum_dictionary_size); auto dictionary = pmr_vector<char>{values.get_allocator()}; size_t dictionary_size; // If the input does not contain enough values, it won't be possible to train a dictionary for it. if (values.size() < _minimum_value_size) { return dictionary; } dictionary.resize(max_dictionary_size); dictionary_size = ZDICT_trainFromBuffer(dictionary.data(), max_dictionary_size, values.data(), sample_sizes.data(), static_cast<unsigned>(sample_sizes.size())); // If the generation failed, then compress without a dictionary (the compression ratio will suffer). if (ZDICT_isError(dictionary_size)) { return pmr_vector<char>{}; } DebugAssert(dictionary_size <= max_dictionary_size, "Generated ZSTD dictionary in LZ4 compression is larger than " "the memory allocated for it."); // Shrink the allocated dictionary size to the actual size. dictionary.resize(dictionary_size); dictionary.shrink_to_fit(); return dictionary; }
void shrink_to_fit() { for (size_t i = 0; i < elements_.size(); ++i) { elements_[i].shrink_to_fit(); } elements_.shrink_to_fit(); }
vector<OERegion> ComputeDoHExtrema:: operator()(const Image<float>& I, vector<Point2i> *scale_octave_pairs) { ImagePyramid<float>& gaussPyr = _gaussians; ImagePyramid<float>& det_hess_pyr = _det_hessians; gaussPyr = gaussian_pyramid(I, pyr_params_); det_hess_pyr = det_of_hessian_pyramid(gaussPyr); vector<OERegion> det_hess_extrema; det_hess_extrema.reserve(int(1e4)); if (scale_octave_pairs) { scale_octave_pairs->clear(); scale_octave_pairs->reserve(int(1e4)); } for (int o = 0; o < det_hess_pyr.num_octaves(); ++o) { // Be careful of the bounds. We go from 1 to N-1. for (int s = 1; s < det_hess_pyr.num_scales_per_octave()-1; ++s) { vector<OERegion> new_det_hess_extrema(local_scale_space_extrema( det_hess_pyr, s, o, _extremum_thres, _edge_ratio_thres, _img_padding_sz, _extremum_refinement_iter) ); append(det_hess_extrema, new_det_hess_extrema); if (scale_octave_pairs) { for (size_t i = 0; i != new_det_hess_extrema.size(); ++i) scale_octave_pairs->push_back(Point2i(s,o)); } } } shrink_to_fit(det_hess_extrema); return det_hess_extrema; }
vector<OERegion> ComputeLoGExtrema::operator()(const Image<float>& I, vector<Point2i> *scale_octave_pairs) { auto& G = _gaussians; auto& L = _laplacians_of_gaussians; G = gaussian_pyramid(I, _params); L = laplacian_pyramid(G); auto extrema = vector<OERegion>{}; const auto preallocated_size = int(1e4); extrema.reserve(preallocated_size); if (scale_octave_pairs) { scale_octave_pairs->clear(); scale_octave_pairs->reserve(preallocated_size); } for (int o = 0; o < L.num_octaves(); ++o) { // Be careful of the bounds. We go from 1 to N-1. for (int s = 1; s < L.num_scales_per_octave() - 1; ++s) { auto new_extrema = local_scale_space_extrema( L, s, o, _extremum_thres, _edge_ratio_thres, _img_padding_sz, _extremum_refinement_iter); append(extrema, new_extrema); if (scale_octave_pairs) { for (size_t i = 0; i != new_extrema.size(); ++i) scale_octave_pairs->push_back(Point2i(s,o)); } } } shrink_to_fit(extrema); return extrema; }
vector<OERegion> ComputeDoHExtrema:: operator()(const Image<float>& I, vector<Point2i> *scaleOctavePairs) { ImagePyramid<float>& gaussPyr = gaussians_; ImagePyramid<float>& detHessPyr = det_hessians_; gaussPyr = DO::gaussianPyramid(I, pyr_params_); detHessPyr = DoHPyramid(gaussPyr); vector<OERegion> detHessExtrema; detHessExtrema.reserve(int(1e4)); if (scaleOctavePairs) { scaleOctavePairs->clear(); scaleOctavePairs->reserve(1e4); } for (int o = 0; o < detHessPyr.numOctaves(); ++o) { // Be careful of the bounds. We go from 1 to N-1. for (int s = 1; s < detHessPyr.numScalesPerOctave()-1; ++s) { vector<OERegion> newDetHessExtrema(localScaleSpaceExtrema( detHessPyr, s, o, extremum_thres_, edge_ratio_thres_, img_padding_sz_, extremum_refinement_iter_) ); append(detHessExtrema, newDetHessExtrema); if (scaleOctavePairs) { for (size_t i = 0; i != newDetHessExtrema.size(); ++i) scaleOctavePairs->push_back(Point2i(s,o)); } } } shrink_to_fit(detHessExtrema); return detHessExtrema; }
vector<OERegion> ComputeDoGExtrema::operator()(const Image<float>& image, vector<Point2i> *scale_octave_pairs) { auto& G = _gaussians; auto& D = _diff_of_gaussians; G = gaussian_pyramid(image, _pyramid_params); D = difference_of_gaussians_pyramid(G); auto extrema = vector<OERegion>{}; extrema.reserve(int(1e4)); if (scale_octave_pairs) { scale_octave_pairs->clear(); scale_octave_pairs->reserve(10000); } for (int o = 0; o < D.num_octaves(); ++o) { // Be careful of the bounds. We go from 1 to N-1. for (int s = 1; s < D.num_scales_per_octave()-1; ++s) { auto new_extrema = local_scale_space_extrema( D, s, o, _extremum_thres, _edge_ratio_thres, _img_padding_sz, _extremum_refinement_iter); append(extrema, new_extrema); if (scale_octave_pairs) { for (size_t i = 0; i != new_extrema.size(); ++i) scale_octave_pairs->push_back(Point2i(s,o)); } } } shrink_to_fit(extrema); return extrema; }
void testRandom(size_t numSteps = 10000) { describePlatform(); auto target = folly::make_unique<T>(); std::vector<bool> valid; for (size_t step = 0; step < numSteps; ++step) { auto pct = folly::Random::rand32(100); auto v = folly::Random::rand32(uint32_t{3} << folly::Random::rand32(14)); if (pct < 5) { doClear(*target, valid); } else if (pct < 30) { T copy; folly::resizeWithoutInitialization(copy, target->size()); for (size_t i = 0; i < copy.size(); ++i) { if (valid[i]) { copy[i] = target->at(i); } } if (pct < 10) { std::swap(copy, *target); } else if (pct < 15) { *target = std::move(copy); } else if (pct < 20) { *target = copy; } else if (pct < 25) { target = folly::make_unique<T>(std::move(copy)); } else { target = folly::make_unique<T>(copy); } } else if (pct < 35) { target->reserve(v); } else if (pct < 40) { target->shrink_to_fit(); } else if (pct < 45) { doResize(*target, valid, v); } else if (pct < 50) { doInsert(*target, valid, v % (target->size() + 1)); } else if (pct < 55) { if (!target->empty()) { doErase(*target, valid, v % target->size()); } } else if (pct < 60) { doPushBack(*target, valid); } else if (pct < 65) { target = folly::make_unique<T>(); valid.clear(); } else if (pct < 80) { auto v2 = folly::Random::rand32(uint32_t{3} << folly::Random::rand32(14)); doOverwrite(*target, valid, std::min(v, v2), std::max(v, v2)); } else { doResizeWithoutInit(*target, valid, v); } // don't check every time in implementation does lazy work if (folly::Random::rand32(100) < 50) { check(*target); } } }
void NonlocalMaterialExtensionInterface :: buildNonlocalPointTable(GaussPoint *gp) { double elemVolume, integrationVolume = 0.; NonlocalMaterialStatusExtensionInterface *statusExt = static_cast< NonlocalMaterialStatusExtensionInterface * >( gp->giveMaterialStatus()-> giveInterface(NonlocalMaterialStatusExtensionInterfaceType) ); if ( !statusExt ) { OOFEM_ERROR("local material status encountered"); } if ( !statusExt->giveIntegrationDomainList()->empty() ) { return; // already done } auto iList = statusExt->giveIntegrationDomainList(); FloatArray gpCoords, jGpCoords, shiftedGpCoords; if ( gp->giveElement()->computeGlobalCoordinates( gpCoords, gp->giveNaturalCoordinates() ) == 0 ) { OOFEM_ERROR("computeGlobalCoordinates of target failed"); } // If nonlocal variation is set to the distance-based approach, a new nonlocal radius // is calculated as a function of the distance from the Gauss point to the nonlocal boundaries if ( nlvar == NLVT_DistanceBasedLinear || nlvar == NLVT_DistanceBasedExponential ) { // cl=cl0; cl = giveDistanceBasedInteractionRadius(gpCoords); suprad = evaluateSupportRadius(); } // If the mesh represents a periodic cell, nonlocal interaction is considered not only for the real neighbors // but also for their periodic images, shifted by +px or -px in the x-direction. In the implementation, // instead of shifting the potential neighbors, we shift the receiver point gp. In the non-periodic case (typical), // px=0 and the following loop is executed only once. int nx = 0; // typical case if ( px > 0. ) nx = 1; // periodicity taken into account for ( int ix = -nx; ix <= nx; ix++ ) { // loop over periodic images shifted in x-direction SpatialLocalizer :: elementContainerType elemSet; shiftedGpCoords = gpCoords; shiftedGpCoords.at(1) += ix*px; // ask domain spatial localizer for list of elements with IP within this zone #ifdef NMEI_USE_ALL_ELEMENTS_IN_SUPPORT this->giveDomain()->giveSpatialLocalizer()->giveAllElementsWithNodesWithinBox(elemSet, shiftedGpCoords, suprad); // insert element containing given gp elemSet.insert( gp->giveElement()->giveNumber() ); #else this->giveDomain()->giveSpatialLocalizer()->giveAllElementsWithIpWithinBox_EvenIfEmpty(elemSet, shiftedGpCoords, suprad); #endif // initialize iList iList->reserve(elemSet.giveSize()); for ( auto elindx: elemSet ) { Element *ielem = this->giveDomain()->giveElement(elindx); if ( regionMap.at( ielem->giveRegionNumber() ) == 0 ) { for ( auto &jGp: *ielem->giveDefaultIntegrationRulePtr() ) { if ( ielem->computeGlobalCoordinates( jGpCoords, jGp->giveNaturalCoordinates() ) ) { double weight = this->computeWeightFunction(shiftedGpCoords, jGpCoords); //manipulate weights for a special averaging of strain (OFF by default) this->manipulateWeight(weight, gp, jGp); this->applyBarrierConstraints(shiftedGpCoords, jGpCoords, weight); #ifdef NMEI_USE_ALL_ELEMENTS_IN_SUPPORT if ( 1 ) { #else if ( weight > 0. ) { #endif localIntegrationRecord ir; ir.nearGp = jGp; // store gp elemVolume = weight * jGp->giveElement()->computeVolumeAround(jGp); ir.weight = elemVolume; // store gp weight iList->push_back(ir); // store own copy in list integrationVolume += elemVolume; } } else { OOFEM_ERROR("computeGlobalCoordinates of target failed"); } } } } // loop over elements iList->shrink_to_fit(); } statusExt->setIntegrationScale(integrationVolume); // store scaling factor } void NonlocalMaterialExtensionInterface :: rebuildNonlocalPointTable(GaussPoint *gp, IntArray *contributingElems) { double weight, elemVolume, integrationVolume = 0.; NonlocalMaterialStatusExtensionInterface *statusExt = static_cast< NonlocalMaterialStatusExtensionInterface * >( gp->giveMaterialStatus()-> giveInterface(NonlocalMaterialStatusExtensionInterfaceType) ); if ( !statusExt ) { OOFEM_ERROR("local material status encountered"); } auto iList = statusExt->giveIntegrationDomainList(); iList->clear(); if ( contributingElems == NULL ) { // no element table provided, use standard method this->buildNonlocalPointTable(gp); } else { FloatArray gpCoords, jGpCoords; int _size = contributingElems->giveSize(); if ( gp->giveElement()->computeGlobalCoordinates( gpCoords, gp->giveNaturalCoordinates() ) == 0 ) { OOFEM_ERROR("computeGlobalCoordinates of target failed"); } //If nonlocal variation is set to the distance-based approach calculates new nonlocal radius // based on the distance from the nonlocal boundaries if ( nlvar == NLVT_DistanceBasedLinear || nlvar == NLVT_DistanceBasedExponential ) { cl = cl0; cl = giveDistanceBasedInteractionRadius(gpCoords); suprad = evaluateSupportRadius(); } // initialize iList iList->reserve(_size); for ( int _e = 1; _e <= _size; _e++ ) { Element *ielem = this->giveDomain()->giveElement( contributingElems->at(_e) ); if ( regionMap.at( ielem->giveRegionNumber() ) == 0 ) { for ( auto &jGp:* ielem->giveDefaultIntegrationRulePtr() ) { if ( ielem->computeGlobalCoordinates( jGpCoords, jGp->giveNaturalCoordinates() ) ) { weight = this->computeWeightFunction(gpCoords, jGpCoords); //manipulate weights for a special averaging of strain (OFF by default) this->manipulateWeight(weight, gp, jGp); this->applyBarrierConstraints(gpCoords, jGpCoords, weight); #ifdef NMEI_USE_ALL_ELEMENTS_IN_SUPPORT if ( 1 ) { #else if ( weight > 0. ) { #endif localIntegrationRecord ir; ir.nearGp = jGp; // store gp elemVolume = weight * jGp->giveElement()->computeVolumeAround(jGp); ir.weight = elemVolume; // store gp weight iList->push_back(ir); // store own copy in list integrationVolume += elemVolume; } } else { OOFEM_ERROR("computeGlobalCoordinates of target failed"); } } } } // loop over elements statusExt->setIntegrationScale(integrationVolume); // remember scaling factor #ifdef __PARALLEL_MODE #ifdef __VERBOSE_PARALLEL fprintf( stderr, "%d(%d):", gp->giveElement()->giveGlobalNumber(), gp->giveNumber() ); for ( auto &lir: *iList ) { fprintf(stderr, "%d,%d(%e)", lir.nearGp->giveElement()->giveGlobalNumber(), lir.nearGp->giveNumber(), lir.weight); } fprintf(stderr, "\n"); #endif #endif } } std :: vector< localIntegrationRecord > * NonlocalMaterialExtensionInterface :: giveIPIntegrationList(GaussPoint *gp) { NonlocalMaterialStatusExtensionInterface *statusExt = static_cast< NonlocalMaterialStatusExtensionInterface * >( gp->giveMaterialStatus()-> giveInterface(NonlocalMaterialStatusExtensionInterfaceType) ); if ( !statusExt ) { OOFEM_ERROR("local material status encountered"); } if ( statusExt->giveIntegrationDomainList()->empty() ) { this->buildNonlocalPointTable(gp); } return statusExt->giveIntegrationDomainList(); }
void exit_handler_intel_x64::unittest_1002_containers_vector() const { auto myvector = std::vector<int>({0, 1, 2, 3}); auto myvector2 = std::vector<int>({0, 1, 2, 3}); auto total = 0; for (auto iter = myvector.begin(); iter != myvector.end(); iter++) total += *iter; auto rtotal = 0; for (auto iter = myvector.rbegin(); iter != myvector.rend(); iter++) rtotal += *iter; auto ctotal = 0; for (auto iter = myvector.cbegin(); iter != myvector.cend(); iter++) ctotal += *iter; auto crtotal = 0; for (auto iter = myvector.crbegin(); iter != myvector.crend(); iter++) crtotal += *iter; expect_true(total == 6); expect_true(rtotal == 6); expect_true(ctotal == 6); expect_true(crtotal == 6); expect_true(myvector.size() == 4); expect_true(myvector.max_size() >= 4); myvector.resize(4); expect_true(myvector.capacity() >= 4); expect_false(myvector.empty()); myvector.reserve(4); myvector.shrink_to_fit(); expect_true(myvector.at(0) == 0); expect_true(myvector.at(3) == 3); expect_true(myvector.front() == 0); expect_true(myvector.back() == 3); expect_true(myvector.data() != nullptr); myvector.assign(4, 0); myvector = myvector2; myvector.push_back(4); myvector.pop_back(); myvector = myvector2; myvector.insert(myvector.begin(), 0); myvector.erase(myvector.begin()); myvector = myvector2; myvector.swap(myvector2); std::swap(myvector, myvector2); myvector.emplace(myvector.begin()); myvector.emplace_back(); myvector = myvector2; expect_true(myvector == myvector2); expect_false(myvector != myvector2); expect_false(myvector < myvector2); expect_false(myvector > myvector2); expect_true(myvector <= myvector2); expect_true(myvector >= myvector2); myvector = myvector2; myvector.get_allocator(); myvector.clear(); }
void _compress(pmr_vector<T>& values, pmr_vector<pmr_vector<char>>& lz4_blocks, const pmr_vector<char>& dictionary) { /** * Here begins the LZ4 compression. The library provides a function to create a stream. The stream is used with * every new block that is to be compressed, but the stream returns a raw pointer to an internal structure. * The stream memory is freed with another call to a library function after compression is done. */ auto lz4_stream = LZ4_createStreamHC(); // We use the maximum high compression level available in LZ4 for best compression ratios. LZ4_resetStreamHC(lz4_stream, LZ4HC_CLEVEL_MAX); const auto input_size = values.size() * sizeof(T); auto num_blocks = input_size / _block_size; // Only add the last not-full block if the data doesn't perfectly fit into the block size. if (input_size % _block_size != 0) { num_blocks++; } lz4_blocks.reserve(num_blocks); for (auto block_index = size_t{0u}; block_index < num_blocks; ++block_index) { auto decompressed_block_size = _block_size; // The last block's uncompressed size varies. if (block_index + 1 == num_blocks) { decompressed_block_size = input_size - (block_index * _block_size); } // LZ4_compressBound returns an upper bound for the size of the compressed data const auto block_bound = static_cast<size_t>(LZ4_compressBound(static_cast<int>(decompressed_block_size))); auto compressed_block = pmr_vector<char>{values.get_allocator()}; compressed_block.resize(block_bound); /** * If we previously learned a dictionary, we use it to initialize LZ4. Otherwise LZ4 uses the previously * compressed block instead, which would cause the blocks to depend on one another. * If we have no dictionary present and compress at least a second block (i.e., block_index > 0), then we reset * the LZ4 stream to maintain the independence of the blocks. This only happens when the column does not contain * enough data to produce a zstd dictionary (i.e., a column of single character strings). */ if (!dictionary.empty()) { LZ4_loadDictHC(lz4_stream, dictionary.data(), static_cast<int>(dictionary.size())); } else if (block_index) { LZ4_resetStreamHC(lz4_stream, LZ4HC_CLEVEL_MAX); } // The offset in the source data where the current block starts. const auto value_offset = block_index * _block_size; // move pointer to start position and pass to the actual compression method const int compression_result = LZ4_compress_HC_continue( lz4_stream, reinterpret_cast<char*>(values.data()) + value_offset, compressed_block.data(), static_cast<int>(decompressed_block_size), static_cast<int>(block_bound)); Assert(compression_result > 0, "LZ4 stream compression failed"); // shrink the block vector to the actual size of the compressed result compressed_block.resize(static_cast<size_t>(compression_result)); compressed_block.shrink_to_fit(); lz4_blocks.emplace_back(std::move(compressed_block)); } // Finally, release the LZ4 stream memory. LZ4_freeStreamHC(lz4_stream); }