ArrayDesc inferSchema(std::vector<ArrayDesc> schemas, boost::shared_ptr<Query> query) { Attributes atts; atts.push_back(AttributeDesc(0, "attribute_name", TID_STRING, 0, 0)); atts.push_back(AttributeDesc(1, "min", TID_STRING, 0, 0)); atts.push_back(AttributeDesc(2, "max", TID_STRING, 0, 0)); atts.push_back(AttributeDesc(3, "distinct_count", TID_UINT64, 0, 0)); atts.push_back(AttributeDesc(4, "non_null_count", TID_UINT64, 0, 0)); const AttributeDesc *emptyIndicator = schemas[0].getEmptyBitmapAttribute(); set<string> a_s; for (size_t i = 0; i < _parameters.size(); i++) { string attName = ((boost::shared_ptr<OperatorParamReference>&)_parameters[i])->getObjectName(); if (emptyIndicator && emptyIndicator->getName() == attName) continue; a_s.insert(attName); } size_t attsCount = (a_s.size() == 0 ? (emptyIndicator ? schemas[0].getAttributes().size() - 1 : schemas[0].getAttributes().size()) : a_s.size()) - 1; Dimensions dims; dims.push_back(DimensionDesc("attribute_number", 0, attsCount, ANALYZE_CHUNK_SIZE, 0)); return ArrayDesc(schemas[0].getName() + "_analyze", atts, dims); }
void Region::setDimensions(Dimensions& newDims) { // Can only set dimensions one time if (dims_ == newDims) return; if (dims_.isUnspecified()) { if (newDims.isDontcare()) { NTA_THROW << "Invalid attempt to set region dimensions to dontcare value"; } if (! newDims.isValid()) { NTA_THROW << "Attempt to set region dimensions to invalid value:" << newDims.toString(); } dims_ = newDims; dimensionInfo_ = "Specified explicitly in setDimensions()"; } else { NTA_THROW << "Attempt to set dimensions of region " << getName() << " to " << newDims.toString() << " but region already has dimensions " << dims_.toString(); } // can only create the enabled node set after we know the number of dimensions setupEnabledNodeSet(); }
void ConstRLEChunk::initialize(ArrayDesc const * desc, const Address &address, int compMethod) { _hasOverlap = false; _compressionMethod = compMethod; _arrayDesc = desc; _firstPositionNoOlap = address.coords; _addr = address; Dimensions dim = desc->getDimensions(); _firstPosition.clear(); _lastPositionNoOlap.clear(); _lastPosition.clear(); _chunkIntervals.clear(); for (uint32_t i = 0; i < dim.size(); ++i) { if (dim[i].getChunkOverlap()) { _hasOverlap = true; } _firstPosition.push_back( std::max<Coordinate>(_firstPositionNoOlap[i] - dim[i].getChunkOverlap(), dim[i].getStart())); _lastPosition.push_back( std::min<Coordinate>(_firstPositionNoOlap[i] + dim[i].getChunkInterval() + 2 * dim[i].getChunkOverlap() - 1, dim[i].getEndMax())); _lastPositionNoOlap.push_back( std::min<Coordinate>(_firstPositionNoOlap[i] + dim[i].getChunkInterval() - 1, dim[i].getEndMax())); _chunkIntervals.push_back(_lastPosition[i] - _firstPosition[i] + 1); } }
//-***************************************************************************** // Get the dimensions directly off of the dataspace on the dataset // This isn't suitable for string and wstring void ReadDataSetDimensions( hid_t iParent, const std::string &iName, hsize_t iExtent, Dimensions &oDims ) { // Open the data set. hid_t dsetId = H5Dopen( iParent, iName.c_str(), H5P_DEFAULT ); ABCA_ASSERT( dsetId >= 0, "Cannot open dataset: " << iName ); DsetCloser dsetCloser( dsetId ); // Read the data space. hid_t dspaceId = H5Dget_space( dsetId ); ABCA_ASSERT( dspaceId >= 0, "Could not get dataspace for dataSet: " << iName ); DspaceCloser dspaceCloser( dspaceId ); H5S_class_t dspaceClass = H5Sget_simple_extent_type( dspaceId ); if ( dspaceClass == H5S_SIMPLE ) { // Get the dimensions int rank = H5Sget_simple_extent_ndims( dspaceId ); ABCA_ASSERT( rank == 1, "H5Sget_simple_extent_ndims() must be 1." ); hsize_t hdim = 0; rank = H5Sget_simple_extent_dims( dspaceId, &hdim, NULL ); oDims.setRank(1); oDims[0] = hdim / iExtent; } else { oDims.setRank(1); oDims[0] = 0; } }
unsigned Pulsar::Transposer::get_ndim (unsigned idim) { range_check (idim, "Pulsar::Transposer::get_ndim"); Dimensions dims (archive); return dims.get_ndim( dim[idim] ); }
// Deserialize region Region::Region(const std::string& name, const std::string& nodeType, const Dimensions& dimensions, BundleIO& bundle, Network * network) : name_(name), type_(nodeType), initialized_(false), enabledNodes_(NULL), network_(network) { // Set region info before creating the RegionImpl so that the // Impl has access to the region info in its constructor. RegionImplFactory & factory = RegionImplFactory::getInstance(); spec_ = factory.getSpec(nodeType); // Dimensions start off as unspecified, but if // the RegionImpl only supports a single node, we // can immediately set the dimensions. if (spec_->singleNodeOnly) if (!dimensions.isDontcare() && !dimensions.isUnspecified() && !dimensions.isOnes()) NTA_THROW << "Attempt to deserialize region of type " << nodeType << " with dimensions " << dimensions << " but region supports exactly one node."; dims_ = dimensions; impl_ = factory.deserializeRegionImpl(nodeType, bundle, this); createInputsAndOutputs_(); }
void DCAttribute::writeAttribute(const char* name, const hid_t type, hid_t parent, uint32_t ndims, const Dimensions dims, const void* src) throw (DCException) { hid_t attr = -1; if (H5Aexists(parent, name)) attr = H5Aopen(parent, name, H5P_DEFAULT); else { hid_t dsp; if( ndims == 1 && dims.getScalarSize() == 1 ) dsp = H5Screate(H5S_SCALAR); else dsp = H5Screate_simple( ndims, dims.getPointer(), dims.getPointer() ); attr = H5Acreate(parent, name, type, dsp, H5P_DEFAULT, H5P_DEFAULT); H5Sclose(dsp); } if (attr < 0) throw DCException(getExceptionString(name, "Attribute could not be opened or created")); if (H5Awrite(attr, type, src) < 0) { H5Aclose(attr); throw DCException(getExceptionString(name, "Attribute could not be written")); } H5Aclose(attr); }
void SwapChain::Resize(const Dimensions& dimensions) { // Release outdated resources backBuffer_.reset(); depthStencilTexture_.reset(); depthStencilView_.reset(); e_throw_com_ret_error(dxgiSwapChain_ ->ResizeBuffers(dxgiSwapChainDesc_.BufferCount, dimensions.GetWidth(), dimensions.GetHeight(), dxgiSwapChainDesc_.BufferDesc.Format, dxgiSwapChainDesc_.Flags), "IDXGISwapChain::ResizeBuffers"); // Temporarily grab the back buffer to get the view boost::intrusive_ptr<ID3D11Texture2D> tempBackBuffer; e_throw_com_ret_error(dxgiSwapChain_->GetBuffer(0, __uuidof(ID3D11Texture2D), ReceiveCOM(tempBackBuffer)), "IDXGISwapChain::GetBuffer"); // Now get the view e_throw_com_ret_error(device_.GetD3DDevice() .CreateRenderTargetView(tempBackBuffer.get(), 0, ReceiveCOM(backBuffer_)), "ID3D11Device::CreateRenderTargetView"); // Create depth/stencil views D3D11_TEXTURE2D_DESC depthTextureDesc = {0}; depthTextureDesc.Width = dimensions.GetWidth(); depthTextureDesc.Height = dimensions.GetHeight(); depthTextureDesc.MipLevels = 1; depthTextureDesc.ArraySize = 1; depthTextureDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT; depthTextureDesc.SampleDesc.Count = 1; depthTextureDesc.SampleDesc.Quality = 0; depthTextureDesc.Usage = D3D11_USAGE_DEFAULT; depthTextureDesc.BindFlags = D3D11_BIND_DEPTH_STENCIL; depthTextureDesc.CPUAccessFlags = 0; depthTextureDesc.MiscFlags = 0; e_throw_com_ret_error(device_.GetD3DDevice() .CreateTexture2D(&depthTextureDesc, NULL, ReceiveCOM(depthStencilTexture_)), "ID3D11Device::CreateTexture2D"); D3D11_DEPTH_STENCIL_VIEW_DESC depthStencilViewDesc; memset(&depthStencilViewDesc, 0, sizeof(depthStencilViewDesc)); depthStencilViewDesc.Format = depthTextureDesc.Format; depthStencilViewDesc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D; //depthStencilViewDesc.Texture2D.MipSlice = 0; e_throw_com_ret_error(device_.GetD3DDevice() .CreateDepthStencilView(depthStencilTexture_.get(), &depthStencilViewDesc, ReceiveCOM(depthStencilView_)), "ID3D11Device::CreateDepthStencilView"); e_throw_com_ret_error(dxgiSwapChain_ ->SetFullscreenState(window_.GetWindowProperties().windowType == WindowType::FULLSCREEN ? TRUE : FALSE, NULL), "IDXGISwapChain::SetFullscreenState"); }
const Dimensions<int> Renderer::getOutputSize() const { Dimensions<int> ret; if(isLoaded()) if(SDL_GetRendererOutputSize(ren, ret.x().getPtr(), ret.y().getPtr()) < 0) return Dimensions<int>(-1, -1); return ret; }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { Attributes outputAttrs; outputAttrs.push_back(AttributeDesc(0, "dummy", TID_DOUBLE, AttributeDesc::IS_NULLABLE, 0)); Dimensions outputDims; outputDims.push_back(DimensionDesc("i",0,0,1,0)); return ArrayDesc("test_cache", outputAttrs, outputDims); }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { assert(schemas.size() == 2); ArrayDesc const& patternDesc = schemas[0]; ArrayDesc const& catalogDesc = schemas[1]; Attributes const& catalogAttributes = catalogDesc.getAttributes(true); Dimensions const& catalogDimensions = catalogDesc.getDimensions(); Attributes const& patternAttributes = patternDesc.getAttributes(true); Dimensions resultDimensions = patternDesc.getDimensions(); size_t totalAttributes = catalogAttributes.size() + patternAttributes.size() + 1 + catalogDimensions.size(); Attributes matchAttributes(totalAttributes); if (catalogDimensions.size() != resultDimensions.size()) { stringstream left, right; printDimNames(left, resultDimensions); printDimNames(right, catalogDimensions); throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DIMENSION_COUNT_MISMATCH) << "match" << left.str() << right.str(); } for (size_t i = 0, n = catalogDimensions.size(); i < n; i++) { if (!(catalogDimensions[i].getStartMin() == resultDimensions[i].getStartMin() && catalogDimensions[i].getChunkInterval() == resultDimensions[i].getChunkInterval() && catalogDimensions[i].getChunkOverlap() == resultDimensions[i].getChunkOverlap())) { // XXX To do: implement requiresRepart() method, remove interval/overlap checks // above, use SCIDB_LE_START_INDEX_MISMATCH here. throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_ARRAYS_NOT_CONFORMANT); } } size_t j = 0; for (size_t i = 0, n = patternAttributes.size(); i < n; i++, j++) { AttributeDesc const& attr = patternAttributes[i]; matchAttributes[j] = AttributeDesc(j, attr.getName(), attr.getType(), attr.getFlags(), attr.getDefaultCompressionMethod(), attr.getAliases(), &attr.getDefaultValue(), attr.getDefaultValueExpr()); } for (size_t i = 0, n = catalogAttributes.size(); i < n; i++, j++) { AttributeDesc const& attr = catalogAttributes[i]; matchAttributes[j] = AttributeDesc(j, "match_" + attr.getName(), attr.getType(), attr.getFlags(), attr.getDefaultCompressionMethod(), attr.getAliases(), &attr.getDefaultValue(), attr.getDefaultValueExpr()); } for (size_t i = 0, n = catalogDimensions.size(); i < n; i++, j++) { matchAttributes[j] = AttributeDesc(j, "match_" + catalogDimensions[i].getBaseName(), TID_INT64, 0, 0); } matchAttributes[j] = AttributeDesc(j, DEFAULT_EMPTY_TAG_ATTRIBUTE_NAME, TID_INDICATOR, AttributeDesc::IS_EMPTY_INDICATOR, 0); int64_t maxCollisions = evaluate(((boost::shared_ptr<OperatorParamLogicalExpression>&)_parameters[1])->getExpression(), query, TID_INT64).getInt64(); if (maxCollisions <= 0 || (int32_t)maxCollisions != maxCollisions) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_WRONG_OPERATOR_ARGUMENT2) << "positive"; } resultDimensions.push_back(DimensionDesc("collision", 0, 0, maxCollisions-1, maxCollisions-1, (uint32_t)maxCollisions, 0)); return ArrayDesc("match", matchAttributes, resultDimensions); }
virtual PhysicalBoundaries getOutputBoundaries( std::vector<PhysicalBoundaries> const& inputBoundaries, std::vector< ArrayDesc> const& inputSchemas) const { if (inputBoundaries[0].isEmpty()) { return PhysicalBoundaries::createEmpty(_schema.getDimensions().size()); } Coordinates newStart, newEnd; Coordinates inStart = inputBoundaries[0].getStartCoords(); Coordinates inEnd = inputBoundaries[0].getEndCoords(); Dimensions dims = inputSchemas[0].getDimensions(); size_t nDims = dims.size(); size_t nParams = _parameters.size(); std::vector<std::string> sliceDimName(nParams/2); for (size_t i = 0; i < nParams; i+=2) { sliceDimName[i >> 1] = ((std::shared_ptr<OperatorParamReference>&)_parameters[i])->getObjectName(); } for (size_t i = 0; i < nDims; i++) { const std::string dimName = dims[i].getBaseName(); int k = safe_static_cast<int>(sliceDimName.size()); while (--k >= 0 && sliceDimName[k] != dimName && !(sliceDimName[k][0] == '_' && (size_t)atoi(sliceDimName[k].c_str()+1) == i+1)) ; if (k < 0) { //dimension i is present in output newStart.push_back(inStart[i]); newEnd.push_back(inEnd[i]); } else { //dimension i is not present in output; check value Coordinate slice = ((std::shared_ptr<OperatorParamPhysicalExpression>&)_parameters[k*2+1])->getExpression()->evaluate().getInt64(); if (!inputBoundaries[0].isInsideBox(slice,i)) { //the slice value is outside the box; guess what - the result is an empty array return PhysicalBoundaries::createEmpty(_schema.getDimensions().size()); } } } // This does nothing but calculate a few local values // and then discard them. // // double resultCells = PhysicalBoundaries::getNumCells(newStart, newEnd); // double origCells = inputBoundaries[0].getNumCells(); // double newDensity = 1.0; // if (resultCells > 0.0) // { // newDensity = inputBoundaries[0].getDensity() * origCells / resultCells; // newDensity = newDensity > 1.0 ? 1.0 : newDensity; // } return PhysicalBoundaries(newStart, newEnd); }
void openH5File() { if (dataCollector == NULL) { DataSpace<simDim> mpi_pos; DataSpace<simDim> mpi_size; Dimensions splashMpiPos; Dimensions splashMpiSize; GridController<simDim> &gc = Environment<simDim>::get().GridController(); mpi_pos = gc.getPosition(); mpi_size = gc.getGpuNodes(); splashMpiPos.set(0, 0, 0); splashMpiSize.set(1, 1, 1); for (uint32_t i = 0; i < simDim; ++i) { splashMpiPos[i] = mpi_pos[i]; splashMpiSize[i] = mpi_size[i]; } const uint32_t maxOpenFilesPerNode = 1; dataCollector = new ParallelDomainCollector( gc.getCommunicator().getMPIComm(), gc.getCommunicator().getMPIInfo(), splashMpiSize, maxOpenFilesPerNode); // set attributes for datacollector files DataCollector::FileCreationAttr h5_attr; h5_attr.enableCompression = false; h5_attr.fileAccType = DataCollector::FAT_CREATE; h5_attr.mpiPosition.set(splashMpiPos); h5_attr.mpiSize.set(splashMpiSize); } // open datacollector try { std::string filename = (foldername + std::string("/makroParticlePerSupercell")); log<picLog::INPUT_OUTPUT > ("HDF5 open DataCollector with file: %1%") % filename; dataCollector->open(filename.c_str(), h5_attr); } catch (DCException e) { std::cerr << e.what() << std::endl; throw std::runtime_error("Failed to open datacollector"); } }
ArrayDesc inferSchema(std::vector< ArrayDesc> schemas, boost::shared_ptr< Query> query) { assert(schemas.size() >= 2); assert(_parameters.size() == 0); Attributes const& leftAttributes = schemas[0].getAttributes(); Dimensions const& leftDimensions = schemas[0].getDimensions(); Attributes const* newAttributes = &leftAttributes; Dimensions newDims = leftDimensions; size_t nDims = newDims.size(); for (size_t j = 1; j < schemas.size(); j++) { Attributes const& rightAttributes = schemas[j].getAttributes(); Dimensions const& rightDimensions = schemas[j].getDimensions(); if (nDims != rightDimensions.size()) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_ARRAYS_NOT_CONFORMANT); for (size_t i = 0; i < nDims; i++) { if ( leftDimensions[i].getStart() != rightDimensions[i].getStart() || leftDimensions[i].getChunkInterval() != rightDimensions[i].getChunkInterval() || leftDimensions[i].getChunkOverlap() != rightDimensions[i].getChunkOverlap()) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_ARRAYS_NOT_CONFORMANT); } DimensionDesc& dim = newDims[i]; dim = DimensionDesc(dim.getBaseName(), dim.getNamesAndAliases(), min(dim.getStartMin(), rightDimensions[i].getStartMin()), min(dim.getCurrStart(), rightDimensions[i].getCurrStart()), max(dim.getCurrEnd(), rightDimensions[i].getCurrEnd()), max(dim.getEndMax(), rightDimensions[i].getEndMax()), dim.getChunkInterval(), dim.getChunkOverlap()); } if (leftAttributes.size() != rightAttributes.size() && (leftAttributes.size() != rightAttributes.size()+1 || !leftAttributes[leftAttributes.size()-1].isEmptyIndicator()) && (leftAttributes.size()+1 != rightAttributes.size() || !rightAttributes[rightAttributes.size()-1].isEmptyIndicator())) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_ARRAYS_NOT_CONFORMANT); size_t nAttrs = min(leftAttributes.size(), rightAttributes.size()); if (rightAttributes.size() > newAttributes->size()) { newAttributes = &rightAttributes; } for (size_t i = 0; i < nAttrs; i++) { if (leftAttributes[i].getType() != rightAttributes[i].getType() || leftAttributes[i].getFlags() != rightAttributes[i].getFlags()) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_ARRAYS_NOT_CONFORMANT); } } return ArrayDesc(schemas[0].getName(), *newAttributes, newDims); }
ArrayDesc inferSchema(std::vector<ArrayDesc> schemas, boost::shared_ptr<Query> query) { ArrayDesc const& input = schemas[0]; assert(schemas.size() == 1); string attName = _parameters.size() > 0 ? ((boost::shared_ptr<OperatorParamReference>&)_parameters[0])->getObjectName() : input.getAttributes()[0].getName(); AttributeID inputAttributeID = 0; bool found = false; BOOST_FOREACH(const AttributeDesc& att, input.getAttributes()) { if (att.getName() == attName) { found = true; inputAttributeID = att.getId(); } } if (!found) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DLA_ERROR14); } AttributeDesc rankedAttribute = input.getAttributes()[inputAttributeID]; if (rankedAttribute.isEmptyIndicator()) { throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DLA_ERROR15); } Dimensions dims = input.getDimensions(); if (_parameters.size()>1) { vector<int> groupBy(_parameters.size()-1); size_t i, j; for (i = 0; i < _parameters.size() - 1; i++) { const string& dimName = ((boost::shared_ptr<OperatorParamReference>&)_parameters[i + 1])->getObjectName(); const string& dimAlias = ((boost::shared_ptr<OperatorParamReference>&)_parameters[i + 1])->getArrayName(); for (j = 0; j < dims.size(); j++) { if (dims[j].hasNameAndAlias(dimName, dimAlias)) { break; } } if (j >= dims.size()) throw USER_EXCEPTION(SCIDB_SE_INFER_SCHEMA, SCIDB_LE_DIMENSION_NOT_EXIST) << dimName; } } return getRankingSchema(input, inputAttributeID); }
inline void convertCoordinates(Coordinates const& srcPos, Dimensions const& srcDims, Coordinates& dstPos, Dimensions const& dstDims) { Coordinate offset = 0; for (size_t i = 0, n = srcDims.size(); i < n; i++) { offset *= srcDims[i].getLength(); offset += srcPos[i] - srcDims[i].getStart(); } for (int i = dstDims.size(); --i >= 0;) { dstPos[i] = dstDims[i].getStart() + (offset % dstDims[i].getLength()); offset /= dstDims[i].getLength(); } }
TEST_F(DimensionsTest, DontCareDimensions) { // dontcare dimensions [0] Dimensions d; d.push_back(0); ASSERT_TRUE(!d.isUnspecified()); ASSERT_TRUE(d.isDontcare()); ASSERT_TRUE(d.isValid()); EXPECT_STREQ("[dontcare]", d.toString().c_str()); ASSERT_ANY_THROW(d.getIndex(zero)); ASSERT_ANY_THROW(d.getCount()); ASSERT_EQ((unsigned int)0, d.getDimension(0)); ASSERT_EQ((unsigned int)1, d.getDimensionCount()); }
inline Surface::self_ref Surface::setSize(Dimensions<> dim) { this->dimensions_ = dim; cairo_xcb_surface_set_size( this->cairoSurface_.get(), dim.width(), dim.height() ); return *this; }
int detectFileMPISize(Options& options, Dimensions &fileMPISizeDim) { int result = RESULT_OK; DataCollector *dc = NULL; #if (SPLASH_SUPPORTED_PARALLEL==1) if (options.parallelFile) dc = new ParallelDataCollector(MPI_COMM_WORLD, MPI_INFO_NULL, Dimensions(options.mpiSize, 1, 1), 1); else #endif dc = new SerialDataCollector(1); DataCollector::FileCreationAttr fileCAttr; DataCollector::initFileCreationAttr(fileCAttr); fileCAttr.fileAccType = DataCollector::FAT_READ; try { dc->open(options.filename.c_str(), fileCAttr); dc->getMPISize(fileMPISizeDim); dc->close(); } catch (DCException e) { std::cerr << "[0] Detecting file MPI size failed!" << std::endl << e.what() << std::endl; fileMPISizeDim.set(0, 0, 0); result = RESULT_ERROR; } delete dc; dc = NULL; return result; }
void PlayerData::GenerateVertices(Dimensions &dimensions) { float widthX = dimensions.getWidth()/2.0f; float heightY = dimensions.getHeight()/2.0f; float x= dimensions.getPosition().getX(); float y=dimensions.getPosition().getY(); vertices.clear(); vertices.push_back({glm::vec2(-widthX+x,-heightY+y)}); vertices.push_back({glm::vec2(widthX+x,-heightY+y)}); vertices.push_back({glm::vec2(widthX+x,heightY+y)}); vertices.push_back({glm::vec2(-widthX+x,heightY+y)}); vertices.push_back({glm::vec2(-widthX+x,-heightY+y)}); vertices.push_back({glm::vec2(widthX+x,heightY+y)}); }
void log4cxx_debug_dimensions(const std::string& prefix, const Dimensions& dims) { if(logger->isDebugEnabled()) { for (size_t i=0; i<dims.size(); i++) { LOG4CXX_DEBUG(logger, prefix << " dims["<<i<<"] from " << dims[i].getStartMin() << " to " << dims[i].getEndMax()); } } }
bool Dimensions::operator==(const Dimensions& dims2) const { if ((std::vector<size_t>)(*this) == (std::vector<size_t>)dims2) return true; if (isOnes() && dims2.isOnes()) return true; return false; }
set<Type> Library::getRealAtomTypes() const{ set<Type> s; struct gl_list_t *l = DefinitionList(); if(!l){ throw runtime_error("No types found in library (perhaps no files have been loaded?)"); } for(unsigned i = 1; i<=gl_length(l); ++i) { Type t((const struct TypeDescription *)gl_fetch(l,i)); if(t.isRefinedReal()){ Dimensions d = t.getDimensions(); if(d.isWild() || d.isDimensionless())continue; // skip this one // it's got some dimensions, add it to the list s.insert(t); } } gl_destroy(l); return s; }
void DCDataSet::create(const CollectionType& colType, hid_t group, const Dimensions size, uint32_t ndims, bool compression) throw (DCException) { log_msg(2, "DCDataSet::create (%s, size %s)", name.c_str(), size.toString().c_str()); if (opened) throw DCException(getExceptionString("create: dataset is already open")); // if the dataset already exists, remove/unlink it // note that this won't free the memory occupied by this // dataset, however, there currently is no function to delete // a dataset if (!checkExistence || (checkExistence && H5Lexists(group, name.c_str(), H5P_LINK_ACCESS_DEFAULT))) H5Ldelete(group, name.c_str(), H5P_LINK_ACCESS_DEFAULT); this->ndims = ndims; this->compression = compression; this->datatype = colType.getDataType(); getLogicalSize().set(size); setChunking(colType.getSize()); setCompression(); if (getPhysicalSize().getScalarSize() != 0) { hsize_t *max_dims = new hsize_t[ndims]; for (size_t i = 0; i < ndims; ++i) max_dims[i] = H5F_UNLIMITED; dataspace = H5Screate_simple(ndims, getPhysicalSize().getPointer(), max_dims); delete[] max_dims; max_dims = NULL; } else dataspace = H5Screate(H5S_NULL); if (dataspace < 0) throw DCException(getExceptionString("create: Failed to create dataspace")); // create the new dataset dataset = H5Dcreate(group, this->name.c_str(), this->datatype, dataspace, H5P_DEFAULT, dsetProperties, H5P_DEFAULT); if (dataset < 0) throw DCException(getExceptionString("create: Failed to create dataset")); isReference = false; opened = true; }
//-***************************************************************************** // Dimensions aren't a scalar, and thus must be read carefully. void ReadDimensions( hid_t iParent, const std::string &iAttrName, Dimensions &oDims ) { // Assume a maximum rank of 128. This is totally reasonable. uint32_t dimVals[128]; size_t readRank; ReadSmallArray( iParent, iAttrName, H5T_STD_U32LE, H5T_NATIVE_UINT32, 128, readRank, ( void * )dimVals ); Dimensions retDims; retDims.setRank( readRank ); for ( size_t r = 0; r < readRank; ++r ) { retDims[r] = ( size_t )dimVals[r]; } oDims = retDims; }
static void fillGrid(Grid<GridT>& grid, const Dimensions<DIM>& dimensions, const std::array<int, DIM>& numberOfUnknownsPerDim) { for (uint i = 0; i < DIM; ++i) { fillGrid(grid, dimensions.dimension(i), numberOfUnknownsPerDim[i]); } }
Pulsar::Dimensions Pulsar::Transposer::get_stride (const Dimensions& d) const { int increment = 1; Dimensions stride; stride.set_ndim (dim[0], increment); increment *= d.get_ndim (dim[0]); stride.set_ndim (dim[1], increment); increment *= d.get_ndim (dim[1]); stride.set_ndim (dim[2], increment); increment *= d.get_ndim (dim[2]); stride.set_if_zero (increment); return stride; }
void DCDataSet::createReference(hid_t refGroup, hid_t srcGroup, DCDataSet &srcDataSet, Dimensions count, Dimensions offset, Dimensions stride) throw (DCException) { if (opened) throw DCException(getExceptionString("createReference: dataset is already open")); if (checkExistence && H5Lexists(refGroup, name.c_str(), H5P_LINK_ACCESS_DEFAULT)) throw DCException(getExceptionString("createReference: this reference already exists")); getLogicalSize().set(count); this->ndims = srcDataSet.getNDims(); count.swapDims(this->ndims); offset.swapDims(this->ndims); stride.swapDims(this->ndims); // select region hyperslab in source dataset if (H5Sselect_hyperslab(srcDataSet.getDataSpace(), H5S_SELECT_SET, offset.getPointer(), stride.getPointer(), count.getPointer(), NULL) < 0 || H5Sselect_valid(srcDataSet.getDataSpace()) <= 0) throw DCException(getExceptionString("createReference: failed to select hyperslap for reference")); if (H5Rcreate(®ionRef, srcGroup, srcDataSet.getName().c_str(), H5R_DATASET_REGION, srcDataSet.getDataSpace()) < 0) throw DCException(getExceptionString("createReference: failed to create region reference")); hsize_t ndims = 1; dataspace = H5Screate_simple(1, &ndims, NULL); if (dataspace < 0) throw DCException(getExceptionString("createReference: failed to create dataspace for reference")); dataset = H5Dcreate(refGroup, name.c_str(), H5T_STD_REF_DSETREG, dataspace, H5P_DEFAULT, dsetProperties, H5P_DEFAULT); if (dataset < 0) throw DCException(getExceptionString("createReference: failed to create dataset for reference")); if (H5Dwrite(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, dsetWriteProperties, ®ionRef) < 0) throw DCException(getExceptionString("createReference: failed to write reference")); isReference = true; opened = true; }
END_TEST START_TEST (test_Dimensions_createWithSize) { Dimensions* d = new(std::nothrow) Dimensions( 1.2 , 0.4 , 3.1415 ); fail_unless( d->getTypeCode () == SBML_LAYOUT_DIMENSIONS ); fail_unless( d->getMetaId () == "" ); // fail_unless( d->getNotes () == "" ); // fail_unless( d->getAnnotation () == "" ); fail_unless( d->getWidth () == 1.2 ); fail_unless( d->getHeight() == 0.4 ); fail_unless( d->getDepth () == 3.1415 ); delete d; }
//-***************************************************************************** // Dimensions aren't a scalar, and thus must be written carefully. void WriteDimensions( hid_t iParent, const std::string &iAttrName, const Dimensions &iDims ) { size_t rank = iDims.rank(); // Create temporary storage to write std::vector<uint32_t> dimStorage( rank ); // Copy into it. for ( size_t r = 0; r < rank; ++r ) { dimStorage[r] = ( uint32_t )iDims[r]; } WriteSmallArray( iParent, iAttrName, H5T_STD_U32LE, H5T_NATIVE_UINT32, rank, ( const void * )&dimStorage.front() ); }