void XMPMetadataSource::saveProperty(const MetadataSet& property, const MetaString& pathToSchema, const MetaString& propertyName) { if (property.empty()) { // not loaded or empty. In any case nothing to do. return; } MetaString pathToPropertiesArray; SXMPUtils::ComposeStructFieldPath(VMF_NS, pathToSchema.c_str(), VMF_NS, SCHEMA_SET, &pathToPropertiesArray); MetaString thisPropertyPath = findProperty(pathToSchema, propertyName); if (thisPropertyPath.empty()) { xmp->AppendArrayItem(VMF_NS, pathToPropertiesArray.c_str(), kXMP_PropValueIsArray, nullptr, kXMP_PropValueIsStruct); SXMPUtils::ComposeArrayItemPath(VMF_NS, pathToPropertiesArray.c_str(), kXMP_ArrayLastItem, &thisPropertyPath); } savePropertyName(thisPropertyPath, propertyName); xmp->SetStructField(VMF_NS, thisPropertyPath.c_str(), VMF_NS, PROPERTY_SET, nullptr, kXMP_PropValueIsArray); MetaString thisPropertySetPath; SXMPUtils::ComposeStructFieldPath(VMF_NS, thisPropertyPath.c_str(), VMF_NS, PROPERTY_SET, &thisPropertySetPath); for(auto metadata = property.begin(); metadata != property.end(); ++metadata) { saveMetadata(*metadata, thisPropertySetPath); } }
void MetadataStream::remove( const MetadataSet& set ) { std::for_each( set.begin(), set.end(), [&]( const std::shared_ptr<Metadata>& spMetadata ) { this->remove( spMetadata->getId() ); }); }
MetadataSet Metadata::getReferencesByMetadata(const std::string& sMetadataName) const { MetadataSet mdSet; if (sMetadataName.empty()) { VMF_EXCEPTION(ValidateException, "MetadataName is empty!"); } else { std::for_each(m_vReferences.begin(), m_vReferences.end(), [&](const Reference& ref) { auto spMetadata = ref.getReferenceMetadata().lock(); if (spMetadata != nullptr) { if (spMetadata->getName() == sMetadataName) { mdSet.emplace_back(spMetadata); } } }); } return mdSet; }
MetadataSet* HeaderMetadata::wrap(::MXFMetadataSet *cMetadataSet) { MXFPP_CHECK(cMetadataSet->headerMetadata == _cHeaderMetadata); MXFPP_CHECK(!mxf_equals_uuid(&cMetadataSet->instanceUID, &g_Null_UUID)); MetadataSet *set = 0; map<mxfUUID, MetadataSet*>::const_iterator objIter; objIter = _objectDirectory.find(cMetadataSet->instanceUID); if (objIter != _objectDirectory.end()) { set = (*objIter).second; if (cMetadataSet != set->getCMetadataSet()) { mxf_log(MXF_WLOG, "Metadata set with same instance UUID found when creating " "C++ object. Changing wrapped C metadata set."); set->_cMetadataSet = cMetadataSet; } } else { map<mxfKey, AbsMetadataSetFactory*>::iterator iter; ::MXFSetDef *setDef = 0; MXFPP_CHECK(mxf_find_set_def(_cHeaderMetadata->dataModel, &cMetadataSet->key, &setDef)); while (setDef != 0) { iter = _objectFactory.find(setDef->key); if (iter != _objectFactory.end()) { set = (*iter).second->create(this, cMetadataSet); break; } else { setDef = setDef->parentSetDef; } } if (set == 0) { // shouldn't be here if every class is a sub-class of interchange object // and libMXF ignores sets with unknown defs throw MXFException("Could not create C++ object for metadata set"); } add(set); } return set; }
// schemaName == "" means all schemas (i.e. all metadata in the stream) // setName == "" means the whole schema with descriptions // schemaName and setName specified means removal all the metadata items from the specified set void removeMetadata(const string& path, const string& schemaName = "", const string& setName = "") { cout << "\nRemoving metadata: " << (schemaName.empty() ? string("*") : schemaName + '/' + (setName.empty() ? string("*") : setName) ) << endl; MetadataStream ms; if (!ms.open(path, MetadataStream::ReadWrite)) throw std::runtime_error("Can't open MetadataStream"); vector<string> schemas; if(schemaName.empty()) { // remove all metadata ms.remove(); } else { schemas.push_back(schemaName); for (unsigned int sNum = 0; sNum < schemas.size(); sNum++) { auto sName = schemas[sNum]; cout << "\t* (" << sNum << ") [schema]: " << sName << endl; if(!ms.load(sName)) throw std::runtime_error(string("Error loading schema: ") + sName); vector<shared_ptr<MetadataDesc>>mDescs; if(setName.empty()) { // remove the whole schema with descriptions ms.remove( ms.getSchema(sName) ); } else { mDescs.push_back(ms.getSchema(sName)->findMetadataDesc(setName)); for (unsigned int setNum = 0; setNum < mDescs.size(); setNum++) { auto mDesc = mDescs[setNum]; string setName = mDesc->getMetadataName(); MetadataSet set = ms.queryByName(setName); cout << "\t\t* (" << sNum << "." << setNum << ") [set]: " << setName << "(" << set.size() << " items)" << endl; ms.remove(set); set.clear(); } } } } cout << "Saving stream..." << endl; ms.save(); cout << "Done." << endl; }
// setName == "" means all sets in the specified schema // schemaName == "" means all schemas (i.e. all metadata) void dumpMetadata(const string& path, const string& schemaName = "", const string& setName = "") { cout << "\nDumping metadata: " << (schemaName.empty() ? string("*") : schemaName + '/' + (setName.empty() ? string("*") : setName) ) << endl; MetadataStream ms; if (!ms.open(path, MetadataStream::ReadOnly)) throw std::runtime_error("Can't open MetadataStream"); vector<string> schemas; if(schemaName.empty()) ms.getAllSchemaNames().swap(schemas); else schemas.push_back(schemaName); for (unsigned int sNum = 0; sNum < schemas.size(); sNum++) { auto sName = schemas[sNum]; cout << "* (" << sNum << ") [schema]: " << sName << endl; if(!ms.load(sName)) throw std::runtime_error(string("Error loading schema: " + sName).c_str()); vector<shared_ptr<MetadataDesc>>mDescs; if(setName.empty()) ms.getSchema(sName)->getAll().swap(mDescs); else mDescs.push_back(ms.getSchema(sName)->findMetadataDesc(setName)); for (unsigned int setNum = 0; setNum < mDescs.size(); setNum++) { auto mDesc = mDescs[setNum]; string setName = mDesc->getMetadataName(); MetadataSet set = ms.queryByName(setName); cout << "\t* (" << sNum << "." << setNum << ") [set]: " << setName << "(" << set.size() << " items)" << endl; if(set.empty()) continue; vector<string> fields(set[0]->getFieldNames()); int itemNum = 0; for (auto item = set.begin(); item != set.end(); item++) { cout << "\t\t* (" << sNum << "." << setNum << "." << ++itemNum << ") { "; const char * separator = ""; for (auto f = fields.begin(); f != fields.end(); f++) { cout << separator << *f << "="; try { cout << (*item)->getFieldValue(*f).toString(); } catch(vmf::Exception& e) { cout << '<' << e.what() << '>'; } separator = ", "; } cout << " }" << endl; } } } }
MetadataSet Metadata::getReferencesByName(const std::string& sRefName) const { MetadataSet mdSet; std::for_each(m_vReferences.begin(), m_vReferences.end(), [&](const Reference& ref) { auto spDesc = ref.getReferenceDescription(); auto spMetadata = ref.getReferenceMetadata().lock(); if ((spMetadata != nullptr) || (spDesc != nullptr)) { if (spDesc->name == sRefName) mdSet.emplace_back(spMetadata); } }); return mdSet; }
bool MetadataStream::import( MetadataStream& srcStream, MetadataSet& srcSet, long long nTarFrameIndex, long long nSrcFrameIndex, long long nNumOfFrames, MetadataSet* pSetFailure ) { // Find all schemes used by the source metadata set std::vector< std::string > vSchemaNames; std::for_each( srcSet.begin(), srcSet.end(), [&vSchemaNames]( std::shared_ptr<Metadata>& spMetadata ) { vSchemaNames.push_back( spMetadata->getSchemaName() ); }); // Remove redundant schema names std::sort( vSchemaNames.begin(), vSchemaNames.end() ); vSchemaNames.erase( std::unique( vSchemaNames.begin(), vSchemaNames.end() ), vSchemaNames.end()); // Import all schemes used std::for_each( vSchemaNames.begin(), vSchemaNames.end(), [&]( std::string& sAppName ) { if( this->getSchema( sAppName ) == nullptr ) { auto it = srcStream.m_mapSchemas.find( sAppName ); if( it != srcStream.m_mapSchemas.end()) this->addSchema( it->second ); else { VMF_EXCEPTION(InternalErrorException, "Metadata schema missing in the source stream!" ); } } }); // This map stores Id pairs<old, new> from the srcSet and this set. std::map< IdType, IdType > mapIds; // Import metadata one by one. std::for_each( srcSet.begin(), srcSet.end(), [&]( std::shared_ptr<Metadata>& spMetadata ) { if( import( srcStream, spMetadata, mapIds, nTarFrameIndex, nSrcFrameIndex, nNumOfFrames ) == nullptr && pSetFailure != NULL ) { pSetFailure->push_back( spMetadata ); } }); return mapIds.size() > 0; }
std::string FormatCompressed::compress(const std::string& input) { if(!compressorId.empty()) { std::shared_ptr<Compressor> compressor = Compressor::create(compressorId); umf_rawbuffer compressedBuf; compressor->compress(input, compressedBuf); // Compressed binary data should be represented in base64 // because of '\0' symbols std::string compressed = Variant::base64encode(compressedBuf); //Store compressed data in a format of current implementation std::shared_ptr<Metadata> cMetadata; cMetadata = std::make_shared<Metadata>(cSchema->findMetadataDesc(COMPRESSED_DATA_DESC_NAME)); cMetadata->push_back(FieldValue(COMPRESSION_ALGO_PROP_NAME, compressor->getId())); cMetadata->push_back(FieldValue(COMPRESSED_DATA_PROP_NAME, compressed)); MetadataAccessor metadataAccessor(*cMetadata); metadataAccessor.setId(0); cMetadata = std::make_shared<Metadata>(metadataAccessor); MetadataSet cSet; cSet.push_back(cMetadata); std::vector< std::shared_ptr<MetadataSchema> > cSchemas; cSchemas.push_back(cSchema); const IdType nextId = 1; std::vector<std::shared_ptr<MetadataStream::VideoSegment>> segments; std::vector<std::shared_ptr<Stat>> stats; AttribMap attribs{ {"nextId", to_string(nextId)}, }; //create writer with no wrapping (like compression or encryption) enabled std::string outputString; outputString = getBackendFormat()->store(cSet, cSchemas, segments, stats, attribs); return outputString; } else { return input; } }
void XMPMetadataSource::saveSchema(const MetaString& schemaName, const MetadataStream& stream) { shared_ptr<MetadataSchema> thisSchemaDescription = stream.getSchema(schemaName); MetaString thisSchemaPath = findSchema(schemaName); if (thisSchemaPath.empty()) { xmp->AppendArrayItem(VMF_NS, VMF_GLOBAL_SCHEMAS_ARRAY, kXMP_PropValueIsArray, NULL, kXMP_PropValueIsStruct); SXMPUtils::ComposeArrayItemPath(VMF_NS, VMF_GLOBAL_SCHEMAS_ARRAY, kXMP_ArrayLastItem, &thisSchemaPath); xmp->SetStructField(VMF_NS, thisSchemaPath.c_str(), VMF_NS, SCHEMA_NAME, schemaName); xmp->SetStructField(VMF_NS, thisSchemaPath.c_str(), VMF_NS, SCHEMA_SET, nullptr, kXMP_PropValueIsArray); } MetadataSet thisSchemaSet = stream.queryBySchema(schemaName); vector< shared_ptr<MetadataDesc> > thisSchemaProperties = thisSchemaDescription->getAll(); for(auto descIter = thisSchemaProperties.begin(); descIter != thisSchemaProperties.end(); ++descIter) { MetaString metadataName = (*descIter)->getMetadataName(); MetadataSet currentPropertySet(thisSchemaSet.queryByName(metadataName)); saveProperty(currentPropertySet, thisSchemaPath, metadataName); } }
void WAVE_MetaHandler::UpdateFile ( bool doSafeUpdate ) { if ( ! this->needsUpdate ) { // If needsUpdate is set then at least the XMP changed. return; } if ( doSafeUpdate ) { XMP_Throw ( "WAVE_MetaHandler::UpdateFile: Safe update not supported", kXMPErr_Unavailable ); } // Export XMP to legacy chunks. Create/delete them if necessary MetadataSet metaSet; WAVEReconcile recon; metaSet.append( &mINFOMeta ); metaSet.append( &mBEXTMeta ); metaSet.append( &mCartMeta ); metaSet.append( &mDISPMeta ); // cr8r is not yet required for WAVE // metaSet.append( &mCr8rMeta ); // If anything changes, update/create/delete the legacy chunks if( recon.exportFromXMP( metaSet, this->xmpObj ) ) { if ( mINFOMeta.hasChanged( )) { updateLegacyChunk( &mINFOChunk, kChunk_LIST, kType_INFO, mINFOMeta ); } if ( mBEXTMeta.hasChanged( )) { updateLegacyChunk( &mBEXTChunk, kChunk_bext, kType_NONE, mBEXTMeta ); } if ( mCartMeta.hasChanged( )) { updateLegacyChunk( &mCartChunk, kChunk_cart, kType_NONE, mCartMeta ); } if ( mDISPMeta.hasChanged( )) { updateLegacyChunk( &mDISPChunk, kChunk_DISP, kType_NONE, mDISPMeta ); } //cr8r is not yet required for WAVE //if ( mCr8rMeta.hasChanged( )) //{ // updateLegacyChunk( &mCr8rChunk, kChunk_Cr8r, kType_NONE, mCr8rMeta ); //} } //update/create XMP chunk if( this->containsXMP ) { this->xmpObj.SerializeToBuffer ( &(this->xmpPacket) ); if( mXMPChunk != NULL ) { mXMPChunk->setData( reinterpret_cast<const XMP_Uns8 *>(this->xmpPacket.c_str()), this->xmpPacket.length() ); } else // create XMP chunk { mXMPChunk = mChunkController->createChunk( kChunk_XMP, kType_NONE ); mXMPChunk->setData( reinterpret_cast<const XMP_Uns8 *>(this->xmpPacket.c_str()), this->xmpPacket.length() ); mChunkController->insertChunk( mXMPChunk ); } } // XMP Packet is never completely removed from the file. XMP_ProgressTracker* progressTracker=this->parent->progressTracker; // local progess tracking required because for Handlers incapable of // kXMPFiles_CanRewrite XMPFiles call this Update method after making // a copy of the orignal file bool localProgressTracking=false; if ( progressTracker != 0 ) { if ( ! progressTracker->WorkInProgress() ) { localProgressTracking = true; progressTracker->BeginWork (); } } //write tree back to file mChunkController->writeFile( this->parent->ioRef ,progressTracker); if ( localProgressTracking && progressTracker != 0 ) progressTracker->WorkComplete(); this->needsUpdate = false; // Make sure this is only called once. } // WAVE_MetaHandler::UpdateFile
void WAVE_MetaHandler::ProcessXMP() { // Must be done only once if ( this->processedXMP ) { return; } // Set the status at start, in case something goes wrong in this method this->processedXMP = true; // Parse the XMP if ( ! this->xmpPacket.empty() ) { XMP_Assert ( this->containsXMP ); FillPacketInfo ( this->xmpPacket, &this->packetInfo ); this->xmpObj.ParseFromBuffer ( this->xmpPacket.c_str(), (XMP_StringLen)this->xmpPacket.size() ); this->containsXMP = true; } // Then import native properties MetadataSet metaSet; WAVEReconcile recon; // Parse the WAVE metadata object with values const XMP_Uns8* buffer = NULL; // temporary buffer XMP_Uns64 size = 0; // Get LIST:INFO legacy chunk mINFOChunk = mChunkController->getChunk( mWAVEInfoChunkPath, true ); if( mINFOChunk != NULL ) { size = mINFOChunk->getData( &buffer ); mINFOMeta.parse( buffer, size ); } // Parse Bext legacy chunk mBEXTChunk = mChunkController->getChunk( mWAVEBextChunkPath, true ); if( mBEXTChunk != NULL ) { size = mBEXTChunk->getData( &buffer ); mBEXTMeta.parse( buffer, size ); } // Parse cart legacy chunk mCartChunk = mChunkController->getChunk( mWAVECartChunkPath, true ); if( mCartChunk != NULL ) { size = mCartChunk->getData( &buffer ); mCartMeta.parse( buffer, size ); } // Parse DISP legacy chunk const std::vector<IChunkData*>& disps = mChunkController->getChunks( mWAVEDispChunkPath ); if( ! disps.empty() ) { for( std::vector<IChunkData*>::const_reverse_iterator iter=disps.rbegin(); iter!=disps.rend(); iter++ ) { size = (*iter)->getData( &buffer ); if( DISPMetadata::isValidDISP( buffer, size ) ) { mDISPChunk = (*iter); break; } } } if( mDISPChunk != NULL ) { size = mDISPChunk->getData( &buffer ); mDISPMeta.parse( buffer, size ); } //cr8r is not yet required for WAVE //// Parse Cr8r legacy chunk //mCr8rChunk = mChunkController->getChunk( mWAVECr8rChunkPath ); //if( mCr8rChunk != NULL ) //{ // size = mCr8rChunk->getData( &buffer ); // mCr8rMeta.parse( buffer, size ); //} // app legacy to the metadata list metaSet.append( &mINFOMeta ); metaSet.append( &mBEXTMeta ); metaSet.append( &mCartMeta ); metaSet.append( &mDISPMeta ); // cr8r is not yet required for WAVE // metaSet.append( &mCr8rMeta ); // Do the import if( recon.importToXMP( this->xmpObj, metaSet ) ) { // Remember if anything has changed this->containsXMP = true; } } // WAVE_MetaHandler::ProcessXMP
void AIFF_MetaHandler::UpdateFile ( bool doSafeUpdate ) { if ( ! this->needsUpdate ) { // If needsUpdate is set then at least the XMP changed. return; } if ( doSafeUpdate ) { XMP_Throw ( "AIFF_MetaHandler::UpdateFile: Safe update not supported", kXMPErr_Unavailable ); } //update/create XMP chunk if( this->containsXMP ) { this->xmpObj.SerializeToBuffer ( &(this->xmpPacket) ); if( mXMPChunk != NULL ) { mXMPChunk->setData( reinterpret_cast<const XMP_Uns8 *>(this->xmpPacket.c_str()), this->xmpPacket.length(), true ); } else // create XMP chunk { mXMPChunk = mChunkController->createChunk( kChunk_APPL, kType_XMP ); mXMPChunk->setData( reinterpret_cast<const XMP_Uns8 *>(this->xmpPacket.c_str()), this->xmpPacket.length(), true ); mChunkController->insertChunk( mXMPChunk ); } } // XMP Packet is never completely removed from the file. // Export XMP to legacy chunks. Create/delete them if necessary MetadataSet metaSet; AIFFReconcile recon; metaSet.append( &mAiffMeta ); // If anything changes, update/create/delete the legacy chunks if( recon.exportFromXMP( metaSet, this->xmpObj ) ) { updateLegacyChunk( &mNameChunk, kChunk_NAME, AIFFMetadata::kName ); updateLegacyChunk( &mAuthChunk, kChunk_AUTH, AIFFMetadata::kAuthor ); updateLegacyChunk( &mCprChunk, kChunk_CPR, AIFFMetadata::kCopyright ); updateLegacyChunk( &mAnnoChunk, kChunk_ANNO, AIFFMetadata::kAnnotation ); } XMP_ProgressTracker* progressTracker=this->parent->progressTracker; // local progess tracking required because for Handlers incapable of // kXMPFiles_CanRewrite XMPFiles call this Update method after making // a copy of the orignal file bool localProgressTracking=false; if ( progressTracker != 0 ) { if ( ! progressTracker->WorkInProgress() ) { localProgressTracking = true; progressTracker->BeginWork (); } } //write tree back to file mChunkController->writeFile( this->parent->ioRef ,progressTracker); if ( localProgressTracking && progressTracker != 0 ) progressTracker->WorkComplete(); this->needsUpdate = false; // Make sure this is only called once. } // AIFF_MetaHandler::UpdateFile
void AIFF_MetaHandler::ProcessXMP() { // Must be done only once if ( this->processedXMP ) { return; } // Set the status at start, in case something goes wrong in this method this->processedXMP = true; // Parse the XMP if ( ! this->xmpPacket.empty() ) { XMP_Assert ( this->containsXMP ); FillPacketInfo ( this->xmpPacket, &this->packetInfo ); this->xmpObj.ParseFromBuffer ( this->xmpPacket.c_str(), (XMP_StringLen)this->xmpPacket.size() ); this->containsXMP = true; } // Then import native properties MetadataSet metaSet; AIFFReconcile recon; // Fill the AIFF metadata object with values // Get NAME (title) legacy chunk mNameChunk = mChunkController->getChunk( mAIFFNameChunkPath, true ); if( mNameChunk != NULL ) { mAiffMeta.setValue<std::string>( AIFFMetadata::kName, mNameChunk->getString() ); } // Get AUTH (author) legacy chunk mAuthChunk = mChunkController->getChunk( mAIFFAuthChunkPath, true ); if( mAuthChunk != NULL ) { mAiffMeta.setValue<std::string>( AIFFMetadata::kAuthor, mAuthChunk->getString() ); } // Get CPR (Copyright) legacy chunk mCprChunk = mChunkController->getChunk( mAIFFCprChunkPath, true ); if( mCprChunk != NULL ) { mAiffMeta.setValue<std::string>( AIFFMetadata::kCopyright, mCprChunk->getString() ); } // Get ANNO (annotation) legacy chunk(s) // Get the list of Annotation chunks and pick the last one not being empty const std::vector<IChunkData*> &annoChunks = mChunkController->getChunks( mAIFFAnnoChunkPath ); mAnnoChunk = selectLastNonEmptyAnnoChunk( annoChunks ); if( mAnnoChunk != NULL ) { mAiffMeta.setValue<std::string>( AIFFMetadata::kAnnotation, mAnnoChunk->getString() ); } // Only interested in AIFF metadata metaSet.append( &mAiffMeta ); // Do the import if( recon.importToXMP( this->xmpObj, metaSet ) ) { // Remember if anything has changed this->containsXMP = true; } } // AIFF_MetaHandler::ProcessXMP
void WAVE_MetaHandler::UpdateFile ( bool doSafeUpdate ) { if ( ! this->needsUpdate ) { // If needsUpdate is set then at least the XMP changed. return; } if ( doSafeUpdate ) { XMP_Throw ( "WAVE_MetaHandler::UpdateFile: Safe update not supported", kXMPErr_Unavailable ); } // Export XMP to legacy chunks. Create/delete them if necessary MetadataSet metaSet; WAVEReconcile recon; metaSet.append( &mINFOMeta ); metaSet.append( &mBEXTMeta ); metaSet.append( &mCartMeta ); metaSet.append( &mDISPMeta ); // cr8r is not yet required for WAVE // metaSet.append( &mCr8rMeta ); // If anything changes, update/create/delete the legacy chunks if( recon.exportFromXMP( metaSet, this->xmpObj ) ) { if ( mINFOMeta.hasChanged( )) { updateLegacyChunk( &mINFOChunk, kChunk_LIST, kType_INFO, mINFOMeta ); } if ( mBEXTMeta.hasChanged( )) { updateLegacyChunk( &mBEXTChunk, kChunk_bext, kType_NONE, mBEXTMeta ); } if ( mCartMeta.hasChanged( )) { updateLegacyChunk( &mCartChunk, kChunk_cart, kType_NONE, mCartMeta ); } if ( mDISPMeta.hasChanged( )) { updateLegacyChunk( &mDISPChunk, kChunk_DISP, kType_NONE, mDISPMeta ); } //cr8r is not yet required for WAVE //if ( mCr8rMeta.hasChanged( )) //{ // updateLegacyChunk( &mCr8rChunk, kChunk_Cr8r, kType_NONE, mCr8rMeta ); //} } //update/create XMP chunk if( this->containsXMP ) { this->xmpObj.SerializeToBuffer ( &(this->xmpPacket) ); if( mXMPChunk != NULL ) { mXMPChunk->setData( reinterpret_cast<const XMP_Uns8 *>(this->xmpPacket.c_str()), this->xmpPacket.length() ); } else // create XMP chunk { mXMPChunk = mChunkController->createChunk( kChunk_XMP, kType_NONE ); mXMPChunk->setData( reinterpret_cast<const XMP_Uns8 *>(this->xmpPacket.c_str()), this->xmpPacket.length() ); mChunkController->insertChunk( mXMPChunk ); } } // XMP Packet is never completely removed from the file. //write tree back to file mChunkController->writeFile( this->parent->ioRef ); this->needsUpdate = false; // Make sure this is only called once. } // WAVE_MetaHandler::UpdateFile
void readAndDumpEncryptedMetadata(const umf_string& videoFile, std::shared_ptr<Encryptor> encryptor) { cout << "Opening file name '" << videoFile << "'" << endl; // Open new metadata stream to load and print saved metadata MetadataStream loadStream; loadStream.setEncryptor(encryptor); if (!loadStream.open(videoFile, MetadataStream::ReadOnly)) { cerr << "Can't open file " << videoFile << endl; exit(1); } if(loadStream.getUseEncryption()) { cout << "The whole stream is encrypted" << endl; } // Get all schemas vector<string> schemas = loadStream.getAllSchemaNames(); // and dump all the related data to console for(size_t sNum = 0; sNum < schemas.size(); sNum++) { string sName = schemas[sNum]; std::shared_ptr<MetadataSchema> schemaPtr = loadStream.getSchema(sName); bool schemaEncrypted = schemaPtr->getUseEncryption(); cout << "* (" << sNum << ") [schema]: " << sName; if(schemaEncrypted) { cout << ", encrypted"; } cout << endl; if(!loadStream.load(sName)) { cerr << "Error loading schema " << sName << endl; exit(1); } vector< shared_ptr<MetadataDesc> > mDescs = schemaPtr->getAll(); for(size_t setNum = 0; setNum < mDescs.size(); setNum++) { shared_ptr<MetadataDesc> mDesc = mDescs[setNum]; string setName = mDesc->getMetadataName(); bool metadescEncrypted = mDesc->getUseEncryption(); MetadataSet mdSet = loadStream.queryByName(setName); cout << "\t* (" << sNum << "." << setNum << ") [set]: "; cout << setName << "(" << mdSet.size() << " items)"; if(mdSet.empty()) continue; vector<string> fields(mdSet[0]->getFieldNames()); cout << " {"; const char * separator = ""; for(FieldDesc fDesc : mDesc->getFields()) { cout << separator << fDesc.name; if(fDesc.useEncryption) { cout << " encrypted"; } separator = ", "; } cout << "}"; if(metadescEncrypted) { cout << ", encrypted"; } cout << endl; int itemNum = 0; for(const std::shared_ptr<Metadata>& item : mdSet) { if (itemNum++ <= 3) { cout << "\t\t* (" << sNum << "." << setNum << "." << itemNum << ") { "; separator = ""; for (const string& f : fields) { cout << separator << f << "="; try { cout << item->getFieldValue(f).toString(); } catch (umf::Exception& e) { cout << '<' << e.what() << '>'; } if(item->findField(f)->getUseEncryption()) { cout << " encrypted"; } separator = ", "; } cout << " }"; long long t = item->getTime(); if (t >= 0) cout << ", time " << t; if(item->getUseEncryption()) cout << ", encrypted"; cout << endl; } else cout << '.'; } cout << "\n\t\t" << itemNum << " items." << endl; } } // Close metadata stream loadStream.close(); }