void MOOV_Manager::UpdateMemoryTree() { if ( ! this->IsChanged() ) return; XMP_Uns32 newSize = this->NewSubtreeSize ( this->moovNode, "" ); XMP_Enforce ( newSize < moovBoxSizeLimit ); RawDataBlock newData; newData.assign ( newSize, 0 ); // Prefill with zeroes, can't append multiple items to a vector. XMP_Uns8 * newPtr = &newData[0]; XMP_Uns8 * newEnd = newPtr + newSize; #if TraceUpdateMoovTree fprintf ( stderr, "Starting MOOV_Manager::UpdateMemoryTree\n" ); newOrigin = newPtr; #endif XMP_Uns8 * trueEnd = this->AppendNewSubtree ( this->moovNode, "", newPtr, newEnd ); XMP_Enforce ( trueEnd == newEnd ); this->fullSubtree.swap ( newData ); this->ParseMemoryTree ( this->fileMode ); } // MOOV_Manager::UpdateMemoryTree
XMP_Uns8 * MOOV_Manager::AppendNewSubtree ( const BoxNode & node, const std::string & parentPath, XMP_Uns8 * newPtr, XMP_Uns8 * newEnd ) { if ( (node.boxType == ISOMedia::k_free) || (node.boxType == ISOMedia::k_wide) ) { } XMP_Assert ( (node.boxType != ISOMedia::k_meta) ? (node.children.empty() || (node.contentSize == 0)) : (node.children.empty() || (node.contentSize == 4)) ); XMP_Enforce ( (XMP_Uns32)(newEnd - newPtr) >= (8 + node.contentSize) ); #if TraceUpdateMoovTree XMP_Uns32 be32 = MakeUns32BE ( node.boxType ); XMP_Uns32 newOffset = (XMP_Uns32) (newPtr - newOrigin); XMP_Uns32 addr32 = (XMP_Uns32) this->PickContentPtr ( node ); fprintf ( stderr, " Appending %s/%.4s @ 0x%X, size %d, content @ 0x%X\n", parentPath.c_str(), &be32, newOffset, node.contentSize, addr32 ); #endif // Leave the size as 0 for now, append the type and content. XMP_Uns8 * boxOrigin = newPtr; // Save origin to fill in the final size. PutUns32BE ( node.boxType, (newPtr + 4) ); IncrNewPtr ( 8 ); if( node.boxType == ISOMedia::k_uuid ) // For uuid, additional 16 bytes is stored for ID { XMP_Enforce ( (XMP_Uns32)(newEnd - newPtr) >= ( 16 + node.contentSize ) ); memcpy( newPtr, node.idUUID, 16 ); IncrNewPtr ( 16 ); } if ( node.contentSize != 0 ) { const XMP_Uns8 * content = PickContentPtr( node ); memcpy ( newPtr, content, node.contentSize ); IncrNewPtr ( node.contentSize ); } // Append the nested boxes. if ( ! node.children.empty() ) { char suffix[6]; suffix[0] = '/'; PutUns32BE ( node.boxType, &suffix[1] ); suffix[5] = 0; std::string nodePath = parentPath + suffix; for ( size_t i = 0, limit = node.children.size(); i < limit; ++i ) { newPtr = this->AppendNewSubtree ( node.children[i], nodePath, newPtr, newEnd ); } } // Fill in the final size. PutUns32BE ( (XMP_Uns32)(newPtr - boxOrigin), boxOrigin ); return newPtr; } // MOOV_Manager::AppendNewSubtree
void XMPChunk::changesAndSize( RIFF_MetaHandler* handler ) { XMP_Enforce( &handler->xmpPacket != 0 ); XMP_Enforce( handler->xmpPacket.size() > 0 ); this->newSize = 8 + handler->xmpPacket.size(); XMP_Validate( this->newSize <= 0xFFFFFFFFLL, "no single chunk may be above 4 GB", kXMPErr_InternalFailure ); // a complete no-change would have been caught in XMPFiles common code anyway this->hasChange = true; }
void ContainerChunk::write( RIFF_MetaHandler* handler, XMP_IO* file, bool isMainChunk ) { if ( isMainChunk ) file ->Rewind(); // enforce even position XMP_Int64 chunkStart = file->Offset(); XMP_Int64 chunkEnd = chunkStart + this->newSize; XMP_Enforce( chunkStart % 2 == 0 ); chunkVect *rc = &this->children; // [2473303] have to write back-to-front to avoid stomp-on-feet XMP_Int64 childStart = chunkEnd; for ( XMP_Int32 chunkNo = (XMP_Int32)(rc->size() -1); chunkNo >= 0; chunkNo-- ) { Chunk* cur = rc->at(chunkNo); // pad byte first if ( cur->newSize % 2 == 1 ) { childStart--; file->Seek ( childStart, kXMP_SeekFromStart ); XIO::WriteUns8( file, 0 ); } // then contents childStart-= cur->newSize; file->Seek ( childStart, kXMP_SeekFromStart ); switch ( cur->chunkType ) { case chunk_GENERAL: //COULDDO enfore no change, since not write-out-able if ( cur->oldPos != childStart ) XIO::Move( file, cur->oldPos, file, childStart, cur->oldSize ); break; default: cur->write( handler, file, false ); break; } // switch } // for XMP_Enforce ( chunkStart + 12 == childStart); file->Seek ( chunkStart, kXMP_SeekFromStart ); XIO::WriteUns32_LE( file, this->id ); XIO::WriteUns32_LE( file, (XMP_Uns32) this->newSize - 8 ); // validated in changesAndSize() above XIO::WriteUns32_LE( file, this->containerType ); }
void JunkChunk::write( RIFF_MetaHandler* /*handler*/, XMP_IO* file, bool /*isMainChunk*/ ) { XIO::WriteUns32_LE( file, kChunk_JUNK ); // write JUNK, never JUNQ XMP_Enforce( this->newSize < 0xFFFFFFFF ); XMP_Enforce( this->newSize >= 8 ); // minimum size of any chunk XMP_Uns32 innerSize = (XMP_Uns32)this->newSize - 8; XIO::WriteUns32_LE( file, innerSize ); // write out in 64K chunks while ( innerSize > kZeroBufferSize64K ) { file->Write ( kZeroes64K , kZeroBufferSize64K ); innerSize -= kZeroBufferSize64K; } file->Write ( kZeroes64K , innerSize ); }
XMP_Uns32 MOOV_Manager::NewSubtreeSize ( const BoxNode & node, const std::string & parentPath ) { XMP_Uns32 subtreeSize = 8 + node.contentSize; // All boxes will have 8 byte headers. if( node.boxType == ISOMedia::k_uuid ) subtreeSize += 16; // id of uuid is 16 bytes long if ( (node.boxType == ISOMedia::k_free) || (node.boxType == ISOMedia::k_wide) ) { } for ( size_t i = 0, limit = node.children.size(); i < limit; ++i ) { char suffix[6]; suffix[0] = '/'; PutUns32BE ( node.boxType, &suffix[1] ); suffix[5] = 0; std::string nodePath = parentPath + suffix; subtreeSize += this->NewSubtreeSize ( node.children[i], nodePath ); XMP_Enforce ( subtreeSize < moovBoxSizeLimit ); } return subtreeSize; } // MOOV_Manager::NewSubtreeSize
void MOOV_Manager::SetBox ( const char * boxPath, const void* dataPtr, XMP_Uns32 size , const XMP_Uns8 * idUUID ) { XMP_Enforce ( size < moovBoxSizeLimit ); size_t pathLen = strlen(boxPath); XMP_Assert ( (pathLen >= 4) && XMP_LitNMatch ( boxPath, "moov", 4 ) ); const char * pathPtr = boxPath + 5; // Skip the "moov/" portion. const char * pathEnd = boxPath + pathLen; BoxRef parentRef = 0; BoxRef currRef = &this->moovNode; while ( pathPtr < pathEnd ) { XMP_Assert ( (pathEnd - pathPtr) >= 4 ); XMP_Uns32 boxType = GetUns32BE ( pathPtr ); pathPtr += 5; // ! Don't care that the last step goes 1 too far. parentRef = currRef; currRef = this->GetTypeChild ( parentRef, boxType, 0 ); if ( currRef == 0 ) currRef = this->AddChildBox ( parentRef, boxType, 0, 0 , idUUID ); } this->SetBox ( currRef, dataPtr, size, idUUID ); } // MOOV_Manager::SetBox
XMP_Int64 XMPFiles_IO::Seek ( XMP_Int64 offset, SeekMode mode ) { XMP_FILESIO_START XMP_Assert ( this->fileRef != Host_IO::noFileRef ); XMP_Assert ( this->currOffset == Host_IO::Offset ( this->fileRef ) ); XMP_Assert ( this->currLength == Host_IO::Length ( this->fileRef ) ); XMP_Int64 newOffset = offset; if ( mode == kXMP_SeekFromCurrent ) { newOffset += this->currOffset; } else if ( mode == kXMP_SeekFromEnd ) { newOffset += this->currLength; } XMP_Enforce ( newOffset >= 0 ); if ( newOffset <= this->currLength ) { this->currOffset = Host_IO::Seek ( this->fileRef, offset, mode ); } else if ( this->readOnly ) { XMP_Throw ( "XMPFiles_IO::Seek, read-only seek beyond EOF", kXMPErr_EnforceFailure ); } else { Host_IO::SetEOF ( this->fileRef, newOffset ); // Extend a file open for writing. this->currLength = newOffset; this->currOffset = Host_IO::Seek ( this->fileRef, 0, kXMP_SeekFromEnd ); } XMP_Assert ( this->currOffset == newOffset ); return this->currOffset; XMP_FILESIO_END1 ( kXMPErrSev_FileFatal ); return -1; } // XMPFiles_IO::Seek
void write(XMP_IO* file) { XMP_Enforce( this->SIG == GetUns32LE( &this->fields[o_Sig] ) ); commentLen = GetUns16LE( &this->fields[o_CommentLen] ); file ->Write ( fields , FIXED_SIZE ); if (commentLen) file->Write ( comment, commentLen ); }
void setXMPFilename() { // only needed for fresh structs, thus enforcing rather than catering to memory issues XMP_Enforce( (filenameLen==0) && (extraFieldLen == 0) ); filenameLen = xmpFilenameLen; PutUns16LE(filenameLen, &fields[FileHeader::o_fileNameLength] ); filename = new char[xmpFilenameLen]; memcpy(filename,"META-INF/metadata.xml",xmpFilenameLen); }
// writes structure to file (starting at current position) void write(XMP_IO* file) { //// WRITE BACK REAL 64 BIT VALUES, CREATE EXTRA FIELD /////////////// //may only wipe extra field after obtaining all Info from it if (extraField) delete extraField; extraFieldLen=0; if ( ( sizeUncompressed > 0xffffffff ) || ( sizeCompressed > 0xffffffff ) || ( offsetLocalHeader > 0xffffffff ) ) { extraField = new char[64]; // actual maxlen is 32 extraFieldLen = 4; //first fields are for ID, size if ( sizeUncompressed > 0xffffffff ) { PutUns64LE( sizeUncompressed, &extraField[extraFieldLen] ); extraFieldLen += 8; sizeUncompressed = 0xffffffff; } if ( sizeCompressed > 0xffffffff ) { PutUns64LE( sizeCompressed, &extraField[extraFieldLen] ); extraFieldLen += 8; sizeCompressed = 0xffffffff; } if ( offsetLocalHeader > 0xffffffff ) { PutUns64LE( offsetLocalHeader, &extraField[extraFieldLen] ); extraFieldLen += 8; offsetLocalHeader = 0xffffffff; } //write ID, dataSize PutUns16LE( 0x0001, &extraField[0] ); PutUns16LE( extraFieldLen-4, &extraField[2] ); //extraFieldSize PutUns16LE( extraFieldLen, &this->fields[CDFileHeader::o_extraFieldLength] ); } // write out 32-bit ('ff-stubs' or not) PutUns32LE( (XMP_Uns32)sizeUncompressed, &fields[o_sizeUncompressed] ); PutUns32LE( (XMP_Uns32)sizeCompressed, &fields[o_sizeCompressed] ); PutUns32LE( (XMP_Uns32)offsetLocalHeader, &fields[o_offsetLocalHeader] ); /// WRITE ///////////////////////////////////////////////////////////////// XMP_Enforce( SIG == GetUns32LE( &this->fields[CDFileHeader::o_sig] ) ); file ->Write ( fields , FIXED_SIZE ); if (filenameLen) file->Write ( filename , filenameLen ); if (extraFieldLen) file->Write ( extraField , extraFieldLen ); if (commentLen) file->Write ( extraField , extraFieldLen ); }
// CONTAINER CHUNK ///////////////////////////////////////////////// // a) creation // [2376832] expectedSize - minimum padding "parking size" to use, if not available append to end ContainerChunk::ContainerChunk( ContainerChunk* parent_, XMP_Uns32 id_, XMP_Uns32 containerType ) : Chunk( NULL /* !! */, chunk_CONTAINER, id_ ) { // accept no unparented ConatinerChunks XMP_Enforce( parent_ != NULL ); this->containerType = containerType; this->newSize = 12; this->parent = parent_; chunkVect* siblings = &parent_->children; // add at end. ( oldSize==0 will flag optimization later in the process) siblings->push_back( this ); }
void MOOV_Manager::SetBox ( BoxRef theBox, const void* dataPtr, XMP_Uns32 size , const XMP_Uns8 * idUUID ) { XMP_Enforce ( size < moovBoxSizeLimit ); BoxNode * node = (BoxNode*)theBox; if ( node->contentSize == size ) { if( node->boxType == ISOMedia::k_uuid && idUUID != 0 ) { memcpy ( node->idUUID, idUUID, 16 ); this->moovNode.changed = true; } XMP_Uns8 * oldContent = PickContentPtr ( *node ); if ( memcmp ( oldContent, dataPtr, size ) == 0 ) return; // No change. memcpy ( oldContent, dataPtr, size ); // Update the old content in-place this->moovNode.changed = true; #if TraceUpdateMoovTree XMP_Uns32 be32 = MakeUns32BE ( node->boxType ); fprintf ( stderr, "Updated '%.4s', parse offset 0x%X, same size\n", &be32, node->offset ); #endif } else { node->changedContent.assign ( size, 0 ); // Fill with 0's first to get the storage. memcpy ( &node->changedContent[0], dataPtr, size ); node->contentSize = size; node->changed = true; if( node->boxType == ISOMedia::k_uuid && idUUID != 0) memcpy ( node->idUUID, idUUID, 16 ); this->moovNode.changed = true; #if TraceUpdateMoovTree XMP_Uns32 be32 = MakeUns32BE ( node->boxType ); XMP_Uns32 addr32 = (XMP_Uns32) this->PickContentPtr ( *node ); fprintf ( stderr, "Updated '%.4s', parse offset 0x%X, new size %d, new content @ 0x%X\n", &be32, node->offset, node->contentSize, addr32 ); #endif } } // MOOV_Manager::SetBox
void XMPFiles_IO::Truncate ( XMP_Int64 length ) { XMP_FILESIO_START XMP_Assert ( this->fileRef != Host_IO::noFileRef ); XMP_Assert ( this->currOffset == Host_IO::Offset ( this->fileRef ) ); XMP_Assert ( this->currLength == Host_IO::Length ( this->fileRef ) ); if ( this->readOnly ) XMP_Throw ( "New_XMPFiles_IO, truncate not permitted on read only file", kXMPErr_FilePermission ); XMP_Enforce ( length <= this->currLength ); Host_IO::SetEOF ( this->fileRef, length ); this->currLength = length; if ( this->currOffset > this->currLength ) this->currOffset = this->currLength; // ! Seek to the expected offset, some versions of Host_IO::SetEOF implicitly seek to EOF. Host_IO::Seek ( this->fileRef, this->currOffset, kXMP_SeekFromStart ); XMP_Assert ( this->currOffset == Host_IO::Offset ( this->fileRef ) ); XMP_FILESIO_END1 ( kXMPErrSev_FileFatal ) } // XMPFiles_IO::Truncate
XMP_Uns32 XMPFiles_IO::Read ( void * buffer, XMP_Uns32 count, bool readAll /* = false */ ) { XMP_FILESIO_START XMP_Assert ( this->fileRef != Host_IO::noFileRef ); XMP_Assert ( this->currOffset == Host_IO::Offset ( this->fileRef ) ); XMP_Assert ( this->currLength == Host_IO::Length ( this->fileRef ) ); XMP_Assert ( this->currOffset <= this->currLength ); if ( count > (this->currLength - this->currOffset) ) { if ( readAll ) XMP_Throw ( "XMPFiles_IO::Read, not enough data", kXMPErr_EnforceFailure ); count = (XMP_Uns32) (this->currLength - this->currOffset); } XMP_Uns32 amountRead = Host_IO::Read ( this->fileRef, buffer, count ); XMP_Enforce ( amountRead == count ); this->currOffset += amountRead; return amountRead; XMP_FILESIO_END1 ( kXMPErrSev_FileFatal ) return 0; } // XMPFiles_IO::Read
void MOOV_Manager::ParseMemoryTree ( XMP_Uns8 fileMode1 ) { this->fileMode = fileMode1; this->moovNode.offset = this->moovNode.boxType = 0; this->moovNode.headerSize = this->moovNode.contentSize = 0; this->moovNode.children.clear(); this->moovNode.changedContent.clear(); this->moovNode.changed = false; if ( this->fullSubtree.empty() ) return; ISOMedia::BoxInfo moovInfo; const XMP_Uns8 * moovOrigin = &this->fullSubtree[0]; const XMP_Uns8 * moovLimit = moovOrigin + this->fullSubtree.size(); (void) ISOMedia::GetBoxInfo ( moovOrigin, moovLimit, &moovInfo ); XMP_Enforce ( moovInfo.boxType == ISOMedia::k_moov ); XMP_Uns64 fullMoovSize = moovInfo.headerSize + moovInfo.contentSize; if ( fullMoovSize > moovBoxSizeLimit ) { // From here on we know 32-bit offsets are safe. XMP_Throw ( "Oversize 'moov' box", kXMPErr_EnforceFailure ); } this->moovNode.boxType = ISOMedia::k_moov; this->moovNode.headerSize = moovInfo.headerSize; this->moovNode.contentSize = (XMP_Uns32)moovInfo.contentSize; bool ignoreMetaBoxes = (fileMode1 == kFileIsTraditionalQT); // ! Don't want, these don't follow ISO spec. #if TraceParseMoovTree fprintf ( stderr, "Parsing 'moov' subtree, moovNode @ 0x%X, ignoreMetaBoxes = %d\n", &this->moovNode, ignoreMetaBoxes ); #endif this->ParseNestedBoxes ( &this->moovNode, "moov", ignoreMetaBoxes ); } // MOOV_Manager::ParseMemoryTree
void ContainerChunk::changesAndSize( RIFF_MetaHandler* handler ) { // Walk the container subtree adjusting the children that have size changes. The only containers // are RIFF and LIST chunks, they are treated differently. // // LISTs get recomposed as a whole. Existing JUNK children of a LIST are removed, existing real // children are left in order with their new size, new children have already been appended. The // LIST as a whole gets a new size that is the sum of the final children. // // Special rules apply to various children of a RIFF container. FIrst, adjacent JUNK children // are combined, this simplifies maximal reuse. The children are recursively adjusted in order // to get their final size. // // Try to determine the final placement of each RIFF child using general rules: // - if the size is unchanged: leave at current location // - if the chunk is at the end of the last RIFF chunk and grows: leave at current location // - if there is enough following JUNK: add part of the JUNK, adjust remaining JUNK size // - if it shrinks by 9 bytes or more: carve off trailing JUNK // - try to find adequate JUNK in the current parent // // Use child-specific rules as a last resort: // - if it is LIST:INFO: delete it, must be in first RIFF chunk // - for others: move to end of last RIFF chunk, make old space JUNK // ! Don't create any junk chunks of exactly 8 bytes, just a header and no content. That has a // ! size field of zero, which hits a crashing bug in some versions of Windows Media Player. bool isRIFFContainer = (this->id == kChunk_RIFF); bool isLISTContainer = (this->id == kChunk_LIST); XMP_Enforce ( isRIFFContainer | isLISTContainer ); XMP_Index childIndex; // Could be local to the loops, this simplifies debuging. Need a signed type! Chunk * currChild; if ( this->children.empty() ) { if ( isRIFFContainer) { this->newSize = 12; // Keep a minimal size container. } else { this->newSize = 0; // Will get removed from parent in outer call. } this->hasChange = true; return; // Nothing more to do without children. } // Collapse adjacent RIFF junk children, remove all LIST junk children. Work back to front to // simplify the effect of .erase() on the loop. Purposely ignore the first chunk. for ( childIndex = (XMP_Index)this->children.size() - 1; childIndex > 0; --childIndex ) { currChild = this->children[childIndex]; if ( currChild->chunkType != chunk_JUNK ) continue; if ( isRIFFContainer ) { Chunk * prevChild = this->children[childIndex-1]; if ( prevChild->chunkType != chunk_JUNK ) continue; prevChild->oldSize += currChild->oldSize; prevChild->newSize += currChild->newSize; prevChild->hasChange = true; } this->children.erase ( this->children.begin() + childIndex ); delete currChild; this->hasChange = true; } // Process the children of RIFF and LIST containers to get their final size. Remove empty // children. Work back to front to simplify the effect of .erase() on the loop. Do not ignore // the first chunk. for ( childIndex = (XMP_Index)this->children.size() - 1; childIndex >= 0; --childIndex ) { currChild = this->children[childIndex]; ++handler->level; currChild->changesAndSize ( handler ); --handler->level; if ( (currChild->newSize == 8) || (currChild->newSize == 0) ) { // ! The newSIze is supposed to include the header. this->children.erase ( this->children.begin() + childIndex ); delete currChild; this->hasChange = true; } else { this->hasChange |= currChild->hasChange; currChild->needSizeFix = (currChild->newSize != currChild->oldSize); if ( currChild->needSizeFix && (currChild->newSize > currChild->oldSize) && (this == handler->lastChunk) && (childIndex+1 == (XMP_Index)this->children.size()) ) { // Let an existing last-in-file chunk grow in-place. Shrinking is conceptually OK, // but complicates later sanity check that the main AVI chunk is not OK to append // other chunks later. Ignore new chunks, they might reuse junk space. if ( currChild->oldSize != 0 ) currChild->needSizeFix = false; } } } // Go through the children of a RIFF container, adjusting the placement as necessary. In brief, // things can only grow at the end of the last RIFF chunk, and non-junk chunks can't be shifted. if ( isRIFFContainer ) { for ( childIndex = 0; childIndex < (XMP_Index)this->children.size(); ++childIndex ) { currChild = this->children[childIndex]; if ( ! currChild->needSizeFix ) continue; currChild->needSizeFix = false; XMP_Int64 sizeDiff = currChild->newSize - currChild->oldSize; // Positive for growth. XMP_Uns8 padSize = (currChild->newSize & 1); // Need a pad for odd size. // See if the following chunk is junk that can be utilized. Chunk * nextChild = 0; if ( childIndex+1 < (XMP_Index)this->children.size() ) nextChild = this->children[childIndex+1]; if ( (nextChild != 0) && (nextChild->chunkType == chunk_JUNK) ) { if ( nextChild->newSize >= (9 + sizeDiff + padSize) ) { // Incorporate part of the trailing junk, or make the trailing junk grow. nextChild->newSize -= sizeDiff; nextChild->newSize -= padSize; nextChild->hasChange = true; continue; } else if ( nextChild->newSize == (sizeDiff + padSize) ) { // Incorporate all of the trailing junk. this->children.erase ( this->children.begin() + childIndex + 1 ); delete nextChild; continue; } } // See if the chunk shrinks enough to turn the leftover space into junk. if ( (sizeDiff + padSize) <= -9 ) { this->children.insert ( (this->children.begin() + childIndex + 1), new JunkChunk ( NULL, ((-sizeDiff) - padSize) ) ); continue; } // Look through the parent for a usable span of junk. XMP_Index junkIndex; Chunk * junkChunk = 0; for ( junkIndex = 0; junkIndex < (XMP_Index)this->children.size(); ++junkIndex ) { junkChunk = this->children[junkIndex]; if ( junkChunk->chunkType != chunk_JUNK ) continue; if ( (junkChunk->newSize >= (9 + currChild->newSize + padSize)) || (junkChunk->newSize == (currChild->newSize + padSize)) ) break; } if ( junkIndex < (XMP_Index)this->children.size() ) { // Use part or all of the junk for the relocated chunk, replace the old space with junk. if ( junkChunk->newSize == (currChild->newSize + padSize) ) { // The found junk is an exact fit. this->children[junkIndex] = currChild; delete junkChunk; } else { // The found junk has excess space. Insert the moving chunk and shrink the junk. XMP_Assert ( junkChunk->newSize >= (9 + currChild->newSize + padSize) ); junkChunk->newSize -= (currChild->newSize + padSize); junkChunk->hasChange = true; this->children.insert ( (this->children.begin() + junkIndex), currChild ); if ( junkIndex < childIndex ) ++childIndex; // The insertion moved the current child. } if ( currChild->oldSize != 0 ) { this->children[childIndex] = new JunkChunk ( 0, currChild->oldSize ); // Replace the old space with junk. } else { this->children.erase ( this->children.begin() + childIndex ); // Remove the newly created chunk's old location. --childIndex; // Make the next loop iteration not skip a chunk. } continue; } // If this is a LIST:INFO chunk not in the last of multiple RIFF chunks, then give up // and replace it with oldSize junk. Preserve the first RIFF chunk's original size. bool isListInfo = (currChild->id == kChunk_LIST) && (currChild->chunkType == chunk_CONTAINER) && (((ContainerChunk*)currChild)->containerType == kType_INFO); if ( isListInfo && (handler->riffChunks.size() > 1) && (this->id == kChunk_RIFF) && (this != handler->lastChunk) ) { if ( currChild->oldSize != 0 ) { this->children[childIndex] = new JunkChunk ( 0, currChild->oldSize ); } else { this->children.erase ( this->children.begin() + childIndex ); --childIndex; // Make the next loop iteration not skip a chunk. } delete currChild; continue; } // Move the chunk to the end of the last RIFF chunk and make the old space junk. if ( (this == handler->lastChunk) && (childIndex+1 == (XMP_Index)this->children.size()) ) continue; // Already last. handler->lastChunk->children.push_back( currChild ); if ( currChild->oldSize != 0 ) { this->children[childIndex] = new JunkChunk ( 0, currChild->oldSize ); // Replace the old space with junk. } else { this->children.erase ( this->children.begin() + childIndex ); // Remove the newly created chunk's old location. --childIndex; // Make the next loop iteration not skip a chunk. } } } // Compute the finished container's new size (for both RIFF and LIST). this->newSize = 12; // Start with standard container header. for ( childIndex = 0; childIndex < (XMP_Index)this->children.size(); ++childIndex ) { currChild = this->children[childIndex]; this->newSize += currChild->newSize; this->newSize += (this->newSize & 1); // Round up if odd. } XMP_Validate ( (this->newSize <= 0xFFFFFFFFLL), "No single chunk may be above 4 GB", kXMPErr_Unimplemented ); }
Chunk* getChunk ( ContainerChunk* parent, RIFF_MetaHandler* handler ) { XMP_IO* file = handler->parent->ioRef; XMP_Uns8 level = handler->level; XMP_Uns32 peek = XIO::PeekUns32_LE ( file ); if ( level == 0 ) { XMP_Validate( peek == kChunk_RIFF, "expected RIFF chunk not found", kXMPErr_BadFileFormat ); XMP_Enforce( parent == NULL ); } else { XMP_Validate( peek != kChunk_RIFF, "unexpected RIFF chunk below top-level", kXMPErr_BadFileFormat ); XMP_Enforce( parent != NULL ); } switch( peek ) { case kChunk_RIFF: return new ContainerChunk( parent, handler ); case kChunk_LIST: { if ( level != 1 ) break; // only care on this level // look further (beyond 4+4 = beyond id+size) to check on relevance file->Seek ( 8, kXMP_SeekFromCurrent ); XMP_Uns32 containerType = XIO::PeekUns32_LE ( file ); file->Seek ( -8, kXMP_SeekFromCurrent ); bool isRelevantList = ( containerType== kType_INFO || containerType == kType_Tdat || containerType == kType_hdrl ); if ( !isRelevantList ) break; return new ContainerChunk( parent, handler ); } case kChunk_XMP: if ( level != 1 ) break; // ignore on inappropriate levels (might be compound metadata?) return new XMPChunk( parent, handler ); case kChunk_DISP: { if ( level != 1 ) break; // only care on this level // peek even further to see if type is 0x001 and size is reasonable file ->Seek ( 4, kXMP_SeekFromCurrent ); // jump DISP XMP_Uns32 dispSize = XIO::ReadUns32_LE( file ); XMP_Uns32 dispType = XIO::ReadUns32_LE( file ); file ->Seek ( -12, kXMP_SeekFromCurrent ); // rewind, be in front of chunkID again // only take as a relevant disp if both criteria met, // otherwise treat as generic chunk! if ( (dispType == 0x0001) && ( dispSize < 256 * 1024 ) ) { ValueChunk* r = new ValueChunk( parent, handler ); handler->dispChunk = r; return r; } break; // treat as irrelevant (non-0x1) DISP chunks as generic chunk } case kChunk_bext: { if ( level != 1 ) break; // only care on this level // store for now in a value chunk ValueChunk* r = new ValueChunk( parent, handler ); handler->bextChunk = r; return r; } case kChunk_PrmL: { if ( level != 1 ) break; // only care on this level ValueChunk* r = new ValueChunk( parent, handler ); handler->prmlChunk = r; return r; } case kChunk_Cr8r: { if ( level != 1 ) break; // only care on this level ValueChunk* r = new ValueChunk( parent, handler ); handler->cr8rChunk = r; return r; } case kChunk_JUNQ: case kChunk_JUNK: { JunkChunk* r = new JunkChunk( parent, handler ); return r; } case kChunk_IDIT: { if ( level != 2 ) break; // only care on this level ValueChunk* r = new ValueChunk( parent, handler ); handler->iditChunk = r; return r; } } // this "default:" section must be ouside switch bracket, to be // reachable by all those break statements above: // digest 'valuable' container chunks: LIST:INFO, LIST:Tdat bool insideRelevantList = ( level==2 && parent->id == kChunk_LIST && ( parent->containerType== kType_INFO || parent->containerType == kType_Tdat )); if ( insideRelevantList ) { ValueChunk* r = new ValueChunk( parent, handler ); return r; } // general chunk of no interest, treat as unknown blob return new Chunk( parent, handler, true, chunk_GENERAL ); }
// b) parsing ContainerChunk::ContainerChunk( ContainerChunk* parent, RIFF_MetaHandler* handler ) : Chunk( parent, handler, false, chunk_CONTAINER ) { bool repairMode = ( 0 != ( handler->parent->openFlags & kXMPFiles_OpenRepairFile )); try { XMP_IO* file = handler->parent->ioRef; XMP_Uns8 level = handler->level; // get type of container chunk this->containerType = XIO::ReadUns32_LE( file ); // ensure legality of top-level chunks if ( level == 0 && handler->riffChunks.size() > 0 ) { XMP_Validate( handler->parent->format == kXMP_AVIFile, "only AVI may have multiple top-level chunks", kXMPErr_BadFileFormat ); XMP_Validate( this->containerType == kType_AVIX, "all chunks beyond main chunk must be type AVIX", kXMPErr_BadFileFormat ); } // has *relevant* subChunks? (there might be e.g. non-INFO LIST chunks we don't care about) bool hasSubChunks = ( ( this->id == kChunk_RIFF ) || ( this->id == kChunk_LIST && this->containerType == kType_INFO ) || ( this->id == kChunk_LIST && this->containerType == kType_Tdat ) || ( this->id == kChunk_LIST && this->containerType == kType_hdrl ) ); XMP_Int64 endOfChunk = this->oldPos + this->oldSize; // this statement catches beyond-EoF-offsets on any level // exception: level 0, tolerate if in repairMode if ( (level == 0) && repairMode && (endOfChunk > handler->oldFileSize) ) { endOfChunk = handler->oldFileSize; // assign actual file size this->oldSize = endOfChunk - this->oldPos; //reversely calculate correct oldSize } XMP_Validate( endOfChunk <= handler->oldFileSize, "offset beyond EoF", kXMPErr_BadFileFormat ); Chunk* curChild = 0; if ( hasSubChunks ) { handler->level++; while ( file->Offset() < endOfChunk ) { curChild = RIFF::getChunk( this, handler ); // digest pad byte - no value validation (0), since some 3rd party files have non-0-padding. if ( file->Offset() % 2 == 1 ) { // [1521093] tolerate missing pad byte at very end of file: XMP_Uns8 pad; file->Read ( &pad, 1 ); // Read the pad, tolerate being at EOF. } // within relevant LISTs, relentlesly delete junk chunks (create a single one // at end as part of updateAndChanges() if ( (containerType== kType_INFO || containerType == kType_Tdat) && ( curChild->chunkType == chunk_JUNK ) ) { this->children.pop_back(); delete curChild; } // for other chunks: join neighouring Junk chunks into one else if ( (curChild->chunkType == chunk_JUNK) && ( this->children.size() >= 2 ) ) { // nb: if there are e.g 2 chunks, then last one is at(1), prev one at(0) ==> '-2' Chunk* prevChunk = this->children.at( this->children.size() - 2 ); if ( prevChunk->chunkType == chunk_JUNK ) { // stack up size to prior chunk prevChunk->oldSize += curChild->oldSize; prevChunk->newSize += curChild->newSize; XMP_Enforce( prevChunk->oldSize == prevChunk->newSize ); // destroy current chunk this->children.pop_back(); delete curChild; } } } handler->level--; XMP_Validate( file->Offset() == endOfChunk, "subchunks exceed outer chunk size", kXMPErr_BadFileFormat ); // pointers for later legacy processing if ( level==1 && this->id==kChunk_LIST && this->containerType == kType_INFO ) handler->listInfoChunk = this; if ( level==1 && this->id==kChunk_LIST && this->containerType == kType_Tdat ) handler->listTdatChunk = this; if ( level == 1 && this->id == kChunk_LIST && this->containerType == kType_hdrl ) handler->listHdlrChunk = this; } else // skip non-interest container chunk { file->Seek ( (this->oldSize - 8 - 4), kXMP_SeekFromCurrent ); } // if - else } // try catch (XMP_Error& e) { this->release(); // free resources if ( this->parent != 0) this->parent->children.pop_back(); // hereby taken care of, so removing myself... throw e; // re-throw } }
static void FixupQualifiedNode ( XMP_Node * xmpParent ) { size_t qualNum, qualLim; size_t childNum, childLim; XMP_Enforce ( (xmpParent->options & kXMP_PropValueIsStruct) && (! xmpParent->children.empty()) ); XMP_Node * valueNode = xmpParent->children[0]; XMP_Enforce ( valueNode->name == "rdf:value" ); xmpParent->qualifiers.reserve ( xmpParent->qualifiers.size() + xmpParent->children.size() + valueNode->qualifiers.size() ); // Move the qualifiers on the value node to the parent. Make sure an xml:lang qualifier stays at // the front. Check for duplicate names between the value node's qualifiers and the parent's // children. The parent's children are about to become qualifiers. Check here, between the // groups. Intra-group duplicates are caught by AddChildNode. qualNum = 0; qualLim = valueNode->qualifiers.size(); if ( valueNode->options & kXMP_PropHasLang ) { if ( xmpParent->options & kXMP_PropHasLang ) XMP_Throw ( "Redundant xml:lang for rdf:value element", kXMPErr_BadXMP ); XMP_Node * langQual = valueNode->qualifiers[0]; XMP_Assert ( langQual->name == "xml:lang" ); langQual->parent = xmpParent; xmpParent->options |= kXMP_PropHasLang; if ( xmpParent->qualifiers.empty() ) { xmpParent->qualifiers.push_back ( langQual ); // *** Should use utilities to add qual & set parent. } else { xmpParent->qualifiers.insert ( xmpParent->qualifiers.begin(), langQual ); } valueNode->qualifiers[0] = 0; // We just moved it to the parent. qualNum = 1; // Start the remaining copy after the xml:lang qualifier. } for ( ; qualNum != qualLim; ++qualNum ) { XMP_Node * currQual = valueNode->qualifiers[qualNum]; if ( FindChildNode ( xmpParent, currQual->name.c_str(), kXMP_ExistingOnly ) != 0 ) { XMP_Throw ( "Duplicate qualifier node", kXMPErr_BadXMP ); } currQual->parent = xmpParent; xmpParent->qualifiers.push_back ( currQual ); valueNode->qualifiers[qualNum] = 0; // We just moved it to the parent. } valueNode->qualifiers.clear(); // ! There should be nothing but null pointers. // Change the parent's other children into qualifiers. This loop starts at 1, child 0 is the // rdf:value node. Put xml:lang at the front, append all others. for ( childNum = 1, childLim = xmpParent->children.size(); childNum != childLim; ++childNum ) { XMP_Node * currQual = xmpParent->children[childNum]; bool isLang = (currQual->name == "xml:lang"); currQual->options |= kXMP_PropIsQualifier; currQual->parent = xmpParent; if ( isLang ) { if ( xmpParent->options & kXMP_PropHasLang ) XMP_Throw ( "Duplicate xml:lang qualifier", kXMPErr_BadXMP ); xmpParent->options |= kXMP_PropHasLang; } else if ( currQual->name == "rdf:type" ) { xmpParent->options |= kXMP_PropHasType; } if ( (! isLang) || xmpParent->qualifiers.empty() ) { xmpParent->qualifiers.push_back ( currQual ); } else { xmpParent->qualifiers.insert ( xmpParent->qualifiers.begin(), currQual ); } xmpParent->children[childNum] = 0; // We just moved it to the qualifiers. } if ( ! xmpParent->qualifiers.empty() ) xmpParent->options |= kXMP_PropHasQualifiers; // Move the options and value last, other checks need the parent's original options. Move the // value node's children to be the parent's children. Delete the now useless value node. XMP_Assert ( xmpParent->options & (kXMP_PropValueIsStruct | kRDF_HasValueElem) ); xmpParent->options &= ~ (kXMP_PropValueIsStruct | kRDF_HasValueElem); xmpParent->options |= valueNode->options; xmpParent->value.swap ( valueNode->value ); #if 0 // *** XMP_DebugBuild xmpParent->_valuePtr = xmpParent->value.c_str(); #endif xmpParent->children[0] = 0; // ! Remove the value node itself before the swap. xmpParent->children.swap ( valueNode->children ); for ( size_t childNum = 0, childLim = xmpParent->children.size(); childNum != childLim; ++childNum ) { XMP_Node * currChild = xmpParent->children[childNum]; currChild->parent = xmpParent; } delete valueNode; } // FixupQualifiedNode
XMP_WinXP_HGQueue::XMP_WinXP_HGQueue() : queueEvent(0), waitCount(0), releaseAll(false) { this->queueEvent = CreateEvent ( NULL, FALSE, TRUE, NULL ); // Auto reset, initially clear. XMP_Enforce ( this->queueEvent != 0 ); }
bool GIF_MetaHandler::ParseGIFBlocks( XMP_IO* fileRef ) { fileRef->Rewind(); // Checking for GIF header XMP_Uns8 buffer[ GIF_89_Header_LEN ]; fileRef->Read( buffer, GIF_89_Header_LEN ); XMP_Enforce( memcmp( buffer, GIF_89_Header_DATA, GIF_89_Header_LEN ) == 0 ); bool IsXMPExists = false; bool IsTrailerExists = false; ReadLogicalScreenDesc( fileRef ); // Parsing rest of the blocks while ( fileRef->Offset() != fileRef->Length() ) { XMP_Uns8 blockType; // Read the block type byte fileRef->Read( &blockType, 1 ); if ( blockType == kXMP_block_ImageDesc ) { // ImageDesc is a special case, So read data just like its structure. long tableSize = 0; XMP_Uns8 fields; // Reading Dimesnions of image as // 2 bytes = Image Left Position // + 2 bytes = Image Right Position // + 2 bytes = Image Width // + 2 bytes = Image Height // = 8 bytes this->SeekFile( fileRef, 8, kXMP_SeekFromCurrent ); // Reading one byte for Packed Fields fileRef->Read( &fields, 1 ); // Getting Local Table Size and skipping table size if ( fields & 0x80 ) { tableSize = ( 1 << ( ( fields & 0x07 ) + 1 ) ) * 3; this->SeekFile( fileRef, tableSize, kXMP_SeekFromCurrent ); } // 1 byte LZW Minimum code size this->SeekFile( fileRef, 1, kXMP_SeekFromCurrent ); XMP_Uns8 subBlockSize; // 1 byte compressed sub-block size fileRef->Read( &subBlockSize, 1 ); while ( subBlockSize != 0x00 ) { // Skipping compressed data sub-block this->SeekFile( fileRef, subBlockSize, kXMP_SeekFromCurrent ); // 1 byte compressed sub-block size fileRef->Read( &subBlockSize, 1 ); } } else if ( blockType == kXMP_block_Extension ) { XMP_Uns8 extensionLbl; XMP_Uns32 blockSize = 0; /*XMP_Uns64 blockOffset = fileRef->Offset();*/ // Extension Label fileRef->Read( &extensionLbl, 1 ); // Block or Sub-Block size fileRef->Read( &blockSize, 1 ); // Checking for Application Extension label and blockSize if ( extensionLbl == 0xFF && blockSize == APP_ID_LEN ) { XMP_Uns8 idData[ APP_ID_LEN ]; fileRef->Read( idData, APP_ID_LEN, true ); // Checking For XMP ID if ( memcmp( idData, XMP_APP_ID_DATA, APP_ID_LEN ) == 0 ) { XMPPacketOffset = fileRef->Offset(); IsXMPExists = true; } // Parsing sub-blocks XMP_Uns8 subBlockSize; fileRef->Read( &subBlockSize, 1 ); while ( subBlockSize != 0x00 ) { this->SeekFile( fileRef, subBlockSize, kXMP_SeekFromCurrent ); fileRef->Read( &subBlockSize, 1 ); } if ( IsXMPExists ) XMPPacketLength = static_cast< XMP_Uns32 >( fileRef->Offset() - XMPPacketOffset - MAGIC_TRAILER_LEN ); } else { // Extension block other than Application Extension while ( blockSize != 0x00 ) { // Seeking block size or sub-block size this->SeekFile( fileRef, blockSize, kXMP_SeekFromCurrent ); // Block Size fileRef->Read( &blockSize, 1 ); } } } else if ( blockType == kXMP_block_Trailer ) { // 1 byte is subtracted for block type trailerOffset = fileRef->Offset() - 1; IsTrailerExists = true; break; } else XMP_Throw( "Invaild GIF Block", kXMPErr_BadBlockFormat ); } if ( !IsTrailerExists ) XMP_Throw( "No trailer exists for GIF file", kXMPErr_BadFileFormat ); return IsXMPExists; } // GIF_MetaHandler::ParseGIFBlocks