Beispiel #1
0
QIODevice* AssetCache::prepare(const QNetworkCacheMetaData &metaData)
{
    if (!WriteMetadata(GetAbsoluteFilePath(true, metaData.url()), metaData))
        return 0;
    QScopedPointer<QFile> dataFile(new QFile(GetAbsoluteFilePath(false, metaData.url())));
    if (!dataFile->open(QIODevice::ReadWrite))
    {
        LogError("Failed not open data file QIODevice::ReadWrite mode for " + metaData.url().toString().toStdString());
        dataFile.reset();
        remove(metaData.url());
        return 0;
    }
    if (dataFile->bytesAvailable() > 0)
    {
        if (!dataFile->resize(0))
        {
            LogError("Failed not reset existing data from cache entry. Skipping cache store for " + metaData.url().toString().toStdString());
            dataFile->close();
            dataFile.reset();
            remove(metaData.url());
            return 0;
        }
    }
    // Take ownership of the ptr
    QFile *dataPtr = dataFile.take();
    preparedItems[metaData.url().toString()] = dataPtr;
    return dataPtr;
}
Beispiel #2
0
void FStatsWriteFile::WriteFrame( int64 TargetFrame, bool bNeedFullMetadata /*= false*/ )
{
	FMemoryWriter Ar( OutData, false, true );
	const int64 PrevArPos = Ar.Tell();

	if( bNeedFullMetadata )
	{
		WriteMetadata( Ar );
	}

	FStatsThreadState const& Stats = FStatsThreadState::GetLocalState();
	TArray<FStatMessage> const& Data = Stats.GetCondensedHistory( TargetFrame );
	for( auto It = Data.CreateConstIterator(); It; ++It )
	{
		WriteMessage( Ar, *It );
	}

	// Get cycles for all threads, so we can use that data to generate the mini-view.
	for( auto It = Stats.Threads.CreateConstIterator(); It; ++It )
	{
		const int64 Cycles = Stats.GetFastThreadFrameTime( TargetFrame, It.Key() );
		ThreadCycles.Add( It.Key(), Cycles );
	}

	// Serialize thread cycles. Disabled for now.
	//Ar << ThreadCycles;
}
Beispiel #3
0
void FStatsWriteFile::Finalize()
{
	FArchive& Ar = *File;

	// Write dummy compression size, so we can detect the end of the file.
	FCompressedStatsData::WriteEndOfCompressedData( Ar );

	// Real header, written at start of the file, but written out right before we close the file.

	// Write out frame table and update header with offset and count.
	Header.FrameTableOffset = Ar.Tell();
	Ar << FramesInfo;

	const FStatsThreadState& Stats = FStatsThreadState::GetLocalState();

	// Add FNames from the stats metadata.
	for( const auto& It : Stats.ShortNameToLongName )
	{
		const FStatMessage& StatMessage = It.Value;
		FNamesSent.Add( StatMessage.NameAndInfo.GetRawName().GetComparisonIndex() );
	}

	// Create a copy of names.
	TSet<int32> FNamesToSent = FNamesSent;
	FNamesSent.Empty( FNamesSent.Num() );

	// Serialize FNames.
	Header.FNameTableOffset = Ar.Tell();
	Header.NumFNames = FNamesToSent.Num();
	for( const int32 It : FNamesToSent )
	{
		WriteFName( Ar, FStatNameAndInfo(FName(It, It, 0),false) );
	}

	// Serialize metadata messages.
	Header.MetadataMessagesOffset = Ar.Tell();
	Header.NumMetadataMessages = Stats.ShortNameToLongName.Num();
	WriteMetadata( Ar );

	// Verify data.
	TSet<int32> BMinA = FNamesSent.Difference( FNamesToSent );
	struct FLocal
	{
		static TArray<FName> GetFNameArray( const TSet<int32>& NameIndices )
		{
			TArray<FName> Result;
			for( const int32 NameIndex : NameIndices )
			{
				new(Result) FName( NameIndex, NameIndex, 0 );
			}
			return Result;
		}
	};
	auto BMinANames = FLocal::GetFNameArray( BMinA );

	// Seek to the position just after a magic value of the file and write out proper header.
	Ar.Seek( sizeof(uint32) );
	Ar << Header;
}
Beispiel #4
0
bool TessdataManager::CombineDataFiles(
    const char *language_data_path_prefix,
    const char *output_filename) {
  int i;
  inT64 offset_table[TESSDATA_NUM_ENTRIES];
  for (i = 0; i < TESSDATA_NUM_ENTRIES; ++i) offset_table[i] = -1;
  FILE *output_file = fopen(output_filename, "wb");
  if (output_file == NULL) {
    tprintf("Error opening %s for writing\n", output_filename);
    return false;
  }
  // Leave some space for recording the offset_table.
  if (fseek(output_file,
            sizeof(inT32) + sizeof(inT64) * TESSDATA_NUM_ENTRIES, SEEK_SET)) {
    tprintf("Error seeking %s\n", output_filename);
    fclose(output_file);
    return false;
  }

  TessdataType type = TESSDATA_NUM_ENTRIES;
  bool text_file = false;
  FILE *file_ptr[TESSDATA_NUM_ENTRIES];

  // Load individual tessdata components from files.
  for (i = 0; i < TESSDATA_NUM_ENTRIES; ++i) {
    ASSERT_HOST(TessdataTypeFromFileSuffix(
        kTessdataFileSuffixes[i], &type, &text_file));
    STRING filename = language_data_path_prefix;
    filename += kTessdataFileSuffixes[i];
    file_ptr[i] =  fopen(filename.string(), "rb");
    if (file_ptr[i] != NULL) {
      offset_table[type] = ftell(output_file);
      CopyFile(file_ptr[i], output_file, text_file, -1);
      fclose(file_ptr[i]);
    }
  }

  // Make sure that the required components are present.
  if (file_ptr[TESSDATA_UNICHARSET] == NULL) {
    tprintf("Error opening %sunicharset file\n", language_data_path_prefix);
    fclose(output_file);
    return false;
  }
  if (file_ptr[TESSDATA_INTTEMP] != NULL &&
      (file_ptr[TESSDATA_PFFMTABLE] == NULL ||
       file_ptr[TESSDATA_NORMPROTO] == NULL)) {
    tprintf("Error opening %spffmtable and/or %snormproto files"
            " while %sinttemp file was present\n", language_data_path_prefix,
            language_data_path_prefix, language_data_path_prefix);
    fclose(output_file);
    return false;
  }

  return WriteMetadata(offset_table, language_data_path_prefix, output_file);
}
bool LocalSequenceFileWriter::WriteHeader() {
    scoped_array<char> header(new char[kMaxHeaderLen]);
    int used_bytes = 0;
    used_bytes += WriteSequenceFileVersion(header.get());
    used_bytes += WriteKeyValueClassName(header.get() + used_bytes);
    used_bytes += WriteCompressInfo(header.get() + used_bytes);
    used_bytes += WriteMetadata(header.get() + used_bytes);
    used_bytes += WriteSync(header.get() + used_bytes);
    CHECK_LE(used_bytes, kMaxHeaderLen);
    return Write(header.get(), used_bytes);
}
Beispiel #6
0
bool TessdataManager::OverwriteComponents(
    const char *new_traineddata_filename,
    char **component_filenames,
    int num_new_components) {
  int i;
  inT64 offset_table[TESSDATA_NUM_ENTRIES];
  TessdataType type = TESSDATA_NUM_ENTRIES;
  bool text_file = false;
  FILE *file_ptr[TESSDATA_NUM_ENTRIES];
  for (i = 0; i < TESSDATA_NUM_ENTRIES; ++i) {
    offset_table[i] = -1;
    file_ptr[i] = NULL;
  }
  FILE *output_file = fopen(new_traineddata_filename, "wb");
  if (output_file == NULL) {
    tprintf("Error opening %s for writing\n", new_traineddata_filename);
    return false;
  }

  // Leave some space for recording the offset_table.
  if (fseek(output_file,
            sizeof(inT32) + sizeof(inT64) * TESSDATA_NUM_ENTRIES, SEEK_SET)) {
    fclose(output_file);
    tprintf("Error seeking %s\n", new_traineddata_filename);
    return false;
  }

  // Open the files with the new components.
  for (i = 0; i < num_new_components; ++i) {
    if (TessdataTypeFromFileName(component_filenames[i], &type, &text_file))
      file_ptr[type] = fopen(component_filenames[i], "rb");
  }

  // Write updated data to the output traineddata file.
  for (i = 0; i < TESSDATA_NUM_ENTRIES; ++i) {
    if (file_ptr[i] != NULL) {
      // Get the data from the opened component file.
      offset_table[i] = ftell(output_file);
      CopyFile(file_ptr[i], output_file, kTessdataFileIsText[i], -1);
      fclose(file_ptr[i]);
    } else {
      // Get this data component from the loaded data file.
      if (SeekToStart(static_cast<TessdataType>(i))) {
        offset_table[i] = ftell(output_file);
        CopyFile(data_file_, output_file, kTessdataFileIsText[i],
                 GetEndOffset(static_cast<TessdataType>(i)) -
                 ftell(data_file_) + 1);
      }
    }
  }
  const char *language_data_path_prefix = strchr(new_traineddata_filename, '.');
  return WriteMetadata(offset_table, language_data_path_prefix, output_file);
}
OGRCloudantTableLayer::~OGRCloudantTableLayer()

{ 
    if( bMustWriteMetadata )
    {
        WriteMetadata();
        bMustWriteMetadata = FALSE;
    }

    if (pszSpatialDDoc)
        free((void*)pszSpatialDDoc);
}
Beispiel #8
0
void FStatsWriteFile::WriteHeader( bool bIsRawStatsFile )
{
	FMemoryWriter MemoryWriter( OutData, false, true );
	FArchive& Ar = File ? *File : MemoryWriter;

	uint32 Magic = EStatMagicWithHeader::MAGIC;
	// Serialize magic value.
	Ar << Magic;

	// Serialize dummy header, overwritten in Finalize.
	Header.Version = EStatMagicWithHeader::VERSION_4;
	Header.PlatformName = FPlatformProperties::PlatformName();
	Header.bRawStatsFile = bIsRawStatsFile;
	Ar << Header;

	// Serialize metadata.
	WriteMetadata( Ar );
	Ar.Flush();
}
Beispiel #9
0
static BOOL DLL_CALLCONV
Save(FreeImageIO *io, FIBITMAP *dib, fi_handle handle, int page, int flags, void *data) {
	png_structp png_ptr;
	png_infop info_ptr;
	png_colorp palette = NULL;
	png_uint_32 width, height;
	BOOL has_alpha_channel = FALSE;

	RGBQUAD *pal;					// pointer to dib palette
	int bit_depth, pixel_depth;		// pixel_depth = bit_depth * channels
	int palette_entries;
	int	interlace_type;

	fi_ioStructure fio;
    fio.s_handle = handle;
	fio.s_io = io;

	if ((dib) && (handle)) {
		try {
			// create the chunk manage structure

			png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, (png_voidp)NULL, error_handler, warning_handler);

			if (!png_ptr)  {
				return FALSE;
			}

			// allocate/initialize the image information data.

			info_ptr = png_create_info_struct(png_ptr);

			if (!info_ptr)  {
				png_destroy_write_struct(&png_ptr,  (png_infopp)NULL);
				return FALSE;
			}

			// Set error handling.  REQUIRED if you aren't supplying your own
			// error handling functions in the png_create_write_struct() call.

			if (setjmp(png_jmpbuf(png_ptr)))  {
				// if we get here, we had a problem reading the file

				png_destroy_write_struct(&png_ptr, &info_ptr);

				return FALSE;
			}

			// init the IO
            
			png_set_write_fn(png_ptr, &fio, _WriteProc, _FlushProc);

			// set physical resolution

			png_uint_32 res_x = (png_uint_32)FreeImage_GetDotsPerMeterX(dib);
			png_uint_32 res_y = (png_uint_32)FreeImage_GetDotsPerMeterY(dib);

			if ((res_x > 0) && (res_y > 0))  {
				png_set_pHYs(png_ptr, info_ptr, res_x, res_y, PNG_RESOLUTION_METER);
			}
	
			// Set the image information here.  Width and height are up to 2^31,
			// bit_depth is one of 1, 2, 4, 8, or 16, but valid values also depend on
			// the color_type selected. color_type is one of PNG_COLOR_TYPE_GRAY,
			// PNG_COLOR_TYPE_GRAY_ALPHA, PNG_COLOR_TYPE_PALETTE, PNG_COLOR_TYPE_RGB,
			// or PNG_COLOR_TYPE_RGB_ALPHA.  interlace is either PNG_INTERLACE_NONE or
			// PNG_INTERLACE_ADAM7, and the compression_type and filter_type MUST
			// currently be PNG_COMPRESSION_TYPE_BASE and PNG_FILTER_TYPE_BASE. REQUIRED

			width = FreeImage_GetWidth(dib);
			height = FreeImage_GetHeight(dib);
			pixel_depth = FreeImage_GetBPP(dib);

			BOOL bInterlaced = FALSE;
			if( (flags & PNG_INTERLACED) == PNG_INTERLACED) {
				interlace_type = PNG_INTERLACE_ADAM7;
				bInterlaced = TRUE;
			} else {
				interlace_type = PNG_INTERLACE_NONE;
			}

			// set the ZLIB compression level or default to PNG default compression level (ZLIB level = 6)
			int zlib_level = flags & 0x0F;
			if((zlib_level >= 1) && (zlib_level <= 9)) {
				png_set_compression_level(png_ptr, zlib_level);
			} else if((flags & PNG_Z_NO_COMPRESSION) == PNG_Z_NO_COMPRESSION) {
				png_set_compression_level(png_ptr, Z_NO_COMPRESSION);
			}

			// filtered strategy works better for high color images
			if(pixel_depth >= 16){
				png_set_compression_strategy(png_ptr, Z_FILTERED);
				png_set_filter(png_ptr, 0, PNG_FILTER_NONE|PNG_FILTER_SUB|PNG_FILTER_PAETH);
			} else {
				png_set_compression_strategy(png_ptr, Z_DEFAULT_STRATEGY);
			}

			FREE_IMAGE_TYPE image_type = FreeImage_GetImageType(dib);
			if(image_type == FIT_BITMAP) {
				// standard image type
				bit_depth = (pixel_depth > 8) ? 8 : pixel_depth;
			} else {
				// 16-bit greyscale or 16-bit RGB(A)
				bit_depth = 16;
			}

			switch (FreeImage_GetColorType(dib)) {
				case FIC_MINISWHITE:
					// Invert monochrome files to have 0 as black and 1 as white (no break here)
					png_set_invert_mono(png_ptr);

				case FIC_MINISBLACK:
					png_set_IHDR(png_ptr, info_ptr, width, height, bit_depth, 
						PNG_COLOR_TYPE_GRAY, interlace_type, 
						PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);

					break;

				case FIC_PALETTE:
				{
					png_set_IHDR(png_ptr, info_ptr, width, height, bit_depth, 
						PNG_COLOR_TYPE_PALETTE, interlace_type, 
						PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);

					// set the palette

					palette_entries = 1 << bit_depth;
					palette = (png_colorp)png_malloc(png_ptr, palette_entries * sizeof (png_color));
					pal = FreeImage_GetPalette(dib);

					for (int i = 0; i < palette_entries; i++) {
						palette[i].red   = pal[i].rgbRed;
						palette[i].green = pal[i].rgbGreen;
						palette[i].blue  = pal[i].rgbBlue;
					}
					
					png_set_PLTE(png_ptr, info_ptr, palette, palette_entries);

					// You must not free palette here, because png_set_PLTE only makes a link to
					// the palette that you malloced.  Wait until you are about to destroy
					// the png structure.

					break;
				}

				case FIC_RGBALPHA :
					has_alpha_channel = TRUE;

					png_set_IHDR(png_ptr, info_ptr, width, height, bit_depth, 
						PNG_COLOR_TYPE_RGBA, interlace_type, 
						PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);

#if FREEIMAGE_COLORORDER == FREEIMAGE_COLORORDER_BGR
					// flip BGR pixels to RGB
					if(image_type == FIT_BITMAP) {
						png_set_bgr(png_ptr);
					}
#endif
					break;
	
				case FIC_RGB:
					png_set_IHDR(png_ptr, info_ptr, width, height, bit_depth, 
						PNG_COLOR_TYPE_RGB, interlace_type, 
						PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);

#if FREEIMAGE_COLORORDER == FREEIMAGE_COLORORDER_BGR
					// flip BGR pixels to RGB
					if(image_type == FIT_BITMAP) {
						png_set_bgr(png_ptr);
					}
#endif
					break;
					
				case FIC_CMYK:
					break;
			}

			// write possible ICC profile

			FIICCPROFILE *iccProfile = FreeImage_GetICCProfile(dib);
			if (iccProfile->size && iccProfile->data) {
				png_set_iCCP(png_ptr, info_ptr, "Embedded Profile", 0, (png_const_bytep)iccProfile->data, iccProfile->size);
			}

			// write metadata

			WriteMetadata(png_ptr, info_ptr, dib);

			// Optional gamma chunk is strongly suggested if you have any guess
			// as to the correct gamma of the image.
			// png_set_gAMA(png_ptr, info_ptr, gamma);

			// set the transparency table

			if (FreeImage_IsTransparent(dib) && (FreeImage_GetTransparencyCount(dib) > 0)) {
				png_set_tRNS(png_ptr, info_ptr, FreeImage_GetTransparencyTable(dib), FreeImage_GetTransparencyCount(dib), NULL);
			}

			// set the background color

			if(FreeImage_HasBackgroundColor(dib)) {
				png_color_16 image_background;
				RGBQUAD rgbBkColor;

				FreeImage_GetBackgroundColor(dib, &rgbBkColor);
				memset(&image_background, 0, sizeof(png_color_16));
				image_background.blue  = rgbBkColor.rgbBlue;
				image_background.green = rgbBkColor.rgbGreen;
				image_background.red   = rgbBkColor.rgbRed;
				image_background.index = rgbBkColor.rgbReserved;

				png_set_bKGD(png_ptr, info_ptr, &image_background);
			}
			
			// Write the file header information.

			png_write_info(png_ptr, info_ptr);

			// write out the image data

#ifndef FREEIMAGE_BIGENDIAN
			if (bit_depth == 16) {
				// turn on 16 bit byte swapping
				png_set_swap(png_ptr);
			}
#endif

			int number_passes = 1;
			if (bInterlaced) {
				number_passes = png_set_interlace_handling(png_ptr);
			}

			if ((pixel_depth == 32) && (!has_alpha_channel)) {
				BYTE *buffer = (BYTE *)malloc(width * 3);

				// transparent conversion to 24-bit
				// the number of passes is either 1 for non-interlaced images, or 7 for interlaced images
				for (int pass = 0; pass < number_passes; pass++) {
					for (png_uint_32 k = 0; k < height; k++) {
						FreeImage_ConvertLine32To24(buffer, FreeImage_GetScanLine(dib, height - k - 1), width);			
						png_write_row(png_ptr, buffer);
					}
				}
				free(buffer);
			} else {
				// the number of passes is either 1 for non-interlaced images, or 7 for interlaced images
				for (int pass = 0; pass < number_passes; pass++) {
					for (png_uint_32 k = 0; k < height; k++) {			
						png_write_row(png_ptr, FreeImage_GetScanLine(dib, height - k - 1));					
					}
				}
			}

			// It is REQUIRED to call this to finish writing the rest of the file
			// Bug with png_flush

			png_write_end(png_ptr, info_ptr);

			// clean up after the write, and free any memory allocated
			if (palette) {
				png_free(png_ptr, palette);
			}

			png_destroy_write_struct(&png_ptr, &info_ptr);

			return TRUE;
		} catch (const char *text) {
			FreeImage_OutputMessageProc(s_format_id, text);
		}
	}

	return FALSE;
}
Beispiel #10
0
int FFmpegImportFileHandle::Import(TrackFactory *trackFactory,
              Track ***outTracks,
              int *outNumTracks,
              Tags *tags)
{

   CreateProgress();

   // Remove stream contexts which are not marked for importing and adjust mScs and mNumStreams accordingly
   for (int i = 0; i < mNumStreams;)
   {
      if (!mScs[i]->m_use)
      {
         delete mScs[i];
         for (int j = i; j < mNumStreams - 1; j++)
         {
            mScs[j] = mScs[j+1];
         }
         mNumStreams--;
      }
      else i++;
   }

   mChannels = new WaveTrack **[mNumStreams];
   mNumSamples = 0;

   for (int s = 0; s < mNumStreams; s++)
   {
      // As you can see, it's really a number of frames.
      // TODO: use something other than nb_frames for progress reporting (nb_frames is not available for some formats). Maybe something from the format context?
      mNumSamples += mScs[s]->m_stream->nb_frames;
      // There is a possibility that number of channels will change over time, but we do not have WaveTracks for new channels. Remember the number of channels and stick to it.
      mScs[s]->m_initialchannels = mScs[s]->m_stream->codec->channels;
      mChannels[s] = new WaveTrack *[mScs[s]->m_stream->codec->channels];
      int c;
      for (c = 0; c < mScs[s]->m_stream->codec->channels; c++)
      {
         mChannels[s][c] = trackFactory->NewWaveTrack(int16Sample, mScs[s]->m_stream->codec->sample_rate);

         if (mScs[s]->m_stream->codec->channels == 2)
         {
            switch (c)
            {
            case 0:
               mChannels[s][c]->SetChannel(Track::LeftChannel);
               mChannels[s][c]->SetLinked(true);
               break;
            case 1:
               mChannels[s][c]->SetChannel(Track::RightChannel);
               mChannels[s][c]->SetTeamed(true);
               break;
            }
         }
         else
         {
            mChannels[s][c]->SetChannel(Track::MonoChannel);
         }
      }
   }

   // Handles the start_time by creating silence. This may or may not be correct.
   // There is a possibility that we should ignore first N milliseconds of audio instead. I do not know.
   /// TODO: Nag FFmpeg devs about start_time until they finally say WHAT is this and HOW to handle it.
   for (int s = 0; s < mNumStreams; s++)
   {
      int64_t stream_delay = 0;
      if (mScs[s]->m_stream->start_time != int64_t(AV_NOPTS_VALUE))
      {
         stream_delay = mScs[s]->m_stream->start_time;
         wxLogMessage(wxT("Stream %d start_time = %d, that would be %f milliseconds."), s, mScs[s]->m_stream->start_time, double(mScs[s]->m_stream->start_time)/AV_TIME_BASE*1000);
      }
      if (stream_delay != 0)
      {
         for (int c = 0; c < mScs[s]->m_stream->codec->channels; c++)
         {
            WaveTrack *t = mChannels[s][c];
            t->InsertSilence(0,double(stream_delay)/AV_TIME_BASE);
         }
      }
   }

   // This is the heart of the importing process
   streamContext *sc = NULL;
   // The result of Import() to be returend. It will be something other than zero if user canceled or some error appears.
   int res = 0;
   // Read next frame.
   while ((sc = ReadNextFrame()) != NULL && (res == 0))
   {
      // ReadNextFrame returns 1 if stream is not to be imported
      if (sc != (streamContext*)1)
      {
         // Decode frame until it is not possible to decode any further
         while (sc->m_pktRemainingSiz > 0 && (res == 0))
         {
            if (DecodeFrame(sc,false) < 0)
               break;

            // If something useable was decoded - write it to mChannels
            if (sc->m_frameValid)
               res = WriteData(sc);
         }

         // Cleanup after frame decoding
         if (sc->m_pktValid)
         {
            av_free_packet(&sc->m_pkt);
            sc->m_pktValid = 0;
         }    
      }
   }

   // Flush the decoders.
   if ((mNumStreams != 0) && (res == 0))
   {
      for (int i = 0; i < mNumStreams; i++)
      {
         if (DecodeFrame(mScs[i], true) == 0)
         {
            WriteData(mScs[i]);

            if (mScs[i]->m_pktValid)
            {
               av_free_packet(&mScs[i]->m_pkt);
               mScs[i]->m_pktValid = 0;
            }				
         }
      }
   }

   // Something bad happened - destroy everything!
   if (res)
   {
      for (int s = 0; s < mNumStreams; s++)
      {
         delete[] mChannels[s];
      }
      delete[] mChannels;

      if (mCancelled)
         return eImportCancelled;
      else
         return eImportFailed;
   }

   *outNumTracks = 0;
   for (int s = 0; s < mNumStreams; s++)
   {
      *outNumTracks += mScs[s]->m_stream->codec->channels;
   }

   // Create new tracks
   *outTracks = new Track *[*outNumTracks];

   // Copy audio from mChannels to newly created tracks (destroying mChannels elements in process)
   int trackindex = 0;
   for (int s = 0; s < mNumStreams; s++)
   {
      for(int c = 0; c < mScs[s]->m_stream->codec->channels; c++)
      {
         mChannels[s][c]->Flush();
         (*outTracks)[trackindex++] = mChannels[s][c];
      }
      delete[] mChannels[s];
   }
   delete[] mChannels;

   // Save metadata
   WriteMetadata(mFormatContext,tags);

   return eImportSuccess;
}
Beispiel #11
0
int FFmpegImportFileHandle::Import(TrackFactory *trackFactory,
              Track ***outTracks,
              int *outNumTracks,
              Tags *tags)
{

   CreateProgress();

   // Remove stream contexts which are not marked for importing and adjust mScs and mNumStreams accordingly
   for (int i = 0; i < mNumStreams;)
   {
      if (!mScs[i]->m_use)
      {
         delete mScs[i];
         for (int j = i; j < mNumStreams - 1; j++)
         {
            mScs[j] = mScs[j+1];
         }
         mNumStreams--;
      }
      else i++;
   }

   mChannels = new WaveTrack **[mNumStreams];

   for (int s = 0; s < mNumStreams; s++)
   {
      switch (mScs[s]->m_stream->codec->sample_fmt)
      {
         case SAMPLE_FMT_U8:
         case SAMPLE_FMT_S16:
            mScs[s]->m_osamplesize = sizeof(int16_t);
            mScs[s]->m_osamplefmt = int16Sample;
         break;
         default:
            mScs[s]->m_osamplesize = sizeof(float);
            mScs[s]->m_osamplefmt = floatSample;
         break;
      }

      // There is a possibility that number of channels will change over time, but we do not have WaveTracks for new channels. Remember the number of channels and stick to it.
      mScs[s]->m_initialchannels = mScs[s]->m_stream->codec->channels;
      mChannels[s] = new WaveTrack *[mScs[s]->m_stream->codec->channels];
      int c;
      for (c = 0; c < mScs[s]->m_stream->codec->channels; c++)
      {
         mChannels[s][c] = trackFactory->NewWaveTrack(mScs[s]->m_osamplefmt, mScs[s]->m_stream->codec->sample_rate);

         if (mScs[s]->m_stream->codec->channels == 2)
         {
            switch (c)
            {
            case 0:
               mChannels[s][c]->SetChannel(Track::LeftChannel);
               mChannels[s][c]->SetLinked(true);
               break;
            case 1:
               mChannels[s][c]->SetChannel(Track::RightChannel);
               break;
            }
         }
         else
         {
            mChannels[s][c]->SetChannel(Track::MonoChannel);
         }
      }
   }

   // Handles the start_time by creating silence. This may or may not be correct.
   // There is a possibility that we should ignore first N milliseconds of audio instead. I do not know.
   /// TODO: Nag FFmpeg devs about start_time until they finally say WHAT is this and HOW to handle it.
   for (int s = 0; s < mNumStreams; s++)
   {
      int64_t stream_delay = 0;
      if (mScs[s]->m_stream->start_time != int64_t(AV_NOPTS_VALUE) && mScs[s]->m_stream->start_time > 0)
      {
         stream_delay = mScs[s]->m_stream->start_time;
         wxLogDebug(wxT("Stream %d start_time = %d, that would be %f milliseconds."), s, mScs[s]->m_stream->start_time, double(mScs[s]->m_stream->start_time)/AV_TIME_BASE*1000);
      }
      if (stream_delay != 0)
      {
         for (int c = 0; c < mScs[s]->m_stream->codec->channels; c++)
         {
            WaveTrack *t = mChannels[s][c];
            t->InsertSilence(0,double(stream_delay)/AV_TIME_BASE);
         }
      }
   }
   // This is the heart of the importing process
   // The result of Import() to be returend. It will be something other than zero if user canceled or some error appears.
   int res = eProgressSuccess;

#ifdef EXPERIMENTAL_OD_FFMPEG
   mUsingOD = false;
   gPrefs->Read(wxT("/Library/FFmpegOnDemand"), &mUsingOD);
   //at this point we know the file is good and that we have to load the number of channels in mScs[s]->m_stream->codec->channels;
   //so for OD loading we create the tracks and releasee the modal lock after starting the ODTask.
   if (mUsingOD) {
      std::vector<ODDecodeFFmpegTask*> tasks;
      //append blockfiles to each stream and add an individual ODDecodeTask for each one.
      for (int s = 0; s < mNumStreams; s++) {
         ODDecodeFFmpegTask* odTask=new ODDecodeFFmpegTask(mScs,mNumStreams,mChannels,mFormatContext, s);
         odTask->CreateFileDecoder(mFilename);

         //each stream has different duration.  We need to know it if seeking is to be allowed.
         sampleCount sampleDuration = 0;
         if (mScs[s]->m_stream->duration > 0)
            sampleDuration = ((sampleCount)mScs[s]->m_stream->duration * mScs[s]->m_stream->time_base.num) *mScs[s]->m_stream->codec->sample_rate / mScs[s]->m_stream->time_base.den;
         else
            sampleDuration = ((sampleCount)mFormatContext->duration *mScs[s]->m_stream->codec->sample_rate) / AV_TIME_BASE;

         //      printf(" OD duration samples %qi, sr %d, secs %d\n",sampleDuration, (int)mScs[s]->m_stream->codec->sample_rate,(int)sampleDuration/mScs[s]->m_stream->codec->sample_rate);
         
         //for each wavetrack within the stream add coded blockfiles
         for (int c = 0; c < mScs[s]->m_stream->codec->channels; c++) {
            WaveTrack *t = mChannels[s][c];
            odTask->AddWaveTrack(t);
         
            sampleCount maxBlockSize = t->GetMaxBlockSize();
            //use the maximum blockfile size to divide the sections (about 11secs per blockfile at 44.1khz)
            for (sampleCount i = 0; i < sampleDuration; i += maxBlockSize) {
               sampleCount blockLen = maxBlockSize;
               if (i + blockLen > sampleDuration)
                  blockLen = sampleDuration - i;
            
               t->AppendCoded(mFilename, i, blockLen, c,ODTask::eODFFMPEG);
            
               // This only works well for single streams since we assume 
               // each stream is of the same duration and channels
               res = mProgress->Update(i+sampleDuration*c+ sampleDuration*mScs[s]->m_stream->codec->channels*s, 
                                       sampleDuration*mScs[s]->m_stream->codec->channels*mNumStreams);
               if (res != eProgressSuccess)
                  break;
            }
         }
         tasks.push_back(odTask);
      }
      //Now we add the tasks and let them run, or delete them if the user cancelled
      for(int i=0; i < (int)tasks.size(); i++) {
         if(res==eProgressSuccess)
            ODManager::Instance()->AddNewTask(tasks[i]);
         else
            {
               delete tasks[i];
            }
      }
   } else {
#endif
   streamContext *sc = NULL;

   // Read next frame.
   while ((sc = ReadNextFrame()) != NULL && (res == eProgressSuccess))
   {
      // ReadNextFrame returns 1 if stream is not to be imported
      if (sc != (streamContext*)1)
      {
         // Decode frame until it is not possible to decode any further
         while (sc->m_pktRemainingSiz > 0 && (res == eProgressSuccess || res == eProgressStopped))
         {
            if (DecodeFrame(sc,false) < 0)
               break;

            // If something useable was decoded - write it to mChannels
            if (sc->m_frameValid)
               res = WriteData(sc);
         }

         // Cleanup after frame decoding
         if (sc->m_pktValid)
         {
            av_free_packet(&sc->m_pkt);
            sc->m_pktValid = 0;
         }    
      }
   }

   // Flush the decoders.
   if ((mNumStreams != 0) && (res == eProgressSuccess || res == eProgressStopped))
   {
      for (int i = 0; i < mNumStreams; i++)
      {
         if (DecodeFrame(mScs[i], true) == 0)
         {
            WriteData(mScs[i]);

            if (mScs[i]->m_pktValid)
            {
               av_free_packet(&mScs[i]->m_pkt);
               mScs[i]->m_pktValid = 0;
            }               
         }
      }
   }
#ifdef EXPERIMENTAL_OD_FFMPEG
   } // else -- !mUsingOD == true
#endif   //EXPERIMENTAL_OD_FFMPEG

   // Something bad happened - destroy everything!
   if (res == eProgressCancelled || res == eProgressFailed)
   {
      for (int s = 0; s < mNumStreams; s++)
      {
         delete[] mChannels[s];
      }
      delete[] mChannels;

      return res;
   }
   //else if (res == 2), we just stop the decoding as if the file has ended

   *outNumTracks = 0;
   for (int s = 0; s < mNumStreams; s++)
   {
      *outNumTracks += mScs[s]->m_initialchannels;
   }

   // Create new tracks
   *outTracks = new Track *[*outNumTracks];

   // Copy audio from mChannels to newly created tracks (destroying mChannels elements in process)
   int trackindex = 0;
   for (int s = 0; s < mNumStreams; s++)
   {
      for(int c = 0; c < mScs[s]->m_initialchannels; c++)
      {
         mChannels[s][c]->Flush();
         (*outTracks)[trackindex++] = mChannels[s][c];
      }
      delete[] mChannels[s];
   }
   delete[] mChannels;

   // Save metadata
   WriteMetadata(tags);

   return res;
}
Beispiel #12
0
static BOOL DLL_CALLCONV
Save(FreeImageIO *io, FIBITMAP *dib, fi_handle handle, int page, int flags, void *data) {
	BOOL bIsFlipped = FALSE;		// FreeImage DIB are upside-down relative to usual graphic conventions
	PKPixelFormatGUID guid_format;	// image format
	PKPixelInfo pixelInfo;			// image specifications
	BOOL bHasAlpha = FALSE;			// is alpha layer present ?

	PKImageEncode *pEncoder = NULL;		// encoder interface
	ERR error_code = 0;					// error code as returned by the interface

	// get the I/O stream wrapper
	WMPStream *pEncodeStream = (WMPStream*)data;

	if(!dib || !handle || !pEncodeStream) {
		return FALSE;
	}

	try {
		// get image dimensions
		unsigned width = FreeImage_GetWidth(dib);
		unsigned height = FreeImage_GetHeight(dib);

		// check JPEG-XR limits
		if((width < MB_WIDTH_PIXEL) || (height < MB_HEIGHT_PIXEL)) {
			FreeImage_OutputMessageProc(s_format_id, "Unsupported image size: width x height = %d x %d", width, height);
			throw (const char*)NULL;
		}

		// get output pixel format
		error_code = GetOutputPixelFormat(dib, &guid_format, &bHasAlpha);
		JXR_CHECK(error_code);
		pixelInfo.pGUIDPixFmt = &guid_format;
		error_code = PixelFormatLookup(&pixelInfo, LOOKUP_FORWARD);
		JXR_CHECK(error_code);

		// create a JXR encoder interface and initialize function pointers with *_WMP functions
		error_code = PKImageEncode_Create_WMP(&pEncoder);
		JXR_CHECK(error_code);

		// attach the stream to the encoder and set all encoder parameters to zero ...
		error_code = pEncoder->Initialize(pEncoder, pEncodeStream, &pEncoder->WMP.wmiSCP, sizeof(CWMIStrCodecParam));
		JXR_CHECK(error_code);

		// ... then configure the encoder
		SetEncoderParameters(&pEncoder->WMP.wmiSCP, &pixelInfo, flags, bHasAlpha);

		// set pixel format
		pEncoder->SetPixelFormat(pEncoder, guid_format);

		// set image size
		pEncoder->SetSize(pEncoder, width, height);
		
		// set resolution (convert from universal units to English units)
		float resX = (float)(unsigned)(0.5F + 0.0254F * FreeImage_GetDotsPerMeterX(dib));
		float resY = (float)(unsigned)(0.5F + 0.0254F * FreeImage_GetDotsPerMeterY(dib));
		pEncoder->SetResolution(pEncoder, resX, resY);

		// set metadata
		WriteMetadata(pEncoder, dib);

		// write metadata & pixels
		// -----------------------

		// dib coordinates are upside-down relative to usual conventions
		bIsFlipped = FreeImage_FlipVertical(dib);

		// get a pointer to dst pixel data
		BYTE *dib_bits = FreeImage_GetBits(dib);

		// get dst pitch (count of BYTE for stride)
		const unsigned cbStride = FreeImage_GetPitch(dib);

		// write metadata + pixels on output
		error_code = pEncoder->WritePixels(pEncoder, height, dib_bits, cbStride);
		JXR_CHECK(error_code);

		// recover dib coordinates
		FreeImage_FlipVertical(dib);

		// free the encoder
		pEncoder->Release(&pEncoder);
		assert(pEncoder == NULL);
		
		return TRUE;

	} catch (const char *message) {
		if(bIsFlipped) {
			// recover dib coordinates
			FreeImage_FlipVertical(dib);
		}
		if(pEncoder) {
			// free the encoder
			pEncoder->Release(&pEncoder);
			assert(pEncoder == NULL);
		}
		if(NULL != message) {
			FreeImage_OutputMessageProc(s_format_id, message);
		}
	}

	return FALSE;
}