Exemplo n.º 1
0
///
/// \brief PLSData::Discriminate
/// \param data
/// \param labels
/// \return
/// Perform PLS-DA
bool PLSData::Discriminate(const mat &data, const mat &labels)
{
    //data (usually, but not necessarly spectra) is X
    //labels are Y;
    if (labels.n_rows != data.n_cols) return false;
    mat X_loadings, Y_loadings, X_scores, Y_scores, coefficients, percent_variance, fitted;
    bool success = Vespucci::Math::DimensionReduction::plsregress(data.t(), labels, labels.n_cols,
                                                                  X_loadings, Y_loadings,
                                                                  X_scores, Y_scores,
                                                                  coefficients, percent_variance,
                                                                  fitted);
    mat residuals = fitted - labels;

    if (success){
        AddMetadata("Type", "Calibration");
        AddMetadata("Components calculated", QString::number(labels.n_cols));
        AddMatrix("Percent Variance", percent_variance);
        AddMatrix("Predictor Loadings", X_loadings);
        AddMatrix("Response Loadings", Y_loadings);
        AddMatrix("Predictor Scores", X_scores);
        AddMatrix("Response Scores", Y_scores);
        AddMatrix("Coefficients", coefficients);
        AddMatrix("Fitted Data", fitted);
        AddMatrix("Residuals", residuals);
    }

    return success;
}
Exemplo n.º 2
0
///
/// \brief PLSData::Apply
/// \param spectra Input matrix
/// \param wavelength Spectral abscissa
/// \param components Number of components to calculate
/// \return
/// Performs PLS analysis on the spectra matrix.
bool PLSData::Classify(const mat &spectra, const vec &wavelength, int components)
{
    mat Y = repmat(wavelength, 1, components);
    mat X_loadings, Y_loadings, X_scores, Y_scores, coefficients, percent_variance, fitted;
    bool success = Vespucci::Math::DimensionReduction::plsregress(spectra, Y, components,
                                        X_loadings, Y_loadings,
                                        X_scores, Y_scores,
                                        coefficients, percent_variance,
                                        fitted);

    //mat residuals = fitted - spectra;
    if (success){
        AddMetadata("Type", "Classification (PCA)");
        AddMetadata("Components calculated", QString::number(components));
        AddMatrix("Percent Variance", percent_variance);
        AddMatrix("Predictor Loadings", X_loadings);
        AddMatrix("Response Loadings", Y_loadings);
        AddMatrix("Predictor Scores", X_scores);
        AddMatrix("Response Scores", Y_scores);
        AddMatrix("Coefficients", coefficients);
        AddMatrix("Fitted Data", fitted);
        //AddMatrix("Residuals", residuals);
    }

    return success;

}
Exemplo n.º 3
0
bool PLSData::Calibrate(const mat &spectra, const mat &controls)
{
    //spectra is y
    //controls are X
    mat X_loadings, Y_loadings, X_scores, Y_scores, coefficients, percent_variance, fitted;
    bool success = Vespucci::Math::DimensionReduction::plsregress(controls, spectra, controls.n_cols,
                                                                  X_loadings, Y_loadings,
                                                                  X_scores, Y_scores,
                                                                  coefficients, percent_variance,
                                                                  fitted);

    inplace_trans(coefficients);
    //mat residuals = fitted - spectra;
    if (success){
        AddMetadata("Type", "Calibration");
        AddMetadata("Components calculated", QString::number(controls.n_cols));
        AddMatrix("Percent Variance", percent_variance);
        AddMatrix("Predictor Loadings", X_loadings);
        AddMatrix("Response Loadings", Y_loadings);
        AddMatrix("Predictor Scores", X_scores);
        AddMatrix("Response Scores", Y_scores);
        AddMatrix("Coefficients", coefficients);
        AddMatrix("Fitted Data", fitted);
        //AddMatrix("Residuals", residuals);
    }

    return success;
}
Exemplo n.º 4
0
void Mpris1Player::CurrentSongChanged(const Song& song, const QString& art_uri,
                                      const QImage&) {
  last_metadata_ = Mpris1::GetMetadata(song);

  if (!art_uri.isEmpty()) {
    AddMetadata("arturl", art_uri, &last_metadata_);
  }

  emit TrackChange(last_metadata_);
  emit StatusChange(GetStatus());
  emit CapsChange(GetCaps());
}
Exemplo n.º 5
0
ProgressResult QTImportFileHandle::Import(TrackFactory *trackFactory,
                               TrackHolders &outTracks,
                               Tags *tags)
{
   outTracks.clear();

   OSErr err = noErr;
   MovieAudioExtractionRef maer = NULL;
   auto updateResult = ProgressResult::Success;
   auto totSamples =
      (sampleCount) GetMovieDuration(mMovie); // convert from TimeValue
   decltype(totSamples) numSamples = 0;
   Boolean discrete = true;
   UInt32 quality = kQTAudioRenderQuality_Max;
   AudioStreamBasicDescription desc;
   UInt32 maxSampleSize;
   bool res = false;

   auto cleanup = finally( [&] {
      if (maer) {
         MovieAudioExtractionEnd(maer);
      }
   } );

   CreateProgress();

   do
   {
      err = MovieAudioExtractionBegin(mMovie, 0, &maer);
      if (err != noErr) {
         AudacityMessageBox(_("Unable to start QuickTime extraction"));
         break;
      }
   
      err = MovieAudioExtractionSetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTMovieAudioExtractionAudioPropertyID_RenderQuality,
                                            sizeof(quality),
                                            &quality);
      if (err != noErr) {
         AudacityMessageBox(_("Unable to set QuickTime render quality"));
         break;
      }
   
      err = MovieAudioExtractionSetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Movie,
                                            kQTMovieAudioExtractionMoviePropertyID_AllChannelsDiscrete,
                                            sizeof(discrete),
                                            &discrete);
      if (err != noErr) {
         AudacityMessageBox(_("Unable to set QuickTime discrete channels property"));
         break;
      }
   
      err = MovieAudioExtractionGetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTAudioPropertyID_MaxAudioSampleSize,
                                            sizeof(maxSampleSize),
                                            &maxSampleSize,
                                            NULL);
      if (err != noErr) {
         AudacityMessageBox(_("Unable to get QuickTime sample size property"));
         break;
      }
   
      err = MovieAudioExtractionGetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
                                            sizeof(desc),
                                            &desc,
                                            NULL);
      if (err != noErr) {
         AudacityMessageBox(_("Unable to retrieve stream description"));
         break;
      }
   
      auto numchan = desc.mChannelsPerFrame;
      const size_t bufsize = 5 * desc.mSampleRate;
   
      // determine sample format
      sampleFormat format;
      switch (maxSampleSize)
      {
         case 16:
            format = int16Sample;
            break;
   
         case 24:
            format = int24Sample;
            break;
   
         default:
            format = floatSample;
            break;
      }

      // Allocate an array of pointers, whose size is not known statically,
      // and prefixed with the AudioBufferList structure.
      MallocPtr< AudioBufferList > abl{
         static_cast< AudioBufferList * >(
            calloc( 1, offsetof( AudioBufferList, mBuffers ) +
               (sizeof(AudioBuffer) * numchan))) };
      abl->mNumberBuffers = numchan;
   
      TrackHolders channels{ numchan };

      const auto size = sizeof(float) * bufsize;
      ArraysOf<unsigned char> holders{ numchan, size };
      for (size_t c = 0; c < numchan; c++) {
         auto &buffer = abl->mBuffers[c];
         auto &holder = holders[c];
         auto &channel = channels[c];

         buffer.mNumberChannels = 1;
         buffer.mDataByteSize = size;

         buffer.mData = holder.get();
   
         channel = trackFactory->NewWaveTrack( format );
         channel->SetRate( desc.mSampleRate );
   
         if (numchan == 2) {
            if (c == 0) {
               channel->SetChannel(Track::LeftChannel);
               channel->SetLinked(true);
            }
            else if (c == 1) {
               channel->SetChannel(Track::RightChannel);
            }
         }
      }
   
      do {
         UInt32 flags = 0;
         UInt32 numFrames = bufsize;
   
         err = MovieAudioExtractionFillBuffer(maer,
                                              &numFrames,
                                              abl.get(),
                                              &flags);
         if (err != noErr) {
            AudacityMessageBox(_("Unable to get fill buffer"));
            break;
         }
   
         for (size_t c = 0; c < numchan; c++) {
            channels[c]->Append((char *) abl->mBuffers[c].mData, floatSample, numFrames);
         }
   
         numSamples += numFrames;
   
         updateResult = mProgress->Update(
            numSamples.as_long_long(),
            totSamples.as_long_long() );
   
         if (numFrames == 0 || flags & kQTMovieAudioExtractionComplete) {
            break;
         }
      } while (updateResult == ProgressResult::Success);
   
      res = (updateResult == ProgressResult::Success && err == noErr);
   
      if (res) {
         for (const auto &channel: channels) {
            channel->Flush();
         }
   
         outTracks.swap(channels);
      }

      //
      // Extract any metadata
      //
      if (res) {
         AddMetadata(tags);
      }
   } while (false);

// done:

   return (res ? ProgressResult::Success : ProgressResult::Failed);
}
Exemplo n.º 6
0
BOOL CLocalSearch::AddHitG1(CLibraryFile* pFile, int nIndex)
{
	// Check that the file is actually available. (We must not return ghost hits to G1!)
	if ( ! pFile->IsAvailable() ) return FALSE;

	// Check that a queue that can upload this file exists, and isn't insanely long.
	if ( UploadQueues.QueueRank( PROTOCOL_HTTP, pFile ) > Settings.Gnutella1.HitQueueLimit ) return FALSE;
	// Normally this isn't a problem- the default queue length is 8 to 10, so this check (50) will
	// never be activated. However, sometimes users configure bad settings, such as a 2000 user HTTP
	// queue. Although the remote client could/should handle this by itself, we really should give
	// Gnutella some protection against 'extreme' settings (if only to reduce un-necessary traffic.)

	m_pPacket->WriteLongLE( pFile->m_nIndex );
	m_pPacket->WriteLongLE( (DWORD)min( pFile->GetSize(), QWORD(0xFFFFFFFF) ) );
	if ( Settings.Gnutella1.QueryHitUTF8 ) //Support UTF-8 Query
	{
		m_pPacket->WriteStringUTF8( pFile->m_sName );
	}
	else
	{
		m_pPacket->WriteString( pFile->m_sName );
	}

	if ( pFile->m_bSHA1 )
	{
		CString strHash = CSHA::HashToString( &pFile->m_pSHA1, TRUE );
		m_pPacket->WriteString( strHash );

		/*
		CGGEPBlock pBlock;

		CGGEPItem* pItem = pBlock.Add( _T("H") );
		pItem->WriteByte( 1 );
		pItem->Write( &pFile->m_pSHA1, 20 );

		pBlock.Write( m_pPacket );
		m_pPacket->WriteByte( 0 );
		*/
	}
	else if ( pFile->m_bTiger )
	{
		CString strHash = CTigerNode::HashToString( &pFile->m_pTiger, TRUE );
		m_pPacket->WriteString( strHash );
	}
	else if ( pFile->m_bED2K )
	{
		CString strHash = CED2K::HashToString( &pFile->m_pED2K, TRUE );
		m_pPacket->WriteString( strHash );
	}
	else
	{
		m_pPacket->WriteByte( 0 );
	}

	if ( pFile->m_pSchema != NULL && pFile->m_pMetadata != NULL && ( m_pSearch == NULL || m_pSearch->m_bWantXML ) )
	{
		AddMetadata( pFile->m_pSchema, pFile->m_pMetadata, nIndex );
	}

	return TRUE;
}
Exemplo n.º 7
0
int QTImportFileHandle::Import(TrackFactory *trackFactory,
                               Track ***outTracks,
                               int *outNumTracks,
                               Tags *tags)
{
   OSErr err = noErr;
   MovieAudioExtractionRef maer = NULL;
   int updateResult = eProgressSuccess;
   sampleCount totSamples = (sampleCount) GetMovieDuration(mMovie);
   sampleCount numSamples = 0;
   Boolean discrete = true;
   UInt32 quality = kQTAudioRenderQuality_Max;
   AudioStreamBasicDescription desc;
   UInt32 maxSampleSize;
   UInt32 numchan;
   UInt32 bufsize;
   bool res = false;

   CreateProgress();

   do
   {
      err = MovieAudioExtractionBegin(mMovie, 0, &maer);
      if (err != noErr) {
         wxMessageBox(_("Unable to start QuickTime extraction"));
         break;
      }
   
      err = MovieAudioExtractionSetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTMovieAudioExtractionAudioPropertyID_RenderQuality,
                                            sizeof(quality),
                                            &quality);
      if (err != noErr) {
         wxMessageBox(_("Unable to set QuickTime render quality"));
         break;
      }
   
      err = MovieAudioExtractionSetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Movie,
                                            kQTMovieAudioExtractionMoviePropertyID_AllChannelsDiscrete,
                                            sizeof(discrete),
                                            &discrete);
      if (err != noErr) {
         wxMessageBox(_("Unable to set QuickTime discrete channels property"));
         break;
      }
   
      err = MovieAudioExtractionGetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTAudioPropertyID_MaxAudioSampleSize,
                                            sizeof(maxSampleSize),
                                            &maxSampleSize,
                                            NULL);
      if (err != noErr) {
         wxMessageBox(_("Unable to get QuickTime sample size property"));
         break;
      }
   
      err = MovieAudioExtractionGetProperty(maer,
                                            kQTPropertyClass_MovieAudioExtraction_Audio,
                                            kQTMovieAudioExtractionAudioPropertyID_AudioStreamBasicDescription,
                                            sizeof(desc),
                                            &desc,
                                            NULL);
      if (err != noErr) {
         wxMessageBox(_("Unable to retrieve stream description"));
         break;
      }
   
      numchan = desc.mChannelsPerFrame;
      bufsize = 5 * desc.mSampleRate;
   
      // determine sample format
      sampleFormat format;
      switch (maxSampleSize)
      {
         case 16:
            format = int16Sample;
            break;
   
         case 24:
            format = int24Sample;
            break;
   
         default:
            format = floatSample;
            break;
      }
   
      AudioBufferList *abl = (AudioBufferList *)
         calloc(1, offsetof(AudioBufferList, mBuffers) + (sizeof(AudioBuffer) * numchan));
      abl->mNumberBuffers = numchan;
   
      WaveTrack **channels = new WaveTrack *[numchan];
   
      int c;
      for (c = 0; c < numchan; c++) {
         abl->mBuffers[c].mNumberChannels = 1;
         abl->mBuffers[c].mDataByteSize = sizeof(float) * bufsize;
         abl->mBuffers[c].mData = malloc(abl->mBuffers[c].mDataByteSize);
   
         channels[c] = trackFactory->NewWaveTrack(format);
         channels[c]->SetRate(desc.mSampleRate);
   
         if (numchan == 2) {
            if (c == 0) {
               channels[c]->SetChannel(Track::LeftChannel);
               channels[c]->SetLinked(true);
            }
            else if (c == 1) {
               channels[c]->SetChannel(Track::RightChannel);
            }
         }
      }
   
      do {
         UInt32 flags = 0;
         UInt32 numFrames = bufsize;
   
         err = MovieAudioExtractionFillBuffer(maer,
                                              &numFrames,
                                              abl,
                                              &flags);
         if (err != noErr) {
            wxMessageBox(_("Unable to get fill buffer"));
            break;
         }
   
         for (c = 0; c < numchan; c++) {
            channels[c]->Append((char *) abl->mBuffers[c].mData, floatSample, numFrames);
         }
   
         numSamples += numFrames;
   
         updateResult = mProgress->Update((wxULongLong_t)numSamples,
                                          (wxULongLong_t)totSamples);
   
         if (numFrames == 0 || flags & kQTMovieAudioExtractionComplete) {
            break;
         }
      } while (updateResult == eProgressSuccess);
   
      res = (updateResult == eProgressSuccess && err == noErr);
   
      if (res) {
         for (c = 0; c < numchan; c++) {
            channels[c]->Flush();
         }
   
         *outTracks = (Track **) channels;
         *outNumTracks = numchan;
      }
      else {
         for (c = 0; c < numchan; c++) {
            delete channels[c];
         }
   
         delete [] channels;
      }
   
      for (c = 0; c < numchan; c++) {
         free(abl->mBuffers[c].mData);
      }
      free(abl);
   
      //
      // Extract any metadata
      //
      if (res) {
         AddMetadata(tags);
      }
   } while (false);

done:

   if (maer) {
      MovieAudioExtractionEnd(maer);
   }

   return (res ? eProgressSuccess : eProgressFailed);
}
Exemplo n.º 8
0
bool QTImportFileHandle::Import(TrackFactory *trackFactory, Track ***outTracks,
                                  int *outNumTracks, Tags *tags)
{
   OSErr err = noErr;

   //
   // Determine the file format.
   //

   // GetMediaSampleDescription takes a SampleDescriptionHandle, but apparently
   // if the media is a sound (which presumably we know it is) then it will treat
   // it as a SoundDescriptionHandle (which in addition to the format of single
   // samples, also tells you sample rate, number of channels, etc.)
   // Pretty messed up interface, if you ask me.
   SoundDescriptionHandle soundDescription = (SoundDescriptionHandle)NewHandle(0);
   GetMediaSampleDescription(mMedia, 1, (SampleDescriptionHandle)soundDescription);

   // If this is a compressed format, it may have out-of-stream compression
   // parameters that need to be passed to the sound converter.  We retrieve
   // these in the form of an audio atom.  To do this, however we have to
   // get the data by way of a handle, then copy it manually from the handle to
   // the atom.  These interfaces get worse all the time!
   Handle decompressionParamsHandle = NewHandle(0);
   AudioFormatAtomPtr decompressionParamsAtom = NULL;
   err = GetSoundDescriptionExtension(soundDescription, &decompressionParamsHandle,
                                      siDecompressionParams);
   if(err == noErr)
   {
      // this stream has decompression parameters.  copy from the handle to the atom.
      int paramsSize = GetHandleSize(decompressionParamsHandle);
      HLock(decompressionParamsHandle);
      decompressionParamsAtom = (AudioFormatAtomPtr)NewPtr(paramsSize);
      //err = MemError();
      BlockMoveData(*decompressionParamsHandle, decompressionParamsAtom, paramsSize);
      HUnlock(decompressionParamsHandle);
   }

   if(decompressionParamsHandle)
      DisposeHandle(decompressionParamsHandle);

   //
   // Now we set up a sound converter to decompress the data if it is compressed.
   //

   SoundComponentData inputFormat;
   SoundComponentData outputFormat;
   SoundConverter     soundConverter = NULL;

   inputFormat.flags       = outputFormat.flags       = 0;
   inputFormat.sampleCount = outputFormat.sampleCount = 0;
   inputFormat.reserved    = outputFormat.reserved    = 0;
   inputFormat.buffer      = outputFormat.buffer      = NULL;
   inputFormat.numChannels = outputFormat.numChannels = (*soundDescription)->numChannels;
   inputFormat.sampleSize  = outputFormat.sampleSize  = (*soundDescription)->sampleSize;
   inputFormat.sampleRate  = outputFormat.sampleRate  = (*soundDescription)->sampleRate;

   inputFormat.format = (*soundDescription)->dataFormat;
   outputFormat.format = kSoundNotCompressed;

   err = SoundConverterOpen(&inputFormat, &outputFormat, &soundConverter);

   //
   // Create the Audacity WaveTracks to house the new data
   //

   WaveTrack **channels = new WaveTrack *[outputFormat.numChannels];

   // determine sample format

   sampleFormat format;
   int bytesPerSample;

   // TODO: do we know for sure that 24 and 32 bit samples are the same kind
   // of 24 and 32 bit samples we expect?
   switch(outputFormat.sampleSize) {
      case 16:
         format = int16Sample;
         bytesPerSample = 2;
         break;

      case 24:
         format = int24Sample;
         bytesPerSample = 3;
         break;

      case 32:
         format = floatSample;
         bytesPerSample = 4;
         break;

      default:
         printf("I can't import a %d-bit file!\n", outputFormat.sampleSize);
         return false;
   }

   int c;
   for (c = 0; c < outputFormat.numChannels; c++)
   {
      channels[c] = trackFactory->NewWaveTrack(format);
      channels[c]->SetRate(outputFormat.sampleRate / 65536.0);

      if(outputFormat.numChannels == 2)
      {
         if(c == 0)
         {
            channels[c]->SetChannel(Track::LeftChannel);
            channels[c]->SetLinked(true);
         }
         else if(c == 1)
         {
            channels[c]->SetChannel(Track::RightChannel);
         }
      }
   }

   //
   // Give the converter the decompression atom.
   //

   // (judging from the sample code, it's OK if the atom is NULL, which
   // it will be if there was no decompression information)

   err = SoundConverterSetInfo(soundConverter, siDecompressionParams, decompressionParamsAtom);
   if(err == siUnknownInfoType)
   {
      // the decompressor didn't need the decompression atom, but that's ok.
      err = noErr;
   }

   // Tell the converter we're cool with VBR audio
   SoundConverterSetInfo(soundConverter, siClientAcceptsVBR, Ptr(true));

   //
   // Determine buffer sizes and allocate output buffer
   //

   int inputBufferSize = 655360;
   int outputBufferSize = 524288;
   char *outputBuffer = new char[outputBufferSize];

   //
   // Populate the structure of data that is passed to the callback
   //

   CallbackData cbData;
   memset(&cbData.compData, 0, sizeof(ExtendedSoundComponentData));

   cbData.isSourceVBR        = ((*soundDescription)->compressionID == variableCompression);
   cbData.sourceMedia        = mMedia;
   cbData.getMediaAtThisTime = 0;
   cbData.sourceDuration     = GetMediaDuration(mMedia);
   cbData.isThereMoreSource  = true;
   cbData.maxBufferSize      = inputBufferSize;

   // allocate source media buffer
   cbData.hSource            = NewHandle((long)cbData.maxBufferSize);
   MoveHHi(cbData.hSource);
   HLock(cbData.hSource);

   cbData.compData.desc = inputFormat;
   cbData.compData.desc.buffer = (BytePtr)*cbData.hSource;

   cbData.compData.desc.flags = kExtendedSoundData;
   cbData.compData.extendedFlags = kExtendedSoundBufferSizeValid |
                                   kExtendedSoundSampleCountNotValid;
   if(cbData.isSourceVBR)
      cbData.compData.extendedFlags |= kExtendedSoundCommonFrameSizeValid;

   cbData.compData.bufferSize = 0; // filled in during callback

   // this doesn't make sense to me, but it is taken from sample code
   cbData.compData.recordSize = sizeof(ExtendedSoundComponentData);


   //
   // Begin the Conversion
   //

   err = SoundConverterBeginConversion(soundConverter);

   SoundConverterFillBufferDataUPP fillBufferUPP;
   fillBufferUPP = NewSoundConverterFillBufferDataUPP(SoundConverterFillBufferCallback);

   bool done = false;
   bool cancelled = false;
   sampleCount samplesSinceLastCallback = 0;
   UInt32 outputFrames;
   UInt32 outputBytes;
   UInt32 outputFlags;

#define SAMPLES_PER_CALLBACK 10000

   while(!done && !cancelled)
   {
      err = SoundConverterFillBuffer(soundConverter,    // a sound converter
                                     fillBufferUPP,     // the callback
                                     &cbData,           // refCon passed to FillDataProc
                                     outputBuffer,      // the buffer to decompress into
                                     outputBufferSize,  // size of that buffer
                                     &outputBytes,      // number of bytes actually output
                                     &outputFrames,     // number of frames actually output
                                     &outputFlags);     // fillbuffer retured advisor flags
      if (err)
         break;

      if((outputFlags & kSoundConverterHasLeftOverData) == false)
         done = true;

      for(c = 0; c < outputFormat.numChannels; c++)
         channels[c]->Append(outputBuffer + (c*bytesPerSample),
                             format,
                             outputFrames,
                             outputFormat.numChannels);

      samplesSinceLastCallback += outputFrames;
      if( samplesSinceLastCallback > SAMPLES_PER_CALLBACK )
      {
         if( mProgressCallback )
            cancelled = mProgressCallback(mUserData,
                                          (float)cbData.getMediaAtThisTime /
                                          cbData.sourceDuration);
         samplesSinceLastCallback -= SAMPLES_PER_CALLBACK;
      }
   }

   HUnlock(cbData.hSource);

   // Flush any remaining data to the output buffer.
   // It appears that we have no way of telling this routine how big the output
   // buffer is!  We had better hope that there isn't more data left than
   // the buffer is big.
   SoundConverterEndConversion(soundConverter, outputBuffer, &outputFrames, &outputBytes);

   for(c = 0; c < outputFormat.numChannels; c++)
   {
       channels[c]->Append(outputBuffer + (c*bytesPerSample),
                           format,
                           outputFrames,
                          outputFormat.numChannels);
      channels[c]->Flush();
   }

   bool res = (!cancelled && err == noErr);

   //
   // Extract any metadata
   //
   if (res) {
      AddMetadata(tags);
   }

   delete[] outputBuffer;
   DisposeHandle(cbData.hSource);
   SoundConverterClose(soundConverter);
   DisposeMovie(mMovie);

   if (!res) {
      for (c = 0; c < outputFormat.numChannels; c++)
         delete channels[c];
      delete[] channels;

      return false;
   }

   *outNumTracks = outputFormat.numChannels;
   *outTracks = new Track *[outputFormat.numChannels];
   for(c = 0; c < outputFormat.numChannels; c++)
         (*outTracks)[c] = channels[c];
      delete[] channels;

      return true;
   }
Exemplo n.º 9
0
QVariantMap Mpris1::GetMetadata(const Song& song) {
  QVariantMap ret;

  AddMetadata("location", song.url().toString(), &ret);
  AddMetadata("title", song.PrettyTitle(), &ret);
  AddMetadata("artist", song.artist(), &ret);
  AddMetadata("album", song.album(), &ret);
  AddMetadata("time", song.length_nanosec() / kNsecPerSec, &ret);
  AddMetadata("mtime", song.length_nanosec() / kNsecPerMsec, &ret);
  AddMetadata("tracknumber", song.track(), &ret);
  AddMetadata("year", song.year(), &ret);
  AddMetadata("genre", song.genre(), &ret);
  AddMetadata("disc", song.disc(), &ret);
  AddMetadata("comment", song.comment(), &ret);
  AddMetadata("audio-bitrate", song.bitrate(), &ret);
  AddMetadata("audio-samplerate", song.samplerate(), &ret);
  AddMetadata("bpm", song.bpm(), &ret);
  AddMetadata("composer", song.composer(), &ret);
  AddMetadata("performer", song.performer(), &ret);
  AddMetadata("grouping", song.grouping(), &ret);
  if (song.rating() != -1.0) {
    AddMetadata("rating", song.rating() * 5, &ret);
  }

  return ret;
}