Exemplo n.º 1
0
bool
AppleMP3Reader::DecodeAudioData()
{
  MOZ_ASSERT(mDecoder->OnDecodeThread(), "Should be on decode thread");

  // Read AUDIO_READ_BYTES if we can
  char bytes[AUDIO_READ_BYTES];
  uint32_t numBytes = AUDIO_READ_BYTES;

  nsresult readrv = Read(&numBytes, bytes);

  // This function calls |AudioSampleCallback| above, synchronously, when it
  // finds compressed MP3 frame.
  OSStatus rv = AudioFileStreamParseBytes(mAudioFileStream,
                                          numBytes,
                                          bytes,
                                          0 /* flags */);

  if (NS_FAILED(readrv)) {
    mAudioQueue.Finish();
    return false;
  }

  // DataUnavailable just means there wasn't enough data to demux anything.
  // We should have more to push into the demuxer next time we're called.
  if (rv && rv != kAudioFileStreamError_DataUnavailable) {
    LOGE("AudioFileStreamParseBytes returned unknown error %x", rv);
    return false;
  }

  return true;
}
Exemplo n.º 2
0
void Audio_Stream::streamHasBytesAvailable(UInt8 *data, UInt32 numBytes)
{
    AS_TRACE("%s: %u bytes\n", __FUNCTION__, (unsigned int)numBytes);
    
    if (!m_httpStreamRunning) {
        AS_TRACE("%s: stray callback detected!\n", __PRETTY_FUNCTION__);
        return;
    }
    
    if (m_fileOutput) {
        m_fileOutput->write(data, numBytes);
    }
	
    if (m_audioStreamParserRunning) {
        OSStatus result = AudioFileStreamParseBytes(m_audioFileStream, numBytes, data, 0);
        
        if (result != 0) {
            AS_TRACE("%s: AudioFileStreamParseBytes error %d\n", __PRETTY_FUNCTION__, (int)result);
            closeAndSignalError(AS_ERR_STREAM_PARSE);
        } else if (m_initializationError == kAudioConverterErr_FormatNotSupported) {
            AS_TRACE("Audio stream initialization failed due to unsupported format\n");
            closeAndSignalError(AS_ERR_UNSUPPORTED_FORMAT);
        } else if (m_initializationError != noErr) {
            AS_TRACE("Audio stream initialization failed due to unknown error\n");
            closeAndSignalError(AS_ERR_OPEN);
        }
    }
}
Exemplo n.º 3
0
OSStatus DZAudioQueuePlayer::parse(const void *data, UInt32 length)
{
    if (this->_parser != NULL && data != NULL) {
        return dzDebug(AudioFileStreamParseBytes(this->_parser, length, data, 0),
                       "Audio file stream parse error.");
    }
    return dzDebug(!noErr, "Null audio file stream or null data.");
}
Exemplo n.º 4
0
static WaitressCbReturn_t BarPlayerAACCb (void *ptr, size_t size, void *stream) {
	struct audioPlayer *player = stream;
	
	QUIT_PAUSE_CHECK;
    
    AudioFileStreamParseBytes(player->audioFileStream, size, ptr, 0);
    
    return WAITRESS_CB_RET_OK;
}
nsresult
AppleMP3Reader::ReadMetadata(MediaInfo* aInfo,
                             MetadataTags** aTags)
{
    MOZ_ASSERT(mDecoder->OnDecodeThread(), "Should be on decode thread");

    *aTags = nullptr;

    /*
     * Feed bytes into the parser until we have all the metadata we need to
     * set up the decoder. When the parser has enough data, it will
     * synchronously call back to |AudioMetadataCallback| below.
     */
    OSStatus rv;
    nsresult readrv;
    do {
        char bytes[AUDIO_READ_BYTES];
        uint32_t numBytes = AUDIO_READ_BYTES;
        readrv = ReadAndNotify(&numBytes, bytes);

        rv = AudioFileStreamParseBytes(mAudioFileStream,
                                       numBytes,
                                       bytes,
                                       0 /* flags */);

        // We have to do our decoder setup from the callback. When it's done it will
        // set mStreamReady.
    } while (!mStreamReady && !rv && NS_SUCCEEDED(readrv));

    if (rv) {
        LOGE("Error decoding audio stream metadata\n");
        return NS_ERROR_FAILURE;
    }

    if (!mAudioConverter) {
        LOGE("Failed to setup the AudioToolbox audio decoder\n");
        return NS_ERROR_FAILURE;
    }

    aInfo->mAudio.mRate = mAudioSampleRate;
    aInfo->mAudio.mChannels = mAudioChannels;
    aInfo->mAudio.mHasAudio = mStreamReady;

    {
        ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
        mDecoder->SetMediaDuration(mDuration);
    }

    return NS_OK;
}
Exemplo n.º 6
0
nsresult
AppleATDecoder::GetImplicitAACMagicCookie(const MediaRawData* aSample)
{
  MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());

  // Prepend ADTS header to AAC audio.
  RefPtr<MediaRawData> adtssample(aSample->Clone());
  if (!adtssample) {
    return NS_ERROR_OUT_OF_MEMORY;
  }
  int8_t frequency_index =
    mp4_demuxer::Adts::GetFrequencyIndex(mConfig.mRate);

  bool rv = mp4_demuxer::Adts::ConvertSample(mConfig.mChannels,
                                             frequency_index,
                                             mConfig.mProfile,
                                             adtssample);
  if (!rv) {
    NS_WARNING("Failed to apply ADTS header");
    return NS_ERROR_FAILURE;
  }
  if (!mStream) {
    OSStatus rv = AudioFileStreamOpen(this,
                                      _MetadataCallback,
                                      _SampleCallback,
                                      kAudioFileAAC_ADTSType,
                                      &mStream);
    if (rv) {
      NS_WARNING("Couldn't open AudioFileStream");
      return NS_ERROR_FAILURE;
    }
  }

  OSStatus status = AudioFileStreamParseBytes(mStream,
                                              adtssample->Size(),
                                              adtssample->Data(),
                                              0 /* discontinuity */);
  if (status) {
    NS_WARNING("Couldn't parse sample");
  }

  if (status || mFileStreamError || mMagicCookie.Length()) {
    // We have decoded a magic cookie or an error occurred as such
    // we won't need the stream any longer.
    AudioFileStreamClose(mStream);
    mStream = nullptr;
  }

  return (mFileStreamError || status) ? NS_ERROR_FAILURE : NS_OK;
}
Exemplo n.º 7
0
nsresult
AppleATDecoder::GetImplicitAACMagicCookie(const mp4_demuxer::MP4Sample* aSample)
{
  // Prepend ADTS header to AAC audio.
  nsAutoPtr<mp4_demuxer::MP4Sample> adtssample(aSample->Clone());
  if (!adtssample) {
    return NS_ERROR_OUT_OF_MEMORY;
  }

  bool rv = mp4_demuxer::Adts::ConvertSample(mConfig.channel_count,
                                             mConfig.frequency_index,
                                             mConfig.aac_profile,
                                             adtssample);
  if (!rv) {
    NS_WARNING("Failed to apply ADTS header");
    return NS_ERROR_FAILURE;
  }
  if (!mStream) {
    OSStatus rv = AudioFileStreamOpen(this,
                                      _MetadataCallback,
                                      _SampleCallback,
                                      kAudioFileAAC_ADTSType,
                                      &mStream);
    if (rv) {
      NS_WARNING("Couldn't open AudioFileStream");
      return NS_ERROR_FAILURE;
    }
  }

  OSStatus status = AudioFileStreamParseBytes(mStream,
                                              adtssample->size,
                                              adtssample->data,
                                              0 /* discontinuity */);
  if (status) {
    NS_WARNING("Couldn't parse sample");
  }

  if (status || mFileStreamError || mMagicCookie.Length()) {
    // We have decoded a magic cookie or an error occurred as such
    // we won't need the stream any longer.
    AudioFileStreamClose(mStream);
    mStream = nullptr;
  }

  return (mFileStreamError || status) ? NS_ERROR_FAILURE : NS_OK;
}
Exemplo n.º 8
0
void Audio_Stream::streamHasBytesAvailable(UInt8 *data, CFIndex numBytes)
{
    AS_TRACE("%s: %lu bytes\n", __FUNCTION__, numBytes);
    
    if (!m_httpStreamRunning) {
        AS_TRACE("%s: stray callback detected!\n", __PRETTY_FUNCTION__);
        return;
    }
	
    if (m_audioStreamParserRunning) {
        OSStatus result = AudioFileStreamParseBytes(m_audioFileStream, numBytes, data, 0);
        if (result != 0) {
            AS_TRACE("%s: AudioFileStreamParseBytes error %d\n", __PRETTY_FUNCTION__, (int)result);
            closeAndSignalError(AS_ERR_STREAM_PARSE);
        }
    }
}
long AudioStreamDecoder::EnqueueData(const void* data, unsigned int length, bool discontinuous)
{
	unsigned int flags = discontinuous ? kAudioFileStreamParseFlag_Discontinuity : 0;

	long err = mStream ? 0 : ENOENT;
	BAIL_IF(err, "AudioFileStream not initialised\n", err);

	err = data ? 0 : EINVAL;
	BAIL_IF(err, "Invalid data pointer\n");

	err = length ? 0 : EINVAL;
	BAIL_IF(err, "Invalid length\n");

	err = AudioFileStreamParseBytes(mStream, length, data, flags);
	BAIL_IF(err, "AudioFileStreamParseBytes returned %ld\n", err);

bail:
	return err;
}
Exemplo n.º 10
0
void
AppleATDecoder::SubmitSample(nsAutoPtr<mp4_demuxer::MP4Sample> aSample)
{
  mSamplePosition = aSample->byte_offset;
  OSStatus rv = AudioFileStreamParseBytes(mStream,
                                          aSample->size,
                                          aSample->data,
                                          0);
  if (rv != noErr) {
    LOG("Error %d parsing audio data", rv);
    mCallback->Error();
  }

  // Sometimes we need multiple input samples before AudioToolbox
  // starts decoding. If we haven't seen any output yet, ask for
  // more data here.
  if (!mHaveOutput) {
    mCallback->InputExhausted();
  }
}
int main (int argc, char * const argv[]) 
{
	// allocate a struct for storing our state
	MyData* myData = (MyData*)calloc(1, sizeof(MyData));
	
	// initialize a mutex and condition so that we can block on buffers in use.
	pthread_mutex_init(&myData->mutex, NULL);
	pthread_cond_init(&myData->cond, NULL);
	pthread_cond_init(&myData->done, NULL);
	
	// get connected
	int connection_socket = MyConnectSocket();
	if (connection_socket < 0) return 1;
	printf("connected\n");

	// allocate a buffer for reading data from a socket
	const size_t kRecvBufSize = 40000;
	char* buf = (char*)malloc(kRecvBufSize * sizeof(char));

	// create an audio file stream parser
	OSStatus err = AudioFileStreamOpen(myData, MyPropertyListenerProc, MyPacketsProc, 
							kAudioFileAAC_ADTSType, &myData->audioFileStream);
	if (err) { PRINTERROR("AudioFileStreamOpen"); return 1; }
	
	while (!myData->failed) {
		// read data from the socket
		printf("->recv\n");
		ssize_t bytesRecvd = recv(connection_socket, buf, kRecvBufSize, 0);
		printf("bytesRecvd %d\n", bytesRecvd);
		if (bytesRecvd <= 0) break; // eof or failure
		
		// parse the data. this will call MyPropertyListenerProc and MyPacketsProc
		err = AudioFileStreamParseBytes(myData->audioFileStream, bytesRecvd, buf, 0);
		if (err) { PRINTERROR("AudioFileStreamParseBytes"); break; }
	}

	// enqueue last buffer
	MyEnqueueBuffer(myData);

	printf("flushing\n");
	err = AudioQueueFlush(myData->audioQueue);
	if (err) { PRINTERROR("AudioQueueFlush"); return 1; }	

	printf("stopping\n");
	err = AudioQueueStop(myData->audioQueue, false);
	if (err) { PRINTERROR("AudioQueueStop"); return 1; }	
	
	printf("waiting until finished playing..\n");
	pthread_mutex_lock(&myData->mutex); 
	pthread_cond_wait(&myData->done, &myData->mutex);
	pthread_mutex_unlock(&myData->mutex);
	
	
	printf("done\n");
	
	// cleanup
	free(buf);
	err = AudioFileStreamClose(myData->audioFileStream);
	err = AudioQueueDispose(myData->audioQueue, false);
	close(connection_socket);
	free(myData);
	
    return 0;
}
Exemplo n.º 12
0
nsresult
AppleMP3Reader::ReadMetadata(MediaInfo* aInfo, MetadataTags** aTags)
{
  MOZ_ASSERT(OnTaskQueue());

  *aTags = nullptr;

  /*
   * Feed bytes into the parser until we have all the metadata we need to
   * set up the decoder. When the parser has enough data, it will
   * synchronously call back to |AudioMetadataCallback| below.
   */
  OSStatus rv;
  nsresult readrv;
  uint32_t offset = 0;
  do {
    char bytes[AUDIO_READ_BYTES];
    uint32_t numBytes = AUDIO_READ_BYTES;
    readrv = Read(&numBytes, bytes);

    rv = AudioFileStreamParseBytes(mAudioFileStream,
                                   numBytes,
                                   bytes,
                                   0 /* flags */);

    mMP3FrameParser.Parse(reinterpret_cast<uint8_t*>(bytes), numBytes, offset);

    offset += numBytes;

    // We have to do our decoder setup from the callback. When it's done it will
    // set mStreamReady.
  } while (!mStreamReady && !rv && NS_SUCCEEDED(readrv));

  if (rv) {
    LOGE("Error decoding audio stream metadata\n");
    return NS_ERROR_FAILURE;
  }

  if (!mAudioConverter) {
    LOGE("Failed to setup the AudioToolbox audio decoder\n");
    return NS_ERROR_FAILURE;
  }

  if (!mMP3FrameParser.IsMP3()) {
    LOGE("Frame parser failed to parse MP3 stream\n");
    return NS_ERROR_FAILURE;
  }

  if (mStreamReady) {
    aInfo->mAudio.mRate = mAudioSampleRate;
    aInfo->mAudio.mChannels = mAudioChannels;
  }

  // This special snowflake reader doesn't seem to set *aInfo = mInfo like all
  // the others. Yuck.
  mDuration = mMP3FrameParser.GetDuration();
  mInfo.mMetadataDuration.emplace(TimeUnit::FromMicroseconds(mDuration));
  aInfo->mMetadataDuration.emplace(TimeUnit::FromMicroseconds(mDuration));

  return NS_OK;
}