//static
sp<SimpleDecodingSource> SimpleDecodingSource::Create(
        const sp<IMediaSource> &source, uint32_t flags, const sp<ANativeWindow> &nativeWindow,
        const char *desiredCodec) {
    sp<Surface> surface = static_cast<Surface*>(nativeWindow.get());
    const char *mime = NULL;
    sp<MetaData> meta = source->getFormat();
    CHECK(meta->findCString(kKeyMIMEType, &mime));

    sp<AMessage> format = new AMessage;
    if (convertMetaDataToMessage(source->getFormat(), &format) != OK) {
        return NULL;
    }

    Vector<AString> matchingCodecs;
    MediaCodecList::findMatchingCodecs(
            mime, false /* encoder */, flags, &matchingCodecs);

    sp<ALooper> looper = new ALooper;
    looper->setName("stagefright");
    looper->start();

    sp<MediaCodec> codec;

    for (size_t i = 0; i < matchingCodecs.size(); ++i) {
        const AString &componentName = matchingCodecs[i];
        if (desiredCodec != NULL && componentName.compare(desiredCodec)) {
            continue;
        }

        ALOGV("Attempting to allocate codec '%s'", componentName.c_str());

        codec = MediaCodec::CreateByComponentName(looper, componentName);
        if (codec != NULL) {
            ALOGI("Successfully allocated codec '%s'", componentName.c_str());

            status_t err = codec->configure(format, surface, NULL /* crypto */, 0 /* flags */);
            if (err == OK) {
                err = codec->getOutputFormat(&format);
            }
            if (err == OK) {
                return new SimpleDecodingSource(codec, source, looper,
                        surface != NULL,
                        strcmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS) == 0,
                        format);
            }

            ALOGD("Failed to configure codec '%s'", componentName.c_str());
            codec->release();
            codec = NULL;
        }
    }

    looper->stop();
    ALOGE("No matching decoder! (mime: %s)", mime);
    return NULL;
}
Example #2
0
status_t OggWriter::addSource(const sp<MediaSource> &source)
{
    LOGV("OggWriter::addSource");

    if (mInitCheck != OK)
    {
        return mInitCheck;
    }

    if (mSource != NULL)
    {
        // Ogg files only support a single track of audio.
        return UNKNOWN_ERROR;
    }

    sp<MetaData> meta = source->getFormat();

    const char *mime;
    CHECK(meta->findCString(kKeyMIMEType, &mime));

    if (strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS))
    {
        return ERROR_UNSUPPORTED;
    }

    meta->findInt32(kKeySampleRate, &mSampleRate);
    mSource = source;
    return OK;
}
Example #3
0
sp<MediaSource> prepareVideoEncoder(const sp<ALooper>& looper,
                                    const sp<MediaSource>& source) {
  sp<MetaData> meta = source->getFormat();
  int32_t width, height, stride, sliceHeight, colorFormat;
  CHECK(meta->findInt32(kKeyWidth, &width));
  CHECK(meta->findInt32(kKeyHeight, &height));
  CHECK(meta->findInt32(kKeyStride, &stride));
  CHECK(meta->findInt32(kKeySliceHeight, &sliceHeight));
  CHECK(meta->findInt32(kKeyColorFormat, &colorFormat));

  sp<AMessage> format = new AMessage();
  format->setInt32("width", width);
  format->setInt32("height", height);
  format->setInt32("stride", stride);
  format->setInt32("slice-height", sliceHeight);
  format->setInt32("color-format", colorFormat);

  format->setString("mime", kMimeTypeAvc);
  format->setInt32("bitrate", sVideoBitRateInK * 1024);
  format->setInt32("bitrate-mode", OMX_Video_ControlRateVariable);
  format->setFloat("frame-rate", sFPS);
  format->setInt32("i-frame-interval-ms", sIFrameIntervalMs);

  return MediaCodecSource::Create(
    looper,
    format,
    source,
#ifdef TARGET_GE_MARSHMALLOW
    NULL,
#endif
    sUseMetaDataMode ? MediaCodecSource::FLAG_USE_METADATA_INPUT : 0
  );
}
Example #4
0
int
CEncoderLame::syncEncode(
    const sp<IMediaSource>& pMediaSource_in,
    const sp<IAudioSink>&  pAudioSink_out,
    const sp<AMessage>&    pOption_in
)
{
    AUTO_LOG();

    CHECK_PTR_EXT(pMediaSource_in, BAD_VALUE);
    CHECK_PTR_EXT(pAudioSink_out,  BAD_VALUE);

    sp<MetaData> meta = pMediaSource_in->getFormat();

    const char *mime;
    CHECK(meta->findCString(kKeyMIMEType, &mime));

    if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
        int ret = prepare(pMediaSource_in, pAudioSink_out, pOption_in);

        if (OK == ret) {
            ret = encode( pMediaSource_in, pAudioSink_out);
        }

        finish();

        CHECK_IS_EXT((OK == ret), ret);
    } else {
        RETURN(INVALID_OPERATION);
    }

    RETURN(OK);
}
Example #5
0
status_t TimedTextDriver::addInBandTextSource(
        size_t trackIndex, const sp<MediaSource>& mediaSource) {
    sp<TimedTextSource> source =
            TimedTextSource::CreateTimedTextSource(mediaSource);
    if (source == NULL) {
        return ERROR_UNSUPPORTED;
    }

    const char *mime;
    uint32_t vobSubFlag = 0;
    if (mediaSource->getFormat()->findCString(kKeyMIMEType, &mime)) {

        if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_MATROSKA_VOBSUB) == 0) {
            vobSubFlag = 1;
        }

        if (mObserver) {
            mObserver->subtitleNotify(SUBTITLE_MSG_VOBSUB_FLAG, &vobSubFlag);
        }
    }

    Mutex::Autolock autoLock(mLock);
    mTextSourceVector.add(trackIndex, source);
    mTextSourceTypeVector.add(TEXT_SOURCE_TYPE_IN_BAND);
    return OK;
}
// static
sp<TimedTextSource> TimedTextSource::CreateTimedTextSource(
        const sp<MediaSource>& mediaSource) {
    const char *mime;
    CHECK(mediaSource->getFormat()->findCString(kKeyMIMEType, &mime));
    ALOGE("[PANDA] CreateTimedTextSource, type = %s\n", mime);
    if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0) {
        return new TimedText3GPPSource(mediaSource);
    }
    #ifdef MTK_SUBTITLE_SUPPORT
    else if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_ASS) == 0) {
        return new TimedTextASSSource(mediaSource);
    }
    else if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_SSA) == 0) {
        return new TimedTextSSASource(mediaSource);
    }
    else if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_VOBSUB) == 0) {
        return new TimedTextVOBSUBSource(mediaSource);
    }
    else if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_DVB) == 0) {
        return new TimedTextDVBSource(mediaSource);
    }
    else if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_TXT) == 0) {
        return new TimedTextTXTSource(mediaSource);
    }
    #endif
    ALOGE("Unsupported mime type for subtitle. : %s", mime);
    return NULL;
}
Example #7
0
status_t WebmWriter::addSource(const sp<MediaSource> &source) {
    Mutex::Autolock l(mLock);
    if (mStarted) {
        ALOGE("Attempt to add source AFTER recording is started");
        return UNKNOWN_ERROR;
    }

    // At most 2 tracks can be supported.
    if (mStreams[kVideoIndex].mTrackEntry != NULL
            && mStreams[kAudioIndex].mTrackEntry != NULL) {
        ALOGE("Too many tracks (2) to add");
        return ERROR_UNSUPPORTED;
    }

    CHECK(source != NULL);

    // A track of type other than video or audio is not supported.
    const char *mime;
    source->getFormat()->findCString(kKeyMIMEType, &mime);
    const char *vp8 = MEDIA_MIMETYPE_VIDEO_VP8;
    const char *vorbis = MEDIA_MIMETYPE_AUDIO_VORBIS;

    size_t streamIndex;
    if (!strncasecmp(mime, vp8, strlen(vp8))) {
        streamIndex = kVideoIndex;
    } else if (!strncasecmp(mime, vorbis, strlen(vorbis))) {
        streamIndex = kAudioIndex;
    } else {
        ALOGE("Track (%s) other than %s or %s is not supported", mime, vp8, vorbis);
        return ERROR_UNSUPPORTED;
    }

    // No more than one video or one audio track is supported.
    if (mStreams[streamIndex].mTrackEntry != NULL) {
        ALOGE("%s track already exists", mStreams[streamIndex].mName);
        return ERROR_UNSUPPORTED;
    }

    // This is the first track of either audio or video.
    // Go ahead to add the track.
    mStreams[streamIndex].mSource = source;
    mStreams[streamIndex].mTrackEntry = mStreams[streamIndex].mMakeTrack(source->getFormat());

    return OK;
}
bool OmxDecoder::SetVideoFormat() {
  const char *componentName;

  if (!mVideoSource->getFormat()->findInt32(kKeyWidth, &mVideoWidth) ||
      !mVideoSource->getFormat()->findInt32(kKeyHeight, &mVideoHeight) ||
      !mVideoSource->getFormat()->findCString(kKeyDecoderComponent, &componentName) ||
      !mVideoSource->getFormat()->findInt32(kKeyColorFormat, &mVideoColorFormat) ) {
    return false;
  }

  if (!mVideoSource->getFormat()->findInt32(kKeyStride, &mVideoStride)) {
    mVideoStride = mVideoWidth;
    LOG("stride not available, assuming width");
  }

  if (!mVideoSource->getFormat()->findInt32(kKeySliceHeight, &mVideoSliceHeight)) {
    mVideoSliceHeight = mVideoHeight;
    LOG("slice height not available, assuming height");
  }

  if (!mVideoSource->getFormat()->findInt32(kKeyRotation, &mVideoRotation)) {
    mVideoRotation = 0;
    LOG("rotation not available, assuming 0");
  }

  LOG("width: %d height: %d component: %s format: %d stride: %d sliceHeight: %d rotation: %d",
      mVideoWidth, mVideoHeight, componentName, mVideoColorFormat,
      mVideoStride, mVideoSliceHeight, mVideoRotation);

  return true;
}
Example #9
0
sp<MediaSource> OmxJpegImageDecoder::getDecoder(
        OMXClient *client, const sp<MediaSource>& source) {
    sp<MetaData> meta = source->getFormat();
    sp<MediaSource> decoder = OMXCodec::Create(
            client->interface(), meta, false /* createEncoder */, source);

    CHECK(decoder != NULL);
    return decoder;
}
MPEG4Writer::Track::Track(
        MPEG4Writer *owner, const sp<MediaSource> &source)
    : mOwner(owner),
      mMeta(source->getFormat()),
      mSource(source),
      mDone(false),
      mCodecSpecificData(NULL),
      mCodecSpecificDataSize(0),
      mReachedEOS(false) {
}
// static
sp<TimedTextSource> TimedTextSource::CreateTimedTextSource(
        const sp<MediaSource>& mediaSource) {
    const char *mime;
    CHECK(mediaSource->getFormat()->findCString(kKeyMIMEType, &mime));
    if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0) {
        return new TimedText3GPPSource(mediaSource);
    }
    ALOGE("Unsupported mime type for subtitle. : %s", mime);
    return NULL;
}
static void performSeekTest(const sp<MediaSource> &source) {
    CHECK_EQ((status_t)OK, source->start());

    int64_t durationUs;
    CHECK(source->getFormat()->findInt64(kKeyDuration, &durationUs));

    for (int64_t seekTimeUs = 0; seekTimeUs <= durationUs;
            seekTimeUs += 60000ll) {
        MediaSource::ReadOptions options;
        options.setSeekTo(
                seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);

        MediaBuffer *buffer;
        status_t err;
        for (;;) {
            err = source->read(&buffer, &options);

            options.clearSeekTo();

            if (err == INFO_FORMAT_CHANGED) {
                CHECK(buffer == NULL);
                continue;
            }

            if (err != OK) {
                CHECK(buffer == NULL);
                break;
            }

            if (buffer->range_length() > 0) {
                break;
            }

            CHECK(buffer != NULL);

            buffer->release();
            buffer = NULL;
        }

        if (err == OK) {
            int64_t timeUs;
            CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));

            printf("%lld\t%lld\t%lld\n", seekTimeUs, timeUs, seekTimeUs - timeUs);

            buffer->release();
            buffer = NULL;
        } else {
            printf("ERROR\n");
            break;
        }
    }

    CHECK_EQ((status_t)OK, source->stop());
}
sp<MetaData> MPEG2TSSource::getFormat() {
    sp<MetaData> meta = mImpl->getFormat();

    int64_t durationUs;
    if (mExtractor->mLiveSession != NULL
            && mExtractor->mLiveSession->getDuration(&durationUs) == OK) {
        meta->setInt64(kKeyDuration, durationUs);
    }

    return meta;
}
Example #14
0
bool OmxDecoder::SetAudioFormat() {
  // If the format changed, update our cached info.
  if (!mAudioSource->getFormat()->findInt32(kKeyChannelCount, &mAudioChannels) ||
      !mAudioSource->getFormat()->findInt32(kKeySampleRate, &mAudioSampleRate)) {
    return false;
  }

  LOG("channelCount: %d sampleRate: %d",
      mAudioChannels, mAudioSampleRate);

  return true;
}
sp<MetaData> MPEG2TSSource::getFormat() {
    sp<MetaData> meta = mImpl->getFormat();

    int64_t durationUs;
    /*no for live source add by Hadwin
    if (mExtractor->mLiveSource != NULL
            && mExtractor->mLiveSource->getDuration(&durationUs)) {
        meta->setInt64(kKeyDuration, durationUs);
    }
    */
    return meta;
}
MediaPuller::MediaPuller(
        const sp<MediaSource> &source, const sp<AMessage> &notify)
    : mSource(source),
      mNotify(notify),
      mPullGeneration(0),
      mIsAudio(false),
      mPaused(false) {
    sp<MetaData> meta = source->getFormat();
    const char *mime;
    CHECK(meta->findCString(kKeyMIMEType, &mime));

    mIsAudio = !strncasecmp(mime, "audio/", 6);
}
Example #17
0
status_t WAVEWriter::addSource(const sp<MediaSource> &source) {
    uint32_t count;
    if (mInitCheck != OK) {
        LOGE("Init Check not OK, return");
        return mInitCheck;
    }

    if (mSource != NULL) {
        LOGE("A source already exists, return");
        return UNKNOWN_ERROR;
    }

    sp<MetaData> meta = source->getFormat();

    const char *mime;
    CHECK(meta->findCString(kKeyMIMEType, &mime));

    int32_t channelCount;
    int32_t sampleRate;
    CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
    CHECK(meta->findInt32(kKeySampleRate, &sampleRate));

    memset(&hdr, 0, sizeof(struct wav_header));
    hdr.riff_id = ID_RIFF;
    hdr.riff_fmt = ID_WAVE;
    hdr.fmt_id = ID_FMT;
    hdr.fmt_sz = 16;
    hdr.audio_format = FORMAT_PCM;
    hdr.num_channels = channelCount;
    hdr.sample_rate = sampleRate;
    hdr.bits_per_sample = 16;
    hdr.byte_rate = (sampleRate * channelCount * hdr.bits_per_sample) / 8;
    hdr.block_align = ( hdr.bits_per_sample * channelCount ) / 8;
    hdr.data_id = ID_DATA;
    hdr.data_sz = 0;
    hdr.riff_sz = hdr.data_sz + 44 - 8;

    if (write(mFd, &hdr, sizeof(hdr)) != sizeof(hdr)) {
        LOGE("Write header error, return ERROR_IO");
        return -ERROR_IO;
    }

    mSource = source;

    return OK;
}
Example #18
0
sp<MediaSource> prepareAudioEncoder(const sp<ALooper>& looper,
                                    const sp<MediaSource>& source) {
  sp<MetaData> meta = source->getFormat();
  int32_t maxInputSize, channels, sampleRate, bitrate;
  CHECK(meta->findInt32(kKeyMaxInputSize, &maxInputSize));
  CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
  CHECK(meta->findInt32(kKeyChannelCount, &channels));

  sp<AMessage> format = new AMessage();
  format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
  format->setInt32("aac-profile", OMX_AUDIO_AACObjectLC);
  format->setInt32("max-input-size", maxInputSize);
  format->setInt32("sample-rate", sampleRate);
  format->setInt32("channel-count", channels);
  format->setInt32("bitrate", sAudioBitRate);

  return MediaCodecSource::Create(looper, format, source);
}
bool OmxDecoder::SetAudioFormat() {
  // If the format changed, update our cached info.
  if (!mAudioSource->getFormat()->findInt32(kKeyChannelCount, &mAudioChannels) ||
      !mAudioSource->getFormat()->findInt32(kKeySampleRate, &mAudioSampleRate)) {
    return false;
  }

  LOG("channelCount: %d sampleRate: %d", mAudioChannels, mAudioSampleRate);

  if (mAudioChannels < 0) {
    LOG("audio channel count %d must be nonnegative", mAudioChannels);
    return false;
  }

  if (mAudioSampleRate < 0) {
    LOG("audio sample rate %d must be nonnegative", mAudioSampleRate);
    return false;
  }

  return true;
}
Example #20
0
status_t AMRWriter::addSource(const sp<MediaSource> &source) {
    if (mInitCheck != OK) {
        return mInitCheck;
    }

    if (mSource != NULL) {
        // AMR files only support a single track of audio.
        return UNKNOWN_ERROR;
    }

    sp<MetaData> meta = source->getFormat();

    const char *mime;
    CHECK(meta->findCString(kKeyMIMEType, &mime));

    bool isWide = false;
    if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
        isWide = true;
    } else if (strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
        return ERROR_UNSUPPORTED;
    }

    int32_t channelCount;
    int32_t sampleRate;
    CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
    CHECK_EQ(channelCount, 1);
    CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
    CHECK_EQ(sampleRate, (isWide ? 16000 : 8000));

    mSource = source;

    const char *kHeader = isWide ? "#!AMR-WB\n" : "#!AMR\n";
    ssize_t n = strlen(kHeader);
    if (write(mFd, kHeader, n) != n) {
        return ERROR_IO;
    }

    return OK;
}
Example #21
0
/*!
*	\brief	It will be invoke when the first input decode data.
*/
void UMMediaPlayer::setVideoDecoder(const sp <UMMediaSource >  &source)
{
	UMLOG_ERR("setVideoSource entry point");
	Mutex::Autolock autoLock(mLock);
	mVideoSource = source;

	UMLOG_ERR("setVideoSource source->getFormat();");
	sp < MetaData > meta = source->getFormat();

	bool success = meta->findInt32(kKeyWidth, &mVideoWidth);
	CHECK(success);

	success = meta->findInt32(kKeyHeight, &mVideoHeight);
	CHECK(success);
	UMLOG_ERR("setVideoSource width=%d,height=%d", mVideoWidth, mVideoHeight);


	mVideoDecoder = OMXCodec::Create(
            mClient.interface(), meta, false, //!< using the stagefright's OMXIL
            source,
            NULL, 0, mNativeWindow);
 	

	if (mVideoDecoder == NULL)
	{
		UMLOG_ERR("UM_MediaPlayer::setVideoSource is NULL");
		mInitCheck = NO_INIT;
		return ;
	}
	UMLOG_ERR("setVideoSource OMXCodec::Create successful!");
	
	if (mNativeWindow != NULL)
	{
		initRenderer();
	}

}
status_t ExtendedWriter::addSource(const sp<MediaSource> &source) {
    if (mInitCheck != OK) {
        ALOGE("Init Check not OK, return");
        return mInitCheck;
    }

    if (mSource != NULL) {
        ALOGE("A source already exists, return");
        return UNKNOWN_ERROR;
    }

    sp<MetaData> meta = source->getFormat();

    const char *mime;
    CHECK(meta->findCString(kKeyMIMEType, &mime));

    if ( !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
        mFormat = AUDIO_FORMAT_QCELP;
    } else if ( !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_EVRC)) {
        mFormat = AUDIO_FORMAT_EVRC;
    }
    else {
        return UNKNOWN_ERROR;
    }

    int32_t channelCount;
    int32_t sampleRate;
    CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
    CHECK_EQ(channelCount, 1);
    CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
    CHECK_EQ(sampleRate, 8000);

    mSource = source;

    return OK;
}
sp<MetaData> MPEG2TSSource::getFormat() {
    return mImpl->getFormat();
}
bool OmxDecoder::Init() {
  //register sniffers, if they are not registered in this process.
  DataSource::RegisterDefaultSniffers();

  sp<DataSource> dataSource = new MediaStreamSource(mPluginHost, mDecoder);
  if (dataSource->initCheck()) {
    return false;
  }

  mPluginHost->SetMetaDataReadMode(mDecoder);

  sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
  if (extractor == NULL) {
    return false;
  }

  ssize_t audioTrackIndex = -1;
  ssize_t videoTrackIndex = -1;
  const char *audioMime = NULL;
  const char *videoMime = NULL;

  for (size_t i = 0; i < extractor->countTracks(); ++i) {
    sp<MetaData> meta = extractor->getTrackMetaData(i);

    const char *mime;
    if (!meta->findCString(kKeyMIMEType, &mime)) {
      continue;
    }

    if (videoTrackIndex == -1 && !strncasecmp(mime, "video/", 6)) {
      videoTrackIndex = i;
      videoMime = mime;
    } else if (audioTrackIndex == -1 && !strncasecmp(mime, "audio/", 6)) {
      audioTrackIndex = i;
      audioMime = mime;
    }
  }

  if (videoTrackIndex == -1 && audioTrackIndex == -1) {
    return false;
  }

  mPluginHost->SetPlaybackReadMode(mDecoder);

  int64_t totalDurationUs = 0;

#ifdef MOZ_WIDGET_GONK
  sp<IOMX> omx = GetOMX();
#else
  // OMXClient::connect() always returns OK and abort's fatally if
  // it can't connect. We may need to implement the connect functionality
  // ourselves if this proves to be an issue.
  if (mClient.connect() != OK) {
    LOG("OMXClient failed to connect");
  }
  sp<IOMX> omx = mClient.interface();
#endif

  sp<MediaSource> videoTrack;
  sp<MediaSource> videoSource;
  if (videoTrackIndex != -1 && (videoTrack = extractor->getTrack(videoTrackIndex)) != NULL) {
    uint32_t flags = GetVideoCreationFlags(mPluginHost);
    videoSource = OMXCodec::Create(omx,
                                   videoTrack->getFormat(),
                                   false, // decoder
                                   videoTrack,
                                   NULL,
                                   flags);
    if (videoSource == NULL) {
      LOG("OMXCodec failed to initialize video decoder for \"%s\"", videoMime);
      return false;
    }

    status_t status = videoSource->start();
    if (status != OK) {
      LOG("videoSource->start() failed with status %#x", status);
      return false;
    }

    int64_t durationUs;
    if (videoTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
      if (durationUs < 0)
        LOG("video duration %lld should be nonnegative", durationUs);
      if (durationUs > totalDurationUs)
        totalDurationUs = durationUs;
    }
  }

  sp<MediaSource> audioTrack;
  sp<MediaSource> audioSource;
  if (audioTrackIndex != -1 && (audioTrack = extractor->getTrack(audioTrackIndex)) != NULL)
  {
    if (!strcasecmp(audioMime, "audio/raw")) {
      audioSource = audioTrack;
    } else {
      audioSource = OMXCodec::Create(omx,
                                     audioTrack->getFormat(),
                                     false, // decoder
                                     audioTrack);
    }

    if (audioSource == NULL) {
      LOG("OMXCodec failed to initialize audio decoder for \"%s\"", audioMime);
      return false;
    }

    status_t status = audioSource->start();
    if (status != OK) {
      LOG("audioSource->start() failed with status %#x", status);
      return false;
    }

    int64_t durationUs;
    if (audioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
      if (durationUs < 0)
        LOG("audio duration %lld should be nonnegative", durationUs);
      if (durationUs > totalDurationUs)
        totalDurationUs = durationUs;
    }
  }

  // set decoder state
  mVideoTrack = videoTrack;
  mVideoSource = videoSource;
  mAudioTrack = audioTrack;
  mAudioSource = audioSource;
  mDurationUs = totalDurationUs;

  if (mVideoSource.get() && !SetVideoFormat())
    return false;

  // To reliably get the channel and sample rate data we need to read from the
  // audio source until we get a INFO_FORMAT_CHANGE status
  if (mAudioSource.get()) {
    if (mAudioSource->read(&mAudioBuffer) != INFO_FORMAT_CHANGED) {
      sp<MetaData> meta = mAudioSource->getFormat();
      if (!meta->findInt32(kKeyChannelCount, &mAudioChannels) ||
          !meta->findInt32(kKeySampleRate, &mAudioSampleRate)) {
        return false;
      }
      mAudioMetadataRead = true;

      if (mAudioChannels < 0) {
        LOG("audio channel count %d must be nonnegative", mAudioChannels);
        return false;
      }

      if (mAudioSampleRate < 0) {
        LOG("audio sample rate %d must be nonnegative", mAudioSampleRate);
        return false;
      }
    }
    else if (!SetAudioFormat()) {
        return false;
    }
  }
  return true;
}
sp<MetaData> DetectSyncSource::getFormat() {
    return mSource->getFormat();
}
static void playSource(OMXClient *client, sp<MediaSource> &source) {
    sp<MetaData> meta = source->getFormat();

    const char *mime;
    CHECK(meta->findCString(kKeyMIMEType, &mime));

    sp<MediaSource> rawSource;
    if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime)) {
        rawSource = source;
    } else {
        int flags = 0;
        if (gPreferSoftwareCodec) {
            flags |= OMXCodec::kPreferSoftwareCodecs;
        }
        if (gForceToUseHardwareCodec) {
            CHECK(!gPreferSoftwareCodec);
            flags |= OMXCodec::kHardwareCodecsOnly;
        }
        rawSource = OMXCodec::Create(
            client->interface(), meta, false /* createEncoder */, source,
            NULL /* matchComponentName */,
            flags,
            gSurface);

        if (rawSource == NULL) {
            fprintf(stderr, "Failed to instantiate decoder for '%s'.\n", mime);
            return;
        }
        displayAVCProfileLevelIfPossible(meta);
    }

    source.clear();

    status_t err = rawSource->start();

    if (err != OK) {
        fprintf(stderr, "rawSource returned error %d (0x%08x)\n", err, err);
        return;
    }

    if (gPlaybackAudio) {
        AudioPlayer *player = new AudioPlayer(NULL);
        player->setSource(rawSource);
        rawSource.clear();

        player->start(true /* sourceAlreadyStarted */);

        status_t finalStatus;
        while (!player->reachedEOS(&finalStatus)) {
            usleep(100000ll);
        }

        delete player;
        player = NULL;

        return;
    } else if (gReproduceBug >= 3 && gReproduceBug <= 5) {
        int64_t durationUs;
        CHECK(meta->findInt64(kKeyDuration, &durationUs));

        status_t err;
        MediaBuffer *buffer;
        MediaSource::ReadOptions options;
        int64_t seekTimeUs = -1;
        for (;;) {
            err = rawSource->read(&buffer, &options);
            options.clearSeekTo();

            bool shouldSeek = false;
            if (err == INFO_FORMAT_CHANGED) {
                CHECK(buffer == NULL);

                printf("format changed.\n");
                continue;
            } else if (err != OK) {
                printf("reached EOF.\n");

                shouldSeek = true;
            } else {
                int64_t timestampUs;
                CHECK(buffer->meta_data()->findInt64(kKeyTime, &timestampUs));

                bool failed = false;

                if (seekTimeUs >= 0) {
                    int64_t diff = timestampUs - seekTimeUs;

                    if (diff < 0) {
                        diff = -diff;
                    }

                    if ((gReproduceBug == 4 && diff > 500000)
                        || (gReproduceBug == 5 && timestampUs < 0)) {
                        printf("wanted: %.2f secs, got: %.2f secs\n",
                               seekTimeUs / 1E6, timestampUs / 1E6);

                        printf("ERROR: ");
                        failed = true;
                    }
                }

                printf("buffer has timestamp %lld us (%.2f secs)\n",
                       timestampUs, timestampUs / 1E6);

                buffer->release();
                buffer = NULL;

                if (failed) {
                    break;
                }

                shouldSeek = ((double)rand() / RAND_MAX) < 0.1;

                if (gReproduceBug == 3) {
                    shouldSeek = false;
                }
            }

            seekTimeUs = -1;

            if (shouldSeek) {
                seekTimeUs = (rand() * (float)durationUs) / RAND_MAX;
                options.setSeekTo(seekTimeUs);

                printf("seeking to %lld us (%.2f secs)\n",
                       seekTimeUs, seekTimeUs / 1E6);
            }
        }

        rawSource->stop();

        return;
    }

    int n = 0;
    int64_t startTime = getNowUs();

    long numIterationsLeft = gNumRepetitions;
    MediaSource::ReadOptions options;

    int64_t sumDecodeUs = 0;
    int64_t totalBytes = 0;

    Vector<int64_t> decodeTimesUs;

    while (numIterationsLeft-- > 0) {
        long numFrames = 0;

        MediaBuffer *buffer;

        for (;;) {
            int64_t startDecodeUs = getNowUs();
            status_t err = rawSource->read(&buffer, &options);
            int64_t delayDecodeUs = getNowUs() - startDecodeUs;

            options.clearSeekTo();

            if (err != OK) {
                CHECK(buffer == NULL);

                if (err == INFO_FORMAT_CHANGED) {
                    printf("format changed.\n");
                    continue;
                }

                break;
            }

            if (buffer->range_length() > 0) {
                if (gDisplayHistogram && n > 0) {
                    // Ignore the first time since it includes some setup
                    // cost.
                    decodeTimesUs.push(delayDecodeUs);
                }

                if ((n++ % 16) == 0) {
                    printf(".");
                    fflush(stdout);
                }
            }

            sumDecodeUs += delayDecodeUs;
            totalBytes += buffer->range_length();

            buffer->release();
            buffer = NULL;

            ++numFrames;
            if (gMaxNumFrames > 0 && numFrames == gMaxNumFrames) {
                break;
            }

            if (gReproduceBug == 1 && numFrames == 40) {
                printf("seeking past the end now.");
                options.setSeekTo(0x7fffffffL);
            } else if (gReproduceBug == 2 && numFrames == 40) {
                printf("seeking to 5 secs.");
                options.setSeekTo(5000000);
            }
        }

        printf("$");
        fflush(stdout);

        options.setSeekTo(0);
    }

    rawSource->stop();
    printf("\n");

    int64_t delay = getNowUs() - startTime;
    if (!strncasecmp("video/", mime, 6)) {
        printf("avg. %.2f fps\n", n * 1E6 / delay);

        printf("avg. time to decode one buffer %.2f usecs\n",
               (double)sumDecodeUs / n);

        printf("decoded a total of %d frame(s).\n", n);

        if (gDisplayHistogram) {
            displayDecodeHistogram(&decodeTimesUs);
        }
    } else if (!strncasecmp("audio/", mime, 6)) {
        // Frame count makes less sense for audio, as the output buffer
        // sizes may be different across decoders.
        printf("avg. %.2f KB/sec\n", totalBytes / 1024 * 1E6 / delay);

        printf("decoded a total of %lld bytes\n", totalBytes);
    }
}
bool OmxDecoder::SetVideoFormat() {
  sp<MetaData> format = mVideoSource->getFormat();

  // Stagefright's kKeyWidth and kKeyHeight are what MPAPI calls stride and
  // slice height. Stagefright only seems to use its kKeyStride and
  // kKeySliceHeight to initialize camera video formats.

#ifdef DEBUG
  int32_t unexpected;
  if (format->findInt32(kKeyStride, &unexpected))
    LOG("Expected kKeyWidth, but found kKeyStride %d", unexpected);
  if (format->findInt32(kKeySliceHeight, &unexpected))
    LOG("Expected kKeyHeight, but found kKeySliceHeight %d", unexpected);
#endif // DEBUG

  const char *componentName;

  if (!format->findInt32(kKeyWidth, &mVideoStride) ||
      !format->findInt32(kKeyHeight, &mVideoSliceHeight) ||
      !format->findCString(kKeyDecoderComponent, &componentName) ||
      !format->findInt32(kKeyColorFormat, &mVideoColorFormat) ) {
    return false;
  }

  if (mVideoStride <= 0) {
    LOG("stride %d must be positive", mVideoStride);
    return false;
  }

  if (mVideoSliceHeight <= 0) {
    LOG("slice height %d must be positive", mVideoSliceHeight);
    return false;
  }

  int32_t cropRight, cropBottom;
  if (!format->findRect(kKeyCropRect, &mVideoCropLeft, &mVideoCropTop,
                                      &cropRight, &cropBottom)) {
    mVideoCropLeft = 0;
    mVideoCropTop = 0;
    cropRight = mVideoStride - 1;
    cropBottom = mVideoSliceHeight - 1;
    LOG("crop rect not available, assuming no cropping");
  }

  if (mVideoCropLeft < 0 || mVideoCropLeft >= cropRight || cropRight >= mVideoStride ||
      mVideoCropTop < 0 || mVideoCropTop >= cropBottom || cropBottom >= mVideoSliceHeight) {
    LOG("invalid crop rect %d,%d-%d,%d", mVideoCropLeft, mVideoCropTop, cropRight, cropBottom);
    return false;
  }

  mVideoWidth = cropRight - mVideoCropLeft + 1;
  mVideoHeight = cropBottom - mVideoCropTop + 1;
  MOZ_ASSERT(mVideoWidth > 0 && mVideoWidth <= mVideoStride);
  MOZ_ASSERT(mVideoHeight > 0 && mVideoHeight <= mVideoSliceHeight);

  if (!format->findInt32(kKeyRotation, &mVideoRotation)) {
    mVideoRotation = 0;
    LOG("rotation not available, assuming 0");
  }

  if (mVideoRotation != 0 && mVideoRotation != 90 &&
      mVideoRotation != 180 && mVideoRotation != 270) {
    LOG("invalid rotation %d, assuming 0", mVideoRotation);
  }

  LOG("width: %d height: %d component: %s format: %#x stride: %d sliceHeight: %d rotation: %d crop: %d,%d-%d,%d",
      mVideoWidth, mVideoHeight, componentName, mVideoColorFormat,
      mVideoStride, mVideoSliceHeight, mVideoRotation,
      mVideoCropLeft, mVideoCropTop, cropRight, cropBottom);

  return true;
}
sp<MetaData> DRMSource::getFormat() {
    return mOriginalMediaSource->getFormat();
}
Example #29
0
sp<MetaData> MPEG2TSSource::getFormat() {
    mFormat = mImpl->getFormat();
    return mFormat;
}
Example #30
0
bool OmxDecoder::Init() {
  //register sniffers, if they are not registered in this process.
  DataSource::RegisterDefaultSniffers();

  sp<DataSource> dataSource = new MediaStreamSource(mPluginHost, mDecoder);
  if (dataSource->initCheck()) {
    return false;
  }

  mPluginHost->SetMetaDataReadMode(mDecoder);

  sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
  if (extractor == NULL) {
    return false;
  }

  ssize_t audioTrackIndex = -1;
  ssize_t videoTrackIndex = -1;
  const char *audioMime = NULL;

  for (size_t i = 0; i < extractor->countTracks(); ++i) {
    sp<MetaData> meta = extractor->getTrackMetaData(i);

    int32_t bitRate;
    if (!meta->findInt32(kKeyBitRate, &bitRate))
      bitRate = 0;

    const char *mime;
    if (!meta->findCString(kKeyMIMEType, &mime)) {
      continue;
    }

    if (videoTrackIndex == -1 && !strncasecmp(mime, "video/", 6)) {
      videoTrackIndex = i;
    } else if (audioTrackIndex == -1 && !strncasecmp(mime, "audio/", 6)) {
      audioTrackIndex = i;
      audioMime = mime;
    }
  }

  if (videoTrackIndex == -1 && audioTrackIndex == -1) {
    return false;
  }

  mPluginHost->SetPlaybackReadMode(mDecoder);

  int64_t totalDurationUs = 0;

  sp<MediaSource> videoTrack;
  sp<MediaSource> videoSource;
  if (videoTrackIndex != -1 && (videoTrack = extractor->getTrack(videoTrackIndex)) != NULL) {
    videoSource = OMXCodec::Create(GetOMX(),
                                   videoTrack->getFormat(),
                                   false, // decoder
                                   videoTrack,
                                   NULL,
                                   0); // flags (prefer hw codecs)
    if (videoSource == NULL) {
      return false;
    }

    if (videoSource->start() != OK) {
      return false;
    }

    int64_t durationUs;
    if (videoTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
      if (durationUs > totalDurationUs)
        totalDurationUs = durationUs;
    }
  }

  sp<MediaSource> audioTrack;
  sp<MediaSource> audioSource;
  if (audioTrackIndex != -1 && (audioTrack = extractor->getTrack(audioTrackIndex)) != NULL)
  {
    if (!strcasecmp(audioMime, "audio/raw")) {
      audioSource = audioTrack;
    } else {
      audioSource = OMXCodec::Create(GetOMX(),
                                     audioTrack->getFormat(),
                                     false, // decoder
                                     audioTrack);
    }
    if (audioSource == NULL) {
      return false;
    }
    if (audioSource->start() != OK) {
      return false;
    }

    int64_t durationUs;
    if (audioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
      if (durationUs > totalDurationUs)
        totalDurationUs = durationUs;
    }
  }

  // set decoder state
  mVideoTrack = videoTrack;
  mVideoSource = videoSource;
  mAudioTrack = audioTrack;
  mAudioSource = audioSource;
  mDurationUs = totalDurationUs;

  if (mVideoSource.get() && !SetVideoFormat())
    return false;

  // To reliably get the channel and sample rate data we need to read from the
  // audio source until we get a INFO_FORMAT_CHANGE status
  if (mAudioSource.get()) {
    if (mAudioSource->read(&mAudioBuffer) != INFO_FORMAT_CHANGED) {
      sp<MetaData> meta = mAudioSource->getFormat();
      if (!meta->findInt32(kKeyChannelCount, &mAudioChannels) ||
          !meta->findInt32(kKeySampleRate, &mAudioSampleRate)) {
        return false;
      }
      mAudioMetadataRead = true;
    }
    else if (!SetAudioFormat()) {
        return false;
    }
  }
  return true;
}