status_t NativeInputEventSender::sendMotionEvent(uint32_t seq, const MotionEvent* event) { if (kDebugDispatchCycle) { ALOGD("channel '%s' ~ Sending motion event, seq=%u.", getInputChannelName(), seq); } uint32_t publishedSeq; for (size_t i = 0; i <= event->getHistorySize(); i++) { publishedSeq = mNextPublishedSeq++; status_t status = mInputPublisher.publishMotionEvent(publishedSeq, event->getDeviceId(), event->getSource(), event->getAction(), event->getActionButton(), event->getFlags(), event->getEdgeFlags(), event->getMetaState(), event->getButtonState(), event->getXOffset(), event->getYOffset(), event->getXPrecision(), event->getYPrecision(), event->getDownTime(), event->getHistoricalEventTime(i), event->getPointerCount(), event->getPointerProperties(), event->getHistoricalRawPointerCoords(0, i)); if (status) { ALOGW("Failed to send motion event sample on channel '%s'. status=%d", getInputChannelName(), status); return status; } } mPublishedSeqMap.add(publishedSeq, seq); return OK; }
status_t MediaHTTP::connect( const char *uri, const KeyedVector<String8, String8> *headers, off64_t /* offset */) { if (mInitCheck != OK) { return mInitCheck; } KeyedVector<String8, String8> extHeaders; if (headers != NULL) { extHeaders = *headers; } if (extHeaders.indexOfKey(String8("User-Agent")) < 0) { extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str())); } bool success = mHTTPConnection->connect(uri, &extHeaders); mLastHeaders = extHeaders; mLastURI = uri; mCachedSizeValid = false; return success ? OK : UNKNOWN_ERROR; }
status_t MuxOMX::allocateNode( const char *name, const sp<IOMXObserver> &observer, node_id *node) { Mutex::Autolock autoLock(mLock); sp<IOMX> omx; if (IsSoftwareComponent(name)) { if (mLocalOMX == NULL) { mLocalOMX = new OMX; } omx = mLocalOMX; } else { omx = mRemoteOMX; } status_t err = omx->allocateNode(name, observer, node); if (err != OK) { return err; } if (omx == mLocalOMX) { mIsLocalNode.add(*node, true); } return OK; }
KeyedVector<uint16_t, const TagDefinition_t*> TiffWriter::buildTagMap( const TagDefinition_t* definitions, size_t length) { KeyedVector<uint16_t, const TagDefinition_t*> map; for(size_t i = 0; i < length; ++i) { map.add(definitions[i].tagId, definitions + i); } return map; }
static KeyedVector<String8, String8> HashMapToKeyedVector( JNIEnv *env, jobject &hashMap, bool* pIsOK) { jclass clazz = gFields.stringClassId; KeyedVector<String8, String8> keyedVector; *pIsOK = true; jobject entrySet = env->CallObjectMethod(hashMap, gFields.hashmap.entrySet); if (entrySet) { jobject iterator = env->CallObjectMethod(entrySet, gFields.set.iterator); if (iterator) { jboolean hasNext = env->CallBooleanMethod(iterator, gFields.iterator.hasNext); while (hasNext) { jobject entry = env->CallObjectMethod(iterator, gFields.iterator.next); if (entry) { jobject obj = env->CallObjectMethod(entry, gFields.entry.getKey); if (obj == NULL || !env->IsInstanceOf(obj, clazz)) { jniThrowException(env, "java/lang/IllegalArgumentException", "HashMap key is not a String"); env->DeleteLocalRef(entry); *pIsOK = false; break; } jstring jkey = static_cast<jstring>(obj); obj = env->CallObjectMethod(entry, gFields.entry.getValue); if (obj == NULL || !env->IsInstanceOf(obj, clazz)) { jniThrowException(env, "java/lang/IllegalArgumentException", "HashMap value is not a String"); env->DeleteLocalRef(entry); *pIsOK = false; break; } jstring jvalue = static_cast<jstring>(obj); String8 key = JStringToString8(env, jkey); String8 value = JStringToString8(env, jvalue); keyedVector.add(key, value); env->DeleteLocalRef(jkey); env->DeleteLocalRef(jvalue); hasNext = env->CallBooleanMethod(iterator, gFields.iterator.hasNext); } env->DeleteLocalRef(entry); } env->DeleteLocalRef(iterator); } env->DeleteLocalRef(entrySet); } return keyedVector; }
EXPORT media_status_t AMediaDrm_getKeyRequest(AMediaDrm *mObj, const AMediaDrmScope *scope, const uint8_t *init, size_t initSize, const char *mimeType, AMediaDrmKeyType keyType, const AMediaDrmKeyValue *optionalParameters, size_t numOptionalParameters, const uint8_t **keyRequest, size_t *keyRequestSize) { if (!mObj || mObj->mDrm == NULL) { return AMEDIA_ERROR_INVALID_OBJECT; } if (!mimeType || !scope || !keyRequest || !keyRequestSize) { return AMEDIA_ERROR_INVALID_PARAMETER; } List<idvec_t>::iterator iter; if (!findId(mObj, *scope, iter)) { return AMEDIA_DRM_SESSION_NOT_OPENED; } Vector<uint8_t> mdInit; mdInit.appendArray(init, initSize); DrmPlugin::KeyType mdKeyType; switch (keyType) { case KEY_TYPE_STREAMING: mdKeyType = DrmPlugin::kKeyType_Streaming; break; case KEY_TYPE_OFFLINE: mdKeyType = DrmPlugin::kKeyType_Offline; break; case KEY_TYPE_RELEASE: mdKeyType = DrmPlugin::kKeyType_Release; break; default: return AMEDIA_ERROR_INVALID_PARAMETER; } KeyedVector<String8, String8> mdOptionalParameters; for (size_t i = 0; i < numOptionalParameters; i++) { mdOptionalParameters.add(String8(optionalParameters[i].mKey), String8(optionalParameters[i].mValue)); } String8 defaultUrl; status_t status = mObj->mDrm->getKeyRequest(*iter, mdInit, String8(mimeType), mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl); if (status != OK) { return translateStatus(status); } else { *keyRequest = mObj->mKeyRequest.array(); *keyRequestSize = mObj->mKeyRequest.size(); } return AMEDIA_OK; }
GPUHardware::Client& GPUHardware::getClientLocked(pid_t pid) { ssize_t index = mClients.indexOfKey(pid); if (index < 0) { Client client; client.pid = pid; client.smi.heap = mSMIHeap; client.ebi.heap = mEBIHeap; client.reg.heap = mREGHeap; index = mClients.add(pid, client); } Client& client(mClients.editValueAt(index)); client.createClientHeaps(); return client; }
KeyedVector<SplitDescription, sp<Rule> > SplitSelector::getRules() const { KeyedVector<SplitDescription, sp<Rule> > rules; const size_t groupCount = mGroups.size(); for (size_t i = 0; i < groupCount; i++) { const SortedVector<SplitDescription>& splits = mGroups[i]; const size_t splitCount = splits.size(); for (size_t j = 0; j < splitCount; j++) { sp<Rule> rule = Rule::simplify(RuleGenerator::generate(splits, j)); if (rule != NULL) { rules.add(splits[j], rule); } } } return rules; }
status_t NativeInputEventSender::sendKeyEvent(uint32_t seq, const KeyEvent* event) { if (kDebugDispatchCycle) { ALOGD("channel '%s' ~ Sending key event, seq=%u.", getInputChannelName(), seq); } uint32_t publishedSeq = mNextPublishedSeq++; status_t status = mInputPublisher.publishKeyEvent(publishedSeq, event->getDeviceId(), event->getSource(), event->getAction(), event->getFlags(), event->getKeyCode(), event->getScanCode(), event->getMetaState(), event->getRepeatCount(), event->getDownTime(), event->getEventTime()); if (status) { ALOGW("Failed to send key event on channel '%s'. status=%d", getInputChannelName(), status); return status; } mPublishedSeqMap.add(publishedSeq, seq); return OK; }
status_t MPEG2TSExtractor::feedMore() { Mutex::Autolock autoLock(mLock); uint8_t packet[kTSPacketSize]; ssize_t n = mDataSource->readAt(mOffset, packet, kTSPacketSize); if (n < (ssize_t)kTSPacketSize) { if (n >= 0) { mParser->signalEOS(ERROR_END_OF_STREAM); } return (n < 0) ? (status_t)n : ERROR_END_OF_STREAM; } ATSParser::SyncEvent event(mOffset); mOffset += n; status_t err = mParser->feedTSPacket(packet, kTSPacketSize, &event); if (event.isInit()) { for (size_t i = 0; i < mSourceImpls.size(); ++i) { if (mSourceImpls[i].get() == event.getMediaSource().get()) { KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i); syncPoints->add(event.getTimeUs(), event.getOffset()); // We're keeping the size of the sync points at most 5mb per a track. size_t size = syncPoints->size(); if (size >= 327680) { int64_t firstTimeUs = syncPoints->keyAt(0); int64_t lastTimeUs = syncPoints->keyAt(size - 1); if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) { syncPoints->removeItemsAt(0, 4096); } else { syncPoints->removeItemsAt(size - 4096, 4096); } } break; } } } return err; }
void JTvInputHal::onDeviceAvailable(const tv_input_device_info_t& info) { { Mutex::Autolock autoLock(&mLock); mConnections.add(info.device_id, KeyedVector<int, Connection>()); } JNIEnv* env = AndroidRuntime::getJNIEnv(); jobject builder = env->NewObject( gTvInputHardwareInfoBuilderClassInfo.clazz, gTvInputHardwareInfoBuilderClassInfo.constructor); env->CallObjectMethod( builder, gTvInputHardwareInfoBuilderClassInfo.deviceId, info.device_id); env->CallObjectMethod( builder, gTvInputHardwareInfoBuilderClassInfo.type, info.type); if (info.type == TV_INPUT_TYPE_HDMI) { env->CallObjectMethod( builder, gTvInputHardwareInfoBuilderClassInfo.hdmiPortId, info.hdmi.port_id); } env->CallObjectMethod( builder, gTvInputHardwareInfoBuilderClassInfo.audioType, info.audio_type); if (info.audio_type != AUDIO_DEVICE_NONE) { jstring audioAddress = env->NewStringUTF(info.audio_address); env->CallObjectMethod( builder, gTvInputHardwareInfoBuilderClassInfo.audioAddress, audioAddress); env->DeleteLocalRef(audioAddress); } jobject infoObject = env->CallObjectMethod(builder, gTvInputHardwareInfoBuilderClassInfo.build); env->CallVoidMethod( mThiz, gTvInputHalClassInfo.deviceAvailable, infoObject); env->DeleteLocalRef(builder); env->DeleteLocalRef(infoObject); }
sp<IMemoryHeap> HeapCache::find_heap(const sp<IBinder>& binder) { Mutex::Autolock _l(mHeapCacheLock); ssize_t i = mHeapCache.indexOfKey(binder); if (i>=0) { heap_info_t& info = mHeapCache.editValueAt(i); ALOGD_IF(VERBOSE, "found binder=%p, heap=%p, size=%d, fd=%d, count=%d", binder.get(), info.heap.get(), static_cast<BpMemoryHeap*>(info.heap.get())->mSize, static_cast<BpMemoryHeap*>(info.heap.get())->mHeapId, info.count); android_atomic_inc(&info.count); return info.heap; } else { heap_info_t info; info.heap = interface_cast<IMemoryHeap>(binder); info.count = 1; //ALOGD("adding binder=%p, heap=%p, count=%d", // binder.get(), info.heap.get(), info.count); mHeapCache.add(binder, info); return info.heap; } }
static int decode( const android::sp<android::ALooper> &looper, const char *path, bool useAudio, bool useVideo, const android::sp<android::Surface> &surface) { using namespace android; static int64_t kTimeout = 500ll; sp<NuMediaExtractor> extractor = new NuMediaExtractor; if (extractor->setDataSource(path) != OK) { fprintf(stderr, "unable to instantiate extractor.\n"); return 1; } KeyedVector<size_t, CodecState> stateByTrack; bool haveAudio = false; bool haveVideo = false; for (size_t i = 0; i < extractor->countTracks(); ++i) { sp<AMessage> format; status_t err = extractor->getTrackFormat(i, &format); CHECK_EQ(err, (status_t)OK); AString mime; CHECK(format->findString("mime", &mime)); bool isAudio = !strncasecmp(mime.c_str(), "audio/", 6); bool isVideo = !strncasecmp(mime.c_str(), "video/", 6); if (useAudio && !haveAudio && isAudio) { haveAudio = true; } else if (useVideo && !haveVideo && isVideo) { haveVideo = true; } else { continue; } ALOGV("selecting track %d", i); err = extractor->selectTrack(i); CHECK_EQ(err, (status_t)OK); CodecState *state = &stateByTrack.editValueAt(stateByTrack.add(i, CodecState())); state->mNumBytesDecoded = 0; state->mNumBuffersDecoded = 0; state->mIsAudio = isAudio; state->mCodec = MediaCodec::CreateByType( looper, mime.c_str(), false /* encoder */); CHECK(state->mCodec != NULL); err = state->mCodec->configure( format, isVideo ? surface : NULL, NULL /* crypto */, 0 /* flags */); CHECK_EQ(err, (status_t)OK); state->mSignalledInputEOS = false; state->mSawOutputEOS = false; } CHECK(!stateByTrack.isEmpty()); int64_t startTimeUs = ALooper::GetNowUs(); for (size_t i = 0; i < stateByTrack.size(); ++i) { CodecState *state = &stateByTrack.editValueAt(i); sp<MediaCodec> codec = state->mCodec; CHECK_EQ((status_t)OK, codec->start()); CHECK_EQ((status_t)OK, codec->getInputBuffers(&state->mInBuffers)); CHECK_EQ((status_t)OK, codec->getOutputBuffers(&state->mOutBuffers)); ALOGV("got %d input and %d output buffers", state->mInBuffers.size(), state->mOutBuffers.size()); } bool sawInputEOS = false; for (;;) { if (!sawInputEOS) { size_t trackIndex; status_t err = extractor->getSampleTrackIndex(&trackIndex); if (err != OK) { ALOGV("saw input eos"); sawInputEOS = true; } else { CodecState *state = &stateByTrack.editValueFor(trackIndex); size_t index; err = state->mCodec->dequeueInputBuffer(&index, kTimeout); if (err == OK) { ALOGV("filling input buffer %d", index); const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index); err = extractor->readSampleData(buffer); CHECK_EQ(err, (status_t)OK); int64_t timeUs; err = extractor->getSampleTime(&timeUs); CHECK_EQ(err, (status_t)OK); uint32_t bufferFlags = 0; err = state->mCodec->queueInputBuffer( index, 0 /* offset */, buffer->size(), timeUs, bufferFlags); CHECK_EQ(err, (status_t)OK); extractor->advance(); } else { CHECK_EQ(err, -EAGAIN); } } } else { for (size_t i = 0; i < stateByTrack.size(); ++i) { CodecState *state = &stateByTrack.editValueAt(i); if (!state->mSignalledInputEOS) { size_t index; status_t err = state->mCodec->dequeueInputBuffer(&index, kTimeout); if (err == OK) { ALOGV("signalling input EOS on track %d", i); err = state->mCodec->queueInputBuffer( index, 0 /* offset */, 0 /* size */, 0ll /* timeUs */, MediaCodec::BUFFER_FLAG_EOS); CHECK_EQ(err, (status_t)OK); state->mSignalledInputEOS = true; } else { CHECK_EQ(err, -EAGAIN); } } } } bool sawOutputEOSOnAllTracks = true; for (size_t i = 0; i < stateByTrack.size(); ++i) { CodecState *state = &stateByTrack.editValueAt(i); if (!state->mSawOutputEOS) { sawOutputEOSOnAllTracks = false; break; } } if (sawOutputEOSOnAllTracks) { break; } for (size_t i = 0; i < stateByTrack.size(); ++i) { CodecState *state = &stateByTrack.editValueAt(i); if (state->mSawOutputEOS) { continue; } size_t index; size_t offset; size_t size; int64_t presentationTimeUs; uint32_t flags; status_t err = state->mCodec->dequeueOutputBuffer( &index, &offset, &size, &presentationTimeUs, &flags, kTimeout); if (err == OK) { ALOGV("draining output buffer %d, time = %lld us", index, presentationTimeUs); ++state->mNumBuffersDecoded; state->mNumBytesDecoded += size; err = state->mCodec->releaseOutputBuffer(index); CHECK_EQ(err, (status_t)OK); if (flags & MediaCodec::BUFFER_FLAG_EOS) { ALOGV("reached EOS on output."); state->mSawOutputEOS = true; } } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) { ALOGV("INFO_OUTPUT_BUFFERS_CHANGED"); CHECK_EQ((status_t)OK, state->mCodec->getOutputBuffers(&state->mOutBuffers)); ALOGV("got %d output buffers", state->mOutBuffers.size()); } else if (err == INFO_FORMAT_CHANGED) { sp<AMessage> format; CHECK_EQ((status_t)OK, state->mCodec->getOutputFormat(&format)); ALOGV("INFO_FORMAT_CHANGED: %s", format->debugString().c_str()); } else { CHECK_EQ(err, -EAGAIN); } } } int64_t elapsedTimeUs = ALooper::GetNowUs() - startTimeUs; for (size_t i = 0; i < stateByTrack.size(); ++i) { CodecState *state = &stateByTrack.editValueAt(i); CHECK_EQ((status_t)OK, state->mCodec->release()); if (state->mIsAudio) { printf("track %zu: %" PRId64 " bytes received. %.2f KB/sec\n", i, state->mNumBytesDecoded, state->mNumBytesDecoded * 1E6 / 1024 / elapsedTimeUs); } else { printf("track %zu: %" PRId64 " frames decoded, %.2f fps. %" PRId64 " bytes received. %.2f KB/sec\n", i, state->mNumBuffersDecoded, state->mNumBuffersDecoded * 1E6 / elapsedTimeUs, state->mNumBytesDecoded, state->mNumBytesDecoded * 1E6 / 1024 / elapsedTimeUs); } } return 0; }
EGLAttributeVector() { mList.add(EGL_NONE, EGL_NONE); }
status_t BnMediaPlayerService::onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { switch(code) { case CREATE_URL: { CHECK_INTERFACE(IMediaPlayerService, data, reply); pid_t pid = data.readInt32(); sp<IMediaPlayerClient> client = interface_cast<IMediaPlayerClient>(data.readStrongBinder()); const char* url = data.readCString(); KeyedVector<String8, String8> headers; int32_t numHeaders = data.readInt32(); for (int i = 0; i < numHeaders; ++i) { String8 key = data.readString8(); String8 value = data.readString8(); headers.add(key, value); } sp<IMediaPlayer> player = create( pid, client, url, numHeaders > 0 ? &headers : NULL); reply->writeStrongBinder(player->asBinder()); return NO_ERROR; } break; case CREATE_FD: { CHECK_INTERFACE(IMediaPlayerService, data, reply); pid_t pid = data.readInt32(); sp<IMediaPlayerClient> client = interface_cast<IMediaPlayerClient>(data.readStrongBinder()); int fd = dup(data.readFileDescriptor()); int64_t offset = data.readInt64(); int64_t length = data.readInt64(); sp<IMediaPlayer> player = create(pid, client, fd, offset, length); reply->writeStrongBinder(player->asBinder()); return NO_ERROR; } break; case DECODE_URL: { CHECK_INTERFACE(IMediaPlayerService, data, reply); const char* url = data.readCString(); uint32_t sampleRate; int numChannels; int format; sp<IMemory> player = decode(url, &sampleRate, &numChannels, &format); reply->writeInt32(sampleRate); reply->writeInt32(numChannels); reply->writeInt32(format); reply->writeStrongBinder(player->asBinder()); return NO_ERROR; } break; case DECODE_FD: { CHECK_INTERFACE(IMediaPlayerService, data, reply); int fd = dup(data.readFileDescriptor()); int64_t offset = data.readInt64(); int64_t length = data.readInt64(); uint32_t sampleRate; int numChannels; int format; sp<IMemory> player = decode(fd, offset, length, &sampleRate, &numChannels, &format); reply->writeInt32(sampleRate); reply->writeInt32(numChannels); reply->writeInt32(format); reply->writeStrongBinder(player->asBinder()); return NO_ERROR; } break; case SNOOP: { CHECK_INTERFACE(IMediaPlayerService, data, reply); sp<IMemory> snooped_audio = snoop(); reply->writeStrongBinder(snooped_audio->asBinder()); return NO_ERROR; } break; case CREATE_MEDIA_RECORDER: { CHECK_INTERFACE(IMediaPlayerService, data, reply); pid_t pid = data.readInt32(); sp<IMediaRecorder> recorder = createMediaRecorder(pid); reply->writeStrongBinder(recorder->asBinder()); return NO_ERROR; } break; case CREATE_METADATA_RETRIEVER: { CHECK_INTERFACE(IMediaPlayerService, data, reply); pid_t pid = data.readInt32(); sp<IMediaMetadataRetriever> retriever = createMetadataRetriever(pid); reply->writeStrongBinder(retriever->asBinder()); return NO_ERROR; } break; case GET_OMX: { CHECK_INTERFACE(IMediaPlayerService, data, reply); sp<IOMX> omx = getOMX(); reply->writeStrongBinder(omx->asBinder()); return NO_ERROR; } break; default: return BBinder::onTransact(code, data, reply, flags); } }
static int muxing( const char *path, bool useAudio, bool useVideo, const char *outputFileName, bool enableTrim, int trimStartTimeMs, int trimEndTimeMs, int rotationDegrees, MediaMuxer::OutputFormat container = MediaMuxer::OUTPUT_FORMAT_MPEG_4) { sp<NuMediaExtractor> extractor = new NuMediaExtractor; if (extractor->setDataSource(NULL /* httpService */, path) != OK) { fprintf(stderr, "unable to instantiate extractor. %s\n", path); return 1; } if (outputFileName == NULL) { outputFileName = "/sdcard/muxeroutput.mp4"; } ALOGV("input file %s, output file %s", path, outputFileName); ALOGV("useAudio %d, useVideo %d", useAudio, useVideo); int fd = open(outputFileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR); if (fd < 0) { ALOGE("couldn't open file"); return fd; } sp<MediaMuxer> muxer = new MediaMuxer(fd, container); close(fd); size_t trackCount = extractor->countTracks(); // Map the extractor's track index to the muxer's track index. KeyedVector<size_t, ssize_t> trackIndexMap; size_t bufferSize = 1 * 1024 * 1024; // default buffer size is 1MB. bool haveAudio = false; bool haveVideo = false; int64_t trimStartTimeUs = trimStartTimeMs * 1000; int64_t trimEndTimeUs = trimEndTimeMs * 1000; bool trimStarted = false; int64_t trimOffsetTimeUs = 0; for (size_t i = 0; i < trackCount; ++i) { sp<AMessage> format; status_t err = extractor->getTrackFormat(i, &format); CHECK_EQ(err, (status_t)OK); ALOGV("extractor getTrackFormat: %s", format->debugString().c_str()); AString mime; CHECK(format->findString("mime", &mime)); bool isAudio = !strncasecmp(mime.c_str(), "audio/", 6); bool isVideo = !strncasecmp(mime.c_str(), "video/", 6); if (useAudio && !haveAudio && isAudio) { haveAudio = true; } else if (useVideo && !haveVideo && isVideo) { haveVideo = true; } else { continue; } if (isVideo) { int width , height; CHECK(format->findInt32("width", &width)); CHECK(format->findInt32("height", &height)); bufferSize = width * height * 4; // Assuming it is maximally 4BPP } int64_t duration; CHECK(format->findInt64("durationUs", &duration)); // Since we got the duration now, correct the start time. if (enableTrim) { if (trimStartTimeUs > duration) { fprintf(stderr, "Warning: trimStartTimeUs > duration," " reset to 0\n"); trimStartTimeUs = 0; } } ALOGV("selecting track %zu", i); err = extractor->selectTrack(i); CHECK_EQ(err, (status_t)OK); ssize_t newTrackIndex = muxer->addTrack(format); if (newTrackIndex < 0) { fprintf(stderr, "%s track (%zu) unsupported by muxer\n", isAudio ? "audio" : "video", i); } else { trackIndexMap.add(i, newTrackIndex); } } int64_t muxerStartTimeUs = ALooper::GetNowUs(); bool sawInputEOS = false; size_t trackIndex = -1; sp<ABuffer> newBuffer = new ABuffer(bufferSize); muxer->setOrientationHint(rotationDegrees); muxer->start(); while (!sawInputEOS) { status_t err = extractor->getSampleTrackIndex(&trackIndex); if (err != OK) { ALOGV("saw input eos, err %d", err); sawInputEOS = true; break; } else if (trackIndexMap.indexOfKey(trackIndex) < 0) { // ALOGV("skipping input from unsupported track %zu", trackIndex); extractor->advance(); continue; } else { // ALOGV("reading sample from track index %zu\n", trackIndex); err = extractor->readSampleData(newBuffer); CHECK_EQ(err, (status_t)OK); int64_t timeUs; err = extractor->getSampleTime(&timeUs); CHECK_EQ(err, (status_t)OK); sp<MetaData> meta; err = extractor->getSampleMeta(&meta); CHECK_EQ(err, (status_t)OK); uint32_t sampleFlags = 0; int32_t val; if (meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) { // We only support BUFFER_FLAG_SYNCFRAME in the flag for now. sampleFlags |= MediaCodec::BUFFER_FLAG_SYNCFRAME; // We turn on trimming at the sync frame. if (enableTrim && timeUs > trimStartTimeUs && timeUs <= trimEndTimeUs) { if (trimStarted == false) { trimOffsetTimeUs = timeUs; } trimStarted = true; } } // Trim can end at any non-sync frame. if (enableTrim && timeUs > trimEndTimeUs) { trimStarted = false; } if (!enableTrim || (enableTrim && trimStarted)) { err = muxer->writeSampleData(newBuffer, trackIndexMap.valueFor(trackIndex), timeUs - trimOffsetTimeUs, sampleFlags); } extractor->advance(); } } muxer->stop(); newBuffer.clear(); trackIndexMap.clear(); int64_t elapsedTimeUs = ALooper::GetNowUs() - muxerStartTimeUs; fprintf(stderr, "SUCCESS: muxer generate the video in %" PRId64 " ms\n", elapsedTimeUs / 1000); return 0; }
status_t TiffWriter::write(Output* out, StripSource** sources, size_t sourcesCount, Endianness end) { status_t ret = OK; EndianOutput endOut(out, end); if (mIfd == NULL) { ALOGE("%s: Tiff header is empty.", __FUNCTION__); return BAD_VALUE; } uint32_t totalSize = getTotalSize(); KeyedVector<uint32_t, uint32_t> offsetVector; for (size_t i = 0; i < mNamedIfds.size(); ++i) { if (mNamedIfds[i]->uninitializedOffsets()) { uint32_t stripSize = mNamedIfds[i]->getStripSize(); if (mNamedIfds[i]->setStripOffset(totalSize) != OK) { ALOGE("%s: Could not set strip offsets.", __FUNCTION__); return BAD_VALUE; } totalSize += stripSize; WORD_ALIGN(totalSize); offsetVector.add(mNamedIfds.keyAt(i), totalSize); } } size_t offVecSize = offsetVector.size(); if (offVecSize != sourcesCount) { ALOGE("%s: Mismatch between number of IFDs with uninitialized strips (%zu) and" " sources (%zu).", __FUNCTION__, offVecSize, sourcesCount); return BAD_VALUE; } BAIL_ON_FAIL(writeFileHeader(endOut), ret); uint32_t offset = FILE_HEADER_SIZE; sp<TiffIfd> ifd = mIfd; while(ifd != NULL) { BAIL_ON_FAIL(ifd->writeData(offset, &endOut), ret); offset += ifd->getSize(); ifd = ifd->getNextIfd(); } if (LOG_NDEBUG == 0) { log(); } for (size_t i = 0; i < offVecSize; ++i) { uint32_t ifdKey = offsetVector.keyAt(i); uint32_t sizeToWrite = mNamedIfds[ifdKey]->getStripSize(); bool found = false; for (size_t j = 0; j < sourcesCount; ++j) { if (sources[j]->getIfd() == ifdKey) { if ((ret = sources[i]->writeToStream(endOut, sizeToWrite)) != OK) { ALOGE("%s: Could not write to stream, received %d.", __FUNCTION__, ret); return ret; } ZERO_TILL_WORD(&endOut, sizeToWrite, ret); found = true; break; } } if (!found) { ALOGE("%s: No stream for byte strips for IFD %u", __FUNCTION__, ifdKey); return BAD_VALUE; } assert(offsetVector[i] == endOut.getCurrentOffset()); } return ret; }
status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps, /*out*/ sp<VendorTagDescriptor>& descriptor) { if (vOps == NULL) { ALOGE("%s: vendor_tag_ops argument was NULL.", __FUNCTION__); return BAD_VALUE; } int tagCount = vOps->get_tag_count(vOps); if (tagCount < 0 || tagCount > INT32_MAX) { ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount); return BAD_VALUE; } Vector<uint32_t> tagArray; LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount, "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount); vOps->get_all_tags(vOps, /*out*/tagArray.editArray()); sp<VendorTagDescriptor> desc = new VendorTagDescriptor(); desc->mTagCount = tagCount; SortedVector<String8> sections; KeyedVector<uint32_t, String8> tagToSectionMap; for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) { uint32_t tag = tagArray[i]; if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) { ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag); return BAD_VALUE; } const char *tagName = vOps->get_tag_name(vOps, tag); if (tagName == NULL) { ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag); return BAD_VALUE; } desc->mTagToNameMap.add(tag, String8(tagName)); const char *sectionName = vOps->get_section_name(vOps, tag); if (sectionName == NULL) { ALOGE("%s: no section name defined for vendor tag %d.", __FUNCTION__, tag); return BAD_VALUE; } String8 sectionString(sectionName); sections.add(sectionString); tagToSectionMap.add(tag, sectionString); int tagType = vOps->get_tag_type(vOps, tag); if (tagType < 0 || tagType >= NUM_TYPES) { ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType); return BAD_VALUE; } desc->mTagToTypeMap.add(tag, tagType); } desc->mSections = sections; for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) { uint32_t tag = tagArray[i]; String8 sectionString = tagToSectionMap.valueFor(tag); // Set up tag to section index map ssize_t index = sections.indexOf(sectionString); LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index); desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index)); // Set up reverse mapping ssize_t reverseIndex = -1; if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) { KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>(); reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper); } desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag); } descriptor = desc; return OK; }
status_t BnMediaPlayer::onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { switch(code) { case DISCONNECT: { CHECK_INTERFACE(IMediaPlayer, data, reply); disconnect(); return NO_ERROR; } break; case SET_DATA_SOURCE_URL: { CHECK_INTERFACE(IMediaPlayer, data, reply); const char* url = data.readCString(); KeyedVector<String8, String8> headers; int32_t numHeaders = data.readInt32(); for (int i = 0; i < numHeaders; ++i) { String8 key = data.readString8(); String8 value = data.readString8(); headers.add(key, value); } reply->writeInt32(setDataSource(url, numHeaders > 0 ? &headers : NULL)); return NO_ERROR; } break; case SET_DATA_SOURCE_FD: { CHECK_INTERFACE(IMediaPlayer, data, reply); int fd = data.readFileDescriptor(); int64_t offset = data.readInt64(); int64_t length = data.readInt64(); reply->writeInt32(setDataSource(fd, offset, length)); return NO_ERROR; } case SET_DATA_SOURCE_STREAM: { CHECK_INTERFACE(IMediaPlayer, data, reply); sp<IStreamSource> source = interface_cast<IStreamSource>(data.readStrongBinder()); reply->writeInt32(setDataSource(source)); return NO_ERROR; } case SET_VIDEO_SURFACETEXTURE: { CHECK_INTERFACE(IMediaPlayer, data, reply); sp<ISurfaceTexture> surfaceTexture = interface_cast<ISurfaceTexture>(data.readStrongBinder()); reply->writeInt32(setVideoSurfaceTexture(surfaceTexture)); return NO_ERROR; } break; case PREPARE_ASYNC: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(prepareAsync()); return NO_ERROR; } break; case START: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(start()); return NO_ERROR; } break; case STOP: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(stop()); return NO_ERROR; } break; case IS_PLAYING: { CHECK_INTERFACE(IMediaPlayer, data, reply); bool state; status_t ret = isPlaying(&state); reply->writeInt32(state); reply->writeInt32(ret); return NO_ERROR; } break; case PAUSE: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(pause()); return NO_ERROR; } break; case SEEK_TO: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(seekTo(data.readInt32())); return NO_ERROR; } break; case GET_CURRENT_POSITION: { CHECK_INTERFACE(IMediaPlayer, data, reply); int msec; status_t ret = getCurrentPosition(&msec); reply->writeInt32(msec); reply->writeInt32(ret); return NO_ERROR; } break; case GET_DURATION: { CHECK_INTERFACE(IMediaPlayer, data, reply); int msec; status_t ret = getDuration(&msec); reply->writeInt32(msec); reply->writeInt32(ret); return NO_ERROR; } break; case RESET: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(reset()); return NO_ERROR; } break; case SET_AUDIO_STREAM_TYPE: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(setAudioStreamType(data.readInt32())); return NO_ERROR; } break; case SET_LOOPING: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(setLooping(data.readInt32())); return NO_ERROR; } break; case SET_VOLUME: { CHECK_INTERFACE(IMediaPlayer, data, reply); float leftVolume = data.readFloat(); float rightVolume = data.readFloat(); reply->writeInt32(setVolume(leftVolume, rightVolume)); return NO_ERROR; } break; case INVOKE: { CHECK_INTERFACE(IMediaPlayer, data, reply); status_t result = invoke(data, reply); return result; } break; case SET_METADATA_FILTER: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(setMetadataFilter(data)); return NO_ERROR; } break; case GET_METADATA: { CHECK_INTERFACE(IMediaPlayer, data, reply); bool update_only = static_cast<bool>(data.readInt32()); bool apply_filter = static_cast<bool>(data.readInt32()); const status_t retcode = getMetadata(update_only, apply_filter, reply); reply->setDataPosition(0); reply->writeInt32(retcode); reply->setDataPosition(0); return NO_ERROR; } break; case SET_AUX_EFFECT_SEND_LEVEL: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(setAuxEffectSendLevel(data.readFloat())); return NO_ERROR; } break; case ATTACH_AUX_EFFECT: { CHECK_INTERFACE(IMediaPlayer, data, reply); reply->writeInt32(attachAuxEffect(data.readInt32())); return NO_ERROR; } break; case SET_PARAMETER: { CHECK_INTERFACE(IMediaPlayer, data, reply); int key = data.readInt32(); Parcel request; if (data.dataAvail() > 0) { request.appendFrom( const_cast<Parcel *>(&data), data.dataPosition(), data.dataAvail()); } request.setDataPosition(0); reply->writeInt32(setParameter(key, request)); return NO_ERROR; } break; case GET_PARAMETER: { CHECK_INTERFACE(IMediaPlayer, data, reply); return getParameter(data.readInt32(), reply); } break; default: return BBinder::onTransact(code, data, reply, flags); } }
status_t BnMediaMetadataRetriever::onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { switch (code) { case DISCONNECT: { CHECK_INTERFACE(IMediaMetadataRetriever, data, reply); disconnect(); return NO_ERROR; } break; case SET_DATA_SOURCE_URL: { CHECK_INTERFACE(IMediaMetadataRetriever, data, reply); sp<IMediaHTTPService> httpService; if (data.readInt32()) { httpService = interface_cast<IMediaHTTPService>(data.readStrongBinder()); } const char* srcUrl = data.readCString(); KeyedVector<String8, String8> headers; size_t numHeaders = (size_t) data.readInt64(); for (size_t i = 0; i < numHeaders; ++i) { String8 key = data.readString8(); String8 value = data.readString8(); headers.add(key, value); } reply->writeInt32( setDataSource( httpService, srcUrl, numHeaders > 0 ? &headers : NULL)); return NO_ERROR; } break; case SET_DATA_SOURCE_FD: { CHECK_INTERFACE(IMediaMetadataRetriever, data, reply); int fd = data.readFileDescriptor(); int64_t offset = data.readInt64(); int64_t length = data.readInt64(); reply->writeInt32(setDataSource(fd, offset, length)); return NO_ERROR; } break; case SET_DATA_SOURCE_CALLBACK: { CHECK_INTERFACE(IMediaMetadataRetriever, data, reply); sp<IDataSource> source = interface_cast<IDataSource>(data.readStrongBinder()); reply->writeInt32(setDataSource(source)); return NO_ERROR; } break; case GET_FRAME_AT_TIME: { CHECK_INTERFACE(IMediaMetadataRetriever, data, reply); int64_t timeUs = data.readInt64(); int option = data.readInt32(); ALOGV("getTimeAtTime: time(%" PRId64 " us) and option(%d)", timeUs, option); #ifndef DISABLE_GROUP_SCHEDULE_HACK setSchedPolicy(data); #endif sp<IMemory> bitmap = getFrameAtTime(timeUs, option); if (bitmap != 0) { // Don't send NULL across the binder interface reply->writeInt32(NO_ERROR); reply->writeStrongBinder(IInterface::asBinder(bitmap)); } else { reply->writeInt32(UNKNOWN_ERROR); } #ifndef DISABLE_GROUP_SCHEDULE_HACK restoreSchedPolicy(); #endif return NO_ERROR; } break; case EXTRACT_ALBUM_ART: { CHECK_INTERFACE(IMediaMetadataRetriever, data, reply); #ifndef DISABLE_GROUP_SCHEDULE_HACK setSchedPolicy(data); #endif sp<IMemory> albumArt = extractAlbumArt(); if (albumArt != 0) { // Don't send NULL across the binder interface reply->writeInt32(NO_ERROR); reply->writeStrongBinder(IInterface::asBinder(albumArt)); } else { reply->writeInt32(UNKNOWN_ERROR); } #ifndef DISABLE_GROUP_SCHEDULE_HACK restoreSchedPolicy(); #endif return NO_ERROR; } break; case EXTRACT_METADATA: { CHECK_INTERFACE(IMediaMetadataRetriever, data, reply); #ifndef DISABLE_GROUP_SCHEDULE_HACK setSchedPolicy(data); #endif int keyCode = data.readInt32(); const char* value = extractMetadata(keyCode); if (value != NULL) { // Don't send NULL across the binder interface reply->writeInt32(NO_ERROR); reply->writeCString(value); } else { reply->writeInt32(UNKNOWN_ERROR); } #ifndef DISABLE_GROUP_SCHEDULE_HACK restoreSchedPolicy(); #endif return NO_ERROR; } break; default: return BBinder::onTransact(code, data, reply, flags); } }