static int main(int argc, char** argv) { // Skip over the first argument. argc--; argv++; bool generateFlag = false; String8 targetConfigStr; Vector<String8> splitApkPaths; String8 baseApkPath; while (argc > 0) { const String8 arg(*argv); if (arg == "--target") { argc--; argv++; if (argc < 1) { fprintf(stderr, "error: missing parameter for --target.\n"); usage(); return 1; } targetConfigStr.setTo(*argv); } else if (arg == "--split") { argc--; argv++; if (argc < 1) { fprintf(stderr, "error: missing parameter for --split.\n"); usage(); return 1; } splitApkPaths.add(String8(*argv)); } else if (arg == "--base") { argc--; argv++; if (argc < 1) { fprintf(stderr, "error: missing parameter for --base.\n"); usage(); return 1; } if (baseApkPath.size() > 0) { fprintf(stderr, "error: multiple --base flags not allowed.\n"); usage(); return 1; } baseApkPath.setTo(*argv); } else if (arg == "--generate") { generateFlag = true; } else if (arg == "--help") { help(); return 0; } else { fprintf(stderr, "error: unknown argument '%s'.\n", arg.string()); usage(); return 1; } argc--; argv++; } if (!generateFlag && targetConfigStr == "") { usage(); return 1; } if (baseApkPath.size() == 0) { fprintf(stderr, "error: missing --base argument.\n"); usage(); return 1; } // Find out some details about the base APK. AppInfo baseAppInfo; if (!getAppInfo(baseApkPath, baseAppInfo)) { fprintf(stderr, "error: unable to read base APK: '%s'.\n", baseApkPath.string()); return 1; } SplitDescription targetSplit; if (!generateFlag) { if (!SplitDescription::parse(targetConfigStr, &targetSplit)) { fprintf(stderr, "error: invalid --target config: '%s'.\n", targetConfigStr.string()); usage(); return 1; } // We don't want to match on things that will change at run-time // (orientation, w/h, etc.). removeRuntimeQualifiers(&targetSplit.config); } splitApkPaths.add(baseApkPath); KeyedVector<String8, Vector<SplitDescription> > apkPathSplitMap; KeyedVector<SplitDescription, String8> splitApkPathMap; Vector<SplitDescription> splitConfigs; const size_t splitCount = splitApkPaths.size(); for (size_t i = 0; i < splitCount; i++) { Vector<SplitDescription> splits = extractSplitDescriptionsFromApk(splitApkPaths[i]); if (splits.isEmpty()) { fprintf(stderr, "error: invalid --split path: '%s'. No splits found.\n", splitApkPaths[i].string()); usage(); return 1; } apkPathSplitMap.replaceValueFor(splitApkPaths[i], splits); const size_t apkSplitDescriptionCount = splits.size(); for (size_t j = 0; j < apkSplitDescriptionCount; j++) { splitApkPathMap.replaceValueFor(splits[j], splitApkPaths[i]); } splitConfigs.appendVector(splits); } if (!generateFlag) { Vector<SplitDescription> matchingConfigs = select(targetSplit, splitConfigs); const size_t matchingConfigCount = matchingConfigs.size(); SortedVector<String8> matchingSplitPaths; for (size_t i = 0; i < matchingConfigCount; i++) { matchingSplitPaths.add(splitApkPathMap.valueFor(matchingConfigs[i])); } const size_t matchingSplitApkPathCount = matchingSplitPaths.size(); for (size_t i = 0; i < matchingSplitApkPathCount; i++) { if (matchingSplitPaths[i] != baseApkPath) { fprintf(stdout, "%s\n", matchingSplitPaths[i].string()); } } } else { generate(apkPathSplitMap, baseApkPath); } return 0; }
static int muxing( const char *path, bool useAudio, bool useVideo, const char *outputFileName, bool enableTrim, int trimStartTimeMs, int trimEndTimeMs, int rotationDegrees, MediaMuxer::OutputFormat container = MediaMuxer::OUTPUT_FORMAT_MPEG_4) { sp<NuMediaExtractor> extractor = new NuMediaExtractor; if (extractor->setDataSource(NULL /* httpService */, path) != OK) { fprintf(stderr, "unable to instantiate extractor. %s\n", path); return 1; } if (outputFileName == NULL) { outputFileName = "/sdcard/muxeroutput.mp4"; } ALOGV("input file %s, output file %s", path, outputFileName); ALOGV("useAudio %d, useVideo %d", useAudio, useVideo); int fd = open(outputFileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR); if (fd < 0) { ALOGE("couldn't open file"); return fd; } sp<MediaMuxer> muxer = new MediaMuxer(fd, container); close(fd); size_t trackCount = extractor->countTracks(); // Map the extractor's track index to the muxer's track index. KeyedVector<size_t, ssize_t> trackIndexMap; size_t bufferSize = 1 * 1024 * 1024; // default buffer size is 1MB. bool haveAudio = false; bool haveVideo = false; int64_t trimStartTimeUs = trimStartTimeMs * 1000; int64_t trimEndTimeUs = trimEndTimeMs * 1000; bool trimStarted = false; int64_t trimOffsetTimeUs = 0; for (size_t i = 0; i < trackCount; ++i) { sp<AMessage> format; status_t err = extractor->getTrackFormat(i, &format); CHECK_EQ(err, (status_t)OK); ALOGV("extractor getTrackFormat: %s", format->debugString().c_str()); AString mime; CHECK(format->findString("mime", &mime)); bool isAudio = !strncasecmp(mime.c_str(), "audio/", 6); bool isVideo = !strncasecmp(mime.c_str(), "video/", 6); if (useAudio && !haveAudio && isAudio) { haveAudio = true; } else if (useVideo && !haveVideo && isVideo) { haveVideo = true; } else { continue; } if (isVideo) { int width , height; CHECK(format->findInt32("width", &width)); CHECK(format->findInt32("height", &height)); bufferSize = width * height * 4; // Assuming it is maximally 4BPP } int64_t duration; CHECK(format->findInt64("durationUs", &duration)); // Since we got the duration now, correct the start time. if (enableTrim) { if (trimStartTimeUs > duration) { fprintf(stderr, "Warning: trimStartTimeUs > duration," " reset to 0\n"); trimStartTimeUs = 0; } } ALOGV("selecting track %zu", i); err = extractor->selectTrack(i); CHECK_EQ(err, (status_t)OK); ssize_t newTrackIndex = muxer->addTrack(format); if (newTrackIndex < 0) { fprintf(stderr, "%s track (%zu) unsupported by muxer\n", isAudio ? "audio" : "video", i); } else { trackIndexMap.add(i, newTrackIndex); } } int64_t muxerStartTimeUs = ALooper::GetNowUs(); bool sawInputEOS = false; size_t trackIndex = -1; sp<ABuffer> newBuffer = new ABuffer(bufferSize); muxer->setOrientationHint(rotationDegrees); muxer->start(); while (!sawInputEOS) { status_t err = extractor->getSampleTrackIndex(&trackIndex); if (err != OK) { ALOGV("saw input eos, err %d", err); sawInputEOS = true; break; } else if (trackIndexMap.indexOfKey(trackIndex) < 0) { // ALOGV("skipping input from unsupported track %zu", trackIndex); extractor->advance(); continue; } else { // ALOGV("reading sample from track index %zu\n", trackIndex); err = extractor->readSampleData(newBuffer); CHECK_EQ(err, (status_t)OK); int64_t timeUs; err = extractor->getSampleTime(&timeUs); CHECK_EQ(err, (status_t)OK); sp<MetaData> meta; err = extractor->getSampleMeta(&meta); CHECK_EQ(err, (status_t)OK); uint32_t sampleFlags = 0; int32_t val; if (meta->findInt32(kKeyIsSyncFrame, &val) && val != 0) { // We only support BUFFER_FLAG_SYNCFRAME in the flag for now. sampleFlags |= MediaCodec::BUFFER_FLAG_SYNCFRAME; // We turn on trimming at the sync frame. if (enableTrim && timeUs > trimStartTimeUs && timeUs <= trimEndTimeUs) { if (trimStarted == false) { trimOffsetTimeUs = timeUs; } trimStarted = true; } } // Trim can end at any non-sync frame. if (enableTrim && timeUs > trimEndTimeUs) { trimStarted = false; } if (!enableTrim || (enableTrim && trimStarted)) { err = muxer->writeSampleData(newBuffer, trackIndexMap.valueFor(trackIndex), timeUs - trimOffsetTimeUs, sampleFlags); } extractor->advance(); } } muxer->stop(); newBuffer.clear(); trackIndexMap.clear(); int64_t elapsedTimeUs = ALooper::GetNowUs() - muxerStartTimeUs; fprintf(stderr, "SUCCESS: muxer generate the video in %" PRId64 " ms\n", elapsedTimeUs / 1000); return 0; }
status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps, /*out*/ sp<VendorTagDescriptor>& descriptor) { if (vOps == NULL) { ALOGE("%s: vendor_tag_ops argument was NULL.", __FUNCTION__); return BAD_VALUE; } int tagCount = vOps->get_tag_count(vOps); if (tagCount < 0 || tagCount > INT32_MAX) { ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount); return BAD_VALUE; } Vector<uint32_t> tagArray; LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount, "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount); vOps->get_all_tags(vOps, /*out*/tagArray.editArray()); sp<VendorTagDescriptor> desc = new VendorTagDescriptor(); desc->mTagCount = tagCount; SortedVector<String8> sections; KeyedVector<uint32_t, String8> tagToSectionMap; for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) { uint32_t tag = tagArray[i]; if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) { ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag); return BAD_VALUE; } const char *tagName = vOps->get_tag_name(vOps, tag); if (tagName == NULL) { ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag); return BAD_VALUE; } desc->mTagToNameMap.add(tag, String8(tagName)); const char *sectionName = vOps->get_section_name(vOps, tag); if (sectionName == NULL) { ALOGE("%s: no section name defined for vendor tag %d.", __FUNCTION__, tag); return BAD_VALUE; } String8 sectionString(sectionName); sections.add(sectionString); tagToSectionMap.add(tag, sectionString); int tagType = vOps->get_tag_type(vOps, tag); if (tagType < 0 || tagType >= NUM_TYPES) { ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType); return BAD_VALUE; } desc->mTagToTypeMap.add(tag, tagType); } desc->mSections = sections; for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) { uint32_t tag = tagArray[i]; String8 sectionString = tagToSectionMap.valueFor(tag); // Set up tag to section index map ssize_t index = sections.indexOf(sectionString); LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index); desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index)); // Set up reverse mapping ssize_t reverseIndex = -1; if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) { KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>(); reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper); } desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag); } descriptor = desc; return OK; }