コード例 #1
0
void mpeg_video_recorder::process_frame(const void* data, uints size, uint64 timestamp_ns, uint nbatchframes, bool video_end){

	if (!m_IsRecording){
		// this should't be there but last frame isn't uniqe so i must skip frames sent after "last" frame
		return;
	}

	if (m_iLastFramesize > 0 && m_uiFrameIndex > 0){
		coid::uint64 timeMs = (timestamp_ns - m_uiLastTimestampNs) / 1000000.0;
		MP4Duration frameDuration = timeMs * 90; // 90 000 ticks per seconds in mp4 container
		MP4WriteSample(m_hMp4FileHandle, m_iVideoTrackID, m_pLastNals[0].p_payload, m_iLastFramesize, frameDuration);
		m_uiLastTimestampNs = timestamp_ns;
	}
	else if(m_uiFrameIndex == 0){
		m_uiLastTimestampNs = timestamp_ns;
	}
	else{
		log(WARNINGMESSAGE("Frameskip detected!"));
	}

	uchar * ucharData = (uchar*)data;

	uint width4 = GetDivisibleBy4(m_iWidth);
	uint halfWidth4 = GetDivisibleBy4(m_iWidth >> 1);

	x264_picture_t pic_in,pic_out;
	x264_picture_init(&pic_in);
	pic_in.img.i_csp = X264_CSP_I420;
	pic_in.img.i_plane = 3;
	pic_in.img.plane[0] = ucharData;
	pic_in.img.plane[1] = ucharData + width4;
	pic_in.img.plane[2] = ucharData + 2 * width4 + halfWidth4;
	pic_in.img.i_stride[0] = width4 + halfWidth4;
	pic_in.img.i_stride[2] = pic_in.img.i_stride[1] = (width4 + halfWidth4) * 2;
	
	int i_nals;
	
	m_iLastFramesize = x264_encoder_encode(m_pEncoder, &m_pLastNals, &i_nals, &pic_in, &pic_out);

	m_uiFrameIndex++;

	if (video_end){
		if (m_iLastFramesize > 0){
			MP4Duration frameDuration = 33 * 90;  // 90 000 ticks per seconds in mp4 container
			MP4WriteSample(m_hMp4FileHandle, m_iVideoTrackID, m_pLastNals[0].p_payload, m_iLastFramesize, frameDuration);
		}
		StopRecording();
	}	
}
コード例 #2
0
ファイル: urltrack.cpp プロジェクト: acassis/emlinux-ssd1935
main(int argc, char** argv)
{
	if (argc < 2) {
		fprintf(stderr, "Usage: %s <file>\n", argv[0]);
		exit(1);
	}

	u_int32_t verbosity = 0 /* MP4_DETAILS_ALL */;

	MP4FileHandle mp4File = MP4Create(argv[1], verbosity);

	if (!mp4File) {
		exit(1);
	}

	MP4TrackId urlTrackId = 
		MP4AddTrack(mp4File, "URLF");
	printf("urlTrackId %d\n", urlTrackId);

	u_int8_t i;
	char url[128];

	for (i = 1; i <= 5; i++) {
		sprintf(url, "http://server.com/foo/bar%u.html", i);

		MP4WriteSample(mp4File, urlTrackId, 
			(u_int8_t*)url, strlen(url) + 1, (MP4Duration)i);
	}

	MP4Close(mp4File);

	mp4File = MP4Read(argv[1], verbosity);

	// check that we can find the track again
	urlTrackId = MP4FindTrackId(mp4File, 0, "URLF");
	printf("urlTrackId %d\n", urlTrackId);
	
	for (i = 1; i <= 5; i++) {
		u_int8_t* pSample = NULL;
		u_int32_t sampleSize = 0;
		MP4Duration duration;
		bool rc;

		rc = MP4ReadSample(mp4File, urlTrackId, i,
			&pSample, &sampleSize, NULL, &duration);

		if (rc) {
			printf("Sample %i duration "D64": %s\n", 
				i, duration, pSample);
			free(pSample);
		} else {
			printf("Couldn't read sample %i\n", i);
		}
	}

	MP4Close(mp4File);

	exit(0);
}
コード例 #3
0
/**
    \fn writeAudioBlock
*/
bool muxerMp4v2::writeAudioBlock(int index,mp4v2AudioPacket::mp4v2AudioBlock *block,uint64_t nbSamples)
{
    aprintf("Writting audio block : size=%d, samples=%d nbSamples=%d \n",block->sizeInBytes,block->nbSamples,(int)nbSamples);
    bool r=MP4WriteSample(handle,audioTrackIds[index],
                            block->buffer,
                            block->sizeInBytes,
                            nbSamples,
                            0,1);
    encoding->pushAudioFrame(block->sizeInBytes);
    if(false==r)
                        {
                            ADM_error("Cannot write audio sample for track %d\n",index);
                            return false;
                        }
    return true;
}
コード例 #4
0
ファイル: Mp4Encoder.cpp プロジェクト: RobotsBrain/robrain
int Mp4Encoder::WriteAacData(u_char * pData, int size)
{
	if (m_audioId == MP4_INVALID_TRACK_ID) {
		m_audioId =
			MP4AddAudioTrack(m_hMp4File, 48000, 1024, MP4_MPEG4_AUDIO_TYPE);
		if (m_audioId == MP4_INVALID_TRACK_ID) {
			printf("add audio track failed.\n");
			return 0;
		}

		MP4SetAudioProfileLevel(m_hMp4File, 0x2);
	}

	MP4WriteSample(m_hMp4File, m_audioId, pData, size, MP4_INVALID_DURATION, 0,
				   1);

	return 0;
}
コード例 #5
0
ファイル: Mp4Encoder.cpp プロジェクト: RobotsBrain/robrain
int Mp4Encoder::WriteH264Data(u_char * pData, int size)
{
	int type = pData[4] & 0x1f;

	if (type == 0x07) {			// sps

		// 添加h264 track
		if (m_videoId == MP4_INVALID_TRACK_ID) {
			m_videoId = MP4AddH264VideoTrack(m_hMp4File, m_nVTimeScale, m_nVTimeScale / m_nFrameRate, m_nWidth,	// width
											 m_nHeight,	// height
											 pData[5],	// sps[1] AVCProfileIndication
											 pData[6],	// sps[2] profile_compat
											 pData[7],	// sps[3] AVCLevelIndication
											 3);	// 4 bytes length before each NAL unit
			if (m_videoId == MP4_INVALID_TRACK_ID) {
				printf("add video track failed.\n");
				return 0;
			}

			MP4SetVideoProfileLevel(m_hMp4File, 0x7F);	//  Simple Profile @ Level
		}

		MP4AddH264SequenceParameterSet(m_hMp4File, m_videoId, pData + 4,
									   size - 4);
	} else if (type == 0x08) {	// pps
		MP4AddH264PictureParameterSet(m_hMp4File, m_videoId, pData + 4,
									  size - 4);
	} else {
		// MP4 Nalu前四个字节表示Nalu长度(no 00 00 00 01)
		pData[0] = (size - 4) >> 24;
		pData[1] = (size - 4) >> 16;
		pData[2] = (size - 4) >> 8;
		pData[3] = (size - 4) & 0xff;

		if (!MP4WriteSample
			(m_hMp4File, m_videoId, pData, size, MP4_INVALID_DURATION, 0, 1)) {
			return 0;
		}
	}

	return 0;
}
コード例 #6
0
ファイル: audio encoder.c プロジェクト: hownam/fennec
int local_write_doublebuffer(unsigned long id, double *dbuff, unsigned long scount, int bytesencoded, int framesize)
{
	unsigned int   i;

	if(pestreams[id].fb_size <= scount * sizeof(float) || !pestreams[id].floatbuffer)
	{
		pestreams[id].fb_size = scount * sizeof(float);
		pestreams[id].floatbuffer = (float*) sys_mem_realloc(pestreams[id].floatbuffer, pestreams[id].fb_size);
	}


	for(i=0; i<scount; i++)
		pestreams[id].floatbuffer[i] = (float)(dbuff[i] * 32767.0);


	bytesencoded = faacEncEncode(pestreams[id].enchandle,
								(int32_t *)pestreams[id].floatbuffer,
								scount,
								pestreams[id].obuffer,
								pestreams[id].maxbytesout);

	if(pestreams[id].ismp4)
	{
		if(bytesencoded)
		{
			unsigned int samples_left = pestreams[id].totalsamples - pestreams[id].encodedsamples + framesize;
			MP4Duration dur           = samples_left > framesize ? framesize : samples_left;
			MP4Duration ofs           = pestreams[id].encodedsamples > 0 ? 0 : framesize;


			MP4WriteSample(pestreams[id].mp4file, pestreams[id].mp4track, pestreams[id].obuffer, bytesencoded, dur, ofs, 1);

			pestreams[id].encodedsamples += (unsigned int)dur;
		}

	}else{
		if(bytesencoded)
			sys_file_write(pestreams[id].fhandle, pestreams[id].obuffer, bytesencoded);
	}
}
コード例 #7
0
ファイル: main.c プロジェクト: 12307/PushRTMPStreamSync
int main(int argc, char *argv[])
{
    int frames, currentFrame;
    faacEncHandle hEncoder;
    pcmfile_t *infile = NULL;

    unsigned long samplesInput, maxBytesOutput, totalBytesWritten=0;

    faacEncConfigurationPtr myFormat;
    unsigned int mpegVersion = MPEG2;
    unsigned int objectType = LOW;
    unsigned int useMidSide = 1;
    static unsigned int useTns = DEFAULT_TNS;
    enum container_format container = NO_CONTAINER;
    int optimizeFlag = 0;
    enum stream_format stream = ADTS_STREAM;
    int cutOff = -1;
    int bitRate = 0;
    unsigned long quantqual = 0;
    int chanC = 3;
    int chanLF = 4;

    char *audioFileName = NULL;
    char *aacFileName = NULL;
    char *aacFileExt = NULL;
    int aacFileNameGiven = 0;

    float *pcmbuf;
    int *chanmap = NULL;

    unsigned char *bitbuf;
    int samplesRead = 0;
    const char *dieMessage = NULL;

    int rawChans = 0; // disabled by default
    int rawBits = 16;
    int rawRate = 44100;
    int rawEndian = 1;

    int shortctl = SHORTCTL_NORMAL;

    FILE *outfile = NULL;

#ifdef HAVE_LIBMP4V2
    MP4FileHandle MP4hFile = MP4_INVALID_FILE_HANDLE;
    MP4TrackId MP4track = 0;
    unsigned int ntracks = 0, trackno = 0;
    unsigned int ndiscs = 0, discno = 0;
    u_int8_t compilation = 0;
    const char *artist = NULL, *title = NULL, *album = NULL, *year = NULL,
      *genre = NULL, *comment = NULL, *writer = NULL;
    u_int8_t *art = NULL;
    u_int64_t artSize = 0;
    u_int64_t total_samples = 0;
    u_int64_t encoded_samples = 0;
    unsigned int delay_samples;
    unsigned int frameSize;
#endif
    char *faac_id_string;
    char *faac_copyright_string;

#ifndef _WIN32
    // install signal handler
    signal(SIGINT, signal_handler);
    signal(SIGTERM, signal_handler);
#endif

    // get faac version
    if (faacEncGetVersion(&faac_id_string, &faac_copyright_string) == FAAC_CFG_VERSION)
    {
        fprintf(stderr, "Freeware Advanced Audio Coder\nFAAC %s\n\n", faac_id_string);
    }
    else
    {
        fprintf(stderr, __FILE__ "(%d): wrong libfaac version\n", __LINE__);
        return 1;
    }

    /* begin process command line */
    progName = argv[0];
    while (1) {
        static struct option long_options[] = {
            { "help", 0, 0, 'h'},
            { "long-help", 0, 0, 'H'},
            { "raw", 0, 0, 'r'},
            { "no-midside", 0, 0, NO_MIDSIDE_FLAG},
            { "cutoff", 1, 0, 'c'},
            { "quality", 1, 0, 'q'},
            { "pcmraw", 0, 0, 'P'},
            { "pcmsamplerate", 1, 0, 'R'},
            { "pcmsamplebits", 1, 0, 'B'},
            { "pcmchannels", 1, 0, 'C'},
            { "shortctl", 1, 0, SHORTCTL_FLAG},
            { "tns", 0, 0, TNS_FLAG},
            { "no-tns", 0, 0, NO_TNS_FLAG},
            { "mpeg-version", 1, 0, MPEGVERS_FLAG},
            { "obj-type", 1, 0, OBJTYPE_FLAG},
            { "license", 0, 0, 'L'},
#ifdef HAVE_LIBMP4V2
            { "createmp4", 0, 0, 'w'},
            { "optimize", 0, 0, 's'},
            { "artist", 1, 0, ARTIST_FLAG},
            { "title", 1, 0, TITLE_FLAG},
            { "album", 1, 0, ALBUM_FLAG},
            { "track", 1, 0, TRACK_FLAG},
            { "disc", 1, 0, DISC_FLAG},
            { "genre", 1, 0, GENRE_FLAG},
            { "year", 1, 0, YEAR_FLAG},
            { "cover-art", 1, 0, COVER_ART_FLAG},
            { "comment", 1, 0, COMMENT_FLAG},
        { "writer", 1, 0, WRITER_FLAG},
        { "compilation", 0, 0, COMPILATION_FLAG},
#endif
        { "pcmswapbytes", 0, 0, 'X'},
            { 0, 0, 0, 0}
        };
        int c = -1;
        int option_index = 0;

        c = getopt_long(argc, argv, "Hhb:m:o:rnc:q:PR:B:C:I:X"
#ifdef HAVE_LIBMP4V2
                        "ws"
#endif
            ,long_options, &option_index);

        if (c == -1)
            break;

        if (!c)
        {
          dieMessage = usage;
          break;
        }

        switch (c) {
    case 'o':
        {
            int l = strlen(optarg);
        aacFileName = malloc(l+1);
        memcpy(aacFileName, optarg, l);
        aacFileName[l] = '\0';
        aacFileNameGiven = 1;
        }
        break;
        case 'r': {
            stream = RAW_STREAM;
            break;
        }
        case NO_MIDSIDE_FLAG: {
            useMidSide = 0;
            break;
        }
        case 'c': {
            unsigned int i;
            if (sscanf(optarg, "%u", &i) > 0) {
                cutOff = i;
            }
            break;
        }
        case 'b': {
            unsigned int i;
            if (sscanf(optarg, "%u", &i) > 0)
            {
                bitRate = 1000 * i;
            }
            break;
        }
        case 'q':
        {
            unsigned int i;
            if (sscanf(optarg, "%u", &i) > 0)
            {
                if (i > 0 && i < 1000)
                    quantqual = i;
            }
            break;
        }
        case 'I':
            sscanf(optarg, "%d,%d", &chanC, &chanLF);
            break;
        case 'P':
            rawChans = 2; // enable raw input
            break;
        case 'R':
        {
            unsigned int i;
            if (sscanf(optarg, "%u", &i) > 0)
            {
                rawRate = i;
                rawChans = (rawChans > 0) ? rawChans : 2;
            }
            break;
        }
        case 'B':
        {
            unsigned int i;
            if (sscanf(optarg, "%u", &i) > 0)
            {
                if (i > 32)
                    i = 32;
                if (i < 8)
                    i = 8;
                rawBits = i;
                rawChans = (rawChans > 0) ? rawChans : 2;
            }
            break;
        }
        case 'C':
        {
            unsigned int i;
            if (sscanf(optarg, "%u", &i) > 0)
                rawChans = i;
            break;
        }
#ifdef HAVE_LIBMP4V2
        case 'w':
        container = MP4_CONTAINER;
            break;
        case 's':
        optimizeFlag = 1;
            break;
    case ARTIST_FLAG:
        artist = optarg;
        break;
    case WRITER_FLAG:
        writer = optarg;
        break;
    case TITLE_FLAG:
        title = optarg;
        break;
    case ALBUM_FLAG:
        album = optarg;
        break;
    case TRACK_FLAG:
        sscanf(optarg, "%d/%d", &trackno, &ntracks);
        break;
    case DISC_FLAG:
        sscanf(optarg, "%d/%d", &discno, &ndiscs);
        break;
    case COMPILATION_FLAG:
        compilation = 0x1;
        break;
    case GENRE_FLAG:
        genre = optarg;
        break;
    case YEAR_FLAG:
        year = optarg;
        break;
    case COMMENT_FLAG:
        comment = optarg;
        break;
    case COVER_ART_FLAG: {
        FILE *artFile = fopen(optarg, "rb");

        if(artFile) {
            u_int64_t r;

            fseek(artFile, 0, SEEK_END);
        artSize = ftell(artFile);

        art = malloc(artSize);

            fseek(artFile, 0, SEEK_SET);
        clearerr(artFile);

        r = fread(art, artSize, 1, artFile);

        if (r != 1) {
            dieMessage = "Error reading cover art file!\n";
            free(art);
            art = NULL;
        } else if (artSize < 12 || !check_image_header(art)) {
            /* the above expression checks the image signature */
            dieMessage = "Unsupported cover image file format!\n";
            free(art);
            art = NULL;
        }

        fclose(artFile);
        } else {
            dieMessage = "Error opening cover art file!\n";
        }

        break;
    }
#endif
        case SHORTCTL_FLAG:
            shortctl = atoi(optarg);
            break;
        case TNS_FLAG:
            useTns = 1;
            break;
        case NO_TNS_FLAG:
            useTns = 0;
            break;
    case MPEGVERS_FLAG:
            mpegVersion = atoi(optarg);
            switch(mpegVersion)
            {
            case 2:
                mpegVersion = MPEG2;
                break;
            case 4:
                mpegVersion = MPEG4;
                break;
            default:
            dieMessage = "Unrecognised MPEG version!\n";
            }
            break;
#if 0
    case OBJTYPE_FLAG:
        if (!strcasecmp(optarg, "LC"))
                objectType = LOW;
        else if (!strcasecmp(optarg, "Main"))
            objectType = MAIN;
        else if (!strcasecmp(optarg, "LTP")) {
            mpegVersion = MPEG4;
        objectType = LTP;
        } else
            dieMessage = "Unrecognised object type!\n";
        break;
#endif
        case 'L':
        fprintf(stderr, faac_copyright_string);
        dieMessage = license;
        break;
    case 'X':
      rawEndian = 0;
      break;
    case 'H':
      dieMessage = long_help;
      break;
    case 'h':
          dieMessage = short_help;
      break;
    case '?':
        default:
      dieMessage = usage;
          break;
        }
    }

    /* check that we have at least one non-option arguments */
    if (!dieMessage && (argc - optind) > 1 && aacFileNameGiven)
        dieMessage = "Cannot encode several input files to one output file.\n";

    if (argc - optind < 1 || dieMessage)
    {
        fprintf(stderr, dieMessage ? dieMessage : usage,
           progName, progName, progName, progName);
        return 1;
    }

    while (argc - optind > 0) {

    /* get the input file name */
    audioFileName = argv[optind++];

    /* generate the output file name, if necessary */
    if (!aacFileNameGiven) {
        char *t = strrchr(audioFileName, '.');
    int l = t ? strlen(audioFileName) - strlen(t) : strlen(audioFileName);

#ifdef HAVE_LIBMP4V2
    aacFileExt = container == MP4_CONTAINER ? ".m4a" : ".aac";
#else
    aacFileExt = ".aac";
#endif

    aacFileName = malloc(l+1+4);
    memcpy(aacFileName, audioFileName, l);
    memcpy(aacFileName + l, aacFileExt, 4);
    aacFileName[l+4] = '\0';
    } else {
        aacFileExt = strrchr(aacFileName, '.');

        if (aacFileExt && (!strcmp(".m4a", aacFileExt) || !strcmp(".m4b", aacFileExt) || !strcmp(".mp4", aacFileExt)))
#ifndef HAVE_LIBMP4V2
        fprintf(stderr, "WARNING: MP4 support unavailable!\n");
#else
        container = MP4_CONTAINER;
#endif
    }

    /* open the audio input file */
    if (rawChans > 0) // use raw input
    {
        infile = wav_open_read(audioFileName, 1);
    if (infile)
    {
        infile->bigendian = rawEndian;
        infile->channels = rawChans;
        infile->samplebytes = rawBits / 8;
        infile->samplerate = rawRate;
        infile->samples /= (infile->channels * infile->samplebytes);
    }
    }
    else // header input
        infile = wav_open_read(audioFileName, 0);

    if (infile == NULL)
    {
        fprintf(stderr, "Couldn't open input file %s\n", audioFileName);
    return 1;
    }


    /* open the encoder library */
    hEncoder = faacEncOpen(infile->samplerate, infile->channels,
        &samplesInput, &maxBytesOutput);

#ifdef HAVE_LIBMP4V2
    if (container != MP4_CONTAINER && (ntracks || trackno || artist ||
                       title ||  album || year || art ||
                       genre || comment || discno || ndiscs ||
                       writer || compilation))
    {
        fprintf(stderr, "Metadata requires MP4 output!\n");
    return 1;
    }

    if (container == MP4_CONTAINER)
    {
        mpegVersion = MPEG4;
    stream = RAW_STREAM;
    }

    frameSize = samplesInput/infile->channels;
    delay_samples = frameSize; // encoder delay 1024 samples
#endif
    pcmbuf = (float *)malloc(samplesInput*sizeof(float));
    bitbuf = (unsigned char*)malloc(maxBytesOutput*sizeof(unsigned char));
    chanmap = mkChanMap(infile->channels, chanC, chanLF);
    if (chanmap)
    {
        fprintf(stderr, "Remapping input channels: Center=%d, LFE=%d\n",
            chanC, chanLF);
    }

    if (cutOff <= 0)
    {
        if (cutOff < 0) // default
            cutOff = 0;
        else // disabled
            cutOff = infile->samplerate / 2;
    }
    if (cutOff > (infile->samplerate / 2))
        cutOff = infile->samplerate / 2;

    /* put the options in the configuration struct */
    myFormat = faacEncGetCurrentConfiguration(hEncoder);
    myFormat->aacObjectType = objectType;
    myFormat->mpegVersion = mpegVersion;
    myFormat->useTns = useTns;
    switch (shortctl)
    {
    case SHORTCTL_NOSHORT:
      fprintf(stderr, "disabling short blocks\n");
      myFormat->shortctl = shortctl;
      break;
    case SHORTCTL_NOLONG:
      fprintf(stderr, "disabling long blocks\n");
      myFormat->shortctl = shortctl;
      break;
    }
    if (infile->channels >= 6)
        myFormat->useLfe = 1;
    myFormat->allowMidside = useMidSide;
    if (bitRate)
        myFormat->bitRate = bitRate / infile->channels;
    myFormat->bandWidth = cutOff;
    if (quantqual > 0)
        myFormat->quantqual = quantqual;
    myFormat->outputFormat = stream;
    myFormat->inputFormat = FAAC_INPUT_FLOAT;
    if (!faacEncSetConfiguration(hEncoder, myFormat)) {
        fprintf(stderr, "Unsupported output format!\n");
#ifdef HAVE_LIBMP4V2
        if (container == MP4_CONTAINER) MP4Close(MP4hFile);
#endif
        return 1;
    }

#ifdef HAVE_LIBMP4V2
    /* initialize MP4 creation */
    if (container == MP4_CONTAINER) {
        unsigned char *ASC = 0;
        unsigned long ASCLength = 0;
    char *version_string;

#ifdef MP4_CREATE_EXTENSIBLE_FORMAT
    /* hack to compile against libmp4v2 >= 1.0RC3
     * why is there no version identifier in mp4.h? */
        MP4hFile = MP4Create(aacFileName, MP4_DETAILS_ERROR, 0);
#else
    MP4hFile = MP4Create(aacFileName, MP4_DETAILS_ERROR, 0, 0);
#endif
        if (!MP4_IS_VALID_FILE_HANDLE(MP4hFile)) {
            fprintf(stderr, "Couldn't create output file %s\n", aacFileName);
            return 1;
        }

        MP4SetTimeScale(MP4hFile, 90000);
        MP4track = MP4AddAudioTrack(MP4hFile, infile->samplerate, MP4_INVALID_DURATION, MP4_MPEG4_AUDIO_TYPE);
        MP4SetAudioProfileLevel(MP4hFile, 0x0F);
        faacEncGetDecoderSpecificInfo(hEncoder, &ASC, &ASCLength);
        MP4SetTrackESConfiguration(MP4hFile, MP4track, ASC, ASCLength);
    free(ASC);

    /* set metadata */
    version_string = malloc(strlen(faac_id_string) + 6);
    strcpy(version_string, "FAAC ");
    strcpy(version_string + 5, faac_id_string);
    MP4SetMetadataTool(MP4hFile, version_string);
    free(version_string);

    if (artist) MP4SetMetadataArtist(MP4hFile, artist);
    if (writer) MP4SetMetadataWriter(MP4hFile, writer);
    if (title) MP4SetMetadataName(MP4hFile, title);
    if (album) MP4SetMetadataAlbum(MP4hFile, album);
    if (trackno > 0) MP4SetMetadataTrack(MP4hFile, trackno, ntracks);
    if (discno > 0) MP4SetMetadataDisk(MP4hFile, discno, ndiscs);
    if (compilation) MP4SetMetadataCompilation(MP4hFile, compilation);
    if (year) MP4SetMetadataYear(MP4hFile, year);
    if (genre) MP4SetMetadataGenre(MP4hFile, genre);
    if (comment) MP4SetMetadataComment(MP4hFile, comment);
        if (artSize) {
        MP4SetMetadataCoverArt(MP4hFile, art, artSize);
        free(art);
    }
    }
    else
    {
#endif
        /* open the aac output file */
        if (!strcmp(aacFileName, "-"))
        {
            outfile = stdout;
        }
        else
        {
            outfile = fopen(aacFileName, "wb");
        }
        if (!outfile)
        {
            fprintf(stderr, "Couldn't create output file %s\n", aacFileName);
            return 1;
        }
#ifdef HAVE_LIBMP4V2
    }
#endif

    cutOff = myFormat->bandWidth;
    quantqual = myFormat->quantqual;
    bitRate = myFormat->bitRate;
    if (bitRate)
      fprintf(stderr, "Average bitrate: %d kbps\n",
          (bitRate + 500)/1000*infile->channels);
    fprintf(stderr, "Quantization quality: %ld\n", quantqual);
    fprintf(stderr, "Bandwidth: %d Hz\n", cutOff);
    fprintf(stderr, "Object type: ");
    switch(objectType)
    {
    case LOW:
        fprintf(stderr, "Low Complexity");
        break;
    case MAIN:
        fprintf(stderr, "Main");
        break;
    case LTP:
        fprintf(stderr, "LTP");
        break;
    }
    fprintf(stderr, "(MPEG-%d)", (mpegVersion == MPEG4) ? 4 : 2);
    if (myFormat->useTns)
        fprintf(stderr, " + TNS");
    if (myFormat->allowMidside)
        fprintf(stderr, " + M/S");
    fprintf(stderr, "\n");

    fprintf(stderr, "Container format: ");
    switch(container)
    {
    case NO_CONTAINER:
      switch(stream)
    {
    case RAW_STREAM:
      fprintf(stderr, "Headerless AAC (RAW)\n");
      break;
    case ADTS_STREAM:
      fprintf(stderr, "Transport Stream (ADTS)\n");
      break;
    }
        break;
#ifdef HAVE_LIBMP4V2
    case MP4_CONTAINER:
        fprintf(stderr, "MPEG-4 File Format (MP4)\n");
        break;
#endif
    }

    if (outfile
#ifdef HAVE_LIBMP4V2
        || MP4hFile != MP4_INVALID_FILE_HANDLE
#endif
       )
    {
        int showcnt = 0;
#ifdef _WIN32
        long begin = GetTickCount();
#endif
        if (infile->samples)
            frames = ((infile->samples + 1023) / 1024) + 1;
        else
            frames = 0;
        currentFrame = 0;

        fprintf(stderr, "Encoding %s to %s\n", audioFileName, aacFileName);
        if (frames != 0)
            fprintf(stderr, "   frame          | bitrate | elapsed/estim | "
            "play/CPU | ETA\n");
        else
            fprintf(stderr, " frame | elapsed | play/CPU\n");

        /* encoding loop */
#ifdef _WIN32
    for (;;)
#else
        while (running)
#endif
        {
            int bytesWritten;

            samplesRead = wav_read_float32(infile, pcmbuf, samplesInput, chanmap);

#ifdef HAVE_LIBMP4V2
            total_samples += samplesRead / infile->channels;
#endif

            /* call the actual encoding routine */
            bytesWritten = faacEncEncode(hEncoder,
                (int32_t *)pcmbuf,
                samplesRead,
                bitbuf,
                maxBytesOutput);

            if (bytesWritten)
            {
                currentFrame++;
                showcnt--;
        totalBytesWritten += bytesWritten;
            }

            if ((showcnt <= 0) || !bytesWritten)
            {
                double timeused;
#ifdef __unix__
                struct rusage usage;
#endif
#ifdef _WIN32
                char percent[MAX_PATH + 20];
                timeused = (GetTickCount() - begin) * 1e-3;
#else
#ifdef __unix__
                if (getrusage(RUSAGE_SELF, &usage) == 0) {
                    timeused = (double)usage.ru_utime.tv_sec +
                        (double)usage.ru_utime.tv_usec * 1e-6;
                }
                else
                    timeused = 0;
#else
                timeused = (double)clock() * (1.0 / CLOCKS_PER_SEC);
#endif
#endif
                if (currentFrame && (timeused > 0.1))
                {
                    showcnt += 50;

                    if (frames != 0)
                        fprintf(stderr,
                            "\r%5d/%-5d (%3d%%)|  %5.1f  | %6.1f/%-6.1f | %7.2fx | %.1f ",
                            currentFrame, frames, currentFrame*100/frames,
                ((double)totalBytesWritten * 8.0 / 1000.0) /
                ((double)infile->samples / infile->samplerate * currentFrame / frames),
                            timeused,
                            timeused * frames / currentFrame,
                            (1024.0 * currentFrame / infile->samplerate) / timeused,
                            timeused  * (frames - currentFrame) / currentFrame);
                    else
                        fprintf(stderr,
                            "\r %5d |  %6.1f | %7.2fx ",
                            currentFrame,
                            timeused,
                            (1024.0 * currentFrame / infile->samplerate) / timeused);

                    fflush(stderr);
#ifdef _WIN32
                    if (frames != 0)
                    {
                        sprintf(percent, "%.2f%% encoding %s",
                            100.0 * currentFrame / frames, audioFileName);
                        SetConsoleTitle(percent);
                    }
#endif
                }
            }

            /* all done, bail out */
            if (!samplesRead && !bytesWritten)
                break ;

            if (bytesWritten < 0)
            {
                fprintf(stderr, "faacEncEncode() failed\n");
                break ;
            }

            if (bytesWritten > 0)
            {
#ifdef HAVE_LIBMP4V2
                u_int64_t samples_left = total_samples - encoded_samples + delay_samples;
                MP4Duration dur = samples_left > frameSize ? frameSize : samples_left;
                MP4Duration ofs = encoded_samples > 0 ? 0 : delay_samples;

                if (container == MP4_CONTAINER)
                {
                    /* write bitstream to mp4 file */
                    MP4WriteSample(MP4hFile, MP4track, bitbuf, bytesWritten, dur, ofs, 1);
                }
                else
                {
#endif
                    /* write bitstream to aac file */
                    fwrite(bitbuf, 1, bytesWritten, outfile);
#ifdef HAVE_LIBMP4V2
                }

                encoded_samples += dur;
#endif
            }
        }

#ifdef HAVE_LIBMP4V2
        /* clean up */
        if (container == MP4_CONTAINER)
        {
            MP4Close(MP4hFile);
            if (optimizeFlag == 1)
            {
                fprintf(stderr, "\n\nMP4 format optimization... ");
                MP4Optimize(aacFileName, NULL, 0);
                fprintf(stderr, "Done!");
            }
        } else
#endif
            fclose(outfile);

        fprintf(stderr, "\n\n");
    }

    faacEncClose(hEncoder);

    wav_close(infile);

    if (pcmbuf) free(pcmbuf);
    if (bitbuf) free(bitbuf);
    if (aacFileNameGiven) free(aacFileName);

    }

    return 0;
}
コード例 #8
0
ファイル: mp4v.cpp プロジェクト: BluePandaLi/mpeg4ip
MP4TrackId Mp4vCreator(MP4FileHandle mp4File, FILE* inFile, bool doEncrypt,
		       bool allowVariableFrameRate)
{
    bool rc;

    u_int8_t sampleBuffer[256 * 1024 * 2];
    u_int8_t* pCurrentSample = sampleBuffer;
    u_int32_t maxSampleSize = sizeof(sampleBuffer) / 2;
    u_int32_t prevSampleSize = 0;

    // the current syntactical object
    // typically 1:1 with a sample
    // but not always, i.e. non-VOP's
    u_int8_t* pObj = pCurrentSample;
    u_int32_t objSize;
    u_int8_t objType;

    // the current sample
    MP4SampleId sampleId = 1;
    MP4Timestamp currentSampleTime = 0;

    // the last reference VOP
    MP4SampleId refVopId = 1;
    MP4Timestamp refVopTime = 0;

    // track configuration info
    u_int8_t videoProfileLevel = MPEG4_SP_L3;
    u_int8_t timeBits = 15;
    u_int16_t timeTicks = 30000;
    u_int16_t frameDuration = 3000;
    u_int16_t frameWidth = 320;
    u_int16_t frameHeight = 240;
    u_int32_t esConfigSize = 0;
    int vopType = 0;
    int prevVopType = 0;
    bool foundVOSH = false, foundVO = false, foundVOL = false;
    u_int32_t lastVopTimeIncrement = 0;
    bool variableFrameRate = false;
    bool lastFrame = false;
    bool haveBframes = false;
    mpeg4_frame_t *head = NULL, *tail = NULL;

    // start reading objects until we get the first VOP
    while (LoadNextObject(inFile, pObj, &objSize, &objType)) {
        // guard against buffer overflow
        if (pObj + objSize >= pCurrentSample + maxSampleSize) {
            fprintf(stderr,
                    "%s: buffer overflow, invalid video stream?\n", ProgName);
            return MP4_INVALID_TRACK_ID;
        }
#ifdef DEBUG_MP4V
        if (Verbosity & MP4_DETAILS_SAMPLE) {
            printf("MP4V type %x size %u\n",
                    objType, objSize);
        }
#endif

        if (objType == MP4AV_MPEG4_VOSH_START) {
            MP4AV_Mpeg4ParseVosh(pObj, objSize,
                    &videoProfileLevel);
            foundVOSH = true;
        } else if (objType == MP4AV_MPEG4_VO_START) {
            foundVO = true;
        } else if (objType == MP4AV_MPEG4_VOL_START) {
            MP4AV_Mpeg4ParseVol(pObj, objSize,
                    &timeBits, &timeTicks, &frameDuration,
                    &frameWidth, &frameHeight);

            foundVOL = true;
#ifdef DEBUG_MP4V
            printf("ParseVol: timeBits %u timeTicks %u frameDuration %u\n",
                    timeBits, timeTicks, frameDuration);
#endif

        } else if (foundVOL == true || objType == MP4AV_MPEG4_VOP_START) {
            esConfigSize = pObj - pCurrentSample;
            // ready to set up mp4 track
            break;
        }
        /* XXX why do we need this if ?
         * It looks like it will remove this object ... XXX */
	// It does.  On Purpose.  wmay 6/2004
        if (objType != MP4AV_MPEG4_USER_DATA_START) {
            pObj += objSize;
        }
    }

    if (foundVOSH == false) {
        fprintf(stderr,
                "%s: no VOSH header found in MPEG-4 video.\n"
                "This can cause problems with players other than mp4player. \n",
                ProgName);
    } else {
        if (VideoProfileLevelSpecified &&
                videoProfileLevel != VideoProfileLevel) {
            fprintf(stderr,
                    "%s: You have specified a different video profile level than was detected in the VOSH header\n"
                    "The level you specified was %d and %d was read from the VOSH\n",
                    ProgName, VideoProfileLevel, videoProfileLevel);
        }
    }
    if (foundVO == false) {
        fprintf(stderr,
                "%s: No VO header found in mpeg-4 video.\n"
                "This can cause problems with players other than mp4player\n",
                ProgName);
    }
    if (foundVOL == false) {
        fprintf(stderr,
                "%s: fatal: No VOL header found in mpeg-4 video stream\n",
                ProgName);
        return MP4_INVALID_TRACK_ID;
    }

    // convert frame duration to canonical time scale
    // note zero value for frame duration signals variable rate video
    if (timeTicks == 0) {
        timeTicks = 1;
    }
    u_int32_t mp4FrameDuration = 0;

    if (VideoFrameRate) {
      mp4FrameDuration = (u_int32_t)(((double)Mp4TimeScale) / VideoFrameRate);    
    } else if (frameDuration) {
	  VideoFrameRate = frameDuration;
	  VideoFrameRate /= timeTicks;
	  mp4FrameDuration = (Mp4TimeScale * frameDuration) / timeTicks;
    } else {
      if (allowVariableFrameRate == false ) {
	fprintf(stderr,
		"%s: variable rate video stream signalled,"
		" please specify average frame rate with -r option\n"
		" or --variable-frame-rate argument\n",
		ProgName);
	return MP4_INVALID_TRACK_ID;
      }

        variableFrameRate = true;
    }

    ismacryp_session_id_t ismaCrypSId;
    mp4v2_ismacrypParams *icPp =  (mp4v2_ismacrypParams *) malloc(sizeof(mp4v2_ismacrypParams));
    memset(icPp, 0, sizeof(mp4v2_ismacrypParams));


    // initialize ismacryp session if encrypting
    if (doEncrypt) {

        if (ismacrypInitSession(&ismaCrypSId,KeyTypeVideo) != 0) {
            fprintf(stderr, "%s: could not initialize the ISMAcryp session\n",
                    ProgName);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetScheme(ismaCrypSId, &(icPp->scheme_type)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp scheme type. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetSchemeVersion(ismaCrypSId, &(icPp->scheme_version)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp scheme ver. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetKMSUri(ismaCrypSId, &(icPp->kms_uri)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp kms uri. sid %d\n",
                    ProgName, ismaCrypSId);
            CHECK_AND_FREE(icPp->kms_uri);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if ( ismacrypGetSelectiveEncryption(ismaCrypSId, &(icPp->selective_enc)) != ismacryp_rc_ok ) {
            fprintf(stderr, "%s: could not get ismacryp selec enc. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetKeyIndicatorLength(ismaCrypSId, &(icPp->key_ind_len)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp key ind len. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetIVLength(ismaCrypSId, &(icPp->iv_len)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp iv len. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
    }

    // create the new video track
    MP4TrackId trackId;
    if (doEncrypt) {
        trackId =
            MP4AddEncVideoTrack(
                    mp4File,
                    Mp4TimeScale,
                    mp4FrameDuration,
                    frameWidth,
                    frameHeight,
                    icPp,
                    MP4_MPEG4_VIDEO_TYPE);
    } else {
        trackId =
            MP4AddVideoTrack(
                    mp4File,
                    Mp4TimeScale,
                    mp4FrameDuration,
                    frameWidth,
                    frameHeight,
                    MP4_MPEG4_VIDEO_TYPE);
    }

    if (trackId == MP4_INVALID_TRACK_ID) {
        fprintf(stderr,
                "%s: can't create video track\n", ProgName);
        return MP4_INVALID_TRACK_ID;
    }

    if (VideoProfileLevelSpecified) {
        videoProfileLevel = VideoProfileLevel;
    }
    if (MP4GetNumberOfTracks(mp4File, MP4_VIDEO_TRACK_TYPE) == 1) {
        MP4SetVideoProfileLevel(mp4File, videoProfileLevel);
    }
    printf("es config size is %d\n", esConfigSize);
    if (esConfigSize) {
        MP4SetTrackESConfiguration(mp4File, trackId,
                pCurrentSample, esConfigSize);

        // move past ES config, so it doesn't go into first sample
        pCurrentSample += esConfigSize;
    }
    // Move the current frame to the beginning of the
    // buffer
    memmove(sampleBuffer, pCurrentSample, pObj - pCurrentSample + objSize);
    pObj = sampleBuffer + (pObj - pCurrentSample);
    pCurrentSample = sampleBuffer;
    MP4Timestamp prevFrameTimestamp = 0;

    // now process the rest of the video stream
    while ( true ) {
        if ( objType != MP4AV_MPEG4_VOP_START ) {
	  // keep it in the buffer until a VOP comes along
	  // Actually, do nothings, since we only want VOP
	  // headers in the stream - wmay 6/2004
	  //pObj += objSize;

        } else { // we have VOP
            u_int32_t sampleSize = (pObj + objSize) - pCurrentSample;

            vopType = MP4AV_Mpeg4GetVopType(pObj, objSize);

	    mpeg4_frame_t *fr = MALLOC_STRUCTURE(mpeg4_frame_t);
	    if (head == NULL) {
	      head = tail = fr;
	    } else {
	      tail->next = fr;
	      tail = fr;
	    }
	    fr->vopType = vopType;
	    fr->frameTimestamp = currentSampleTime;
	    fr->next = NULL;
            if ( variableFrameRate ) {
                // variable frame rate:  recalculate "mp4FrameDuration"
                if ( lastFrame ) {
                    // last frame
                    mp4FrameDuration = Mp4TimeScale / timeTicks;
                } else {
                    // not the last frame
                    u_int32_t vopTimeIncrement;
                    MP4AV_Mpeg4ParseVop(pObj, objSize, &vopType, timeBits, timeTicks, &vopTimeIncrement);
                    u_int32_t vopTime = vopTimeIncrement - lastVopTimeIncrement;
                    mp4FrameDuration = (Mp4TimeScale * vopTime) / timeTicks;
                    lastVopTimeIncrement = vopTimeIncrement % timeTicks;
                }
	    }
            if ( prevSampleSize > 0 ) { // not the first time
                // fill sample data & length to write
                u_int8_t* sampleData2Write = NULL;
                u_int32_t sampleLen2Write = 0;
                if ( doEncrypt ) {
                    if ( ismacrypEncryptSampleAddHeader(ismaCrypSId,
                                sampleSize,
                                sampleBuffer,
                                &sampleLen2Write,
                                &sampleData2Write) != 0 ) {
                        fprintf(stderr,
                                "%s: can't encrypt video sample and add header %u\n",
                                ProgName, sampleId);
                    }
                } else {
                    sampleData2Write = sampleBuffer;
                    sampleLen2Write = prevSampleSize;
                }

		
            if (variableFrameRate == false) {
	      double now_calc;
	      now_calc = sampleId;
	      now_calc *= Mp4TimeScale;
	      now_calc /= VideoFrameRate;
	      MP4Timestamp now_ts = (MP4Timestamp)now_calc;
	      mp4FrameDuration = now_ts - prevFrameTimestamp;
	      prevFrameTimestamp = now_ts;
	      currentSampleTime = now_ts;
	    }
                // Write the previous sample
                rc = MP4WriteSample(mp4File, trackId,
                        sampleData2Write, sampleLen2Write,
                        mp4FrameDuration, 0, prevVopType == VOP_TYPE_I);

                if ( doEncrypt && sampleData2Write ) {
                    // buffer allocated by encrypt function.
                    // must free it!
                    free(sampleData2Write);
                }

                if ( !rc ) {
                    fprintf(stderr,
                            "%s: can't write video frame %u\n",
                            ProgName, sampleId);
                    MP4DeleteTrack(mp4File, trackId);
                    return MP4_INVALID_TRACK_ID;
                }

                // deal with rendering time offsets
                // that can occur when B frames are being used
                // which is the case for all profiles except Simple Profile
		haveBframes |= (prevVopType == VOP_TYPE_B);

		if ( lastFrame ) {
		  // finish read frames
		  break;
		}
                sampleId++;
            } // not the first time

            currentSampleTime += mp4FrameDuration;

            // Move the current frame to the beginning of the
            // buffer
            memmove(sampleBuffer, pCurrentSample, sampleSize);
            prevSampleSize = sampleSize;
            prevVopType = vopType;
            // reset pointers
            pObj = pCurrentSample = sampleBuffer + sampleSize;
        } // we have VOP

        // load next object from bitstream
        if (!LoadNextObject(inFile, pObj, &objSize, &objType)) {
            if (objType != MP4AV_MPEG4_VOP_START)
                break;
            lastFrame = true;
            objSize = 0;
            continue;
        }
        // guard against buffer overflow
        if (pObj + objSize >= pCurrentSample + maxSampleSize) {
            fprintf(stderr,
                    "%s: buffer overflow, invalid video stream?\n", ProgName);
            MP4DeleteTrack(mp4File, trackId);
            return MP4_INVALID_TRACK_ID;
        }
#ifdef DEBUG_MP4V
        if (Verbosity & MP4_DETAILS_SAMPLE) {
            printf("MP4V type %x size %u\n",
                    objType, objSize);
        }
#endif
    }
    bool doRenderingOffset = false;
    switch (videoProfileLevel) {
    case MPEG4_SP_L0:
    case MPEG4_SP_L1:
    case MPEG4_SP_L2:
    case MPEG4_SP_L3:
      break;
    default:
      doRenderingOffset = true;
      break;
    }
   
    if (doRenderingOffset && haveBframes) {
      // only generate ctts (with rendering offset for I, P frames) when
      // we need one.  We saved all the frames types and timestamps above - 
      // we can't use MP4ReadSample, because the end frames might not have
      // been written 
      refVopId = 1;
      refVopTime = 0;
      MP4SampleId maxSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
      // start with sample 2 - we know the first one is a I frame
      mpeg4_frame_t *fr = head->next; // skip the first one
      for (MP4SampleId ix = 2; ix <= maxSamples; ix++) {
	if (fr->vopType != VOP_TYPE_B) {
#ifdef DEBUG_MP4V_TS
            printf("sample %u %u renderingOffset "U64"\n",
		   refVopId, fr->vopType, fr->frameTimestamp - refVopTime);
#endif
	  MP4SetSampleRenderingOffset(mp4File, trackId, refVopId, 
				      fr->frameTimestamp - refVopTime);
	  refVopId = ix;
	  refVopTime = fr->frameTimestamp;
	}
	fr = fr->next;
      }
      
#ifdef DEBUG_MP4V_TS
      printf("sample %u %u renderingOffset "U64"\n",
	     refVopId, fr->vopType, fr->frameTimestamp - refVopTime);
#endif
      MP4SetSampleRenderingOffset(mp4File, trackId, refVopId, 
				  fr->frameTimestamp - refVopTime);
    }

    while (head != NULL) {
      tail = head->next;
      free(head);
      head = tail;
    }
    // terminate session if encrypting
    if (doEncrypt) {
        if (ismacrypEndSession(ismaCrypSId) != 0) {
            fprintf(stderr,
                    "%s: could not end the ISMAcryp session\n",
                    ProgName);
        }
    }

    return trackId;
}
コード例 #9
0
ファイル: mod_mp4v2.c プロジェクト: odmanV2/freecenter
static switch_status_t do_write_video(switch_file_handle_t *handle, switch_frame_t *frame)
{
	uint32_t datalen = frame->datalen;
	switch_status_t status = SWITCH_STATUS_SUCCESS;
	int is_iframe = 0;
	uint32_t size;
	uint8_t *hdr = NULL;
	uint8_t fragment_type;
	uint8_t nal_type;
	uint8_t start_bit;
	mp4_file_context_t *context = handle->private_info;

	hdr = (uint8_t *)frame->data;
	fragment_type = hdr[0] & 0x1f;
	nal_type = hdr[1] & 0x1f;
	start_bit = hdr[1] & 0x80;
	is_iframe = (((fragment_type == 28 || fragment_type == 29) && nal_type == 5 && start_bit == 128) || fragment_type == 5 || fragment_type ==7 || fragment_type ==8) ? 1 : 0;

	// switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "%02x %02x %02x | len:%d m:%d st:%d i:%d\n", hdr[0], hdr[1], hdr[2], datalen, frame->m, start_bit, is_iframe);

	size = htonl(datalen);
	switch_buffer_write(context->buf, &size, 4);
	switch_buffer_write(context->buf, hdr, datalen);

	switch_mutex_lock(context->mutex);

	if (fragment_type == 7 && !context->sps_set) { //sps
		context->sps_set = 1;

		init_video_track(context->fd, &context->video, frame);
		if (context->video == MP4_INVALID_TRACK_ID) {
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Error add video track!\n");
			switch_goto_status(SWITCH_STATUS_FALSE, end);
		}
	} else if (fragment_type == 8 && context->sps_set && !context->pps_set) { //pps
		MP4AddH264PictureParameterSet(context->fd, context->video, hdr, datalen);
		context->pps_set = 1;
	}

	if (nal_type == 7 || nal_type == 8 || frame->m == 0) {
	} else if (context->sps_set && context->pps_set) {
		uint32_t used = switch_buffer_inuse(context->buf);
		const void *data;
		int duration = 0;

		if (!context->timer.interval) {
			switch_core_timer_init(&context->timer, "soft", 1, 1, context->pool);
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "init timer\n");
		} else {
			switch_core_timer_sync(&context->timer);
		}

		duration = context->timer.samplecount - context->last_pts;

		// switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "samplecount: %u, duration: %u\n", context->timer.samplecount, duration);
		switch_buffer_peek_zerocopy(context->buf, &data);

		if (context->last_pts == 0) { // first img, write at the very beginning so we don't see blank screen
			duration /= 2;
			MP4WriteSample(context->fd, context->video, data, used, duration, 0, is_iframe);

			if (duration > context->offset) {
				duration -= context->offset;
			} else {
				duration = 0;
			}
		}

		context->last_pts = context->timer.samplecount;

		// switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "samplecount: %u, duration: %u\n", context->timer.samplecount, duration);

		if (duration) {
			MP4WriteSample(context->fd, context->video, data, used, duration, 0, is_iframe);
		}
		switch_buffer_zero(context->buf);
	}

end:
	switch_mutex_unlock(context->mutex);

	return status;
}
コード例 #10
0
ファイル: mod_mp4v2.c プロジェクト: odmanV2/freecenter
static switch_status_t mp4_file_write(switch_file_handle_t *handle, void *data, size_t *len)
{
	uint32_t datalen = *len * 2 * handle->channels;
	switch_status_t status = SWITCH_STATUS_SUCCESS;
	uint8_t buf[SWITCH_RECOMMENDED_BUFFER_SIZE];
	uint32_t encoded_rate;
	mp4_file_context_t *context = handle->private_info;
	uint32_t size = 0;

	context->audio_duration += *len;


	if (context->audio_type == MP4_PCM16_LITTLE_ENDIAN_AUDIO_TYPE) {
		size = datalen;
		memcpy(buf, data, datalen);
	} else {
		switch_core_codec_encode(&context->audio_codec, NULL,
								data, datalen,
								handle->samplerate,
								buf, &size, &encoded_rate, NULL);
	}

	switch_mutex_lock(context->mutex);

	if (!context->timer.interval) {
		switch_core_timer_init(&context->timer, "soft", 1, 1, context->pool);
		switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "init timer\n");
	} else if(!context->audio_start) { // try make up some sampels if the video already start
		int i, count;
		uint8_t buf0[SWITCH_RECOMMENDED_BUFFER_SIZE] = { 0 };

		context->audio_start++;
		switch_core_timer_sync(&context->timer);

		count = context->timer.samplecount - context->offset;

		if (count > 0) {
			count /= *len;
		}

		if (context->audio_type != MP4_ULAW_AUDIO_TYPE) {
			count = 0; // todo: make this feature work for mp3/aac
		}

		if (count){
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_NOTICE, "video is short, make up %lu samples\n", count * (*len));
			MP4WriteSample(context->fd, context->audio, buf0, size, 0, 0, 1);
		}

		for (i = 1; i < count; i++) {
			MP4WriteSample(context->fd, context->audio, buf0, size, *len, 0, 1);
		}
	}

	if (context->audio_type == MP4_MPEG4_AUDIO_TYPE && size == 0) {
		// don't write 0
	} else {
		MP4WriteSample(context->fd, context->audio, buf, size, context->audio_duration, 0, 1);
		context->audio_duration = 0;
	}

	switch_mutex_unlock(context->mutex);

	return status;
}
コード例 #11
0
ファイル: mod_mp4v2.c プロジェクト: odmanV2/freecenter
static void record_video_thread(switch_core_session_t *session, void *obj)
{
	struct record_helper *eh = obj;
	switch_channel_t *channel = switch_core_session_get_channel(session);
	switch_status_t status;
	switch_frame_t *read_frame;
	uint bytes;
	MP4FileHandle mp4;
	MP4TrackId video;
	unsigned char buf[40960];
	int len = 0;
	uint8_t iframe = 0;
	uint32_t *size = (uint32_t *)buf;
	uint8_t *hdr = NULL;
	uint8_t fragment_type;
	uint8_t nal_type;
	uint8_t start_bit;
	uint8_t *sps = NULL;
	// uint8_t *pps = NULL;
	int sps_set = 0;
	int pps_set = 0;

	eh->up = 1;
	mp4 = eh->fd;

	/* Tell the channel to request a fresh vid frame */
	switch_core_session_request_video_refresh(session);

	len = 0;
	while (switch_channel_ready(channel) && eh->up) {
		status = switch_core_session_read_video_frame(session, &read_frame, SWITCH_IO_FLAG_NONE, 0);

		if (!SWITCH_READ_ACCEPTABLE(status)) {
			break;
		}

		if (switch_test_flag(read_frame, SFF_CNG)) {
			continue;
		}

		bytes = read_frame->datalen;

		if (bytes > 2000) {
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "xxxxxxxx buffer overflow\n");
			continue;
		}

		hdr = read_frame->data;
		fragment_type = hdr[0] & 0x1f;
		nal_type = hdr[1] & 0x1f;
		start_bit = hdr[1] & 0x80;
		iframe = (((fragment_type == 28 || fragment_type == 29) && nal_type == 5 && start_bit == 128) || fragment_type == 5 || fragment_type ==7 || fragment_type ==8) ? 1 : 0;

#if 0
		switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "%02x %02x %02x | len:%d m:%d st:%d i:%d\n", hdr[0], hdr[1], hdr[2], bytes, read_frame->m, start_bit, iframe);
#endif

		// echo back
		switch_core_session_write_video_frame(session, read_frame, SWITCH_IO_FLAG_NONE, 0);

		if (fragment_type == 7 && !sps_set) { //sps
			sps = malloc(bytes);
			memcpy(sps, read_frame->data, bytes);
			sps_set = 1;

			switch_mutex_lock(eh->mutex);

			init_video_track(mp4, &video, read_frame);
			if (video == MP4_INVALID_TRACK_ID) {
				switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Error add video track!\n");
				switch_mutex_unlock(eh->mutex);
				goto end;
			}

			switch_mutex_unlock(eh->mutex);
			continue;
		} else if (fragment_type == 8 && !pps_set) { //pps
			switch_mutex_lock(eh->mutex);
			MP4AddH264PictureParameterSet(mp4, video, read_frame->data, bytes);
			switch_mutex_unlock(eh->mutex);
			pps_set = 1;
			// continue;
		}

		if ((!sps_set) && (!pps_set)) {
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Waiting for SPS/PPS\n");
			// continue;
		}

		len += 4 + read_frame->datalen;

		if (len > 40960) {
			switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "buffer overflow!!!! %d\n", len);
			len = 0;
			size = (uint32_t *)buf;
			continue;
		}

		*size = htonl(read_frame->datalen);
		memcpy(size + 1, read_frame->data, read_frame->datalen);

		size = (uint32_t *)((uint8_t *)size + 4 + read_frame->datalen);

		if (read_frame->m) {
			int duration = 0;

			switch_mutex_lock(eh->mutex);
			if (!eh->timer.interval) {
				switch_core_timer_init(&eh->timer, "soft", 1, 1, switch_core_session_get_pool(session));
				switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "init timer\n");
			} else {
				switch_core_timer_sync(&eh->timer);
			}
			switch_mutex_unlock(eh->mutex);

			if (eh->last_pts) {
				duration = eh->timer.samplecount - eh->last_pts;
			}
			eh->last_pts = eh->timer.samplecount;

			switch_mutex_lock(eh->mutex);
			MP4WriteSample(mp4, video, buf, len, duration, 0, iframe);
			switch_mutex_unlock(eh->mutex);
			len = 0;
			size = (uint32_t *)buf;
		}

	}

end:
	eh->up = 0;
	return;
}
コード例 #12
0
/**
    \fn save
*/
bool muxerMp4v2::save(void)
{
    bool result=true;
    int nbFrame=0;
    printf("[Mp4v2Muxer] Saving\n");
   

    initUI("Saving MP4V2");
    encoding->setContainer("MP4 (libmp4v2)");
    uint64_t lastSentDts=0;
    
    while(loadNextVideoFrame((&(in[nextWrite])))) 
    {
        bool kf=false;
        int other=!nextWrite;
        if(in[other].flags & AVI_KEY_FRAME) kf=true;

        ADM_assert(in[nextWrite].dts!=ADM_NO_PTS)
        ADM_assert(in[nextWrite].dts!=ADM_NO_PTS)
        if(in[other].pts==ADM_NO_PTS || in[other].pts==ADM_NO_PTS)
        {
            GUI_Error_HIG("Video","Video does not have enough timing information. Are you copying from AVI ?");
            goto theEnd;
        }

        

        uint64_t nextDts=in[nextWrite].dts;   // Delta between dts=duration of the frame (sort of)     
        uint64_t myDts=in[other].dts;
        uint64_t myPts=in[other].pts;        
        cprintf(">>next DTS=%"PRIu64", last DTS=%"PRIu64"delta=%"PRIu64"\n",nextDts,lastSentDts,nextDts-lastSentDts);

        encoding->pushVideoFrame(in[other].len,in[other].out_quantizer,myDts);
        uint64_t delta=myPts-lastSentDts; // composition time...
        delta=timeScale(delta);
        uint64_t duration=nextDts-lastSentDts;

        uint64_t scaled_duration=timeScale(duration);
        duration=inverseTimeScale(scaled_duration); // handle rounding error
        nbFrame++;
        cprintf("Sending frame duration=%"PRIu64", pts/dts=%"PRIu64"\n",lastSentDts,delta);
        if(false==MP4WriteSample(handle,videoTrackId,in[other].data,in[other].len,
                        scaled_duration, // duration
                        delta, // pts/dts offset
                        kf // Sync Sample
                        ))
        {
            ADM_error("Cannot write video sample\n");
            result=false;
            goto theEnd;
        }
        // update lastSentDts
        lastSentDts+=duration; // beginning of next frame...
        //
        cprintf("lastSendDts=%"PRIu64", next Dts=%"PRIu64", skew=%"PRId64"\n",lastSentDts,nextDts,
                        (int64_t)nextDts-(int64_t)lastSentDts);
        //
        fillAudio(lastSentDts);
        // toggle
        nextWrite=other;
        if(updateUI()==false)
            {  
                result=false;
                break;
            }
    }
    // Write last frame
    nextWrite=!nextWrite;
    int scale;
    if(videoIncrement>5000) scale=(1000000.0/videoIncrement);
        else       scale=100; // 10 ms
    MP4WriteSample(handle,videoTrackId,in[nextWrite].data,in[nextWrite].len,
                        (MP4Duration)90000/scale, 
                        0, // pts/dts offset
                        0 // Sync Sample
                        );
theEnd:
    close();
    
    
    if(muxerConfig.optimize && result==true)
    {
        encoding->setPhasis("Optimizing");
        string tmpTargetFileName=targetFileName+string(".tmp");
        if(!ADM_renameFile(targetFileName.c_str(),tmpTargetFileName.c_str()))
        {
            GUI_Error_HIG("","Cannot rename file (optimize)");
            return false;
        }
        // Optimize
        ADM_info("Optimizing...\n");
        MP4Optimize( tmpTargetFileName.c_str(), targetFileName.c_str() );
        // delete
        unlink(tmpTargetFileName.c_str());
    }
    closeUI();
    return result;
}
コード例 #13
0
int MP4Encoder::WriteH264Data(MP4FileHandle hMp4File,const unsigned char* pData,int size)
{
    if(hMp4File == NULL)
    {
        return -1;
    }
    if(pData == NULL)
    {
        return -1;
    }
    MP4ENC_NaluUnit nalu;
    int pos = 0, len = 0;
    while (len = ReadOneNaluFromBuf(pData,size,pos,nalu))
    {
        if(nalu.type == 0x07) // sps
        {
            // 添加h264 track    
            m_videoId = MP4AddH264VideoTrack
                (hMp4File, 
                m_nTimeScale, 
                m_nTimeScale / m_nFrameRate, 
                m_nWidth,     // width
                m_nHeight,    // height
                nalu.data[1], // sps[1] AVCProfileIndication
                nalu.data[2], // sps[2] profile_compat
                nalu.data[3], // sps[3] AVCLevelIndication
                3);           // 4 bytes length before each NAL unit
            if (m_videoId == MP4_INVALID_TRACK_ID)
            {
                printf("add video track failed.\n");
                return 0;
            }
            MP4SetVideoProfileLevel(hMp4File, 1); //  Simple Profile @ Level 3

            MP4AddH264SequenceParameterSet(hMp4File,m_videoId,nalu.data,nalu.size);
        }
        else if(nalu.type == 0x08) // pps
        {
            MP4AddH264PictureParameterSet(hMp4File,m_videoId,nalu.data,nalu.size);
        }
        else
        {
            int datalen = nalu.size+4;
            unsigned char *data = new unsigned char[datalen];
            // MP4 Nalu前四个字节表示Nalu长度
            data[0] = nalu.size>>24;
            data[1] = nalu.size>>16;
            data[2] = nalu.size>>8;
            data[3] = nalu.size&0xff;
            memcpy(data+4,nalu.data,nalu.size);
            if(!MP4WriteSample(hMp4File, m_videoId, data, datalen,MP4_INVALID_DURATION, 0, 1))
            {
                return 0;
            }
            delete[] data;
        }

        pos += len;
    }
    return pos;
}
コード例 #14
0
ファイル: mpeg.cpp プロジェクト: acassis/emlinux-ssd1935
static MP4TrackId VideoCreate (MP4FileHandle mp4file, 
			       mpeg2ps_t *file, 
			       int vstream,
			       bool doEncrypt)
{
  double frame_rate = mpeg2ps_get_video_stream_framerate(file, vstream);
  uint8_t video_type;
  uint16_t w, h;
  MP4TrackId id;
  ismacryp_session_id_t ismaCrypSId;
  mp4v2_ismacrypParams *icPp =  (mp4v2_ismacrypParams *) malloc(sizeof(mp4v2_ismacrypParams));
  memset(icPp, 0, sizeof(mp4v2_ismacrypParams));


#ifdef _WIN32
  MP4Duration mp4FrameDuration;
  mp4FrameDuration = 
    (MP4Duration)((double)Mp4TimeScale / frame_rate);
#else
  MP4Duration mp4FrameDuration = 
    (MP4Duration)(Mp4TimeScale / frame_rate);
#endif

  h = mpeg2ps_get_video_stream_height(file, vstream);
  w = mpeg2ps_get_video_stream_width(file, vstream);

  video_type = mpeg2ps_get_video_stream_type(file, vstream) == MPEG_VIDEO_MPEG2 ?
    MP4_MPEG2_MAIN_VIDEO_TYPE : MP4_MPEG1_VIDEO_TYPE;

  if (doEncrypt) {
    // initialize session
    if (ismacrypInitSession(&ismaCrypSId,KeyTypeVideo) != 0) {
      fprintf(stderr, "%s: could not initialize the ISMAcryp session\n",
	      ProgName);
      return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetScheme(ismaCrypSId, &(icPp->scheme_type)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp scheme type. sid %d\n", 
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetSchemeVersion(ismaCrypSId, &(icPp->scheme_version)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp scheme ver. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetKMSUri(ismaCrypSId, &(icPp->kms_uri)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp kms uri. sid %d\n",
               ProgName, ismaCrypSId);
       if (icPp->kms_uri != NULL) free(icPp->kms_uri);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if ( ismacrypGetSelectiveEncryption(ismaCrypSId, &(icPp->selective_enc)) != ismacryp_rc_ok ) {
       fprintf(stderr, "%s: could not get ismacryp selec enc. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetKeyIndicatorLength(ismaCrypSId, &(icPp->key_ind_len)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp key ind len. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetIVLength(ismaCrypSId, &(icPp->iv_len)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp iv len. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    id = MP4AddEncVideoTrack(mp4file, 
			     Mp4TimeScale, 
			     mp4FrameDuration,
			     w, 
			     h,
                             icPp,
			     video_type);
  } else {
    id = MP4AddVideoTrack(mp4file, 
			  Mp4TimeScale, 
			  mp4FrameDuration,
			  w, 
			  h, 
			  video_type);
  }

  //printf("duration "U64" w %d h %d type %x\n", mp4FrameDuration, w, h, video_type);
  if (MP4GetNumberOfTracks(mp4file, MP4_VIDEO_TRACK_TYPE) == 1) {
    MP4SetVideoProfileLevel(mp4file, 0xfe); // undefined profile
  }

  if (id == MP4_INVALID_TRACK_ID) {
    fprintf(stderr, "%s:Couldn't add video track %d", ProgName, vstream);
    return MP4_INVALID_TRACK_ID;
  }
  uint8_t *buf;
  uint32_t blen;
  uint32_t frames = 1;
#if 0
  printf("Processing %lu video frames\n", frames_max);
#endif
  uint32_t refFrame = 1;
  uint8_t frame_type;
  while (mpeg2ps_get_video_frame(file, 
				 vstream,
				 &buf, 
				 &blen, 
				 &frame_type,
				 TS_90000,
				 NULL)) {
    if (buf[blen - 4] == 0 &&
	buf[blen - 3] == 0 &&
	buf[blen - 2] == 1) blen -= 4;
    
    // encrypt the sample if neeed
    if (doEncrypt) {
      u_int8_t* encSampleData = NULL;
      u_int32_t encSampleLen = 0;
      if (ismacrypEncryptSampleAddHeader(ismaCrypSId, blen, buf,
				&encSampleLen, &encSampleData) != 0) {
	fprintf(stderr,	
		"%s: can't encrypt video sample and add header %u\n", ProgName, id);
      }
      MP4WriteSample(mp4file, id, encSampleData, encSampleLen, 
		     mp4FrameDuration, 0, 
		     frame_type == 1 ? true : false);
      if (encSampleData != NULL) {
	free(encSampleData);
      }
    } else {
      MP4WriteSample(mp4file, id, buf, blen, mp4FrameDuration, 0, 
		     frame_type == 1 ? true : false);
#if 0
      printf("frame %d len %d duration "U64" ftype %d\n",
	     frames, blen, mp4FrameDuration, frame_type);
#endif
    }
    if (frame_type != 3) {
      // I or P frame
      MP4SetSampleRenderingOffset(mp4file, id, refFrame, 
				  (frames - refFrame) * mp4FrameDuration);
      refFrame = frames;
    }
    frames++;
#if 0
    if ((frames % 100) == 0) printf("%d frames\n", frames);
#endif
  }

  // if encrypting, terminate the ismacryp session
  if (doEncrypt) {
    if (ismacrypEndSession(ismaCrypSId) != 0) {
      fprintf(stderr, 
	      "%s: could not end the ISMAcryp session\n",
	      ProgName);
      return MP4_INVALID_TRACK_ID;
    }
  }
  return id;
}
コード例 #15
0
ファイル: mpeg.cpp プロジェクト: acassis/emlinux-ssd1935
static MP4TrackId AudioCreate (MP4FileHandle mp4file, 
			       mpeg2ps_t *file, 
			       int astream,
			       bool doEncrypt)
{
  uint16_t freq;
  int type;
  MP4TrackId id;
  uint16_t samples_per_frame;
  uint8_t *buf = NULL;
  uint32_t blen = 0;
  uint32_t frame_num = 1;
  ismacryp_session_id_t ismaCrypSId;
  mp4v2_ismacrypParams *icPp =  (mp4v2_ismacrypParams *) malloc(sizeof(mp4v2_ismacrypParams));
  MP4AV_Mp3Header hdr;
  u_int8_t mpegVersion;
  memset(icPp, 0, sizeof(mp4v2_ismacrypParams));

  type = mpeg2ps_get_audio_stream_type(file, astream);

  if (type != MPEG_AUDIO_MPEG) {
    fprintf(stderr, "Unsupported audio format %d in audio stream %d\n", 
	    type, astream);
    return MP4_INVALID_TRACK_ID;
  }

  freq = mpeg2ps_get_audio_stream_sample_freq(file, astream);

  if (mpeg2ps_get_audio_frame(file, 
			      astream,
			      &buf, 
			      &blen,
			      TS_90000,
			      NULL, 
			      NULL) == false) {
    fprintf(stderr, "No audio tracks in audio stream %d\n", astream);
    return MP4_INVALID_TRACK_ID;
  }
  
  hdr = MP4AV_Mp3HeaderFromBytes(buf);
  mpegVersion = MP4AV_Mp3GetHdrVersion(hdr);
  samples_per_frame = MP4AV_Mp3GetHdrSamplingWindow(hdr);

  u_int8_t audioType = MP4AV_Mp3ToMp4AudioType(mpegVersion);
  
  if (audioType == MP4_INVALID_AUDIO_TYPE
      || samples_per_frame == 0) {
    fprintf(stderr,	
	    "%s: data in file doesn't appear to be valid audio\n",
	    ProgName);
    return MP4_INVALID_TRACK_ID;
  }

  MP4Duration duration = (90000 * samples_per_frame) / freq;

  if (doEncrypt) {
    // initialize the ismacryp session
    if (ismacrypInitSession(&ismaCrypSId,KeyTypeAudio) != 0) {
      fprintf(stderr, 
	      "%s: could not initialize the ISMAcryp session\n",
	      ProgName);
      return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetScheme(ismaCrypSId, &(icPp->scheme_type)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp scheme type. sid %d\n", 
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetSchemeVersion(ismaCrypSId, &(icPp->scheme_version)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp scheme ver. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetKMSUri(ismaCrypSId, &(icPp->kms_uri)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp kms uri. sid %d\n",
               ProgName, ismaCrypSId);
       if (icPp->kms_uri != NULL) free(icPp->kms_uri);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if ( ismacrypGetSelectiveEncryption(ismaCrypSId, &(icPp->selective_enc)) != ismacryp_rc_ok ) {
       fprintf(stderr, "%s: could not get ismacryp selec enc. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetKeyIndicatorLength(ismaCrypSId, &(icPp->key_ind_len)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp key ind len. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetIVLength(ismaCrypSId, &(icPp->iv_len)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp iv len. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    id = MP4AddEncAudioTrack(mp4file, 
			     90000, 
			     duration,
                             icPp,
			     audioType);
  } else {
    id = MP4AddAudioTrack(mp4file, 
			  90000, 
			  duration,
			  audioType);
  }
  
  if (id == MP4_INVALID_TRACK_ID) {
    fprintf(stderr, 
	    "%s: can't create audio track from stream %d\n", 
	    ProgName, astream);
    return MP4_INVALID_TRACK_ID;
  }

  if (MP4GetNumberOfTracks(mp4file, MP4_AUDIO_TRACK_TYPE) == 1) {
    MP4SetAudioProfileLevel(mp4file, 0xFE);
  }

  do {
    // encrypt if needed
     if (doEncrypt) {
       u_int8_t* encSampleData = NULL;
       u_int32_t encSampleLen = 0;
       if (ismacrypEncryptSampleAddHeader(ismaCrypSId, blen, buf,
					  &encSampleLen, &encSampleData) != 0) {
	 fprintf(stderr,	
		 "%s: can't encrypt audio sample and add header %u\n", ProgName, id);
       }
       // now write the sample
       if (!MP4WriteSample(mp4file, id, encSampleData, encSampleLen)) {
	 fprintf(stderr, "%s: can't write audio track %u, stream %d",
		 ProgName, frame_num, astream);
	 MP4DeleteTrack(mp4file, id);
	 return MP4_INVALID_TRACK_ID;
       }
       if (encSampleData != NULL) {
	 free(encSampleData);
       }
    }
     // now write the sample
    if (!MP4WriteSample(mp4file, id, buf, blen)) {
      fprintf(stderr, "%s: can't write audio track %u, stream %d",
	      ProgName, frame_num, astream);
      MP4DeleteTrack(mp4file, id);
      return MP4_INVALID_TRACK_ID;
    }
    frame_num++;
#if 0
    if ((frame_num % 100) == 0) printf("Audio frame %d\n", frame_num);
#endif
  }  while (mpeg2ps_get_audio_frame(file, 
				    astream, 
				    &buf, 
				    &blen,
				    TS_90000,
				    NULL, NULL));
  
  // if encrypting, terminate the ismacryp session
  if (doEncrypt) {
    if (ismacrypEndSession(ismaCrypSId) != 0) {
      fprintf(stderr, 
	      "%s: could not end the ISMAcryp session\n",
	      ProgName);
      return MP4_INVALID_TRACK_ID;
    }
  }

  return id;
}