Exemplo n.º 1
0
MP4Duration MP4AV_GetAudioSampleDuration(
	MP4FileHandle mp4File, 
	MP4TrackId mediaTrackId)
{
	MP4SampleId sampleId = 1;
	MP4SampleId numSamples = 
		MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);

	// find first non-zero size sample
	// we need to search in case an empty audio sample has been added
	// at the beginning of the track to achieve sync with video
	for (; sampleId <= numSamples; sampleId++) {
		if (MP4GetSampleSize(mp4File, mediaTrackId, sampleId) > 0) {
			break;
		}
	}
	if (sampleId >= numSamples) {
		return MP4_INVALID_DURATION;
	}

	// get sample duration
	return MP4GetSampleDuration(mp4File, mediaTrackId, sampleId);

	// OPTION may want to scan all non-zero sized samples
	// and check that sample durations are +/-1 the same value
}
Exemplo n.º 2
0
extern "C" bool MP4AV_Rfc3016Hinter(
    MP4FileHandle mp4File,
    MP4TrackId mediaTrackId,
    u_int16_t maxPayloadSize)
{
    u_int32_t numSamples = MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);
    u_int32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);

    if (numSamples == 0 || maxSampleSize == 0) {
        return false;
    }

    MP4TrackId hintTrackId =
        MP4AV_Rfc3016_HintTrackCreate(mp4File, mediaTrackId);

    if (hintTrackId == MP4_INVALID_TRACK_ID) {
        return false;
    }

    u_int8_t* pSampleBuffer = (u_int8_t*)malloc(maxSampleSize);
    if (pSampleBuffer == NULL) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }

    for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {
        u_int32_t sampleSize = maxSampleSize;
        MP4Timestamp startTime;
        MP4Duration duration;
        MP4Duration renderingOffset;
        bool isSyncSample;

        bool rc = MP4ReadSample(
                      mp4File, mediaTrackId, sampleId,
                      &pSampleBuffer, &sampleSize,
                      &startTime, &duration,
                      &renderingOffset, &isSyncSample);

        if (rc == false ||
                MP4AV_Rfc3016_HintAddSample(mp4File,
                                            hintTrackId,
                                            sampleId,
                                            pSampleBuffer,
                                            sampleSize,
                                            duration,
                                            renderingOffset,
                                            isSyncSample,
                                            maxPayloadSize) == false) {
            MP4DeleteTrack(mp4File, hintTrackId);
            CHECK_AND_FREE(pSampleBuffer);
            return false;
        }
    }
    CHECK_AND_FREE(pSampleBuffer);

    return true;
}
Exemplo n.º 3
0
    MP4Reader(const std::string &file_path)
            : time_scale(9 * MP4_MSECS_TIME_SCALE)
            , file_path(file_path)
            , handle(MP4_INVALID_FILE_HANDLE)
            , video_track_id(MP4_INVALID_TRACK_ID)
            , next_video_sample_idx(1)
            , video_sample(nullptr)
            , video_timescale(0)
            , video_sample_max_size(0)
            , video_sample_number(0)
            , video_duration(0)
            , pSeqHeaders(nullptr)
            , pSeqHeaderSize(nullptr)
            , pPictHeaders(nullptr)
            , pPictHeaderSize(nullptr)
    {
        handle = MP4Read(this->file_path.c_str());

        video_track_id = MP4FindTrackId(handle, 0, MP4_VIDEO_TRACK_TYPE);
        if (video_track_id != MP4_INVALID_TRACK_ID) {
            video_timescale = MP4GetTrackTimeScale(handle, video_track_id);
            video_sample_max_size = MP4GetTrackMaxSampleSize(handle, video_track_id) * 2;
            video_duration = MP4GetTrackDuration(handle, video_track_id);
            video_sample = new unsigned char[video_sample_max_size];
            video_sample_number = MP4GetTrackNumberOfSamples(handle, video_track_id);

            if (MP4GetTrackH264SeqPictHeaders(handle,
                                              video_track_id,
                                              &pSeqHeaders,
                                              &pSeqHeaderSize,
                                              &pPictHeaders,
                                              &pPictHeaderSize))
            {
                printf("Get SPS(%d) and PPS(%d)\n", *pSeqHeaderSize, *pPictHeaderSize);

                for(int i = 0; (pSeqHeaders[i] && pSeqHeaderSize[i]); i++) {
                    printf("SPS(%d): %02x %02x %02x %02x %02x\n", i,
                           pSeqHeaders[i][0], pSeqHeaders[i][1], pSeqHeaders[i][2],
                           pSeqHeaders[i][3], pSeqHeaders[i][4]);
                }
                for(int i = 0; (pPictHeaders[i] && pPictHeaderSize[i]); i++) {
                    printf("PPS(%d): %02x %02x %02x %02x %02x\n", i,
                           pPictHeaders[i][0], pPictHeaders[i][1], pPictHeaders[i][2],
                           pPictHeaders[i][3], pPictHeaders[i][4]);
                }
            }
        }
    }
Exemplo n.º 4
0
/**************************************************************************
 * Quicktime stream base class functions
 **************************************************************************/
CMp4ByteStream::CMp4ByteStream (CMp4File *parent,
				MP4TrackId track,
				const char *type,
				bool has_video)
  : COurInByteStream(type)
{
#ifdef ISMACRYP_DEBUG
  my_enc_file = fopen("encbuffer.raw", "w");
  my_unenc_file = fopen("unencbuffer.raw", "w");
  my_unenc_file2 = fopen("unencbuffer2.raw", "w");
#endif
#ifdef OUTPUT_TO_FILE
  char buffer[80];
  strcpy(buffer, type);
  strcat(buffer, ".raw");
  m_output_file = fopen(buffer, "w");
#endif
  m_track = track;
  m_frame_on = 1;
  m_parent = parent;
  m_eof = false;
  MP4FileHandle fh = parent->get_file();
  m_frames_max = MP4GetTrackNumberOfSamples(fh, m_track);
  mp4f_message(LOG_DEBUG, "%s - %u samples", type, m_frames_max);
  m_max_frame_size = MP4GetTrackMaxSampleSize(fh, m_track) + 4; 
  m_sample_freq = MP4GetTrackTimeScale(fh, m_track);
  m_buffer = (u_int8_t *) malloc(m_max_frame_size * sizeof(u_int8_t));
  m_has_video = has_video;
  m_frame_in_buffer = 0xffffffff;
  MP4Duration trackDuration;
  trackDuration = MP4GetTrackDuration(fh, m_track);
  uint64_t max_ts;
  max_ts = MP4ConvertFromTrackDuration(fh, 
				       m_track, 
				       trackDuration,
				       MP4_MSECS_TIME_SCALE);
  m_max_time = UINT64_TO_DOUBLE(max_ts);
  m_max_time /= 1000.0;
  mp4f_message(LOG_DEBUG, 
	       "MP4 %s max time is "U64" %g", type, max_ts, m_max_time);
  read_frame(1, NULL);
}
Exemplo n.º 5
0
static int
aac_init (DB_fileinfo_t *_info, DB_playItem_t *it) {
    aac_info_t *info = (aac_info_t *)_info;

    info->file = deadbeef->fopen (deadbeef->pl_find_meta (it, ":URI"));
    if (!info->file) {
        return -1;
    }

    // probe
    float duration = -1;
    int samplerate = -1;
    int channels = -1;
    int totalsamples = -1;

    info->junk = deadbeef->junk_get_leading_size (info->file);
    if (!info->file->vfs->is_streaming ()) {
        if (info->junk >= 0) {
            deadbeef->fseek (info->file, info->junk, SEEK_SET);
        }
        else {
            info->junk = 0;
        }
    }
    else {
        deadbeef->fset_track (info->file, it);
    }

    info->mp4track = -1;
#if USE_MP4FF
    info->mp4reader.read = aac_fs_read;
    info->mp4reader.write = NULL;
    info->mp4reader.seek = aac_fs_seek;
    info->mp4reader.truncate = NULL;
    info->mp4reader.user_data = info;
#else
    info->mp4reader.open = aac_fs_open;
    info->mp4reader.seek = aac_fs_seek;
    info->mp4reader.read = aac_fs_read;
    info->mp4reader.write = NULL;
    info->mp4reader.close = aac_fs_close;
#endif

    if (!info->file->vfs->is_streaming ()) {
#ifdef USE_MP4FF
        trace ("aac_init: mp4ff_open_read %s\n", deadbeef->pl_find_meta (it, ":URI"));
        info->mp4file = mp4ff_open_read (&info->mp4reader);
        if (info->mp4file) {
            int ntracks = mp4ff_total_tracks (info->mp4file);
            if (ntracks > 0) {
                trace ("m4a container detected, ntracks=%d\n", ntracks);
                int i = -1;
                unsigned char*  buff = 0;
                unsigned int    buff_size = 0;
                for (i = 0; i < ntracks; i++) {
                    mp4AudioSpecificConfig mp4ASC;
                    mp4ff_get_decoder_config (info->mp4file, i, &buff, &buff_size);
                    if(buff){
                        int rc = AudioSpecificConfig(buff, buff_size, &mp4ASC);
                        if(rc < 0)
                            continue;
                        break;
                    }
                }
                trace ("mp4 probe-buffer size: %d\n", buff_size);

                if (i != ntracks && buff) 
                {
                    trace ("mp4 track: %d\n", i);
                    int samples = mp4ff_num_samples(info->mp4file, i);
                    info->mp4samples = samples;
                    info->mp4track = i;

                    // init mp4 decoding
                    info->dec = NeAACDecOpen ();
                    unsigned long srate;
                    unsigned char ch;
                    if (NeAACDecInit2(info->dec, buff, buff_size, &srate, &ch) < 0) {
                        trace ("NeAACDecInit2 returned error\n");
                        free (buff);
                        return -1;
                    }
                    samplerate = srate;
                    channels = ch;
                    samples = (int64_t)samples * srate / mp4ff_time_scale (info->mp4file, i);
                    totalsamples = samples;
                    NeAACDecConfigurationPtr conf = NeAACDecGetCurrentConfiguration (info->dec);
                    conf->dontUpSampleImplicitSBR = 1;
                    NeAACDecSetConfiguration (info->dec, conf);
                    mp4AudioSpecificConfig mp4ASC;
                    if (NeAACDecAudioSpecificConfig(buff, buff_size, &mp4ASC) >= 0)
                    {
                        info->mp4framesize = 1024;
                        if (mp4ASC.frameLengthFlag == 1) {
                            info->mp4framesize = 960;
                        }
//                        if (mp4ASC.sbr_present_flag == 1) {
//                            info->mp4framesize *= 2;
//                        }
                    }
                    totalsamples *= info->mp4framesize;
                    duration = (float)totalsamples  / samplerate;
                }
                else {
                    mp4ff_close (info->mp4file);
                    info->mp4file = NULL;
                }
                if (buff) {
                    free (buff);
                }
            }
            else {
                mp4ff_close (info->mp4file);
                info->mp4file = NULL;
            }
        }
// {{{ libmp4v2 code
#else
        trace ("aac_init: MP4ReadProvider %s\n", deadbeef->pl_find_meta (it, ":URI"));
        info->mp4file = MP4ReadProvider (deadbeef->pl_find_meta (it, ":URI"), 0, &info->mp4reader);
        info->mp4track = MP4FindTrackId(info->mp4file, 0, "audio", 0);
        trace ("aac_init: MP4FindTrackId returned %d\n", info->mp4track);
        if (info->mp4track >= 0) {
            info->timescale = MP4GetTrackTimeScale(info->mp4file, info->mp4track);

            u_int8_t* pConfig;
            uint32_t configSize = 0;
            bool res = MP4GetTrackESConfiguration(info->mp4file, info->mp4track, &pConfig, &configSize);

            mp4AudioSpecificConfig mp4ASC;
            int rc = AudioSpecificConfig(pConfig, configSize, &mp4ASC);
            if (rc >= 0) {
                _info->samplerate = mp4ASC.samplingFrequency;
                _info->channels = MP4GetTrackAudioChannels (info->mp4file, info->mp4track);
                totalsamples = MP4GetTrackNumberOfSamples (info->mp4file, info->mp4track) * 1024 * _info->channels;

                // init mp4 decoding
                info->dec = NeAACDecOpen ();
                unsigned long srate;
                unsigned char ch;
                if (NeAACDecInit2(info->dec, pConfig, configSize, &srate, &ch) < 0) {
                    trace ("NeAACDecInit2 returned error\n");
                    return -1;
                }
                samplerate = srate;
                channels = ch;
                NeAACDecConfigurationPtr conf = NeAACDecGetCurrentConfiguration (info->dec);
                conf->dontUpSampleImplicitSBR = 1;
                NeAACDecSetConfiguration (info->dec, conf);
                mp4AudioSpecificConfig mp4ASC;
                if (NeAACDecAudioSpecificConfig(pConfig, configSize, &mp4ASC) >= 0)
                {
                    info->mp4framesize = 1024;
                    if (mp4ASC.frameLengthFlag == 1) {
                        info->mp4framesize = 960;
                    }
//                    if (mp4ASC.sbr_present_flag == 1) {
//                        info->mp4framesize *= 2;
//                    }
                }
                //totalsamples *= info->mp4framesize;
                free (pConfig);
                info->maxSampleSize = MP4GetTrackMaxSampleSize(info->mp4file, info->mp4track);
                info->samplebuffer = malloc (info->maxSampleSize);
                info->mp4sample = 1;
            }
            else {
                MP4Close (info->mp4file);
                info->mp4file = NULL;
            }
        }
        else {
            MP4Close (info->mp4file);
            info->mp4file = NULL;
        }
#endif
// }}}
        if (!info->mp4file) {
            trace ("mp4 track not found, looking for aac stream...\n");

            if (info->junk >= 0) {
                deadbeef->fseek (info->file, info->junk, SEEK_SET);
            }
            else {
                deadbeef->rewind (info->file);
            }
            int offs = parse_aac_stream (info->file, &samplerate, &channels, &duration, &totalsamples);
            if (offs == -1) {
                trace ("aac stream not found\n");
                return -1;
            }
            if (offs > info->junk) {
                info->junk = offs;
            }
            if (info->junk >= 0) {
                deadbeef->fseek (info->file, info->junk, SEEK_SET);
            }
            else {
                deadbeef->rewind (info->file);
            }
            trace ("found aac stream (junk: %d, offs: %d)\n", info->junk, offs);
        }

        _info->fmt.channels = channels;
        _info->fmt.samplerate = samplerate;
    }
    else {
        // sync before attempting to init
        int samplerate, channels;
        float duration;
        int offs = parse_aac_stream (info->file, &samplerate, &channels, &duration, NULL);
        if (offs < 0) {
            trace ("aac: parse_aac_stream failed\n");
            return -1;
        }
        if (offs > info->junk) {
            info->junk = offs;
        }
        trace("parse_aac_stream returned %x\n", offs);
        deadbeef->pl_replace_meta (it, "!FILETYPE", "AAC");
    }

//    duration = (float)totalsamples / samplerate;
//    deadbeef->pl_set_item_duration (it, duration);

    _info->fmt.bps = 16;
    _info->plugin = &plugin;

    if (!info->mp4file) {
        //trace ("NeAACDecGetCapabilities\n");
        //unsigned long caps = NeAACDecGetCapabilities();

        trace ("NeAACDecOpen\n");
        info->dec = NeAACDecOpen ();

        trace ("prepare for NeAACDecInit: fread %d from offs %lld\n", AAC_BUFFER_SIZE, deadbeef->ftell (info->file));
        info->remaining = deadbeef->fread (info->buffer, 1, AAC_BUFFER_SIZE, info->file);

        NeAACDecConfigurationPtr conf = NeAACDecGetCurrentConfiguration (info->dec);
//        conf->dontUpSampleImplicitSBR = 1;
        NeAACDecSetConfiguration (info->dec, conf);
        unsigned long srate;
        unsigned char ch;
        trace ("NeAACDecInit (%d bytes)\n", info->remaining);
        int consumed = NeAACDecInit (info->dec, info->buffer, info->remaining, &srate, &ch);
        trace ("NeAACDecInit returned samplerate=%d, channels=%d, consumed: %d\n", (int)srate, (int)ch, consumed);
        if (consumed < 0) {
            trace ("NeAACDecInit returned %d\n", consumed);
            return -1;
        }
        if (consumed > info->remaining) {
            trace ("NeAACDecInit consumed more than available! wtf?\n");
            return -1;
        }
        if (consumed == info->remaining) {
            info->remaining = 0;
        }
        else if (consumed > 0) {
            memmove (info->buffer, info->buffer + consumed, info->remaining - consumed);
            info->remaining -= consumed;
        }
        _info->fmt.channels = ch;
        _info->fmt.samplerate = srate;
    }

    if (!info->file->vfs->is_streaming ()) {
        if (it->endsample > 0) {
            info->startsample = it->startsample;
            info->endsample = it->endsample;
            plugin.seek_sample (_info, 0);
        }
        else {
            info->startsample = 0;
            info->endsample = totalsamples-1;
        }
    }
    trace ("totalsamples: %d, endsample: %d, samples-from-duration: %d\n", totalsamples-1, info->endsample, (int)deadbeef->pl_get_item_duration (it)*44100);

    for (int i = 0; i < _info->fmt.channels; i++) {
        _info->fmt.channelmask |= 1 << i;
    }
    info->noremap = 0;
    info->remap[0] = -1;
    trace ("init success\n");

    return 0;
}
Exemplo n.º 6
0
// returns -1 for error, 0 for mp4, 1 for aac
int
aac_probe (DB_FILE *fp, const char *fname, MP4FILE_CB *cb, float *duration, int *samplerate, int *channels, int *totalsamples, int *mp4track, MP4FILE *pmp4) {
    // try mp4
    trace ("aac_probe: pos=%lld, junk=%d\n", deadbeef->ftell (fp), ((aac_info_t*)cb->user_data)->junk);

    if (mp4track) {
        *mp4track = -1;
    }
    if (*pmp4) {
        *pmp4 = NULL;
    }
    *duration = -1;
#ifdef USE_MP4FF
    trace ("mp4ff_open_read\n");
    mp4ff_t *mp4 = mp4ff_open_read (cb);
#else
    MP4FileHandle mp4 = MP4ReadProvider (fname, 0, cb);
#endif
    if (!mp4) {
        trace ("not an mp4 file\n");
        return -1;
    }
    if (pmp4) {
        *pmp4 = mp4;
    }
#ifdef USE_MP4FF
    int ntracks = mp4ff_total_tracks (mp4);
    if (ntracks > 0) {
        trace ("m4a container detected, ntracks=%d\n", ntracks);
        int i = -1;
        trace ("looking for mp4 data...\n");
        int sr = -1;
        unsigned char*  buff = 0;
        unsigned int    buff_size = 0;
        for (i = 0; i < ntracks; i++) {
            mp4AudioSpecificConfig mp4ASC;
            mp4ff_get_decoder_config(mp4, i, &buff, &buff_size);
            if (buff) {
                int rc = AudioSpecificConfig(buff, buff_size, &mp4ASC);
                sr = mp4ASC.samplingFrequency;
                if(rc < 0) {
                    free (buff);
                    buff = 0;
                    continue;
                }
                break;
            }
        }
        if (i != ntracks && buff) 
        {
            trace ("found audio track (%d)\n", i);
            // init mp4 decoding
            NeAACDecHandle dec = NeAACDecOpen ();
            unsigned long srate;
            unsigned char ch;
            if (NeAACDecInit2(dec, buff, buff_size, &srate, &ch) < 0) {
                trace ("NeAACDecInit2 returned error\n");
                goto error;
            }
            *samplerate = srate;
            *channels = ch;
            int samples = mp4ff_num_samples(mp4, i);
            samples = (int64_t)samples * srate / mp4ff_time_scale (mp4, i);
            int tsamples = samples;
            NeAACDecConfigurationPtr conf = NeAACDecGetCurrentConfiguration (dec);
            conf->dontUpSampleImplicitSBR = 1;
            NeAACDecSetConfiguration (dec, conf);
            mp4AudioSpecificConfig mp4ASC;
            int mp4framesize;
            if (NeAACDecAudioSpecificConfig(buff, buff_size, &mp4ASC) >= 0)
            {
                mp4framesize = 1024;
                if (mp4ASC.frameLengthFlag == 1) {
                    mp4framesize = 960;
                }
                // commented this out, since it fixes double-duration bug on
                // some mp4 files
                //if (mp4ASC.sbr_present_flag == 1) {
                //    mp4framesize *= 2;
                //}
            }
            else {
                trace ("NeAACDecAudioSpecificConfig failed, can't get mp4framesize\n");
                goto error;
            }
            tsamples *= mp4framesize;

            trace ("mp4 nsamples=%d, samplerate=%d, timescale=%d, duration=%lld\n", samples, *samplerate, mp4ff_time_scale(mp4, i), mp4ff_get_track_duration(mp4, i));
            *duration = (float)tsamples / (*samplerate);
            trace ("mp4 duration: %f (tsamples %d/samplerate %d)\n", *duration, tsamples, *samplerate);
            
            NeAACDecClose (dec);

            if (totalsamples) {
                *totalsamples = tsamples;
            }
            if (mp4track) {
                *mp4track = i;
            }
            if (!*pmp4) {
                mp4ff_close (mp4);
            }
            return 0;
error:
            NeAACDecClose (dec);
            free (buff);
            if (!*pmp4) {
                mp4ff_close (mp4);
            }
            return -1;
        }
        else {
            trace ("audio track not found\n");
            mp4ff_close (mp4);
            mp4 = NULL;
        }
        if (buff) {
            free (buff);
            buff = NULL;
        }

    }
#else
    MP4FileHandle mp4File = mp4;
    MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "audio", 0);
    trace ("trackid: %d\n", trackId);
    uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId);
    MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId);
    MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
    u_int8_t* pConfig;
    uint32_t configSize = 0;
    bool res = MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize);
    if (res && pConfig) {
        mp4AudioSpecificConfig mp4ASC;
        int rc = AudioSpecificConfig(pConfig, configSize, &mp4ASC);
        free (pConfig);
        if (rc >= 0) {
            *samplerate = mp4ASC.samplingFrequency;
            *channels = MP4GetTrackAudioChannels (mp4File, trackId);
//            int64_t duration = MP4ConvertFromTrackDuration (mp4File, trackId, trackDuration, timeScale);
            int samples = MP4GetTrackNumberOfSamples (mp4File, trackId) * 1024 * (*channels);
            trace ("mp4 nsamples=%d, timescale=%d, samplerate=%d\n", samples, timeScale, *samplerate);
            *duration = (float)samples / (*samplerate);

            if (totalsamples) {
                *totalsamples = samples;
            }
            if (mp4track) {
                *mp4track = trackId;
            }
            if (!*pmp4) {
                MP4Close (mp4);
            }
            return 0;
        }
    }
#endif
    if (*pmp4) {
        *pmp4 = NULL;
    }

    if (mp4) {
#if USE_MP4FF
        mp4ff_close (mp4);
#else
        MP4Close (mp4);
#endif
        mp4 = NULL;
    }
    trace ("mp4 track not found, looking for aac stream...\n");

    // not an mp4, try raw aac
#if USE_MP4FF
    deadbeef->rewind (fp);
#endif
    if (parse_aac_stream (fp, samplerate, channels, duration, totalsamples) == -1) {
        trace ("aac stream not found\n");
        return -1;
    }
    trace ("found aac stream\n");
    return 1;
}
Exemplo n.º 7
0
void ExtractTrack(MP4FileHandle mp4File, MP4TrackId trackId, 
	bool sampleMode, MP4SampleId sampleId, char* dstFileName)
{
	char outFileName[PATH_MAX];
	int outFd = -1;
	int openFlags = O_WRONLY | O_TRUNC | OPEN_CREAT;

	if (!sampleMode) {
		if (dstFileName == NULL) {
			snprintf(outFileName, sizeof(outFileName), 
				"%s.t%u", Mp4FileName, trackId);
		} else {
			snprintf(outFileName, sizeof(outFileName), 
				"%s", dstFileName);
		}

		outFd = open(outFileName, openFlags, 0644);
		if (outFd == -1) {
			fprintf(stderr, "%s: can't open %s: %s\n",
				ProgName, outFileName, strerror(errno));
			return;
		}
	}

	MP4SampleId numSamples;

	if (sampleMode && sampleId != MP4_INVALID_SAMPLE_ID) {
		numSamples = sampleId;
	} else {
		sampleId = 1;
		numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
	}

	u_int8_t* pSample;
	u_int32_t sampleSize;

	for ( ; sampleId <= numSamples; sampleId++) {
		int rc;

		// signals to ReadSample() that it should malloc a buffer for us
		pSample = NULL;
		sampleSize = 0;

		rc = MP4ReadSample(mp4File, trackId, sampleId, &pSample, &sampleSize);
		if (rc == 0) {
			fprintf(stderr, "%s: read sample %u for %s failed\n",
				ProgName, sampleId, outFileName);
			break;
		}

		if (sampleMode) {
			snprintf(outFileName, sizeof(outFileName), "%s.t%u.s%u",
				Mp4FileName, trackId, sampleId);

			outFd = open(outFileName, openFlags, 0644);

			if (outFd == -1) {
				fprintf(stderr, "%s: can't open %s: %s\n",
					ProgName, outFileName, strerror(errno));
				break;
			}
		}

		rc = write(outFd, pSample, sampleSize);
		if (rc == -1 || (u_int32_t)rc != sampleSize) {
			fprintf(stderr, "%s: write to %s failed: %s\n",
				ProgName, outFileName, strerror(errno));
			break;
		}

		free(pSample);

		if (sampleMode) {
			close(outFd);
			outFd = -1;
		}
	}

	if (outFd != -1) {
		close(outFd);
	}
}
Exemplo n.º 8
0
MP4TrackId Mp4vCreator(MP4FileHandle mp4File, FILE* inFile, bool doEncrypt,
		       bool allowVariableFrameRate)
{
    bool rc;

    u_int8_t sampleBuffer[256 * 1024 * 2];
    u_int8_t* pCurrentSample = sampleBuffer;
    u_int32_t maxSampleSize = sizeof(sampleBuffer) / 2;
    u_int32_t prevSampleSize = 0;

    // the current syntactical object
    // typically 1:1 with a sample
    // but not always, i.e. non-VOP's
    u_int8_t* pObj = pCurrentSample;
    u_int32_t objSize;
    u_int8_t objType;

    // the current sample
    MP4SampleId sampleId = 1;
    MP4Timestamp currentSampleTime = 0;

    // the last reference VOP
    MP4SampleId refVopId = 1;
    MP4Timestamp refVopTime = 0;

    // track configuration info
    u_int8_t videoProfileLevel = MPEG4_SP_L3;
    u_int8_t timeBits = 15;
    u_int16_t timeTicks = 30000;
    u_int16_t frameDuration = 3000;
    u_int16_t frameWidth = 320;
    u_int16_t frameHeight = 240;
    u_int32_t esConfigSize = 0;
    int vopType = 0;
    int prevVopType = 0;
    bool foundVOSH = false, foundVO = false, foundVOL = false;
    u_int32_t lastVopTimeIncrement = 0;
    bool variableFrameRate = false;
    bool lastFrame = false;
    bool haveBframes = false;
    mpeg4_frame_t *head = NULL, *tail = NULL;

    // start reading objects until we get the first VOP
    while (LoadNextObject(inFile, pObj, &objSize, &objType)) {
        // guard against buffer overflow
        if (pObj + objSize >= pCurrentSample + maxSampleSize) {
            fprintf(stderr,
                    "%s: buffer overflow, invalid video stream?\n", ProgName);
            return MP4_INVALID_TRACK_ID;
        }
#ifdef DEBUG_MP4V
        if (Verbosity & MP4_DETAILS_SAMPLE) {
            printf("MP4V type %x size %u\n",
                    objType, objSize);
        }
#endif

        if (objType == MP4AV_MPEG4_VOSH_START) {
            MP4AV_Mpeg4ParseVosh(pObj, objSize,
                    &videoProfileLevel);
            foundVOSH = true;
        } else if (objType == MP4AV_MPEG4_VO_START) {
            foundVO = true;
        } else if (objType == MP4AV_MPEG4_VOL_START) {
            MP4AV_Mpeg4ParseVol(pObj, objSize,
                    &timeBits, &timeTicks, &frameDuration,
                    &frameWidth, &frameHeight);

            foundVOL = true;
#ifdef DEBUG_MP4V
            printf("ParseVol: timeBits %u timeTicks %u frameDuration %u\n",
                    timeBits, timeTicks, frameDuration);
#endif

        } else if (foundVOL == true || objType == MP4AV_MPEG4_VOP_START) {
            esConfigSize = pObj - pCurrentSample;
            // ready to set up mp4 track
            break;
        }
        /* XXX why do we need this if ?
         * It looks like it will remove this object ... XXX */
	// It does.  On Purpose.  wmay 6/2004
        if (objType != MP4AV_MPEG4_USER_DATA_START) {
            pObj += objSize;
        }
    }

    if (foundVOSH == false) {
        fprintf(stderr,
                "%s: no VOSH header found in MPEG-4 video.\n"
                "This can cause problems with players other than mp4player. \n",
                ProgName);
    } else {
        if (VideoProfileLevelSpecified &&
                videoProfileLevel != VideoProfileLevel) {
            fprintf(stderr,
                    "%s: You have specified a different video profile level than was detected in the VOSH header\n"
                    "The level you specified was %d and %d was read from the VOSH\n",
                    ProgName, VideoProfileLevel, videoProfileLevel);
        }
    }
    if (foundVO == false) {
        fprintf(stderr,
                "%s: No VO header found in mpeg-4 video.\n"
                "This can cause problems with players other than mp4player\n",
                ProgName);
    }
    if (foundVOL == false) {
        fprintf(stderr,
                "%s: fatal: No VOL header found in mpeg-4 video stream\n",
                ProgName);
        return MP4_INVALID_TRACK_ID;
    }

    // convert frame duration to canonical time scale
    // note zero value for frame duration signals variable rate video
    if (timeTicks == 0) {
        timeTicks = 1;
    }
    u_int32_t mp4FrameDuration = 0;

    if (VideoFrameRate) {
      mp4FrameDuration = (u_int32_t)(((double)Mp4TimeScale) / VideoFrameRate);    
    } else if (frameDuration) {
	  VideoFrameRate = frameDuration;
	  VideoFrameRate /= timeTicks;
	  mp4FrameDuration = (Mp4TimeScale * frameDuration) / timeTicks;
    } else {
      if (allowVariableFrameRate == false ) {
	fprintf(stderr,
		"%s: variable rate video stream signalled,"
		" please specify average frame rate with -r option\n"
		" or --variable-frame-rate argument\n",
		ProgName);
	return MP4_INVALID_TRACK_ID;
      }

        variableFrameRate = true;
    }

    ismacryp_session_id_t ismaCrypSId;
    mp4v2_ismacrypParams *icPp =  (mp4v2_ismacrypParams *) malloc(sizeof(mp4v2_ismacrypParams));
    memset(icPp, 0, sizeof(mp4v2_ismacrypParams));


    // initialize ismacryp session if encrypting
    if (doEncrypt) {

        if (ismacrypInitSession(&ismaCrypSId,KeyTypeVideo) != 0) {
            fprintf(stderr, "%s: could not initialize the ISMAcryp session\n",
                    ProgName);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetScheme(ismaCrypSId, &(icPp->scheme_type)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp scheme type. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetSchemeVersion(ismaCrypSId, &(icPp->scheme_version)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp scheme ver. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetKMSUri(ismaCrypSId, &(icPp->kms_uri)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp kms uri. sid %d\n",
                    ProgName, ismaCrypSId);
            CHECK_AND_FREE(icPp->kms_uri);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if ( ismacrypGetSelectiveEncryption(ismaCrypSId, &(icPp->selective_enc)) != ismacryp_rc_ok ) {
            fprintf(stderr, "%s: could not get ismacryp selec enc. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetKeyIndicatorLength(ismaCrypSId, &(icPp->key_ind_len)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp key ind len. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetIVLength(ismaCrypSId, &(icPp->iv_len)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp iv len. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
    }

    // create the new video track
    MP4TrackId trackId;
    if (doEncrypt) {
        trackId =
            MP4AddEncVideoTrack(
                    mp4File,
                    Mp4TimeScale,
                    mp4FrameDuration,
                    frameWidth,
                    frameHeight,
                    icPp,
                    MP4_MPEG4_VIDEO_TYPE);
    } else {
        trackId =
            MP4AddVideoTrack(
                    mp4File,
                    Mp4TimeScale,
                    mp4FrameDuration,
                    frameWidth,
                    frameHeight,
                    MP4_MPEG4_VIDEO_TYPE);
    }

    if (trackId == MP4_INVALID_TRACK_ID) {
        fprintf(stderr,
                "%s: can't create video track\n", ProgName);
        return MP4_INVALID_TRACK_ID;
    }

    if (VideoProfileLevelSpecified) {
        videoProfileLevel = VideoProfileLevel;
    }
    if (MP4GetNumberOfTracks(mp4File, MP4_VIDEO_TRACK_TYPE) == 1) {
        MP4SetVideoProfileLevel(mp4File, videoProfileLevel);
    }
    printf("es config size is %d\n", esConfigSize);
    if (esConfigSize) {
        MP4SetTrackESConfiguration(mp4File, trackId,
                pCurrentSample, esConfigSize);

        // move past ES config, so it doesn't go into first sample
        pCurrentSample += esConfigSize;
    }
    // Move the current frame to the beginning of the
    // buffer
    memmove(sampleBuffer, pCurrentSample, pObj - pCurrentSample + objSize);
    pObj = sampleBuffer + (pObj - pCurrentSample);
    pCurrentSample = sampleBuffer;
    MP4Timestamp prevFrameTimestamp = 0;

    // now process the rest of the video stream
    while ( true ) {
        if ( objType != MP4AV_MPEG4_VOP_START ) {
	  // keep it in the buffer until a VOP comes along
	  // Actually, do nothings, since we only want VOP
	  // headers in the stream - wmay 6/2004
	  //pObj += objSize;

        } else { // we have VOP
            u_int32_t sampleSize = (pObj + objSize) - pCurrentSample;

            vopType = MP4AV_Mpeg4GetVopType(pObj, objSize);

	    mpeg4_frame_t *fr = MALLOC_STRUCTURE(mpeg4_frame_t);
	    if (head == NULL) {
	      head = tail = fr;
	    } else {
	      tail->next = fr;
	      tail = fr;
	    }
	    fr->vopType = vopType;
	    fr->frameTimestamp = currentSampleTime;
	    fr->next = NULL;
            if ( variableFrameRate ) {
                // variable frame rate:  recalculate "mp4FrameDuration"
                if ( lastFrame ) {
                    // last frame
                    mp4FrameDuration = Mp4TimeScale / timeTicks;
                } else {
                    // not the last frame
                    u_int32_t vopTimeIncrement;
                    MP4AV_Mpeg4ParseVop(pObj, objSize, &vopType, timeBits, timeTicks, &vopTimeIncrement);
                    u_int32_t vopTime = vopTimeIncrement - lastVopTimeIncrement;
                    mp4FrameDuration = (Mp4TimeScale * vopTime) / timeTicks;
                    lastVopTimeIncrement = vopTimeIncrement % timeTicks;
                }
	    }
            if ( prevSampleSize > 0 ) { // not the first time
                // fill sample data & length to write
                u_int8_t* sampleData2Write = NULL;
                u_int32_t sampleLen2Write = 0;
                if ( doEncrypt ) {
                    if ( ismacrypEncryptSampleAddHeader(ismaCrypSId,
                                sampleSize,
                                sampleBuffer,
                                &sampleLen2Write,
                                &sampleData2Write) != 0 ) {
                        fprintf(stderr,
                                "%s: can't encrypt video sample and add header %u\n",
                                ProgName, sampleId);
                    }
                } else {
                    sampleData2Write = sampleBuffer;
                    sampleLen2Write = prevSampleSize;
                }

		
            if (variableFrameRate == false) {
	      double now_calc;
	      now_calc = sampleId;
	      now_calc *= Mp4TimeScale;
	      now_calc /= VideoFrameRate;
	      MP4Timestamp now_ts = (MP4Timestamp)now_calc;
	      mp4FrameDuration = now_ts - prevFrameTimestamp;
	      prevFrameTimestamp = now_ts;
	      currentSampleTime = now_ts;
	    }
                // Write the previous sample
                rc = MP4WriteSample(mp4File, trackId,
                        sampleData2Write, sampleLen2Write,
                        mp4FrameDuration, 0, prevVopType == VOP_TYPE_I);

                if ( doEncrypt && sampleData2Write ) {
                    // buffer allocated by encrypt function.
                    // must free it!
                    free(sampleData2Write);
                }

                if ( !rc ) {
                    fprintf(stderr,
                            "%s: can't write video frame %u\n",
                            ProgName, sampleId);
                    MP4DeleteTrack(mp4File, trackId);
                    return MP4_INVALID_TRACK_ID;
                }

                // deal with rendering time offsets
                // that can occur when B frames are being used
                // which is the case for all profiles except Simple Profile
		haveBframes |= (prevVopType == VOP_TYPE_B);

		if ( lastFrame ) {
		  // finish read frames
		  break;
		}
                sampleId++;
            } // not the first time

            currentSampleTime += mp4FrameDuration;

            // Move the current frame to the beginning of the
            // buffer
            memmove(sampleBuffer, pCurrentSample, sampleSize);
            prevSampleSize = sampleSize;
            prevVopType = vopType;
            // reset pointers
            pObj = pCurrentSample = sampleBuffer + sampleSize;
        } // we have VOP

        // load next object from bitstream
        if (!LoadNextObject(inFile, pObj, &objSize, &objType)) {
            if (objType != MP4AV_MPEG4_VOP_START)
                break;
            lastFrame = true;
            objSize = 0;
            continue;
        }
        // guard against buffer overflow
        if (pObj + objSize >= pCurrentSample + maxSampleSize) {
            fprintf(stderr,
                    "%s: buffer overflow, invalid video stream?\n", ProgName);
            MP4DeleteTrack(mp4File, trackId);
            return MP4_INVALID_TRACK_ID;
        }
#ifdef DEBUG_MP4V
        if (Verbosity & MP4_DETAILS_SAMPLE) {
            printf("MP4V type %x size %u\n",
                    objType, objSize);
        }
#endif
    }
    bool doRenderingOffset = false;
    switch (videoProfileLevel) {
    case MPEG4_SP_L0:
    case MPEG4_SP_L1:
    case MPEG4_SP_L2:
    case MPEG4_SP_L3:
      break;
    default:
      doRenderingOffset = true;
      break;
    }
   
    if (doRenderingOffset && haveBframes) {
      // only generate ctts (with rendering offset for I, P frames) when
      // we need one.  We saved all the frames types and timestamps above - 
      // we can't use MP4ReadSample, because the end frames might not have
      // been written 
      refVopId = 1;
      refVopTime = 0;
      MP4SampleId maxSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
      // start with sample 2 - we know the first one is a I frame
      mpeg4_frame_t *fr = head->next; // skip the first one
      for (MP4SampleId ix = 2; ix <= maxSamples; ix++) {
	if (fr->vopType != VOP_TYPE_B) {
#ifdef DEBUG_MP4V_TS
            printf("sample %u %u renderingOffset "U64"\n",
		   refVopId, fr->vopType, fr->frameTimestamp - refVopTime);
#endif
	  MP4SetSampleRenderingOffset(mp4File, trackId, refVopId, 
				      fr->frameTimestamp - refVopTime);
	  refVopId = ix;
	  refVopTime = fr->frameTimestamp;
	}
	fr = fr->next;
      }
      
#ifdef DEBUG_MP4V_TS
      printf("sample %u %u renderingOffset "U64"\n",
	     refVopId, fr->vopType, fr->frameTimestamp - refVopTime);
#endif
      MP4SetSampleRenderingOffset(mp4File, trackId, refVopId, 
				  fr->frameTimestamp - refVopTime);
    }

    while (head != NULL) {
      tail = head->next;
      free(head);
      head = tail;
    }
    // terminate session if encrypting
    if (doEncrypt) {
        if (ismacrypEndSession(ismaCrypSId) != 0) {
            fprintf(stderr,
                    "%s: could not end the ISMAcryp session\n",
                    ProgName);
        }
    }

    return trackId;
}
Exemplo n.º 9
0
extern "C" bool MP4AV_AVSMHinter(
				 MP4FileHandle mp4File, 
				 MP4TrackId mediaTrackId, 
				 u_int16_t maxPayloadSize)
{
  u_int32_t numSamples = MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);
  u_int32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);
	
  uint32_t sizeLength;

  if (numSamples == 0 || maxSampleSize == 0) {
    return false;
  }

  /*if (MP4GetTrackAVSMLengthSize(mp4File, mediaTrackId, &sizeLength) == false) {
    return false;
  }*/
  sizeLength=4;						//why?

  MP4TrackId hintTrackId = 
    MP4AV_AVSM_HintTrackCreate(mp4File, mediaTrackId);				//****AVSMspecial****

  if (hintTrackId == MP4_INVALID_TRACK_ID) {
    return false;
  }

  u_int8_t* pSampleBuffer = (u_int8_t*)malloc(maxSampleSize);
  if (pSampleBuffer == NULL) {
    MP4DeleteTrack(mp4File, hintTrackId);
    return false;
  }
  for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {
    u_int32_t sampleSize = maxSampleSize;
    MP4Timestamp startTime;
    MP4Duration duration;
    MP4Duration renderingOffset;
    bool isSyncSample;//stss指定同步帧

    bool rc = MP4ReadSample(
			    mp4File, mediaTrackId, sampleId, 
			    &pSampleBuffer, &sampleSize, 
			    &startTime, &duration, 
			    &renderingOffset, &isSyncSample);

    if (!rc) {
      MP4DeleteTrack(mp4File, hintTrackId);
      CHECK_AND_FREE(pSampleBuffer);
      return false;
    }

    MP4AV_AVSM_HintAddSample(mp4File,								//****AVSMspecial****
			     hintTrackId,
			     sampleId,
			     pSampleBuffer,
			     sampleSize,
			     sizeLength,
			     duration,
			     renderingOffset,
			     isSyncSample,
			     maxPayloadSize);
	
  }
   CHECK_AND_FREE(pSampleBuffer);

  return true;
}
Exemplo n.º 10
0
bool MP4AV_AudioInterleaveHinter( 
	MP4FileHandle mp4File, 
	MP4TrackId mediaTrackId, 
	MP4TrackId hintTrackId,
	MP4Duration sampleDuration, 
	u_int8_t stride, 
	u_int8_t bundle,
	u_int16_t maxPayloadSize,
	MP4AV_AudioConcatenator pConcatenator)
{
	bool rc;
	u_int32_t numSamples = 
		MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);

	MP4SampleId* pSampleIds = new MP4SampleId[bundle];

	uint32_t sampleIds = 0;
	for (u_int32_t i = 1; i <= numSamples; i += stride * bundle) {
		for (u_int32_t j = 0; j < stride; j++) {
			u_int32_t k;
			for (k = 0; k < bundle; k++) {

				MP4SampleId sampleId = i + j + (k * stride);

				// out of samples for this bundle
				if (sampleId > numSamples) {
					break;
				}

				// add sample to this hint
				pSampleIds[k] = sampleId;
				sampleIds++;
			}

			if (k == 0) {
				break;
			}

			// compute hint duration
			// note this is used to control the RTP timestamps 
			// that are emitted for the packet,
			// it isn't the actual duration of the samples in the packet
			MP4Duration hintDuration;
			if (j + 1 == stride) {
				// at the end of the track
				if (i + (stride * bundle) > numSamples) {
					hintDuration = ((numSamples - i) - j) * sampleDuration;
					if (hintDuration == 0) {
					  hintDuration = sampleDuration;
					}
				} else {
					hintDuration = ((stride * bundle) - j) * sampleDuration;
				}
			} else {
				hintDuration = sampleDuration;
			}

			// write hint
			rc = (*pConcatenator)(mp4File, mediaTrackId, hintTrackId,
				k, pSampleIds, hintDuration, maxPayloadSize);
			sampleIds = 0;

			if (!rc) {
				return false;
			}
		}
	}

	delete [] pSampleIds;

	return true;
}
Exemplo n.º 11
0
extern "C" bool MP4AV_Rfc3016LatmHinter (MP4FileHandle mp4File,
        MP4TrackId mediaTrackId,
        u_int16_t maxPayloadSize)
{
    u_int32_t numSamples = MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);
    u_int32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);
    MP4Duration sampleDuration =
        MP4AV_GetAudioSampleDuration(mp4File, mediaTrackId);

    if (sampleDuration == MP4_INVALID_DURATION) {
        return false;
    }

    if (numSamples == 0 || maxSampleSize == 0) {
        return false;
    }


    /* get the mpeg4 video configuration */
    u_int8_t* pAudioSpecificConfig;
    u_int32_t AudioSpecificConfigSize;

    if (MP4GetTrackESConfiguration(mp4File, mediaTrackId,
                                   &pAudioSpecificConfig,
                                   &AudioSpecificConfigSize) == false)
        return false;

    if (pAudioSpecificConfig == NULL ||
            AudioSpecificConfigSize == 0) return false;

    uint8_t channels = MP4AV_AacConfigGetChannels(pAudioSpecificConfig);
    uint32_t freq = MP4AV_AacConfigGetSamplingRate(pAudioSpecificConfig);
    uint8_t type = MP4AV_AacConfigGetAudioObjectType(pAudioSpecificConfig);

    uint8_t *pConfig;
    uint32_t configSize;

    MP4AV_LatmGetConfiguration(&pConfig, &configSize,
                               pAudioSpecificConfig, AudioSpecificConfigSize);
    free(pAudioSpecificConfig);

    if (pConfig == NULL || configSize == 0) {
        CHECK_AND_FREE(pConfig);
        return false;
    }

    MP4TrackId hintTrackId =
        MP4AddHintTrack(mp4File, mediaTrackId);

    if (hintTrackId == MP4_INVALID_TRACK_ID) {
        free(pConfig);
        return false;
    }
    u_int8_t payloadNumber = MP4_SET_DYNAMIC_PAYLOAD;

    char buffer[10];
    if (channels != 1) {
        snprintf(buffer, sizeof(buffer), "%u", channels);
    }

    /* convert it into ASCII form */
    char* sConfig = MP4BinaryToBase16(pConfig, configSize);
    free(pConfig);
    if (sConfig == NULL ||
            MP4SetHintTrackRtpPayload(mp4File, hintTrackId,
                                      "MP4A-LATM", &payloadNumber, 0,
                                      channels != 1 ? buffer : NULL) == false) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }

    uint32_t profile_level;
    // from gpac code
    switch (type) {
    case 2:
        if (channels <= 2) profile_level = freq <= 24000 ? 0x28 : 0x29;
        else profile_level = freq <= 48000 ? 0x2a : 0x2b;
        break;
    case 5:
        if (channels <= 2) profile_level = freq < 24000 ? 0x2c : 0x2d;
        else profile_level = freq <= 48000 ? 0x2e : 0x2f;
        break;
    default:
        if (channels <= 2) profile_level = freq < 24000 ? 0x0e : 0x0f;
        else profile_level = 0x10;
        break;
    }

    /* create the appropriate SDP attribute */
    char* sdpBuf = (char*)malloc(strlen(sConfig) + 128);

    if (sdpBuf == NULL) {
        free(sConfig);
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }
    snprintf(sdpBuf,
             strlen(sConfig) + 128,
             "a=fmtp:%u profile-level-id=%u; cpresent=0; config=%s;\015\012",
             payloadNumber,
             profile_level,
             sConfig);

    /* add this to the track's sdp */
    bool val = MP4AppendHintTrackSdp(mp4File, hintTrackId, sdpBuf);

    free(sConfig);
    free(sdpBuf);
    if (val == false) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }

    for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {
        uint8_t buffer[32];
        uint32_t offset = 0;
        uint32_t sampleSize =
            MP4GetSampleSize(mp4File, mediaTrackId, sampleId);
        uint32_t size_left = sampleSize;

        while (size_left > 0) {
            if (size_left > 0xff) {
                size_left -= 0xff;
                buffer[offset] = 0xff;
            } else {
                buffer[offset] = size_left;
                size_left = 0;
            }
            offset++;
        }
        if (MP4AddRtpHint(mp4File, hintTrackId) == false ||
                MP4AddRtpPacket(mp4File, hintTrackId, true) == false ||
                MP4AddRtpImmediateData(mp4File, hintTrackId,
                                       buffer, offset) == false ||
                MP4AddRtpSampleData(mp4File, hintTrackId,
                                    sampleId, 0, sampleSize) == false ||
                MP4WriteRtpHint(mp4File, hintTrackId, sampleDuration) == false) {
            MP4DeleteTrack(mp4File, hintTrackId);
            return false;
        }
    }
    return true;

}
Exemplo n.º 12
0
static void DumpTrack (MP4FileHandle mp4file, MP4TrackId tid, 
		       bool dump_off, bool dump_rend)
{
  uint32_t numSamples;
  MP4SampleId sid;
  uint8_t *buffer;
  uint32_t max_frame_size;
  uint32_t timescale;
  uint64_t msectime;
  const char *media_data_name;
  uint32_t len_size = 0;
  uint8_t video_type = 0;
  numSamples = MP4GetTrackNumberOfSamples(mp4file, tid);
  max_frame_size = MP4GetTrackMaxSampleSize(mp4file, tid) + 4;
  media_data_name = MP4GetTrackMediaDataName(mp4file, tid);
  if (strcasecmp(media_data_name, "avc1") == 0) {
    MP4GetTrackH264LengthSize(mp4file, tid, &len_size);
  } else if (strcasecmp(media_data_name, "mp4v") == 0) {
    video_type = MP4GetTrackEsdsObjectTypeId(mp4file, tid);
  }
  buffer = (uint8_t *)malloc(max_frame_size);
  if (buffer == NULL) {
    printf("couldn't get buffer\n");
    return;
  }

  timescale = MP4GetTrackTimeScale(mp4file, tid);
  printf("mp4file %s, track %d, samples %d, timescale %d\n", 
	 Mp4FileName, tid, numSamples, timescale);

  for (sid = 1; sid <= numSamples; sid++) {
    MP4Timestamp sampleTime;
    MP4Duration sampleDuration, sampleRenderingOffset;
    bool isSyncSample = FALSE;
    bool ret;
    u_int8_t *temp;
    uint32_t this_frame_size = max_frame_size;
    temp = buffer;
    ret = MP4ReadSample(mp4file, 
			tid,
			sid,
			&temp,
			&this_frame_size,
			&sampleTime,
			&sampleDuration,
			&sampleRenderingOffset,
			&isSyncSample);

    msectime = sampleTime;
    msectime *= TO_U64(1000);
    msectime /= timescale;

    printf("sampleId %6d, size %5u time "U64"("U64")",
	  sid,  MP4GetSampleSize(mp4file, tid, sid), 
	   sampleTime, msectime);
    if (dump_rend) printf(" %6"U64F, sampleRenderingOffset);
    if (strcasecmp(media_data_name, "mp4v") == 0) {
      if (MP4_IS_MPEG4_VIDEO_TYPE(video_type))
	ParseMpeg4(temp, this_frame_size, dump_off);
    } else if (strcasecmp(media_data_name, "avc1") == 0) {
      ParseH264(temp, this_frame_size, len_size, dump_off);
    }
    printf("\n");
  }
}
Exemplo n.º 13
0
// returns -1 for error, 0 for mp4, 1 for aac
int
aac_probe (DB_FILE *fp, const char *fname, MP4FILE_CB *cb, float *duration, int *samplerate, int *channels, int *totalsamples, int *mp4track, MP4FILE *pmp4) {
    // try mp4

    if (mp4track) {
        *mp4track = -1;
    }
    if (*pmp4) {
        *pmp4 = NULL;
    }
    *duration = -1;
#ifdef USE_MP4FF
    mp4ff_t *mp4 = mp4ff_open_read (cb);
#else
    MP4FileHandle mp4 = MP4ReadProvider (fname, 0, cb);
#endif
    if (!mp4) {
        trace ("not an mp4 file\n");
        return -1;
    }
    if (pmp4) {
        *pmp4 = mp4;
    }
#ifdef USE_MP4FF
    int ntracks = mp4ff_total_tracks (mp4);
    if (ntracks > 0) {
        trace ("m4a container detected, ntracks=%d\n", ntracks);
        int i = -1;
        trace ("looking for mp4 data...\n");
        int sr = -1;
        for (i = 0; i < ntracks; i++) {
            unsigned char*  buff = 0;
            unsigned int    buff_size = 0;
            mp4AudioSpecificConfig mp4ASC;
            mp4ff_get_decoder_config(mp4, i, &buff, &buff_size);
            if(buff){
                int rc = AudioSpecificConfig(buff, buff_size, &mp4ASC);
                sr = mp4ASC.samplingFrequency;
                free(buff);
                if(rc < 0)
                    continue;
                break;
            }
        }

        if (i != ntracks) 
        {
            trace ("mp4 track: %d\n", i);
            if (sr != -1) {
                *samplerate = sr;
            }
            else {
                *samplerate = mp4ff_get_sample_rate (mp4, i);
            }
            *channels = mp4ff_get_channel_count (mp4, i);
            int samples = mp4ff_num_samples(mp4, i) * 1024;
            samples = (int64_t)samples * (*samplerate) / mp4ff_time_scale (mp4, i);

            trace ("mp4 nsamples=%d, samplerate=%d, timescale=%d, duration=%lld\n", samples, *samplerate, mp4ff_time_scale(mp4, i), mp4ff_get_track_duration(mp4, i));
            *duration = (float)samples / (*samplerate);

            if (totalsamples) {
                *totalsamples = samples;
            }
            if (mp4track) {
                *mp4track = i;
            }
            if (!*pmp4) {
                mp4ff_close (mp4);
            }
            return 0;
        }
    }
#else
    MP4FileHandle mp4File = mp4;
    MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "audio", 0);
    trace ("trackid: %d\n", trackId);
    uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId);
    MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId);
    MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
    u_int8_t* pConfig;
    uint32_t configSize = 0;
    bool res = MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize);
    if (res && pConfig) {
        mp4AudioSpecificConfig mp4ASC;
        int rc = AudioSpecificConfig(pConfig, configSize, &mp4ASC);
        free (pConfig);
        if (rc >= 0) {
            *samplerate = mp4ASC.samplingFrequency;
            *channels = MP4GetTrackAudioChannels (mp4File, trackId);
//            int64_t duration = MP4ConvertFromTrackDuration (mp4File, trackId, trackDuration, timeScale);
            int samples = MP4GetTrackNumberOfSamples (mp4File, trackId) * 1024 * (*channels);
            trace ("mp4 nsamples=%d, timescale=%d, samplerate=%d\n", samples, timeScale, *samplerate);
            *duration = (float)samples / (*samplerate);

            if (totalsamples) {
                *totalsamples = samples;
            }
            if (mp4track) {
                *mp4track = trackId;
            }
            if (!*pmp4) {
                MP4Close (mp4);
            }
            return 0;
        }
    }
#endif
    if (*pmp4) {
        *pmp4 = NULL;
    }

    if (mp4) {
#if USE_MP4FF
        mp4ff_close (mp4);
#else
        MP4Close (mp4);
#endif
        mp4 = NULL;
    }
    trace ("mp4 track not found, looking for aac stream...\n");

    // not an mp4, try raw aac
#if USE_MP4FF
    deadbeef->rewind (fp);
#endif
    if (parse_aac_stream (fp, samplerate, channels, duration, totalsamples) == -1) {
        trace ("aac stream not found\n");
        return -1;
    }
    trace ("found aac stream\n");
    return 1;
}
void main(int argc, char** argv)
{


	if (argc < 2) {
		fprintf(stderr, "Usage: %s <file>\n", argv[0]);
		exit(1);
	}

	//u_int32_t verbosity = MP4_DETAILS_ALL;
	char* fileName = argv[1];

	// open the mp4 file, and read meta-info
	MP4FileHandle mp4File = MP4Read(fileName  );

	uint8_t profileLevel = MP4GetVideoProfileLevel(mp4File);

	// get a handle on the first video track
	MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "video");

	// gather the crucial track information 

	uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId);

	// note all times and durations 
	// are in units of the track time scale

	MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId);

	MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);

	uint32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, trackId);

	uint8_t* pConfig;
	uint32_t configSize = 0;

	MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize);

	// initialize decoder with Elementary Stream (ES) configuration

	// done with our copy of ES configuration
	free(pConfig);


	// now consecutively read and display the track samples

	uint8_t* pSample = (uint8_t*)malloc(maxSampleSize);
	uint32_t sampleSize;

	MP4Timestamp sampleTime;
	MP4Duration sampleDuration;
	MP4Duration sampleRenderingOffset;
	bool isSyncSample;

	for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {

		// give ReadSample our own buffer, and let it know how big it is
		sampleSize = maxSampleSize;

		// read next sample from video track
		MP4ReadSample(mp4File, trackId, sampleId, 
			&pSample, &sampleSize,
			&sampleTime, &sampleDuration, &sampleRenderingOffset, 
			&isSyncSample);

		// convert timestamp and duration from track time to milliseconds
		uint64_t myTime = MP4ConvertFromTrackTimestamp(mp4File, trackId, 
			sampleTime, MP4_MSECS_TIME_SCALE);

		uint64_t myDuration = MP4ConvertFromTrackDuration(mp4File, trackId,
			sampleDuration, MP4_MSECS_TIME_SCALE);

		// decode frame and display it
	}

	// close mp4 file
	MP4Close(mp4File);


	// Note to seek to time 'when' in the track
	// use MP4GetSampleIdFromTime(MP4FileHandle hFile, 
	//		MP4Timestamp when, bool wantSyncSample)
	// 'wantSyncSample' determines if a sync sample is desired or not
	// e.g.
	// MP4Timestamp when = 
	//	MP4ConvertToTrackTimestamp(mp4File, trackId, 30, MP4_SECS_TIME_SCALE);
	// MP4SampleId newSampleId = MP4GetSampleIdFromTime(mp4File, when, true);
	// MP4ReadSample(mp4File, trackId, newSampleId, ...);
	// 
	// Note that start time for sample may be later than 'when'

	exit(0);
}
Exemplo n.º 15
0
int AacPcm::getInfos(MediaInfo *infos)
{
	if(!infos)
		return 1;

	if(hDecoder)
	{
		SHOW_INFO()
	    return 0;
	}

	IsAAC=strcmpi(infos->getFilename()+lstrlen(infos->getFilename())-4,".aac")==0;

	if(!IsAAC) // MP4 file ---------------------------------------------------------------------
	{
	MP4Duration			length;
	unsigned __int32	buffer_size;
    mp4AudioSpecificConfig mp4ASC;

		if(!(mp4File=MP4Read(infos->getFilename(), 0)))
			ERROR_getInfos("Error opening file");

		if((track=GetAACTrack(mp4File))<0)
			ERROR_getInfos(0); //"Unable to find correct AAC sound track");

		if(!(hDecoder=faacDecOpen()))
			ERROR_getInfos("Error initializing decoder library");

		MP4GetTrackESConfiguration(mp4File, track, (unsigned __int8 **)&buffer, &buffer_size);
		if(!buffer)
			ERROR_getInfos("MP4GetTrackESConfiguration");
		AudioSpecificConfig(buffer, buffer_size, &mp4ASC);
        Channels=mp4ASC.channelsConfiguration;

		if(faacDecInit2(hDecoder, buffer, buffer_size, &Samplerate, &Channels) < 0)
			ERROR_getInfos("Error initializing decoder library");
		FREE_ARRAY(buffer);

		length=MP4GetTrackDuration(mp4File, track);
		len_ms=(DWORD)MP4ConvertFromTrackDuration(mp4File, track, length, MP4_MSECS_TIME_SCALE);
		file_info.bitrate=MP4GetTrackBitRate(mp4File, track);
		file_info.version=MP4GetTrackAudioType(mp4File, track)==MP4_MPEG4_AUDIO_TYPE ? 4 : 2;
		numSamples=MP4GetTrackNumberOfSamples(mp4File, track);
		sampleId=1;
	}
	else // AAC file ------------------------------------------------------------------------------
	{   
	DWORD			read,
					tmp;
	BYTE			Channels4Raw=0;

		if(!(aacFile=fopen(infos->getFilename(),"rb")))
			ERROR_getInfos("Error opening file"); 

		// use bufferized stream
		setvbuf(aacFile,NULL,_IOFBF,32767);

		// get size of file
		fseek(aacFile, 0, SEEK_END);
		src_size=ftell(aacFile);
		fseek(aacFile, 0, SEEK_SET);

		if(!(buffer=(BYTE *)malloc(FAAD_STREAMSIZE)))
			ERROR_getInfos("Memory allocation error: buffer")

		tmp=src_size<FAAD_STREAMSIZE ? src_size : FAAD_STREAMSIZE;
		read=fread(buffer, 1, tmp, aacFile);
		if(read==tmp)
		{
			bytes_read=read;
			bytes_into_buffer=read;
		}
		else
			ERROR_getInfos("Read failed!")

		if(tagsize=id3v2_tag(buffer))
		{
			if(tagsize>(long)src_size)
				ERROR_getInfos("Corrupt stream!");
			if(tagsize<bytes_into_buffer)
			{
				bytes_into_buffer-=tagsize;
				memcpy(buffer,buffer+tagsize,bytes_into_buffer);
			}
			else
			{
				bytes_read=tagsize;
				bytes_into_buffer=0;
				if(tagsize>bytes_into_buffer)
					fseek(aacFile, tagsize, SEEK_SET);
			}
			if(src_size<bytes_read+FAAD_STREAMSIZE-bytes_into_buffer)
				tmp=src_size-bytes_read;
			else
				tmp=FAAD_STREAMSIZE-bytes_into_buffer;
			read=fread(buffer+bytes_into_buffer, 1, tmp, aacFile);
			if(read==tmp)
			{
				bytes_read+=read;
				bytes_into_buffer+=read;
			}
			else
				ERROR_getInfos("Read failed!");
		}

		if(get_AAC_format((char *)infos->getFilename(), &file_info, &seek_table, &seek_table_length, 0))
			ERROR_getInfos("get_AAC_format");
		IsSeekable=file_info.headertype==ADTS && seek_table && seek_table_length>0;
		BlockSeeking=!IsSeekable;

		if(!(hDecoder=faacDecOpen()))
			ERROR_getInfos("Can't open library");

		if(file_info.headertype==RAW)
		{
		faacDecConfiguration	config;

			config.defSampleRate=atoi(cfg_samplerate);
			switch(cfg_profile[1])
			{
			case 'a':
				config.defObjectType=MAIN;
				break;
			case 'o':
				config.defObjectType=LOW;
				break;
			case 'S':
				config.defObjectType=SSR;
				break;
			case 'T':
				config.defObjectType=LTP;
				break;
			}
			switch(cfg_bps[0])
			{
			case '1':
				config.outputFormat=FAAD_FMT_16BIT;
				break;
			case '2':
				config.outputFormat=FAAD_FMT_24BIT;
				break;
			case '3':
				config.outputFormat=FAAD_FMT_32BIT;
				break;
			case 'F':
				config.outputFormat=FAAD_FMT_24BIT;
				break;
			}
			faacDecSetConfiguration(hDecoder, &config);

			if(!FindBitrate)
			{
			AacPcm *NewInst;
				if(!(NewInst=new AacPcm()))
					ERROR_getInfos("Memory allocation error: NewInst");

				NewInst->FindBitrate=TRUE;
				if(NewInst->getInfos(infos))
					ERROR_getInfos(0);
				Channels4Raw=NewInst->frameInfo.channels;
				file_info.bitrate=NewInst->file_info.bitrate*Channels4Raw;
				delete NewInst;
			}
			else
			{
			DWORD	Samples,
					BytesConsumed;

				if((bytes_consumed=faacDecInit(hDecoder,buffer,bytes_into_buffer,&Samplerate,&Channels))<0)
					ERROR_getInfos("Can't init library");
				bytes_into_buffer-=bytes_consumed;
				if(!processData(infos,0,0))
					ERROR_getInfos(0);
				Samples=frameInfo.samples/sizeof(short);
				BytesConsumed=frameInfo.bytesconsumed;
				processData(infos,0,0);
				if(BytesConsumed<frameInfo.bytesconsumed)
					BytesConsumed=frameInfo.bytesconsumed;
				file_info.bitrate=(BytesConsumed*8*Samplerate)/Samples;
				if(!file_info.bitrate)
					file_info.bitrate=1000; // try to continue decoding
				return 0;
			}
		}

		if((bytes_consumed=faacDecInit(hDecoder, buffer, bytes_into_buffer, &Samplerate, &Channels))<0)
			ERROR_getInfos("faacDecInit failed!")
		bytes_into_buffer-=bytes_consumed;

		if(Channels4Raw)
			Channels=Channels4Raw;

		len_ms=(DWORD)((1000*((float)src_size*8))/file_info.bitrate);
	}

	SHOW_INFO();
    return 0;
}
Exemplo n.º 16
0
Result SoundSourceM4A::tryOpen(const AudioSourceConfig& audioSrcCfg) {
    DEBUG_ASSERT(MP4_INVALID_FILE_HANDLE == m_hFile);
    /* open MP4 file, check for >= ver 1.9.1 */
#if MP4V2_PROJECT_version_hex <= 0x00010901
    m_hFile = MP4Read(getLocalFileNameBytes().constData(), 0);
#else
    m_hFile = MP4Read(getLocalFileNameBytes().constData());
#endif
    if (MP4_INVALID_FILE_HANDLE == m_hFile) {
        qWarning() << "Failed to open file for reading:" << getUrlString();
        return ERR;
    }

    m_trackId = findFirstAudioTrackId(m_hFile);
    if (MP4_INVALID_TRACK_ID == m_trackId) {
        qWarning() << "No AAC track found:" << getUrlString();
        return ERR;
    }

    const MP4SampleId numberOfSamples =
            MP4GetTrackNumberOfSamples(m_hFile, m_trackId);
    if (0 >= numberOfSamples) {
        qWarning() << "Failed to read number of samples from file:" << getUrlString();
        return ERR;
    }
    m_maxSampleBlockId = kSampleBlockIdMin + (numberOfSamples - 1);

    // Determine the maximum input size (in bytes) of a
    // sample block for the selected track.
    const u_int32_t maxSampleBlockInputSize = MP4GetTrackMaxSampleSize(m_hFile,
            m_trackId);
    m_inputBuffer.resize(maxSampleBlockInputSize, 0);

    DEBUG_ASSERT(NULL == m_hDecoder); // not already opened
    m_hDecoder = NeAACDecOpen();
    if (!m_hDecoder) {
        qWarning() << "Failed to open the AAC decoder!";
        return ERR;
    }
    NeAACDecConfigurationPtr pDecoderConfig = NeAACDecGetCurrentConfiguration(
            m_hDecoder);
    pDecoderConfig->outputFormat = FAAD_FMT_FLOAT;
    if ((kChannelCountMono == audioSrcCfg.channelCountHint) ||
            (kChannelCountStereo == audioSrcCfg.channelCountHint)) {
        pDecoderConfig->downMatrix = 1;
    } else {
        pDecoderConfig->downMatrix = 0;
    }

    pDecoderConfig->defObjectType = LC;
    if (!NeAACDecSetConfiguration(m_hDecoder, pDecoderConfig)) {
        qWarning() << "Failed to configure AAC decoder!";
        return ERR;
    }

    u_int8_t* configBuffer = NULL;
    u_int32_t configBufferSize = 0;
    if (!MP4GetTrackESConfiguration(m_hFile, m_trackId, &configBuffer,
            &configBufferSize)) {
        /* failed to get mpeg-4 audio config... this is ok.
         * NeAACDecInit2() will simply use default values instead.
         */
        qWarning() << "Failed to read the MP4 audio configuration."
                << "Continuing with default values.";
    }

    SAMPLERATE_TYPE sampleRate;
    unsigned char channelCount;
    if (0 > NeAACDecInit2(m_hDecoder, configBuffer, configBufferSize,
                    &sampleRate, &channelCount)) {
        free(configBuffer);
        qWarning() << "Failed to initialize the AAC decoder!";
        return ERR;
    } else {
        free(configBuffer);
    }

    setChannelCount(channelCount);
    setFrameRate(sampleRate);
    setFrameCount(getFrameCountForSampleBlockId(m_maxSampleBlockId));

    // Resize temporary buffer for decoded sample data
    const SINT sampleBufferCapacity =
            frames2samples(kFramesPerSampleBlock);
    m_sampleBuffer.resetCapacity(sampleBufferCapacity);

    // Invalidate current position to enforce the following
    // seek operation
    m_curFrameIndex = getMaxFrameIndex();

    // (Re-)Start decoding at the beginning of the file
    seekSampleFrame(getMinFrameIndex());

    return OK;
}
Exemplo n.º 17
0
bool MP4AV_AudioConsecutiveHinter( 
	MP4FileHandle mp4File, 
	MP4TrackId mediaTrackId, 
	MP4TrackId hintTrackId,
	MP4Duration sampleDuration, 
	u_int8_t perPacketHeaderSize,
	u_int8_t perSampleHeaderSize,
	u_int8_t maxSamplesPerPacket,
	u_int16_t maxPayloadSize,
	MP4AV_AudioSampleSizer pSizer,
	MP4AV_AudioConcatenator pConcatenator,
	MP4AV_AudioFragmenter pFragmenter)
{
	bool rc;
	u_int32_t numSamples = 
		MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);

	u_int16_t bytesThisHint = perPacketHeaderSize;
	u_int16_t samplesThisHint = 0;
	MP4SampleId* pSampleIds = 
		new MP4SampleId[maxSamplesPerPacket];

	for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {

		u_int32_t sampleSize = 
			(*pSizer)(mp4File, mediaTrackId, sampleId);

		// sample won't fit in this packet
		// or we've reached the limit on samples per packet
		if ((int16_t)(sampleSize + perSampleHeaderSize) 
		    > maxPayloadSize - bytesThisHint 
		  || samplesThisHint == maxSamplesPerPacket) {

			if (samplesThisHint > 0) {
				rc = (*pConcatenator)(mp4File, mediaTrackId, hintTrackId,
					samplesThisHint, pSampleIds,
					samplesThisHint * sampleDuration,
					maxPayloadSize);

				if (!rc) {
					return false;
				}
			}

			// start a new hint 
			samplesThisHint = 0;
			bytesThisHint = perPacketHeaderSize;

			// fall thru
		}

		// sample is less than remaining payload size
		if ((int16_t)(sampleSize + perSampleHeaderSize)
		  <= maxPayloadSize - bytesThisHint) {

			// add it to this hint
			bytesThisHint += (sampleSize + perSampleHeaderSize);
			pSampleIds[samplesThisHint++] = sampleId;

		} else { 
			// jumbo frame, need to fragment it
			rc = (*pFragmenter)(mp4File, mediaTrackId, hintTrackId,
				sampleId, sampleSize, sampleDuration, maxPayloadSize);

			if (!rc) {
				return false;
			}

			// start a new hint 
			samplesThisHint = 0;
			bytesThisHint = perPacketHeaderSize;
		}
	}

	if (samplesThisHint > 0) {
	  rc = (*pConcatenator)(mp4File, mediaTrackId, hintTrackId,
				samplesThisHint, pSampleIds,
				samplesThisHint * sampleDuration,
				maxPayloadSize);
	  
	  if (!rc) {
	    return false;
	  }
	}

	delete [] pSampleIds;

	return true;
}
Exemplo n.º 18
0
extern "C" bool MP4AV_RfcIsmaHinter(
	MP4FileHandle mp4File, 
	MP4TrackId mediaTrackId, 
	bool interleave,
	u_int16_t maxPayloadSize)
{
	// gather information, and check for validity

	u_int32_t numSamples =
		MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);

	if (numSamples == 0) {
		return false;
	}

	u_int32_t timeScale =
		MP4GetTrackTimeScale(mp4File, mediaTrackId);

	if (timeScale == 0) {
		return false;
	}

	u_int8_t audioType =
		MP4GetTrackEsdsObjectTypeId(mp4File, mediaTrackId);

	if (audioType != MP4_MPEG4_AUDIO_TYPE
	  && !MP4_IS_AAC_AUDIO_TYPE(audioType)) {
		return false;
	}

	u_int8_t mpeg4AudioType =
		MP4GetTrackAudioMpeg4Type(mp4File, mediaTrackId);

	if (audioType == MP4_MPEG4_AUDIO_TYPE) {
		// check that track contains either MPEG-4 AAC or CELP
		if (!MP4_IS_MPEG4_AAC_AUDIO_TYPE(mpeg4AudioType) 
		  && mpeg4AudioType != MP4_MPEG4_CELP_AUDIO_TYPE) {
			return false;
		}
	}

	MP4Duration sampleDuration = 
		MP4AV_GetAudioSampleDuration(mp4File, mediaTrackId);

	if (sampleDuration == MP4_INVALID_DURATION) {
		return false;
	}

	/* get the ES configuration */
	u_int8_t* pConfig = NULL;
	u_int32_t configSize;
	uint8_t channels;

	if (MP4GetTrackESConfiguration(mp4File, mediaTrackId, 
				       &pConfig, &configSize) == false)
	  return false;

	if (!pConfig) {
		return false;
	}
     
	channels = MP4AV_AacConfigGetChannels(pConfig);

	/* convert ES Config into ASCII form */
	char* sConfig = 
		MP4BinaryToBase16(pConfig, configSize);

	free(pConfig);

	if (!sConfig) {
		return false;
	}

	/* create the appropriate SDP attribute */
	uint sdpBufLen = strlen(sConfig) + 256;
	char* sdpBuf = 
	  (char*)malloc(sdpBufLen);

	if (!sdpBuf) {
		free(sConfig);
		return false;
	}


	// now add the hint track
	MP4TrackId hintTrackId =
		MP4AddHintTrack(mp4File, mediaTrackId);

	if (hintTrackId == MP4_INVALID_TRACK_ID) {
		free(sConfig);
		free(sdpBuf);
		return false;
	}

	u_int8_t payloadNumber = MP4_SET_DYNAMIC_PAYLOAD;
	char buffer[10];
	if (channels != 1) {
	  snprintf(buffer, sizeof(buffer), "%u", channels);
	}
	if (MP4SetHintTrackRtpPayload(mp4File, hintTrackId, 
				      "mpeg4-generic", &payloadNumber, 0,
				      channels != 1 ? buffer : NULL) == false) {
	  MP4DeleteTrack(mp4File, hintTrackId);
	  free(sConfig);
	  free(sdpBuf);
	  return false;
	}

	MP4Duration maxLatency;
	bool OneByteHeader = false;
	if (mpeg4AudioType == MP4_MPEG4_CELP_AUDIO_TYPE) {
	  snprintf(sdpBuf, sdpBufLen,
			"a=fmtp:%u "
			"streamtype=5; profile-level-id=15; mode=CELP-vbr; config=%s; "
			"SizeLength=6; IndexLength=2; IndexDeltaLength=2; Profile=0;"
			"\015\012",
				payloadNumber,
				sConfig); 

		// 200 ms max latency for ISMA profile 1
		maxLatency = timeScale / 5;
		OneByteHeader = true;
	} else { // AAC
	  snprintf(sdpBuf, sdpBufLen,
			"a=fmtp:%u "
			"streamtype=5; profile-level-id=15; mode=AAC-hbr; config=%s; "
			"SizeLength=13; IndexLength=3; IndexDeltaLength=3;"
			"\015\012",
				payloadNumber,
				sConfig); 

		// 500 ms max latency for ISMA profile 1
		maxLatency = timeScale / 2;
	}

	/* add this to the track's sdp */
	bool val = MP4AppendHintTrackSdp(mp4File, hintTrackId, sdpBuf);

	free(sConfig);
	free(sdpBuf);
	if (val == false) {
	  MP4DeleteTrack(mp4File, hintTrackId);
	  return false;
	}

	u_int32_t samplesPerPacket = 0;
 
	if (interleave) {
		u_int32_t maxSampleSize =
			MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);

		// compute how many maximum size samples would fit in a packet
		samplesPerPacket = 
			(maxPayloadSize - 2) / (maxSampleSize + 2);

		// can't interleave if this number is 0 or 1
		if (samplesPerPacket < 2) {
			interleave = false;
		}
	}

	bool rc;

	if (interleave) {
		u_int32_t samplesPerGroup = maxLatency / sampleDuration;
		u_int32_t stride;
		stride = samplesPerGroup / samplesPerPacket;

		if (OneByteHeader && stride > 3) stride = 3;
		if (!OneByteHeader && stride > 7) stride = 7;

#if 0
		printf("max latency %llu sampleDuration %llu spg %u spp %u strid %u\n",
		       maxLatency, sampleDuration, samplesPerGroup,
		       samplesPerPacket, stride);
#endif
		rc = MP4AV_AudioInterleaveHinter(
			mp4File, 
			mediaTrackId, 
			hintTrackId,
			sampleDuration, 
			stride,		// stride
			samplesPerPacket,						// bundle
			maxPayloadSize,
			MP4AV_RfcIsmaConcatenator);

	} else {
		rc = MP4AV_AudioConsecutiveHinter(
			mp4File, 
			mediaTrackId, 
			hintTrackId,
			sampleDuration, 
			2,										// perPacketHeaderSize
			2,										// perSampleHeaderSize
			maxLatency / sampleDuration,			// maxSamplesPerPacket
			maxPayloadSize,
			MP4GetSampleSize,
			MP4AV_RfcIsmaConcatenator,
			MP4AV_RfcIsmaFragmenter);
	}

	if (!rc) {
		MP4DeleteTrack(mp4File, hintTrackId);
		return false;
	}

	return true;
}
Exemplo n.º 19
0
static void *mp4Decode(void *args)
{
  MP4FileHandle mp4file;

  pthread_mutex_lock(&mutex);
  seekPosition = -1;
  bPlaying = TRUE;
  if(!(mp4file = MP4Read(args, 0))){
    mp4cfg.file_type = FILE_AAC;
    MP4Close(mp4file);
  }else{
    mp4cfg.file_type = FILE_MP4;
  }

  if(mp4cfg.file_type == FILE_MP4){
    // We are reading a MP4 file
    gint		mp4track;

    if((mp4track = getAACTrack(mp4file)) < 0){
      //TODO: check here for others Audio format.....
      g_print("Unsupported Audio track type\n");
      g_free(args);
      MP4Close(mp4file);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }else{
      faacDecHandle	decoder;
      unsigned char	*buffer	= NULL;
      guint		bufferSize = 0;
      gulong		samplerate;
      guchar		channels;
      guint		avgBitrate;
      MP4Duration	duration;
      gulong		msDuration;
      MP4SampleId	numSamples;
      MP4SampleId	sampleID = 1;

      decoder = faacDecOpen();
      MP4GetTrackESConfiguration(mp4file, mp4track, &buffer, &bufferSize);
      if(!buffer){
	g_free(args);
	faacDecClose(decoder);
	MP4Close(mp4file);
	bPlaying = FALSE;
	pthread_mutex_unlock(&mutex);
	pthread_exit(NULL);
      }
      if(faacDecInit2(decoder, buffer, bufferSize, &samplerate, &channels)<0){
	g_free(args);
	faacDecClose(decoder);
	MP4Close(mp4file);
	bPlaying = FALSE;
	pthread_mutex_unlock(&mutex);
	pthread_exit(NULL);
      }
      g_free(buffer);
      if(channels == 0){
	g_print("Number of Channels not supported\n");
	g_free(args);
	faacDecClose(decoder);
	MP4Close(mp4file);
	bPlaying = FALSE;
	pthread_mutex_unlock(&mutex);
	pthread_exit(NULL);
      }
      duration = MP4GetTrackDuration(mp4file, mp4track);
      msDuration = MP4ConvertFromTrackDuration(mp4file, mp4track, duration,
					       MP4_MSECS_TIME_SCALE);
      numSamples = MP4GetTrackNumberOfSamples(mp4file, mp4track);
      mp4_ip.output->open_audio(FMT_S16_NE, samplerate, channels);
      mp4_ip.output->flush(0);
      mp4_ip.set_info(args, msDuration, -1, samplerate/1000, channels);
      g_print("MP4 - %d channels @ %d Hz\n", channels, samplerate);

      while(bPlaying){
	void*			sampleBuffer;
	faacDecFrameInfo	frameInfo;    
	gint			rc;

	if(seekPosition!=-1){
	  duration = MP4ConvertToTrackDuration(mp4file,
					       mp4track,
					       seekPosition*1000,
					       MP4_MSECS_TIME_SCALE);
	  sampleID = MP4GetSampleIdFromTime(mp4file, mp4track, duration, 0);
	  mp4_ip.output->flush(seekPosition*1000);
	  seekPosition = -1;
	}
	buffer=NULL;
	bufferSize=0;
	if(sampleID > numSamples){
	  mp4_ip.output->close_audio();
	  g_free(args);
	  faacDecClose(decoder);
	  MP4Close(mp4file);
	  bPlaying = FALSE;
	  pthread_mutex_unlock(&mutex);
	  pthread_exit(NULL);
	}
	rc = MP4ReadSample(mp4file, mp4track, sampleID++, &buffer, &bufferSize,
			   NULL, NULL, NULL, NULL);
	//g_print("%d/%d\n", sampleID-1, numSamples);
	if((rc==0) || (buffer== NULL)){
	  g_print("MP4: read error\n");
	  sampleBuffer = NULL;
	  sampleID=0;
	  mp4_ip.output->buffer_free();
	  mp4_ip.output->close_audio();
	  g_free(args);
	  faacDecClose(decoder);
	  MP4Close(mp4file);
	  bPlaying = FALSE;
	  pthread_mutex_unlock(&mutex);
	  pthread_exit(NULL);
	}else{
	  sampleBuffer = faacDecDecode(decoder, &frameInfo, buffer, bufferSize);
	  if(frameInfo.error > 0){
	    g_print("MP4: %s\n",
		    faacDecGetErrorMessage(frameInfo.error));
	    mp4_ip.output->close_audio();
	    g_free(args);
	    faacDecClose(decoder);
	    MP4Close(mp4file);
	    bPlaying = FALSE;
	    pthread_mutex_unlock(&mutex);
	    pthread_exit(NULL);
	  }
	  if(buffer){
	    g_free(buffer); buffer=NULL; bufferSize=0;
	  }
	  while(bPlaying && mp4_ip.output->buffer_free()<frameInfo.samples<<1)
	    xmms_usleep(30000);
	}
	mp4_ip.add_vis_pcm(mp4_ip.output->written_time(),
			   FMT_S16_NE,
			   channels,
			   frameInfo.samples<<1,
			   sampleBuffer);
	mp4_ip.output->write_audio(sampleBuffer, frameInfo.samples<<1);
      }
      while(bPlaying && mp4_ip.output->buffer_free()){
	xmms_usleep(10000);
      }
      mp4_ip.output->close_audio();
      g_free(args);
      faacDecClose(decoder);
      MP4Close(mp4file);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
  } else{
    // WE ARE READING AN AAC FILE
    FILE		*file = NULL;
    faacDecHandle	decoder = 0;
    guchar		*buffer = 0;
    gulong		bufferconsumed = 0;
    gulong		samplerate = 0;
    guchar		channels;
    gulong		buffervalid = 0;
    TitleInput*		input;
    gchar		*temp = g_strdup(args);
    gchar		*ext  = strrchr(temp, '.');
    gchar		*xmmstitle = NULL;
    faacDecConfigurationPtr config;

    if((file = fopen(args, "rb")) == 0){
      g_print("AAC: can't find file %s\n", args);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    if((decoder = faacDecOpen()) == NULL){
      g_print("AAC: Open Decoder Error\n");
      fclose(file);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    config = faacDecGetCurrentConfiguration(decoder);
    config->useOldADTSFormat = 0;
    faacDecSetConfiguration(decoder, config);
    if((buffer = g_malloc(BUFFER_SIZE)) == NULL){
      g_print("AAC: error g_malloc\n");
      fclose(file);
      bPlaying = FALSE;
      faacDecClose(decoder);
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    if((buffervalid = fread(buffer, 1, BUFFER_SIZE, file))==0){
      g_print("AAC: Error reading file\n");
      g_free(buffer);
      fclose(file);
      bPlaying = FALSE;
      faacDecClose(decoder);
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    XMMS_NEW_TITLEINPUT(input);
    input->file_name = g_basename(temp);
    input->file_ext = ext ? ext+1 : NULL;
    input->file_path = temp;
    if(!strncmp(buffer, "ID3", 3)){
      gint size = 0;

      fseek(file, 0, SEEK_SET);
      size = (buffer[6]<<21) | (buffer[7]<<14) | (buffer[8]<<7) | buffer[9];
      size+=10;
      fread(buffer, 1, size, file);
      buffervalid = fread(buffer, 1, BUFFER_SIZE, file);
    }
    xmmstitle = xmms_get_titlestring(xmms_get_gentitle_format(), input);
    if(xmmstitle == NULL)
      xmmstitle = g_strdup(input->file_name);
    if(temp) g_free(temp);
    if(input->performer) g_free(input->performer);
    if(input->album_name) g_free(input->album_name);
    if(input->track_name) g_free(input->track_name);
    if(input->genre) g_free(input->genre);
    g_free(input);
    bufferconsumed = faacDecInit(decoder,
				 buffer,
				 buffervalid,
				 &samplerate,
				 &channels);
    if(mp4_ip.output->open_audio(FMT_S16_NE,samplerate,channels) == FALSE){
      g_print("AAC: Output Error\n");
      g_free(buffer); buffer=0;
      faacDecClose(decoder);
      fclose(file);
      mp4_ip.output->close_audio();
      /*
      if(positionTable){
	g_free(positionTable); positionTable=0;
      }
      */
      g_free(xmmstitle);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    //if(bSeek){
    //mp4_ip.set_info(xmmstitle, lenght*1000, -1, samplerate, channels);
      //}else{
    mp4_ip.set_info(xmmstitle, -1, -1, samplerate, channels);
      //}
    mp4_ip.output->flush(0);

    while(bPlaying && buffervalid > 0){
      faacDecFrameInfo	finfo;
      unsigned long	samplesdecoded;
      char*		sample_buffer = NULL;
      /*
	if(bSeek && seekPosition!=-1){
	fseek(file, positionTable[seekPosition], SEEK_SET);
	bufferconsumed=0;
	buffervalid = fread(buffer, 1, BUFFER_SIZE, file);
	aac_ip.output->flush(seekPosition*1000);
	seekPosition=-1;
	}
      */
      if(bufferconsumed > 0){
	memmove(buffer, &buffer[bufferconsumed], buffervalid-bufferconsumed);
	buffervalid -= bufferconsumed;
	buffervalid += fread(&buffer[buffervalid], 1,
			     BUFFER_SIZE-buffervalid, file);
	bufferconsumed = 0;
      }
      sample_buffer = faacDecDecode(decoder, &finfo, buffer, buffervalid);
      if(finfo.error){
	config = faacDecGetCurrentConfiguration(decoder);
	if(config->useOldADTSFormat != 1){
	  faacDecClose(decoder);
	  decoder = faacDecOpen();
	  config = faacDecGetCurrentConfiguration(decoder);
	  config->useOldADTSFormat = 1;
	  faacDecSetConfiguration(decoder, config);
	  finfo.bytesconsumed=0;
	  finfo.samples = 0;
	  faacDecInit(decoder,
		      buffer,
		      buffervalid,
		      &samplerate,
		      &channels);
	}else{
	  g_print("FAAD2 Warning %s\n", faacDecGetErrorMessage(finfo.error));
	  buffervalid = 0;
	}
      }
      bufferconsumed += finfo.bytesconsumed;
      samplesdecoded = finfo.samples;
      if((samplesdecoded<=0) && !sample_buffer){
	g_print("AAC: error sample decoding\n");
	continue;
      }
      while(bPlaying && mp4_ip.output->buffer_free() < (samplesdecoded<<1)){
	xmms_usleep(10000);
      }
      mp4_ip.add_vis_pcm(mp4_ip.output->written_time(),
			 FMT_S16_LE, channels,
			 samplesdecoded<<1, sample_buffer);
      mp4_ip.output->write_audio(sample_buffer, samplesdecoded<<1);
    }
    while(bPlaying && mp4_ip.output->buffer_playing()){
      xmms_usleep(10000);
    }
    mp4_ip.output->buffer_free();
    mp4_ip.output->close_audio();
    bPlaying = FALSE;
    g_free(buffer);
    faacDecClose(decoder);
    g_free(xmmstitle);
    fclose(file);
    seekPosition = -1;
    /*
    if(positionTable){
      g_free(positionTable); positionTable=0;
    }
    */
    bPlaying = FALSE;
    pthread_mutex_unlock(&mutex);
    pthread_exit(NULL);
    
  }
}
Exemplo n.º 20
0
/** Action for importing chapters into the <b>job.file</b>
 *
 *  
 *  @param job the job to process
 *  @return mp4v2::util::SUCCESS if successful, mp4v2::util::FAILURE otherwise
 */
bool
ChapterUtility::actionImport( JobContext& job )
{
    vector<MP4Chapter_t> chapters;
    Timecode::Format format;

    // create the chapter file name
    string inName = job.file;
    if( _ChapterFile.empty() )
    {
        FileSystem::pathnameStripExtension( inName );
        inName.append( ".chapters.txt" );
    }
    else
    {
        inName = _ChapterFile;
    }

    if( parseChapterFile( inName, chapters, format ) )
    {
        return FAILURE;
    }

    ostringstream oss;
    oss << "Importing " << chapters.size() << " " << getChapterTypeName( _ChapterType );
    oss << " chapters from file " << inName << " into file " << '"' << job.file << '"' << endl;

    verbose1f( "%s", oss.str().c_str() );
    if( dryrunAbort() )
    {
        return SUCCESS;
    }

    if( 0 == chapters.size() )
    {
        return herrf( "No chapters found in file %s\n", inName.c_str() );
    }

    job.fileHandle = MP4Modify( job.file.c_str() );
    if( job.fileHandle == MP4_INVALID_FILE_HANDLE )
    {
        return herrf( "unable to open for write: %s\n", job.file.c_str() );
    }

    bool isVideoTrack = false;
    MP4TrackId refTrackId = getReferencingTrack( job.fileHandle, isVideoTrack );
    if( !MP4_IS_VALID_TRACK_ID(refTrackId) )
    {
        return herrf( "unable to find a video or audio track in file %s\n", job.file.c_str() );
    }
    if( Timecode::FRAME == format && !isVideoTrack )
    {
        // we need a video track for this
        return herrf( "unable to find a video track in file %s but chapter file contains frame timestamps\n", job.file.c_str() );
    }

    // get duration and recalculate scale
    Timecode refTrackDuration( MP4GetTrackDuration( job.fileHandle, refTrackId ),
                               MP4GetTrackTimeScale( job.fileHandle, refTrackId ) );
    refTrackDuration.setScale( CHAPTERTIMESCALE );

    // check for chapters starting after duration of reftrack
    for( vector<MP4Chapter_t>::iterator it = chapters.begin(); it != chapters.end(); )
    {
        Timecode curr( (*it).duration, CHAPTERTIMESCALE );
        if( refTrackDuration <= curr )
        {
            hwarnf( "Chapter '%s' start: %s, playlength of file: %s, chapter cannot be set\n",
                    (*it).title, curr.svalue.c_str(), refTrackDuration.svalue.c_str() );
            it = chapters.erase( it );
        }
        else
        {
            ++it;
        }
    }
    if( 0 == chapters.size() )
    {
        return SUCCESS;
    }

    // convert start time into duration
	uint32_t framerate = static_cast<uint32_t>( CHAPTERTIMESCALE );
    if( Timecode::FRAME == format )
    {
        // get the framerate
        MP4SampleId sampleCount = MP4GetTrackNumberOfSamples( job.fileHandle, refTrackId );
        Timecode tmpcd( refTrackDuration.svalue, CHAPTERTIMESCALE );
		framerate = static_cast<uint32_t>( std::ceil( ((double)sampleCount / (double)tmpcd.duration) * CHAPTERTIMESCALE ) );
    }

    for( vector<MP4Chapter_t>::iterator it = chapters.begin(); it != chapters.end(); ++it )
    {
        MP4Duration currDur = (*it).duration;
        MP4Duration nextDur =  chapters.end() == it+1 ? refTrackDuration.duration : (*(it+1)).duration;

        if( Timecode::FRAME == format )
        {
            // convert from frame nr to milliseconds
            currDur = convertFrameToMillis( (*it).duration, framerate );

            if( chapters.end() != it+1 )
            {
                nextDur = convertFrameToMillis( (*(it+1)).duration, framerate );
            }
        }

        (*it).duration = nextDur - currDur;
    }

    // now set the chapters
    MP4SetChapters( job.fileHandle, &chapters[0], (uint32_t)chapters.size(), _ChapterType );

    fixQtScale( job.fileHandle );
    job.optimizeApplicable = true;

    return SUCCESS;
}
Exemplo n.º 21
0
int decodeMP4file(char *sndfile, aac_dec_opt *opt)
{
    int track;
    unsigned long samplerate;
    unsigned char channels;
    void *sample_buffer;

    MP4FileHandle infile;
    MP4SampleId sampleId, numSamples;

    audio_file *aufile;

    faacDecHandle hDecoder;
    faacDecFrameInfo frameInfo;

    unsigned char *buffer;
    int buffer_size;

    int first_time = 1;

    hDecoder = faacDecOpen();

    infile = MP4Read(opt->filename, 0);
    if (!infile)
    {
        /* unable to open file */
        error_handler("Error opening file: %s\n", opt->filename);
        return 1;
    }

    if ((track = GetAACTrack(infile)) < 0)
    {
        error_handler("Unable to find correct AAC sound track in the MP4 file.\n");
        MP4Close(infile);
        return 1;
    }

    buffer = NULL;
    buffer_size = 0;
    MP4GetTrackESConfiguration(infile, track, &buffer, &buffer_size);

    if(faacDecInit2(hDecoder, buffer, buffer_size, &samplerate, &channels) < 0)
    {
        /* If some error initializing occured, skip the file */
        error_handler("Error initializing decoder library.\n");
        faacDecClose(hDecoder);
        MP4Close(infile);
        return 1;
    }
    if (buffer)
        free(buffer);

    numSamples = MP4GetTrackNumberOfSamples(infile, track);

    for (sampleId = 1; sampleId <= numSamples; sampleId++)
    {
        int rc;

        /* get access unit from MP4 file */
        buffer = NULL;
        buffer_size = 0;

        rc = MP4ReadSample(infile, track, sampleId, &buffer, &buffer_size, NULL, NULL, NULL, NULL);
        if (rc == 0)
        {
            error_handler("Reading from MP4 file failed.\n");
            faacDecClose(hDecoder);
            MP4Close(infile);
            return 1;
        }

        sample_buffer = faacDecDecode(hDecoder, &frameInfo, buffer, buffer_size);

        if (buffer)
            free(buffer);

        opt->progress_update((long)numSamples, sampleId);

        /* open the sound file now that the number of channels are known */
        if (first_time && !frameInfo.error)
        {
            if(opt->decode_mode == 0)
            {
                if (Set_WIN_Params (INVALID_FILEDESC, samplerate, SAMPLE_SIZE,
                                frameInfo.channels) < 0)
                {
                    error_handler("\nCan't access %s\n", "WAVE OUT");
                    faacDecClose(hDecoder);
                    MP4Close(infile);
                    return (0);
                }
            }
            else
            {
                aufile = open_audio_file(sndfile, samplerate, frameInfo.channels,
                     opt->output_format, opt->file_type, aacChannelConfig2wavexChannelMask(&frameInfo));

                if (aufile == NULL)
                {
                    faacDecClose(hDecoder);
                    MP4Close(infile);
                    return 0;
                }
            }
            first_time = 0;
        }

        if ((frameInfo.error == 0) && (frameInfo.samples > 0))
        {
            if(opt->decode_mode == 0)
                WIN_Play_Samples((short*)sample_buffer, frameInfo.channels*frameInfo.samples);
            else
                write_audio_file(aufile, sample_buffer, frameInfo.samples, 0);
        }

        if (frameInfo.error > 0)
        {
            error_handler("Error: %s\n",
            faacDecGetErrorMessage(frameInfo.error));
            break;
        }
        if(stop_decoding)
            break;
    }


    faacDecClose(hDecoder);


    MP4Close(infile);

    if(opt->decode_mode == 0)
        WIN_Audio_close();
    else
    {
        if (!first_time)
            close_audio_file(aufile);
    }

    return frameInfo.error;
}
Exemplo n.º 22
0
extern "C" bool MP4AV_Rfc2429Hinter (MP4FileHandle file,
				     MP4TrackId mediaTrackId,
				     uint16_t maxPayloadSize)
{
  uint32_t numSamples, maxSampleSize;
  MP4TrackId hid;
  MP4Duration duration;

  numSamples = MP4GetTrackNumberOfSamples(file, mediaTrackId);
  if (numSamples == 0) {
    return false;
  }
  maxSampleSize = MP4GetTrackMaxSampleSize(file, mediaTrackId);
  u_int8_t* pSampleBuffer = (u_int8_t*)malloc(maxSampleSize);
  if (pSampleBuffer == NULL) {
    return false;
  }

  hid = MP4AddHintTrack(file, mediaTrackId);
  if (hid == MP4_INVALID_TRACK_ID) {
    return false;
  }

  uint8_t payloadNumber = MP4_SET_DYNAMIC_PAYLOAD;
  MP4SetHintTrackRtpPayload(file,
                            hid,
                            "H263-2000",
                            &payloadNumber,
                            0,
                            NULL,
                            true,
                            false);

  // strictly speaking, this is not required for H.263 - it's a quicktime
  // thing.
  u_int16_t videoWidth = MP4GetTrackVideoWidth(file, mediaTrackId);
  u_int16_t videoHeight = MP4GetTrackVideoHeight(file, mediaTrackId);
  
  char sdpString[80];
  sprintf(sdpString, "a=cliprect:0,0,%d,%d\015\012", videoHeight, videoWidth);
  
  MP4AppendHintTrackSdp(file, 
 			hid,
 			sdpString);

  for (uint32_t sid = 1; sid <= numSamples; sid++) {

    duration = MP4GetSampleDuration(file, mediaTrackId, sid);

    MP4AddRtpVideoHint(file, hid, false, 0);

    u_int32_t sampleSize = maxSampleSize;
    MP4Timestamp startTime;
    MP4Duration duration;
    MP4Duration renderingOffset;
    bool isSyncSample;

    bool rc = MP4ReadSample(file, mediaTrackId, sid,
                            &pSampleBuffer, &sampleSize,
                            &startTime, &duration,
                            &renderingOffset, &isSyncSample);

    if (!rc) {
      MP4DeleteTrack(file, hid);
      free(pSampleBuffer);
      return false;
    }

    // need to skip the first 2 bytes of the packet - it is the
    //start code
    uint16_t payload_head = htons(0x400);
    uint32_t offset = sizeof(payload_head);
    uint32_t remaining = sampleSize - sizeof(payload_head);
    while (remaining) {
      bool last_pak = false;
      uint32_t len;

      if (remaining + 2 <= maxPayloadSize) {
        len = remaining;
        last_pak = true;
      } else {
        len = maxPayloadSize - 2;
      }
      MP4AddRtpPacket(file, hid, last_pak);

      MP4AddRtpImmediateData(file, hid,
                            (u_int8_t*)&payload_head, sizeof(payload_head));
      payload_head = 0;
      MP4AddRtpSampleData(file, hid, sid,
                          offset, len);
      offset += len;
      remaining -= len;
    }
    MP4WriteRtpHint(file, hid, duration, true);
  }

  free(pSampleBuffer);

  return true;
}
Exemplo n.º 23
0
Result SoundSourceM4A::tryOpen(const AudioSourceConfig& audioSrcCfg) {
    DEBUG_ASSERT(MP4_INVALID_FILE_HANDLE == m_hFile);
    // open MP4 file, check for >= ver 1.9.1
    // From mp4v2/file.h:
    //  * On Windows, this should be a UTF-8 encoded string.
    //  * On other platforms, it should be an 8-bit encoding that is
    //  * appropriate for the platform, locale, file system, etc.
    //  * (prefer to use UTF-8 when possible).
#if MP4V2_PROJECT_version_hex <= 0x00010901
    m_hFile = MP4Read(getLocalFileName().toUtf8().constData(), 0);
#else
    m_hFile = MP4Read(getLocalFileName().toUtf8().constData());
#endif
    if (MP4_INVALID_FILE_HANDLE == m_hFile) {
        qWarning() << "Failed to open file for reading:" << getUrlString();
        return ERR;
    }

    m_trackId = findFirstAudioTrackId(m_hFile);
    if (MP4_INVALID_TRACK_ID == m_trackId) {
        qWarning() << "No AAC track found:" << getUrlString();
        return ERR;
    }

    // Read fixed sample duration.  If the sample duration is not
    // fixed (that is, if the number of frames per sample block varies
    // through the file), the call returns MP4_INVALID_DURATION. We
    // can't currently handle these.
    m_framesPerSampleBlock = MP4GetTrackFixedSampleDuration(m_hFile, m_trackId);
    if (MP4_INVALID_DURATION == m_framesPerSampleBlock) {
      qWarning() << "Unable to decode tracks with non-fixed sample durations: " << getUrlString();
      return ERR;
    }

    const MP4SampleId numberOfSamples =
            MP4GetTrackNumberOfSamples(m_hFile, m_trackId);
    if (0 >= numberOfSamples) {
        qWarning() << "Failed to read number of samples from file:" << getUrlString();
        return ERR;
    }
    m_maxSampleBlockId = kSampleBlockIdMin + (numberOfSamples - 1);

    // Determine the maximum input size (in bytes) of a
    // sample block for the selected track.
    const u_int32_t maxSampleBlockInputSize = MP4GetTrackMaxSampleSize(m_hFile,
            m_trackId);
    m_inputBuffer.resize(maxSampleBlockInputSize, 0);

    DEBUG_ASSERT(nullptr == m_hDecoder); // not already opened
    m_hDecoder = NeAACDecOpen();
    if (!m_hDecoder) {
        qWarning() << "Failed to open the AAC decoder!";
        return ERR;
    }
    NeAACDecConfigurationPtr pDecoderConfig = NeAACDecGetCurrentConfiguration(
            m_hDecoder);
    pDecoderConfig->outputFormat = FAAD_FMT_FLOAT;
    if ((kChannelCountMono == audioSrcCfg.channelCountHint) ||
            (kChannelCountStereo == audioSrcCfg.channelCountHint)) {
        pDecoderConfig->downMatrix = 1;
    } else {
        pDecoderConfig->downMatrix = 0;
    }

    pDecoderConfig->defObjectType = LC;
    if (!NeAACDecSetConfiguration(m_hDecoder, pDecoderConfig)) {
        qWarning() << "Failed to configure AAC decoder!";
        return ERR;
    }

    u_int8_t* configBuffer = nullptr;
    u_int32_t configBufferSize = 0;
    if (!MP4GetTrackESConfiguration(m_hFile, m_trackId, &configBuffer,
            &configBufferSize)) {
        /* failed to get mpeg-4 audio config... this is ok.
         * NeAACDecInit2() will simply use default values instead.
         */
        qWarning() << "Failed to read the MP4 audio configuration."
                << "Continuing with default values.";
    }

    SAMPLERATE_TYPE sampleRate;
    unsigned char channelCount;
    if (0 > NeAACDecInit2(m_hDecoder, configBuffer, configBufferSize,
                    &sampleRate, &channelCount)) {
        free(configBuffer);
        qWarning() << "Failed to initialize the AAC decoder!";
        return ERR;
    } else {
        free(configBuffer);
    }

    // Calculate how many sample blocks we need to decode in advance
    // of a random seek in order to get the recommended number of
    // prefetch frames
    m_numberOfPrefetchSampleBlocks  = (kNumberOfPrefetchFrames +
                                      (m_framesPerSampleBlock - 1)) / m_framesPerSampleBlock;

    setChannelCount(channelCount);
    setFrameRate(sampleRate);
    setFrameCount(((m_maxSampleBlockId - kSampleBlockIdMin) + 1) * m_framesPerSampleBlock);

    // Resize temporary buffer for decoded sample data
    const SINT sampleBufferCapacity =
            frames2samples(m_framesPerSampleBlock);
    m_sampleBuffer.resetCapacity(sampleBufferCapacity);

    // Invalidate current position to enforce the following
    // seek operation
    m_curFrameIndex = getMaxFrameIndex();

    // (Re-)Start decoding at the beginning of the file
    seekSampleFrame(getMinFrameIndex());

    return OK;
}
Exemplo n.º 24
0
//#define DEBUG_G711 1
extern "C" bool G711Hinter (MP4FileHandle mp4file, 
			    MP4TrackId trackid,
			    uint16_t maxPayloadSize)
{
  uint32_t numSamples;
  uint8_t audioType;
  MP4SampleId sampleId;
  uint32_t sampleSize;
  MP4TrackId hintTrackId;
  uint8_t payload;
  uint32_t bytes_this_hint;
  uint32_t sampleOffset;

  numSamples = MP4GetTrackNumberOfSamples(mp4file, trackid);

  if (numSamples == 0) return false;

  audioType = MP4GetTrackEsdsObjectTypeId(mp4file, trackid);

  if (audioType != MP4_ALAW_AUDIO_TYPE &&
      audioType != MP4_ULAW_AUDIO_TYPE) return false;

  hintTrackId = MP4AddHintTrack(mp4file, trackid);

  if (hintTrackId == MP4_INVALID_TRACK_ID) {
    return false;
  }
  const char *type;

  if (audioType == MP4_ALAW_AUDIO_TYPE) {
    payload = 8;
    type = "PCMA";
  } else {
    payload = 0;
    type = "PCMU";
  }

  MP4SetHintTrackRtpPayload(mp4file, hintTrackId, type, &payload, 0,NULL,
			    false);

  MP4Duration sampleDuration;
  bool have_skip;
  sampleId = 1;
  sampleSize = MP4GetSampleSize(mp4file, trackid, sampleId);
  sampleDuration = MP4GetSampleDuration(mp4file, trackid, sampleId);
  have_skip = sampleDuration != sampleSize;
  sampleOffset = 0;
  bytes_this_hint = 0;

  if (maxPayloadSize > 160) maxPayloadSize = 160;

  while (1) {
    if (bytes_this_hint == 0) {
#ifdef DEBUG_G711
      printf("Adding hint/packet\n");
#endif
      MP4AddRtpHint(mp4file, hintTrackId);
      MP4AddRtpPacket(mp4file, hintTrackId, false); // marker bit 0
    }
    uint16_t bytes_left_this_packet;
    bytes_left_this_packet = maxPayloadSize - bytes_this_hint;
    if (sampleSize >= bytes_left_this_packet) {
      MP4AddRtpSampleData(mp4file, hintTrackId, 
			  sampleId, sampleOffset, bytes_left_this_packet);
      bytes_this_hint += bytes_left_this_packet;
      sampleSize -= bytes_left_this_packet;
      sampleOffset += bytes_left_this_packet;
#ifdef DEBUG_G711
      printf("Added sample with %u bytes\n", bytes_left_this_packet);
#endif
    } else {
      MP4AddRtpSampleData(mp4file, hintTrackId, 
			  sampleId, sampleOffset, sampleSize);
      bytes_this_hint += sampleSize;
#ifdef DEBUG_G711
      printf("Added sample with %u bytes\n", sampleSize);
#endif
      sampleSize = 0;
    }

    if (bytes_this_hint >= maxPayloadSize) {
      // Write the hint
      // duration is bytes written
      MP4WriteRtpHint(mp4file, hintTrackId, bytes_this_hint);
#ifdef DEBUG_G711
      printf("Finished packet - bytes %u\n", bytes_this_hint);
#endif
      bytes_this_hint = 0;
    }
    if (sampleSize == 0) {
      // next sample
      if (have_skip && bytes_this_hint != 0) {
#ifdef DEBUG_G711
	printf("duration - ending packet - bytes %u\n", bytes_this_hint);
#endif
	MP4WriteRtpHint(mp4file, hintTrackId, bytes_this_hint);
	bytes_this_hint = 0;
      }
      sampleId++;
      if (sampleId > numSamples) {
	// finish it and exit
	if (bytes_this_hint != 0) {
	  MP4WriteRtpHint(mp4file, hintTrackId, bytes_this_hint);
	}
	return true;
      }
      sampleSize = MP4GetSampleSize(mp4file, trackid, sampleId);
      sampleDuration = MP4GetSampleDuration(mp4file, trackid, sampleId);
      have_skip = sampleDuration != sampleSize;
#ifdef DEBUG_G711
      printf("Next sample %u - size %u %u\n", sampleId, sampleSize,
	     have_skip);
#endif
      sampleOffset = 0;
    }
  }
	
  return true; // will never reach here
}
Exemplo n.º 25
0
QWORD MP4TextTrack::ReadPrevious(QWORD time,Listener *listener)
{
	//Check it is the first
	if (sampleId==1)
	{
		//Set emtpy frame
		frame.SetFrame(time,(wchar_t*)NULL,0);
		//call listener
		if (listener)
			//Call it
			listener->onTextFrame(frame);
		//Exit
		return 1;
	}

	//The previous one
	MP4SampleId prevId = sampleId-1;

	//If it was not found
	if (sampleId==MP4_INVALID_SAMPLE_ID)
		//The latest
		prevId = MP4GetTrackNumberOfSamples(mp4,track);

	// Get size of sample
	frameSize = MP4GetSampleSize(mp4, track, prevId);

	// Get data pointer
	BYTE *data = (BYTE*)malloc(frameSize);
	//Get max data lenght
	DWORD dataLen = frameSize;

	MP4Timestamp	startTime;
	MP4Duration	duration;
	MP4Duration	renderingOffset;

	// Read next rtp packet
	if (!MP4ReadSample(
				mp4,				// MP4FileHandle hFile
				track,				// MP4TrackId hintTrackId
				prevId,				// MP4SampleId sampleId,
				(u_int8_t **) &data,		// u_int8_t** ppBytes
				(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
				&startTime,			// MP4Timestamp* pStartTime
				&duration,			// MP4Duration* pDuration
				&renderingOffset,		// MP4Duration* pRenderingOffset
				NULL				// bool* pIsSyncSample
	))
		//Last
		return MP4_INVALID_TIMESTAMP;

	//Get length
	if (dataLen>2)
	{
		//Get string length
		DWORD len = data[0]<<8 | data[1];
		//Set frame
		frame.SetFrame(time,data+2+renderingOffset,len-renderingOffset-2);
		//call listener
		if (listener)
			//Call it
			listener->onTextFrame(frame);
	}

	// exit next send time
	return 1;
}