void CMp4ByteStream::play (uint64_t start) { m_play_start_time = start; MP4Timestamp mp4_ts; MP4SampleId mp4_sampleId; m_parent->lock_file_mutex(); mp4_ts = MP4ConvertToTrackTimestamp(m_parent->get_file(), m_track, start, MP4_MSECS_TIME_SCALE); mp4_sampleId = MP4GetSampleIdFromTime(m_parent->get_file(), m_track, mp4_ts, TRUE); uint64_t ts; MP4Timestamp sampleTime; sampleTime = MP4GetSampleTime(m_parent->get_file(), m_track, mp4_sampleId); ts = MP4ConvertFromTrackTimestamp(m_parent->get_file(), m_track, sampleTime, MP4_MSECS_TIME_SCALE); m_parent->unlock_file_mutex(); #ifdef DEBUG_MP4_FRAME mp4f_message(LOG_DEBUG, "%s searching timestamp "U64" gives "U64, m_name, start, mp4_ts); mp4f_message(LOG_DEBUG, "%s values are sample time "U64" ts "U64, m_name, sampleTime, ts); #endif set_timebase(mp4_sampleId); }
QWORD MP4TextTrack::Read(Listener *listener) { int next = 0; int last = 0; int first = 0; // Get number of samples for this sample frameSamples = MP4GetSampleDuration(mp4, track, sampleId); // Get size of sample frameSize = MP4GetSampleSize(mp4, track, sampleId); // Get sample timestamp frameTime = MP4GetSampleTime(mp4, track, sampleId); //Convert to miliseconds frameTime = MP4ConvertFromTrackTimestamp(mp4, track, frameTime, 1000); // Get data pointer BYTE *data = (BYTE*)malloc(frameSize); //Get max data lenght DWORD dataLen = frameSize; MP4Timestamp startTime; MP4Duration duration; MP4Duration renderingOffset; // Read next rtp packet if (!MP4ReadSample( mp4, // MP4FileHandle hFile track, // MP4TrackId hintTrackId sampleId++, // MP4SampleId sampleId, (u_int8_t **) &data, // u_int8_t** ppBytes (u_int32_t *) &dataLen, // u_int32_t* pNumBytes &startTime, // MP4Timestamp* pStartTime &duration, // MP4Duration* pDuration &renderingOffset, // MP4Duration* pRenderingOffset NULL // bool* pIsSyncSample )) //Last return MP4_INVALID_TIMESTAMP; //Log("Got text frame [time:%d,start:%d,duration:%d,lenght:%d,offset:%d\n",frameTime,startTime,duration,dataLen,renderingOffset); //Dump(data,dataLen); //Get length if (dataLen>2) { //Get string length DWORD len = data[0]<<8 | data[1]; //Set frame frame.SetFrame(startTime,data+2+renderingOffset,len-renderingOffset-2); //call listener if (listener) //Call it listener->onTextFrame(frame); } // exit next send time return GetNextFrameTime(); }
QWORD MP4RtpTrack::GetNextFrameTime() { QWORD ts = MP4GetSampleTime(mp4, hint, sampleId); //Check it if (ts==MP4_INVALID_TIMESTAMP) //Return it return ts; //Convert to miliseconds ts = MP4ConvertFromTrackTimestamp(mp4, hint, ts, 1000); //Get next timestamp return ts; }
/* * read_frame for video - this will try to read the next frame - it * tries to be smart about reading it 1 time if we've already read it * while bookmarking */ void CMp4ByteStream::read_frame (uint32_t frame_to_read, frame_timestamp_t *pts) { #ifdef DEBUG_MP4_FRAME mp4f_message(LOG_DEBUG, "%s - Reading frame %d", m_name, frame_to_read); #endif if (m_frame_in_buffer == frame_to_read) { #ifdef DEBUG_MP4_FRAME mp4f_message(LOG_DEBUG, "%s - frame in buffer %u %u "U64, m_name, m_byte_on, m_this_frame_size, m_frame_on_ts); #endif m_byte_on = 0; m_frame_on_ts = m_frame_in_buffer_ts; m_frame_on_has_sync = m_frame_in_buffer_has_sync; if (pts != NULL) { pts->msec_timestamp = m_frame_on_ts; pts->audio_freq_timestamp = m_frame_on_sample_ts; pts->audio_freq = m_sample_freq; pts->timestamp_is_pts = false; } return; } // Haven't already read the next frame, so - get the size, see if // it fits, then read it into the appropriate buffer m_parent->lock_file_mutex(); m_frame_in_buffer = frame_to_read; MP4Timestamp sampleTime; MP4Duration sampleDuration, sampleRenderingOffset; bool isSyncSample = FALSE; bool ret; u_int8_t *temp; m_this_frame_size = m_max_frame_size; temp = m_buffer; ret = MP4ReadSample(m_parent->get_file(), m_track, frame_to_read, &temp, &m_this_frame_size, &sampleTime, &sampleDuration, &sampleRenderingOffset, &isSyncSample); if (ret == FALSE) { mp4f_message(LOG_ALERT, "Couldn't read frame from mp4 file - frame %d %d", frame_to_read, m_track); m_eof = true; m_parent->unlock_file_mutex(); return; } memset(m_buffer + m_this_frame_size, 0, sizeof(uint32_t)); //*(uint32_t *)(m_buffer + m_this_frame_size) = 0; // add some 0's #ifdef OUTPUT_TO_FILE fwrite(m_buffer, m_this_frame_size, 1, m_output_file); #endif uint64_t ts; ts = MP4ConvertFromTrackTimestamp(m_parent->get_file(), m_track, sampleTime, MP4_MSECS_TIME_SCALE); //if (isSyncSample == TRUE && m_has_video != 0 ) player_debug_message("%s has sync sample "U64, m_name, ts); #if 0 mp4f_message(LOG_DEBUG, "%s frame %u sample time "U64 " converts to time "U64, m_name, frame_to_read, sampleTime, ts); #endif if (pts != NULL) { pts->msec_timestamp = ts; pts->audio_freq_timestamp = sampleTime; pts->audio_freq = m_sample_freq; pts->timestamp_is_pts = false; } m_frame_on_sample_ts = sampleTime; m_frame_in_buffer_ts = ts; m_frame_on_ts = ts; m_frame_in_buffer_has_sync = m_frame_on_has_sync = isSyncSample; m_parent->unlock_file_mutex(); m_byte_on = 0; }
static int aac_read (DB_fileinfo_t *_info, char *bytes, int size) { aac_info_t *info = (aac_info_t *)_info; if (info->eof) { trace ("aac_read: received call after eof\n"); return 0; } int samplesize = _info->fmt.channels * _info->fmt.bps / 8; if (!info->file->vfs->is_streaming ()) { if (info->currentsample + size / samplesize > info->endsample) { size = (info->endsample - info->currentsample + 1) * samplesize; if (size <= 0) { trace ("aac_read: eof (current=%d, total=%d)\n", info->currentsample, info->endsample); return 0; } } } int initsize = size; while (size > 0) { if (info->skipsamples > 0 && info->out_remaining > 0) { int skip = min (info->out_remaining, info->skipsamples); if (skip < info->out_remaining) { memmove (info->out_buffer, info->out_buffer + skip * samplesize, (info->out_remaining - skip) * samplesize); } info->out_remaining -= skip; info->skipsamples -= skip; } if (info->out_remaining > 0) { int n = size / samplesize; n = min (info->out_remaining, n); char *src = info->out_buffer; if (info->noremap) { memcpy (bytes, src, n * samplesize); bytes += n * samplesize; src += n * samplesize; } else { int i, j; if (info->remap[0] == -1) { // build remap mtx // FIXME: should build channelmask 1st; then remap based on channelmask for (i = 0; i < _info->fmt.channels; i++) { switch (info->frame_info.channel_position[i]) { case FRONT_CHANNEL_CENTER: trace ("FC->%d\n", i); info->remap[2] = i; break; case FRONT_CHANNEL_LEFT: trace ("FL->%d\n", i); info->remap[0] = i; break; case FRONT_CHANNEL_RIGHT: trace ("FR->%d\n", i); info->remap[1] = i; break; case SIDE_CHANNEL_LEFT: trace ("SL->%d\n", i); info->remap[6] = i; break; case SIDE_CHANNEL_RIGHT: trace ("SR->%d\n", i); info->remap[7] = i; break; case BACK_CHANNEL_LEFT: trace ("RL->%d\n", i); info->remap[4] = i; break; case BACK_CHANNEL_RIGHT: trace ("RR->%d\n", i); info->remap[5] = i; break; case BACK_CHANNEL_CENTER: trace ("BC->%d\n", i); info->remap[8] = i; break; case LFE_CHANNEL: trace ("LFE->%d\n", i); info->remap[3] = i; break; default: trace ("aac: unknown ch(%d)->%d\n", info->frame_info.channel_position[i], i); break; } } for (i = 0; i < _info->fmt.channels; i++) { trace ("%d ", info->remap[i]); } trace ("\n"); if (info->remap[0] == -1) { info->remap[0] = 0; } if ((_info->fmt.channels == 1 && info->remap[0] == FRONT_CHANNEL_CENTER) || (_info->fmt.channels == 2 && info->remap[0] == FRONT_CHANNEL_LEFT && info->remap[1] == FRONT_CHANNEL_RIGHT)) { info->noremap = 1; } } for (i = 0; i < n; i++) { for (j = 0; j < _info->fmt.channels; j++) { ((int16_t *)bytes)[j] = ((int16_t *)src)[info->remap[j]]; } src += samplesize; bytes += samplesize; } } size -= n * samplesize; if (n == info->out_remaining) { info->out_remaining = 0; } else { memmove (info->out_buffer, src, (info->out_remaining - n) * samplesize); info->out_remaining -= n; } continue; } char *samples = NULL; if (info->mp4file) { if (info->mp4sample >= info->mp4samples) { break; } unsigned char *buffer = NULL; int buffer_size = 0; #ifdef USE_MP4FF int rc = mp4ff_read_sample (info->mp4file, info->mp4track, info->mp4sample, &buffer, &buffer_size); if (rc == 0) { trace ("mp4ff_read_sample failed\n"); info->eof = 1; break; } #else buffer = info->samplebuffer; buffer_size = info->maxSampleSize; MP4Timestamp sampleTime; MP4Duration sampleDuration; MP4Duration sampleRenderingOffset; bool isSyncSample; MP4ReadSample (info->mp4file, info->mp4track, info->mp4sample, &buffer, &buffer_size, &sampleTime, &sampleDuration, &sampleRenderingOffset, &isSyncSample); // convert timestamp and duration from track time to milliseconds u_int64_t myTime = MP4ConvertFromTrackTimestamp (info->mp4file, info->mp4track, sampleTime, MP4_MSECS_TIME_SCALE); u_int64_t myDuration = MP4ConvertFromTrackDuration (info->mp4file, info->mp4track, sampleDuration, MP4_MSECS_TIME_SCALE); #endif info->mp4sample++; samples = NeAACDecDecode(info->dec, &info->frame_info, buffer, buffer_size); if (buffer) { free (buffer); } if (!samples) { break; } } else { if (info->remaining < AAC_BUFFER_SIZE) { trace ("fread from offs %lld\n", deadbeef->ftell (info->file)); size_t res = deadbeef->fread (info->buffer + info->remaining, 1, AAC_BUFFER_SIZE-info->remaining, info->file); info->remaining += res; trace ("remain: %d\n", info->remaining); if (!info->remaining) { break; } } trace ("NeAACDecDecode %d bytes\n", info->remaining) samples = NeAACDecDecode (info->dec, &info->frame_info, info->buffer, info->remaining); trace ("samples =%p\n", samples); if (!samples) { trace ("NeAACDecDecode failed with error %s (%d), consumed=%d\n", NeAACDecGetErrorMessage(info->frame_info.error), (int)info->frame_info.error, info->frame_info.bytesconsumed); if (info->num_errors > 10) { trace ("NeAACDecDecode failed %d times, interrupting\n", info->num_errors); break; } info->num_errors++; info->remaining = 0; continue; } info->num_errors=0; int consumed = info->frame_info.bytesconsumed; if (consumed > info->remaining) { trace ("NeAACDecDecode consumed more than available! wtf?\n"); break; } if (consumed == info->remaining) { info->remaining = 0; } else if (consumed > 0) { memmove (info->buffer, info->buffer + consumed, info->remaining - consumed); info->remaining -= consumed; } } if (info->frame_info.samples > 0) { memcpy (info->out_buffer, samples, info->frame_info.samples * 2); info->out_remaining = info->frame_info.samples / info->frame_info.channels; } } info->currentsample += (initsize-size) / samplesize; return initsize-size; }
void main(int argc, char** argv) { if (argc < 2) { fprintf(stderr, "Usage: %s <file>\n", argv[0]); exit(1); } //u_int32_t verbosity = MP4_DETAILS_ALL; char* fileName = argv[1]; // open the mp4 file, and read meta-info MP4FileHandle mp4File = MP4Read(fileName ); uint8_t profileLevel = MP4GetVideoProfileLevel(mp4File); // get a handle on the first video track MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "video"); // gather the crucial track information uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId); // note all times and durations // are in units of the track time scale MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId); MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId); uint32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, trackId); uint8_t* pConfig; uint32_t configSize = 0; MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize); // initialize decoder with Elementary Stream (ES) configuration // done with our copy of ES configuration free(pConfig); // now consecutively read and display the track samples uint8_t* pSample = (uint8_t*)malloc(maxSampleSize); uint32_t sampleSize; MP4Timestamp sampleTime; MP4Duration sampleDuration; MP4Duration sampleRenderingOffset; bool isSyncSample; for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) { // give ReadSample our own buffer, and let it know how big it is sampleSize = maxSampleSize; // read next sample from video track MP4ReadSample(mp4File, trackId, sampleId, &pSample, &sampleSize, &sampleTime, &sampleDuration, &sampleRenderingOffset, &isSyncSample); // convert timestamp and duration from track time to milliseconds uint64_t myTime = MP4ConvertFromTrackTimestamp(mp4File, trackId, sampleTime, MP4_MSECS_TIME_SCALE); uint64_t myDuration = MP4ConvertFromTrackDuration(mp4File, trackId, sampleDuration, MP4_MSECS_TIME_SCALE); // decode frame and display it } // close mp4 file MP4Close(mp4File); // Note to seek to time 'when' in the track // use MP4GetSampleIdFromTime(MP4FileHandle hFile, // MP4Timestamp when, bool wantSyncSample) // 'wantSyncSample' determines if a sync sample is desired or not // e.g. // MP4Timestamp when = // MP4ConvertToTrackTimestamp(mp4File, trackId, 30, MP4_SECS_TIME_SCALE); // MP4SampleId newSampleId = MP4GetSampleIdFromTime(mp4File, when, true); // MP4ReadSample(mp4File, trackId, newSampleId, ...); // // Note that start time for sample may be later than 'when' exit(0); }
QWORD MP4RtpTrack::Read(Listener *listener) { int last = 0; uint8_t* data; bool isSyncSample; // If it's first packet of a frame if (!numHintSamples) { // Get number of rtp packets for this sample if (!MP4ReadRtpHint(mp4, hint, sampleId, &numHintSamples)) { //Print error Error("Error reading hintt"); //Exit return MP4_INVALID_TIMESTAMP; } // Get number of samples for this sample frameSamples = MP4GetSampleDuration(mp4, hint, sampleId); // Get size of sample frameSize = MP4GetSampleSize(mp4, hint, sampleId); // Get sample timestamp frameTime = MP4GetSampleTime(mp4, hint, sampleId); //Convert to miliseconds frameTime = MP4ConvertFromTrackTimestamp(mp4, hint, frameTime, 1000); // Check if it is H264 and it is a Sync frame if (codec==VideoCodec::H264 && MP4GetSampleSync(mp4,track,sampleId)) // Send SEI info SendH263SEI(listener); //Get max data lenght BYTE *data = NULL; DWORD dataLen = 0; MP4Timestamp startTime; MP4Duration duration; MP4Duration renderingOffset; //Get values data = frame->GetData(); dataLen = frame->GetMaxMediaLength(); // Read next rtp packet if (!MP4ReadSample( mp4, // MP4FileHandle hFile track, // MP4TrackId hintTrackId sampleId, // MP4SampleId sampleId, (u_int8_t **) &data, // u_int8_t** ppBytes (u_int32_t *) &dataLen, // u_int32_t* pNumBytes &startTime, // MP4Timestamp* pStartTime &duration, // MP4Duration* pDuration &renderingOffset, // MP4Duration* pRenderingOffset &isSyncSample // bool* pIsSyncSample )) { Error("Error reading sample"); //Last return MP4_INVALID_TIMESTAMP; } //Check type if (media == MediaFrame::Video) { //Get video frame VideoFrame *video = (VideoFrame*)frame; //Set lenght video->SetLength(dataLen); //Timestamp video->SetTimestamp(startTime*90000/timeScale); //Set intra video->SetIntra(isSyncSample); } else { //Get Audio frame AudioFrame *audio = (AudioFrame*)frame; //Set lenght audio->SetLength(dataLen); //Timestamp audio->SetTimestamp(startTime*8000/timeScale); } //Check listener if (listener) //Frame callback listener->onMediaFrame(*frame); } // if it's the last if (packetIndex + 1 == numHintSamples) //Set last mark last = 1; // Set mark bit rtp.SetMark(last); // Get data pointer data = rtp.GetMediaData(); //Get max data lenght DWORD dataLen = rtp.GetMaxMediaLength(); // Read next rtp packet if (!MP4ReadRtpPacket( mp4, // MP4FileHandle hFile hint, // MP4TrackId hintTrackId packetIndex++, // u_int16_t packetIndex (u_int8_t **) &data, // u_int8_t** ppBytes (u_int32_t *) &dataLen, // u_int32_t* pNumBytes 0, // u_int32_t ssrc DEFAULT(0) 0, // bool includeHeader DEFAULT(true) 1 // bool includePayload DEFAULT(true) )) { //Error Error("Error reading packet [%d,%d,%d]\n", hint, track,packetIndex); //Exit return MP4_INVALID_TIMESTAMP; } //Check if (dataLen>rtp.GetMaxMediaLength()) { //Error Error("RTP packet too big [%u,%u]\n",dataLen,rtp.GetMaxMediaLength()); //Exit return MP4_INVALID_TIMESTAMP; } //Set lenght rtp.SetMediaLength(dataLen); // Write frame listener->onRTPPacket(rtp); // Are we the last packet in a hint? if (last) { // The first hint packetIndex = 0; // Go for next sample sampleId++; numHintSamples = 0; //Return next frame time return GetNextFrameTime(); } // This packet is this one return frameTime; }