Exemplo n.º 1
0
main(int argc, char** argv)
{
	if (argc < 2) {
		fprintf(stderr, "Usage: %s <file>\n", argv[0]);
		exit(1);
	}

	u_int32_t verbosity = 0 /* MP4_DETAILS_ALL */;

	MP4FileHandle mp4File = MP4Create(argv[1], verbosity);

	if (!mp4File) {
		exit(1);
	}

	MP4TrackId urlTrackId = 
		MP4AddTrack(mp4File, "URLF");
	printf("urlTrackId %d\n", urlTrackId);

	u_int8_t i;
	char url[128];

	for (i = 1; i <= 5; i++) {
		sprintf(url, "http://server.com/foo/bar%u.html", i);

		MP4WriteSample(mp4File, urlTrackId, 
			(u_int8_t*)url, strlen(url) + 1, (MP4Duration)i);
	}

	MP4Close(mp4File);

	mp4File = MP4Read(argv[1], verbosity);

	// check that we can find the track again
	urlTrackId = MP4FindTrackId(mp4File, 0, "URLF");
	printf("urlTrackId %d\n", urlTrackId);
	
	for (i = 1; i <= 5; i++) {
		u_int8_t* pSample = NULL;
		u_int32_t sampleSize = 0;
		MP4Duration duration;
		bool rc;

		rc = MP4ReadSample(mp4File, urlTrackId, i,
			&pSample, &sampleSize, NULL, &duration);

		if (rc) {
			printf("Sample %i duration "D64": %s\n", 
				i, duration, pSample);
			free(pSample);
		} else {
			printf("Couldn't read sample %i\n", i);
		}
	}

	MP4Close(mp4File);

	exit(0);
}
Exemplo n.º 2
0
QWORD MP4TextTrack::Read(Listener *listener)
{
	int next = 0;
	int last = 0;
	int first = 0;

	// Get number of samples for this sample
	frameSamples = MP4GetSampleDuration(mp4, track, sampleId);

	// Get size of sample
	frameSize = MP4GetSampleSize(mp4, track, sampleId);

	// Get sample timestamp
	frameTime = MP4GetSampleTime(mp4, track, sampleId);
	//Convert to miliseconds
	frameTime = MP4ConvertFromTrackTimestamp(mp4, track, frameTime, 1000);

	// Get data pointer
	BYTE *data = (BYTE*)malloc(frameSize);
	//Get max data lenght
	DWORD dataLen = frameSize;

	MP4Timestamp	startTime;
	MP4Duration	duration;
	MP4Duration	renderingOffset;

	// Read next rtp packet
	if (!MP4ReadSample(
				mp4,				// MP4FileHandle hFile
				track,				// MP4TrackId hintTrackId
				sampleId++,			// MP4SampleId sampleId,
				(u_int8_t **) &data,		// u_int8_t** ppBytes
				(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
				&startTime,			// MP4Timestamp* pStartTime
				&duration,			// MP4Duration* pDuration
				&renderingOffset,		// MP4Duration* pRenderingOffset
				NULL				// bool* pIsSyncSample
	))
		//Last
		return MP4_INVALID_TIMESTAMP;

	//Log("Got text frame [time:%d,start:%d,duration:%d,lenght:%d,offset:%d\n",frameTime,startTime,duration,dataLen,renderingOffset);
	//Dump(data,dataLen);
	//Get length
	if (dataLen>2)
	{
		//Get string length
		DWORD len = data[0]<<8 | data[1];
		//Set frame
		frame.SetFrame(startTime,data+2+renderingOffset,len-renderingOffset-2);
		//call listener
		if (listener)
			//Call it
			listener->onTextFrame(frame);
	}
	
	// exit next send time
	return GetNextFrameTime();
}
Exemplo n.º 3
0
    MP4ReadStatus GetNextH264VideoSample(unsigned char **sample,
                                         unsigned int &sample_size,
                                         unsigned long long int &duration,
                                         bool &is_key_frame)
    {
        if (next_video_sample_idx > video_sample_number) {
            return MP4_READ_EOS;
        }

        unsigned int video_sample_offset = 0;
        if(MP4GetSampleSync(handle, video_track_id, next_video_sample_idx)) {
            /*
             * If current sample has key frame, we need to put SPS/PPS in front of key frame.
             */
            if (pSeqHeaders && pSeqHeaderSize) {
                for(int i = 0; (pSeqHeaders[i] && pSeqHeaderSize[i]); i++) {
                    (*(unsigned int *)(video_sample + video_sample_offset)) = htonl(1);
                    video_sample_offset += 4;
                    memcpy(video_sample + video_sample_offset, pSeqHeaders[i], pSeqHeaderSize[i]);
                    video_sample_offset += pSeqHeaderSize[i];
                }
            }
            if (pPictHeaders && pPictHeaderSize) {
                for(int i = 0; (pPictHeaders[i] && pPictHeaderSize[i]); i++) {
                    (*(unsigned int *)(video_sample + video_sample_offset)) = htonl(1);
                    video_sample_offset += 4;
                    memcpy(video_sample + video_sample_offset, pPictHeaders[i], pPictHeaderSize[i]);
                    video_sample_offset += pPictHeaderSize[i];
                }
            }
        }

        MP4Duration mp4_duration = 0;
        unsigned char *video_sample_start_addr = video_sample + video_sample_offset;
        sample_size = video_sample_max_size - video_sample_offset;
        if (!MP4ReadSample(handle, video_track_id, next_video_sample_idx,
                           &video_sample_start_addr, &sample_size,
                           NULL,
                           &mp4_duration,
                           NULL,
                           &is_key_frame)) {
            printf("Fail to read video sample (%d)\n", next_video_sample_idx);
            return MP4_READ_ERR;
        }

        // Convert AVC1 format to AnnexB
        if (sample_size >= 4) {
            unsigned int *p = (unsigned int *) video_sample_start_addr;
            *p = htonl(1);
        }

        *sample = video_sample;
        sample_size += video_sample_offset;
        duration = (1000 * mp4_duration) / time_scale;
        next_video_sample_idx++;
        return MP4_READ_OK;
    }
Exemplo n.º 4
0
extern "C" bool MP4AV_Rfc3016Hinter(
    MP4FileHandle mp4File,
    MP4TrackId mediaTrackId,
    u_int16_t maxPayloadSize)
{
    u_int32_t numSamples = MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);
    u_int32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);

    if (numSamples == 0 || maxSampleSize == 0) {
        return false;
    }

    MP4TrackId hintTrackId =
        MP4AV_Rfc3016_HintTrackCreate(mp4File, mediaTrackId);

    if (hintTrackId == MP4_INVALID_TRACK_ID) {
        return false;
    }

    u_int8_t* pSampleBuffer = (u_int8_t*)malloc(maxSampleSize);
    if (pSampleBuffer == NULL) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }

    for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {
        u_int32_t sampleSize = maxSampleSize;
        MP4Timestamp startTime;
        MP4Duration duration;
        MP4Duration renderingOffset;
        bool isSyncSample;

        bool rc = MP4ReadSample(
                      mp4File, mediaTrackId, sampleId,
                      &pSampleBuffer, &sampleSize,
                      &startTime, &duration,
                      &renderingOffset, &isSyncSample);

        if (rc == false ||
                MP4AV_Rfc3016_HintAddSample(mp4File,
                                            hintTrackId,
                                            sampleId,
                                            pSampleBuffer,
                                            sampleSize,
                                            duration,
                                            renderingOffset,
                                            isSyncSample,
                                            maxPayloadSize) == false) {
            MP4DeleteTrack(mp4File, hintTrackId);
            CHECK_AND_FREE(pSampleBuffer);
            return false;
        }
    }
    CHECK_AND_FREE(pSampleBuffer);

    return true;
}
Exemplo n.º 5
0
int AacPcm::processData(MediaInfo *infos, ChunkList *chunk_list, bool *killswitch)
{
DWORD			BytesDecoded=0;
char			*bufout=0;
ChunkInfosI		*ci=0;
svc_fileReader	*reader=0;

	if(!FindBitrate && !chunk_list)
		ERROR_processData("chunk_list==NULL"); // is this case possible?

	if(!(reader=infos->getReader()))
		ERROR_processData("File doesn\'t exists");

	if(chunk_list)
	{
		if(!(ci=new ChunkInfosI()))
			ERROR_processData("Memory allocation error: ci");

		ci->addInfo("srate", Samplerate);
		ci->addInfo("bps", bps);
		ci->addInfo("nch", Channels);
	}

	if(!IsAAC) // MP4 file --------------------------------------------------------------------------
	{   
	unsigned __int32 buffer_size=0;
    int rc;

		if(newpos_ms>-1)
		{
		MP4Duration duration=MP4ConvertToTrackDuration(mp4File,track,newpos_ms,MP4_MSECS_TIME_SCALE);
            sampleId=MP4GetSampleIdFromTime(mp4File,track,duration,0);
			bytes_read=(DWORD)(((float)newpos_ms*file_info.bitrate)/(8*1000));
			reader->seek(bytes_read);  // updates slider
			newpos_ms=-1;
		}
		do
		{
			buffer=NULL;
			if(sampleId>=numSamples)
				ERROR_processData(0);

			rc=MP4ReadSample(mp4File, track, sampleId++, (unsigned __int8 **)&buffer, &buffer_size, NULL, NULL, NULL, NULL);
			if(rc==0 || buffer==NULL)
			{
				FREE_ARRAY(buffer);
				ERROR_processData("MP4ReadSample")
			}

			bufout=(char *)faacDecDecode(hDecoder,&frameInfo,buffer,buffer_size);
			BytesDecoded=frameInfo.samples*sizeof(short);
			FREE_ARRAY(buffer);
			// to update the slider
			bytes_read+=buffer_size;
			reader->seek(bytes_read);
		}while(!BytesDecoded && !frameInfo.error);
Exemplo n.º 6
0
static MP4AV_Mp3Header GetMp3Header(
	MP4FileHandle mp4File, 
	MP4TrackId audioTrackId)
{
	u_int8_t* pMp3Frame = NULL;
	u_int32_t mp3FrameLength = 0;

	bool rc = MP4ReadSample(
		mp4File,
		audioTrackId,
		1,
		&pMp3Frame,
		&mp3FrameLength);

	if (!rc || mp3FrameLength < 4) {
		return 0;
	}

	MP4AV_Mp3Header mp3Hdr =
		MP4AV_Mp3HeaderFromBytes(pMp3Frame);
	free(pMp3Frame);

	return mp3Hdr;
}
Exemplo n.º 7
0
extern "C" bool MP4AV_AVSMHinter(
				 MP4FileHandle mp4File, 
				 MP4TrackId mediaTrackId, 
				 u_int16_t maxPayloadSize)
{
  u_int32_t numSamples = MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);
  u_int32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);
	
  uint32_t sizeLength;

  if (numSamples == 0 || maxSampleSize == 0) {
    return false;
  }

  /*if (MP4GetTrackAVSMLengthSize(mp4File, mediaTrackId, &sizeLength) == false) {
    return false;
  }*/
  sizeLength=4;						//why?

  MP4TrackId hintTrackId = 
    MP4AV_AVSM_HintTrackCreate(mp4File, mediaTrackId);				//****AVSMspecial****

  if (hintTrackId == MP4_INVALID_TRACK_ID) {
    return false;
  }

  u_int8_t* pSampleBuffer = (u_int8_t*)malloc(maxSampleSize);
  if (pSampleBuffer == NULL) {
    MP4DeleteTrack(mp4File, hintTrackId);
    return false;
  }
  for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {
    u_int32_t sampleSize = maxSampleSize;
    MP4Timestamp startTime;
    MP4Duration duration;
    MP4Duration renderingOffset;
    bool isSyncSample;//stss指定同步帧

    bool rc = MP4ReadSample(
			    mp4File, mediaTrackId, sampleId, 
			    &pSampleBuffer, &sampleSize, 
			    &startTime, &duration, 
			    &renderingOffset, &isSyncSample);

    if (!rc) {
      MP4DeleteTrack(mp4File, hintTrackId);
      CHECK_AND_FREE(pSampleBuffer);
      return false;
    }

    MP4AV_AVSM_HintAddSample(mp4File,								//****AVSMspecial****
			     hintTrackId,
			     sampleId,
			     pSampleBuffer,
			     sampleSize,
			     sizeLength,
			     duration,
			     renderingOffset,
			     isSyncSample,
			     maxPayloadSize);
	
  }
   CHECK_AND_FREE(pSampleBuffer);

  return true;
}
Exemplo n.º 8
0
SINT SoundSourceM4A::readSampleFrames(
        SINT numberOfFrames, CSAMPLE* sampleBuffer) {
    DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));

    const SINT numberOfFramesTotal = math_min(
            numberOfFrames, getMaxFrameIndex() - m_curFrameIndex);
    const SINT numberOfSamplesTotal = frames2samples(numberOfFramesTotal);

    CSAMPLE* pSampleBuffer = sampleBuffer;
    SINT numberOfSamplesRemaining = numberOfSamplesTotal;
    while (0 < numberOfSamplesRemaining) {

        if (!m_sampleBuffer.isEmpty()) {
            // Consume previously decoded sample data
            const SampleBuffer::ReadableChunk readableChunk(
                    m_sampleBuffer.readFromHead(numberOfSamplesRemaining));
            if (pSampleBuffer) {
                SampleUtil::copy(pSampleBuffer, readableChunk.data(), readableChunk.size());
                pSampleBuffer += readableChunk.size();
            }
            m_curFrameIndex += samples2frames(readableChunk.size());
            DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));
            DEBUG_ASSERT(numberOfSamplesRemaining >= readableChunk.size());
            numberOfSamplesRemaining -= readableChunk.size();
            if (0 == numberOfSamplesRemaining) {
                break; // exit loop
            }
        }
        // All previously decoded sample data has been consumed now
        DEBUG_ASSERT(m_sampleBuffer.isEmpty());

        if (0 == m_inputBufferLength) {
            // Fill input buffer from file
            if (isValidSampleBlockId(m_curSampleBlockId)) {
                // Read data for next sample block into input buffer
                u_int8_t* pInputBuffer = &m_inputBuffer[0];
                u_int32_t inputBufferLength = m_inputBuffer.size(); // in/out parameter
                if (!MP4ReadSample(m_hFile, m_trackId, m_curSampleBlockId,
                        &pInputBuffer, &inputBufferLength,
                        NULL, NULL, NULL, NULL)) {
                    qWarning()
                            << "Failed to read MP4 input data for sample block"
                            << m_curSampleBlockId << "(" << "min ="
                            << kSampleBlockIdMin << "," << "max ="
                            << m_maxSampleBlockId << ")";
                    break; // abort
                }
                ++m_curSampleBlockId;
                m_inputBufferLength = inputBufferLength;
                m_inputBufferOffset = 0;
            }
        }
        DEBUG_ASSERT(0 <= m_inputBufferLength);
        if (0 == m_inputBufferLength) {
            break; // EOF
        }

        // NOTE(uklotzde): The sample buffer for NeAACDecDecode2 has to
        // be big enough for a whole block of decoded samples, which
        // contains up to kFramesPerSampleBlock frames. Otherwise
        // we need to use a temporary buffer.
        CSAMPLE* pDecodeBuffer; // in/out parameter
        SINT decodeBufferCapacity;
        const SINT decodeBufferCapacityMin = frames2samples(kFramesPerSampleBlock);
        if (pSampleBuffer && (decodeBufferCapacityMin <= numberOfSamplesRemaining)) {
            // Decode samples directly into sampleBuffer
            pDecodeBuffer = pSampleBuffer;
            decodeBufferCapacity = numberOfSamplesRemaining;
        } else {
            // Decode next sample block into temporary buffer
            const SINT writeToTailCount = math_max(
                    numberOfSamplesRemaining, decodeBufferCapacityMin);
            const SampleBuffer::WritableChunk writableChunk(
                    m_sampleBuffer.writeToTail(writeToTailCount));
            pDecodeBuffer = writableChunk.data();
            decodeBufferCapacity = writableChunk.size();
        }
        DEBUG_ASSERT(decodeBufferCapacityMin <= decodeBufferCapacity);

        NeAACDecFrameInfo decFrameInfo;
        void* pDecodeResult = NeAACDecDecode2(
                m_hDecoder, &decFrameInfo,
                &m_inputBuffer[m_inputBufferOffset],
                m_inputBufferLength,
                reinterpret_cast<void**>(&pDecodeBuffer),
                decodeBufferCapacity * sizeof(*pDecodeBuffer));
        // Verify the decoding result
        if (0 != decFrameInfo.error) {
            qWarning() << "AAC decoding error:"
                    << decFrameInfo.error
                    << NeAACDecGetErrorMessage(decFrameInfo.error)
                    << getUrlString();
            break; // abort
        }
        DEBUG_ASSERT(pDecodeResult == pDecodeBuffer); // verify the in/out parameter

        // Verify the decoded sample data for consistency
        if (getChannelCount() != decFrameInfo.channels) {
            qWarning() << "Corrupt or unsupported AAC file:"
                    << "Unexpected number of channels" << decFrameInfo.channels
                    << "<>" << getChannelCount();
            break; // abort
        }
        if (getFrameRate() != SINT(decFrameInfo.samplerate)) {
            qWarning() << "Corrupt or unsupported AAC file:"
                    << "Unexpected sample rate" << decFrameInfo.samplerate
                    << "<>" << getFrameRate();
            break; // abort
        }

        // Consume input data
        m_inputBufferLength -= decFrameInfo.bytesconsumed;
        m_inputBufferOffset += decFrameInfo.bytesconsumed;

        // Consume decoded output data
        const SINT numberOfSamplesDecoded = decFrameInfo.samples;
        DEBUG_ASSERT(numberOfSamplesDecoded <= decodeBufferCapacity);
        SINT numberOfSamplesRead;
        if (pDecodeBuffer == pSampleBuffer) {
            numberOfSamplesRead = math_min(numberOfSamplesDecoded, numberOfSamplesRemaining);
            pSampleBuffer += numberOfSamplesRead;
        } else {
            m_sampleBuffer.readFromTail(decodeBufferCapacity - numberOfSamplesDecoded);
            const SampleBuffer::ReadableChunk readableChunk(
                    m_sampleBuffer.readFromHead(numberOfSamplesRemaining));
            numberOfSamplesRead = readableChunk.size();
            if (pSampleBuffer) {
                SampleUtil::copy(pSampleBuffer, readableChunk.data(), numberOfSamplesRead);
                pSampleBuffer += numberOfSamplesRead;
            }
        }
        // The decoder might decode more samples than actually needed
        // at the end of the file! When the end of the file has been
        // reached decoding can be restarted by seeking to a new
        // position.
        DEBUG_ASSERT(numberOfSamplesDecoded >= numberOfSamplesRead);
        m_curFrameIndex += samples2frames(numberOfSamplesRead);
        DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));
        DEBUG_ASSERT(numberOfSamplesRemaining >= numberOfSamplesRead);
        numberOfSamplesRemaining -= numberOfSamplesRead;
    }

    DEBUG_ASSERT(isValidFrameIndex(m_curFrameIndex));
    DEBUG_ASSERT(numberOfSamplesTotal >= numberOfSamplesRemaining);
    return samples2frames(numberOfSamplesTotal - numberOfSamplesRemaining);
}
Exemplo n.º 9
0
static int
aac_read (DB_fileinfo_t *_info, char *bytes, int size) {
    aac_info_t *info = (aac_info_t *)_info;
    if (info->eof) {
        trace ("aac_read: received call after eof\n");
        return 0;
    }
    int samplesize = _info->fmt.channels * _info->fmt.bps / 8;
    if (!info->file->vfs->is_streaming ()) {
        if (info->currentsample + size / samplesize > info->endsample) {
            size = (info->endsample - info->currentsample + 1) * samplesize;
            if (size <= 0) {
                trace ("aac_read: eof (current=%d, total=%d)\n", info->currentsample, info->endsample);
                return 0;
            }
        }
    }

    int initsize = size;

    while (size > 0) {
        if (info->skipsamples > 0 && info->out_remaining > 0) {
            int skip = min (info->out_remaining, info->skipsamples);
            if (skip < info->out_remaining) {
                memmove (info->out_buffer, info->out_buffer + skip * samplesize, (info->out_remaining - skip) * samplesize);
            }
            info->out_remaining -= skip;
            info->skipsamples -= skip;
        }
        if (info->out_remaining > 0) {
            int n = size / samplesize;
            n = min (info->out_remaining, n);

            char *src = info->out_buffer;
            if (info->noremap) {
                memcpy (bytes, src, n * samplesize);
                bytes += n * samplesize;
                src += n * samplesize;
            }
            else {
                int i, j;
                if (info->remap[0] == -1) {
                    // build remap mtx

                    // FIXME: should build channelmask 1st; then remap based on channelmask
                    for (i = 0; i < _info->fmt.channels; i++) {
                        switch (info->frame_info.channel_position[i]) {
                        case FRONT_CHANNEL_CENTER:
                            trace ("FC->%d\n", i);
                            info->remap[2] = i;
                            break;
                        case FRONT_CHANNEL_LEFT:
                            trace ("FL->%d\n", i);
                            info->remap[0] = i;
                            break;
                        case FRONT_CHANNEL_RIGHT:
                            trace ("FR->%d\n", i);
                            info->remap[1] = i;
                            break;
                        case SIDE_CHANNEL_LEFT:
                            trace ("SL->%d\n", i);
                            info->remap[6] = i;
                            break;
                        case SIDE_CHANNEL_RIGHT:
                            trace ("SR->%d\n", i);
                            info->remap[7] = i;
                            break;
                        case BACK_CHANNEL_LEFT:
                            trace ("RL->%d\n", i);
                            info->remap[4] = i;
                            break;
                        case BACK_CHANNEL_RIGHT:
                            trace ("RR->%d\n", i);
                            info->remap[5] = i;
                            break;
                        case BACK_CHANNEL_CENTER:
                            trace ("BC->%d\n", i);
                            info->remap[8] = i;
                            break;
                        case LFE_CHANNEL:
                            trace ("LFE->%d\n", i);
                            info->remap[3] = i;
                            break;
                        default:
                            trace ("aac: unknown ch(%d)->%d\n", info->frame_info.channel_position[i], i);
                            break;
                        }
                    }
                    for (i = 0; i < _info->fmt.channels; i++) {
                        trace ("%d ", info->remap[i]);
                    }
                    trace ("\n");
                    if (info->remap[0] == -1) {
                        info->remap[0] = 0;
                    }
                    if ((_info->fmt.channels == 1 && info->remap[0] == FRONT_CHANNEL_CENTER)
                        || (_info->fmt.channels == 2 && info->remap[0] == FRONT_CHANNEL_LEFT && info->remap[1] == FRONT_CHANNEL_RIGHT)) {
                        info->noremap = 1;
                    }
                }

                for (i = 0; i < n; i++) {
                    for (j = 0; j < _info->fmt.channels; j++) {
                        ((int16_t *)bytes)[j] = ((int16_t *)src)[info->remap[j]];
                    }
                    src += samplesize;
                    bytes += samplesize;
                }
            }
            size -= n * samplesize;

            if (n == info->out_remaining) {
                info->out_remaining = 0;
            }
            else {
                memmove (info->out_buffer, src, (info->out_remaining - n) * samplesize);
                info->out_remaining -= n;
            }
            continue;
        }

        char *samples = NULL;

        if (info->mp4file) {
            if (info->mp4sample >= info->mp4samples) {
                break;
            }
            
            unsigned char *buffer = NULL;
            int buffer_size = 0;
#ifdef USE_MP4FF
            int rc = mp4ff_read_sample (info->mp4file, info->mp4track, info->mp4sample, &buffer, &buffer_size);
            if (rc == 0) {
                trace ("mp4ff_read_sample failed\n");
                info->eof = 1;
                break;
            }
#else

            buffer = info->samplebuffer;
            buffer_size = info->maxSampleSize;
            MP4Timestamp sampleTime;
            MP4Duration sampleDuration;
            MP4Duration sampleRenderingOffset;
            bool isSyncSample;
            MP4ReadSample (info->mp4file, info->mp4track, info->mp4sample, &buffer, &buffer_size, &sampleTime, &sampleDuration, &sampleRenderingOffset, &isSyncSample);
            // convert timestamp and duration from track time to milliseconds
            u_int64_t myTime = MP4ConvertFromTrackTimestamp (info->mp4file, info->mp4track, 
                    sampleTime, MP4_MSECS_TIME_SCALE);

            u_int64_t myDuration = MP4ConvertFromTrackDuration (info->mp4file, info->mp4track,
                    sampleDuration, MP4_MSECS_TIME_SCALE);
#endif
            info->mp4sample++;
            samples = NeAACDecDecode(info->dec, &info->frame_info, buffer, buffer_size);

            if (buffer) {
                free (buffer);
            }
            if (!samples) {
                break;
            }
        }
        else {
            if (info->remaining < AAC_BUFFER_SIZE) {
                trace ("fread from offs %lld\n", deadbeef->ftell (info->file));
                size_t res = deadbeef->fread (info->buffer + info->remaining, 1, AAC_BUFFER_SIZE-info->remaining, info->file);
                info->remaining += res;
                trace ("remain: %d\n", info->remaining);
                if (!info->remaining) {
                    break;
                }
            }

            trace ("NeAACDecDecode %d bytes\n", info->remaining)
            samples = NeAACDecDecode (info->dec, &info->frame_info, info->buffer, info->remaining);
            trace ("samples =%p\n", samples);
            if (!samples) {
                trace ("NeAACDecDecode failed with error %s (%d), consumed=%d\n", NeAACDecGetErrorMessage(info->frame_info.error), (int)info->frame_info.error, info->frame_info.bytesconsumed);

                if (info->num_errors > 10) {
                    trace ("NeAACDecDecode failed %d times, interrupting\n", info->num_errors);
                    break;
                }
                info->num_errors++;
                info->remaining = 0;
                continue;
            }
            info->num_errors=0;
            int consumed = info->frame_info.bytesconsumed;
            if (consumed > info->remaining) {
                trace ("NeAACDecDecode consumed more than available! wtf?\n");
                break;
            }
            if (consumed == info->remaining) {
                info->remaining = 0;
            }
            else if (consumed > 0) {
                memmove (info->buffer, info->buffer + consumed, info->remaining - consumed);
                info->remaining -= consumed;
            }
        }

        if (info->frame_info.samples > 0) {
            memcpy (info->out_buffer, samples, info->frame_info.samples * 2);
            info->out_remaining = info->frame_info.samples / info->frame_info.channels;
        }
    }

    info->currentsample += (initsize-size) / samplesize;
    return initsize-size;
}
Exemplo n.º 10
0
QWORD MP4TextTrack::ReadPrevious(QWORD time,Listener *listener)
{
	//Check it is the first
	if (sampleId==1)
	{
		//Set emtpy frame
		frame.SetFrame(time,(wchar_t*)NULL,0);
		//call listener
		if (listener)
			//Call it
			listener->onTextFrame(frame);
		//Exit
		return 1;
	}

	//The previous one
	MP4SampleId prevId = sampleId-1;

	//If it was not found
	if (sampleId==MP4_INVALID_SAMPLE_ID)
		//The latest
		prevId = MP4GetTrackNumberOfSamples(mp4,track);

	// Get size of sample
	frameSize = MP4GetSampleSize(mp4, track, prevId);

	// Get data pointer
	BYTE *data = (BYTE*)malloc(frameSize);
	//Get max data lenght
	DWORD dataLen = frameSize;

	MP4Timestamp	startTime;
	MP4Duration	duration;
	MP4Duration	renderingOffset;

	// Read next rtp packet
	if (!MP4ReadSample(
				mp4,				// MP4FileHandle hFile
				track,				// MP4TrackId hintTrackId
				prevId,				// MP4SampleId sampleId,
				(u_int8_t **) &data,		// u_int8_t** ppBytes
				(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
				&startTime,			// MP4Timestamp* pStartTime
				&duration,			// MP4Duration* pDuration
				&renderingOffset,		// MP4Duration* pRenderingOffset
				NULL				// bool* pIsSyncSample
	))
		//Last
		return MP4_INVALID_TIMESTAMP;

	//Get length
	if (dataLen>2)
	{
		//Get string length
		DWORD len = data[0]<<8 | data[1];
		//Set frame
		frame.SetFrame(time,data+2+renderingOffset,len-renderingOffset-2);
		//call listener
		if (listener)
			//Call it
			listener->onTextFrame(frame);
	}

	// exit next send time
	return 1;
}
Exemplo n.º 11
0
static void *mp4Decode(void *args)
{
  MP4FileHandle mp4file;

  pthread_mutex_lock(&mutex);
  seekPosition = -1;
  bPlaying = TRUE;
  if(!(mp4file = MP4Read(args, 0))){
    mp4cfg.file_type = FILE_AAC;
    MP4Close(mp4file);
  }else{
    mp4cfg.file_type = FILE_MP4;
  }

  if(mp4cfg.file_type == FILE_MP4){
    // We are reading a MP4 file
    gint		mp4track;

    if((mp4track = getAACTrack(mp4file)) < 0){
      //TODO: check here for others Audio format.....
      g_print("Unsupported Audio track type\n");
      g_free(args);
      MP4Close(mp4file);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }else{
      faacDecHandle	decoder;
      unsigned char	*buffer	= NULL;
      guint		bufferSize = 0;
      gulong		samplerate;
      guchar		channels;
      guint		avgBitrate;
      MP4Duration	duration;
      gulong		msDuration;
      MP4SampleId	numSamples;
      MP4SampleId	sampleID = 1;

      decoder = faacDecOpen();
      MP4GetTrackESConfiguration(mp4file, mp4track, &buffer, &bufferSize);
      if(!buffer){
	g_free(args);
	faacDecClose(decoder);
	MP4Close(mp4file);
	bPlaying = FALSE;
	pthread_mutex_unlock(&mutex);
	pthread_exit(NULL);
      }
      if(faacDecInit2(decoder, buffer, bufferSize, &samplerate, &channels)<0){
	g_free(args);
	faacDecClose(decoder);
	MP4Close(mp4file);
	bPlaying = FALSE;
	pthread_mutex_unlock(&mutex);
	pthread_exit(NULL);
      }
      g_free(buffer);
      if(channels == 0){
	g_print("Number of Channels not supported\n");
	g_free(args);
	faacDecClose(decoder);
	MP4Close(mp4file);
	bPlaying = FALSE;
	pthread_mutex_unlock(&mutex);
	pthread_exit(NULL);
      }
      duration = MP4GetTrackDuration(mp4file, mp4track);
      msDuration = MP4ConvertFromTrackDuration(mp4file, mp4track, duration,
					       MP4_MSECS_TIME_SCALE);
      numSamples = MP4GetTrackNumberOfSamples(mp4file, mp4track);
      mp4_ip.output->open_audio(FMT_S16_NE, samplerate, channels);
      mp4_ip.output->flush(0);
      mp4_ip.set_info(args, msDuration, -1, samplerate/1000, channels);
      g_print("MP4 - %d channels @ %d Hz\n", channels, samplerate);

      while(bPlaying){
	void*			sampleBuffer;
	faacDecFrameInfo	frameInfo;    
	gint			rc;

	if(seekPosition!=-1){
	  duration = MP4ConvertToTrackDuration(mp4file,
					       mp4track,
					       seekPosition*1000,
					       MP4_MSECS_TIME_SCALE);
	  sampleID = MP4GetSampleIdFromTime(mp4file, mp4track, duration, 0);
	  mp4_ip.output->flush(seekPosition*1000);
	  seekPosition = -1;
	}
	buffer=NULL;
	bufferSize=0;
	if(sampleID > numSamples){
	  mp4_ip.output->close_audio();
	  g_free(args);
	  faacDecClose(decoder);
	  MP4Close(mp4file);
	  bPlaying = FALSE;
	  pthread_mutex_unlock(&mutex);
	  pthread_exit(NULL);
	}
	rc = MP4ReadSample(mp4file, mp4track, sampleID++, &buffer, &bufferSize,
			   NULL, NULL, NULL, NULL);
	//g_print("%d/%d\n", sampleID-1, numSamples);
	if((rc==0) || (buffer== NULL)){
	  g_print("MP4: read error\n");
	  sampleBuffer = NULL;
	  sampleID=0;
	  mp4_ip.output->buffer_free();
	  mp4_ip.output->close_audio();
	  g_free(args);
	  faacDecClose(decoder);
	  MP4Close(mp4file);
	  bPlaying = FALSE;
	  pthread_mutex_unlock(&mutex);
	  pthread_exit(NULL);
	}else{
	  sampleBuffer = faacDecDecode(decoder, &frameInfo, buffer, bufferSize);
	  if(frameInfo.error > 0){
	    g_print("MP4: %s\n",
		    faacDecGetErrorMessage(frameInfo.error));
	    mp4_ip.output->close_audio();
	    g_free(args);
	    faacDecClose(decoder);
	    MP4Close(mp4file);
	    bPlaying = FALSE;
	    pthread_mutex_unlock(&mutex);
	    pthread_exit(NULL);
	  }
	  if(buffer){
	    g_free(buffer); buffer=NULL; bufferSize=0;
	  }
	  while(bPlaying && mp4_ip.output->buffer_free()<frameInfo.samples<<1)
	    xmms_usleep(30000);
	}
	mp4_ip.add_vis_pcm(mp4_ip.output->written_time(),
			   FMT_S16_NE,
			   channels,
			   frameInfo.samples<<1,
			   sampleBuffer);
	mp4_ip.output->write_audio(sampleBuffer, frameInfo.samples<<1);
      }
      while(bPlaying && mp4_ip.output->buffer_free()){
	xmms_usleep(10000);
      }
      mp4_ip.output->close_audio();
      g_free(args);
      faacDecClose(decoder);
      MP4Close(mp4file);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
  } else{
    // WE ARE READING AN AAC FILE
    FILE		*file = NULL;
    faacDecHandle	decoder = 0;
    guchar		*buffer = 0;
    gulong		bufferconsumed = 0;
    gulong		samplerate = 0;
    guchar		channels;
    gulong		buffervalid = 0;
    TitleInput*		input;
    gchar		*temp = g_strdup(args);
    gchar		*ext  = strrchr(temp, '.');
    gchar		*xmmstitle = NULL;
    faacDecConfigurationPtr config;

    if((file = fopen(args, "rb")) == 0){
      g_print("AAC: can't find file %s\n", args);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    if((decoder = faacDecOpen()) == NULL){
      g_print("AAC: Open Decoder Error\n");
      fclose(file);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    config = faacDecGetCurrentConfiguration(decoder);
    config->useOldADTSFormat = 0;
    faacDecSetConfiguration(decoder, config);
    if((buffer = g_malloc(BUFFER_SIZE)) == NULL){
      g_print("AAC: error g_malloc\n");
      fclose(file);
      bPlaying = FALSE;
      faacDecClose(decoder);
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    if((buffervalid = fread(buffer, 1, BUFFER_SIZE, file))==0){
      g_print("AAC: Error reading file\n");
      g_free(buffer);
      fclose(file);
      bPlaying = FALSE;
      faacDecClose(decoder);
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    XMMS_NEW_TITLEINPUT(input);
    input->file_name = g_basename(temp);
    input->file_ext = ext ? ext+1 : NULL;
    input->file_path = temp;
    if(!strncmp(buffer, "ID3", 3)){
      gint size = 0;

      fseek(file, 0, SEEK_SET);
      size = (buffer[6]<<21) | (buffer[7]<<14) | (buffer[8]<<7) | buffer[9];
      size+=10;
      fread(buffer, 1, size, file);
      buffervalid = fread(buffer, 1, BUFFER_SIZE, file);
    }
    xmmstitle = xmms_get_titlestring(xmms_get_gentitle_format(), input);
    if(xmmstitle == NULL)
      xmmstitle = g_strdup(input->file_name);
    if(temp) g_free(temp);
    if(input->performer) g_free(input->performer);
    if(input->album_name) g_free(input->album_name);
    if(input->track_name) g_free(input->track_name);
    if(input->genre) g_free(input->genre);
    g_free(input);
    bufferconsumed = faacDecInit(decoder,
				 buffer,
				 buffervalid,
				 &samplerate,
				 &channels);
    if(mp4_ip.output->open_audio(FMT_S16_NE,samplerate,channels) == FALSE){
      g_print("AAC: Output Error\n");
      g_free(buffer); buffer=0;
      faacDecClose(decoder);
      fclose(file);
      mp4_ip.output->close_audio();
      /*
      if(positionTable){
	g_free(positionTable); positionTable=0;
      }
      */
      g_free(xmmstitle);
      bPlaying = FALSE;
      pthread_mutex_unlock(&mutex);
      pthread_exit(NULL);
    }
    //if(bSeek){
    //mp4_ip.set_info(xmmstitle, lenght*1000, -1, samplerate, channels);
      //}else{
    mp4_ip.set_info(xmmstitle, -1, -1, samplerate, channels);
      //}
    mp4_ip.output->flush(0);

    while(bPlaying && buffervalid > 0){
      faacDecFrameInfo	finfo;
      unsigned long	samplesdecoded;
      char*		sample_buffer = NULL;
      /*
	if(bSeek && seekPosition!=-1){
	fseek(file, positionTable[seekPosition], SEEK_SET);
	bufferconsumed=0;
	buffervalid = fread(buffer, 1, BUFFER_SIZE, file);
	aac_ip.output->flush(seekPosition*1000);
	seekPosition=-1;
	}
      */
      if(bufferconsumed > 0){
	memmove(buffer, &buffer[bufferconsumed], buffervalid-bufferconsumed);
	buffervalid -= bufferconsumed;
	buffervalid += fread(&buffer[buffervalid], 1,
			     BUFFER_SIZE-buffervalid, file);
	bufferconsumed = 0;
      }
      sample_buffer = faacDecDecode(decoder, &finfo, buffer, buffervalid);
      if(finfo.error){
	config = faacDecGetCurrentConfiguration(decoder);
	if(config->useOldADTSFormat != 1){
	  faacDecClose(decoder);
	  decoder = faacDecOpen();
	  config = faacDecGetCurrentConfiguration(decoder);
	  config->useOldADTSFormat = 1;
	  faacDecSetConfiguration(decoder, config);
	  finfo.bytesconsumed=0;
	  finfo.samples = 0;
	  faacDecInit(decoder,
		      buffer,
		      buffervalid,
		      &samplerate,
		      &channels);
	}else{
	  g_print("FAAD2 Warning %s\n", faacDecGetErrorMessage(finfo.error));
	  buffervalid = 0;
	}
      }
      bufferconsumed += finfo.bytesconsumed;
      samplesdecoded = finfo.samples;
      if((samplesdecoded<=0) && !sample_buffer){
	g_print("AAC: error sample decoding\n");
	continue;
      }
      while(bPlaying && mp4_ip.output->buffer_free() < (samplesdecoded<<1)){
	xmms_usleep(10000);
      }
      mp4_ip.add_vis_pcm(mp4_ip.output->written_time(),
			 FMT_S16_LE, channels,
			 samplesdecoded<<1, sample_buffer);
      mp4_ip.output->write_audio(sample_buffer, samplesdecoded<<1);
    }
    while(bPlaying && mp4_ip.output->buffer_playing()){
      xmms_usleep(10000);
    }
    mp4_ip.output->buffer_free();
    mp4_ip.output->close_audio();
    bPlaying = FALSE;
    g_free(buffer);
    faacDecClose(decoder);
    g_free(xmmstitle);
    fclose(file);
    seekPosition = -1;
    /*
    if(positionTable){
      g_free(positionTable); positionTable=0;
    }
    */
    bPlaying = FALSE;
    pthread_mutex_unlock(&mutex);
    pthread_exit(NULL);
    
  }
}
Exemplo n.º 12
0
static void DumpTrack (MP4FileHandle mp4file, MP4TrackId tid, 
		       bool dump_off, bool dump_rend)
{
  uint32_t numSamples;
  MP4SampleId sid;
  uint8_t *buffer;
  uint32_t max_frame_size;
  uint32_t timescale;
  uint64_t msectime;
  const char *media_data_name;
  uint32_t len_size = 0;
  uint8_t video_type = 0;
  numSamples = MP4GetTrackNumberOfSamples(mp4file, tid);
  max_frame_size = MP4GetTrackMaxSampleSize(mp4file, tid) + 4;
  media_data_name = MP4GetTrackMediaDataName(mp4file, tid);
  if (strcasecmp(media_data_name, "avc1") == 0) {
    MP4GetTrackH264LengthSize(mp4file, tid, &len_size);
  } else if (strcasecmp(media_data_name, "mp4v") == 0) {
    video_type = MP4GetTrackEsdsObjectTypeId(mp4file, tid);
  }
  buffer = (uint8_t *)malloc(max_frame_size);
  if (buffer == NULL) {
    printf("couldn't get buffer\n");
    return;
  }

  timescale = MP4GetTrackTimeScale(mp4file, tid);
  printf("mp4file %s, track %d, samples %d, timescale %d\n", 
	 Mp4FileName, tid, numSamples, timescale);

  for (sid = 1; sid <= numSamples; sid++) {
    MP4Timestamp sampleTime;
    MP4Duration sampleDuration, sampleRenderingOffset;
    bool isSyncSample = FALSE;
    bool ret;
    u_int8_t *temp;
    uint32_t this_frame_size = max_frame_size;
    temp = buffer;
    ret = MP4ReadSample(mp4file, 
			tid,
			sid,
			&temp,
			&this_frame_size,
			&sampleTime,
			&sampleDuration,
			&sampleRenderingOffset,
			&isSyncSample);

    msectime = sampleTime;
    msectime *= TO_U64(1000);
    msectime /= timescale;

    printf("sampleId %6d, size %5u time "U64"("U64")",
	  sid,  MP4GetSampleSize(mp4file, tid, sid), 
	   sampleTime, msectime);
    if (dump_rend) printf(" %6"U64F, sampleRenderingOffset);
    if (strcasecmp(media_data_name, "mp4v") == 0) {
      if (MP4_IS_MPEG4_VIDEO_TYPE(video_type))
	ParseMpeg4(temp, this_frame_size, dump_off);
    } else if (strcasecmp(media_data_name, "avc1") == 0) {
      ParseH264(temp, this_frame_size, len_size, dump_off);
    }
    printf("\n");
  }
}
void main(int argc, char** argv)
{


	if (argc < 2) {
		fprintf(stderr, "Usage: %s <file>\n", argv[0]);
		exit(1);
	}

	//u_int32_t verbosity = MP4_DETAILS_ALL;
	char* fileName = argv[1];

	// open the mp4 file, and read meta-info
	MP4FileHandle mp4File = MP4Read(fileName  );

	uint8_t profileLevel = MP4GetVideoProfileLevel(mp4File);

	// get a handle on the first video track
	MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "video");

	// gather the crucial track information 

	uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId);

	// note all times and durations 
	// are in units of the track time scale

	MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId);

	MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);

	uint32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, trackId);

	uint8_t* pConfig;
	uint32_t configSize = 0;

	MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize);

	// initialize decoder with Elementary Stream (ES) configuration

	// done with our copy of ES configuration
	free(pConfig);


	// now consecutively read and display the track samples

	uint8_t* pSample = (uint8_t*)malloc(maxSampleSize);
	uint32_t sampleSize;

	MP4Timestamp sampleTime;
	MP4Duration sampleDuration;
	MP4Duration sampleRenderingOffset;
	bool isSyncSample;

	for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {

		// give ReadSample our own buffer, and let it know how big it is
		sampleSize = maxSampleSize;

		// read next sample from video track
		MP4ReadSample(mp4File, trackId, sampleId, 
			&pSample, &sampleSize,
			&sampleTime, &sampleDuration, &sampleRenderingOffset, 
			&isSyncSample);

		// convert timestamp and duration from track time to milliseconds
		uint64_t myTime = MP4ConvertFromTrackTimestamp(mp4File, trackId, 
			sampleTime, MP4_MSECS_TIME_SCALE);

		uint64_t myDuration = MP4ConvertFromTrackDuration(mp4File, trackId,
			sampleDuration, MP4_MSECS_TIME_SCALE);

		// decode frame and display it
	}

	// close mp4 file
	MP4Close(mp4File);


	// Note to seek to time 'when' in the track
	// use MP4GetSampleIdFromTime(MP4FileHandle hFile, 
	//		MP4Timestamp when, bool wantSyncSample)
	// 'wantSyncSample' determines if a sync sample is desired or not
	// e.g.
	// MP4Timestamp when = 
	//	MP4ConvertToTrackTimestamp(mp4File, trackId, 30, MP4_SECS_TIME_SCALE);
	// MP4SampleId newSampleId = MP4GetSampleIdFromTime(mp4File, when, true);
	// MP4ReadSample(mp4File, trackId, newSampleId, ...);
	// 
	// Note that start time for sample may be later than 'when'

	exit(0);
}
Exemplo n.º 14
0
void ExtractTrack(MP4FileHandle mp4File, MP4TrackId trackId, 
	bool sampleMode, MP4SampleId sampleId, char* dstFileName)
{
	char outFileName[PATH_MAX];
	int outFd = -1;
	int openFlags = O_WRONLY | O_TRUNC | OPEN_CREAT;

	if (!sampleMode) {
		if (dstFileName == NULL) {
			snprintf(outFileName, sizeof(outFileName), 
				"%s.t%u", Mp4FileName, trackId);
		} else {
			snprintf(outFileName, sizeof(outFileName), 
				"%s", dstFileName);
		}

		outFd = open(outFileName, openFlags, 0644);
		if (outFd == -1) {
			fprintf(stderr, "%s: can't open %s: %s\n",
				ProgName, outFileName, strerror(errno));
			return;
		}
	}

	MP4SampleId numSamples;

	if (sampleMode && sampleId != MP4_INVALID_SAMPLE_ID) {
		numSamples = sampleId;
	} else {
		sampleId = 1;
		numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
	}

	u_int8_t* pSample;
	u_int32_t sampleSize;

	for ( ; sampleId <= numSamples; sampleId++) {
		int rc;

		// signals to ReadSample() that it should malloc a buffer for us
		pSample = NULL;
		sampleSize = 0;

		rc = MP4ReadSample(mp4File, trackId, sampleId, &pSample, &sampleSize);
		if (rc == 0) {
			fprintf(stderr, "%s: read sample %u for %s failed\n",
				ProgName, sampleId, outFileName);
			break;
		}

		if (sampleMode) {
			snprintf(outFileName, sizeof(outFileName), "%s.t%u.s%u",
				Mp4FileName, trackId, sampleId);

			outFd = open(outFileName, openFlags, 0644);

			if (outFd == -1) {
				fprintf(stderr, "%s: can't open %s: %s\n",
					ProgName, outFileName, strerror(errno));
				break;
			}
		}

		rc = write(outFd, pSample, sampleSize);
		if (rc == -1 || (u_int32_t)rc != sampleSize) {
			fprintf(stderr, "%s: write to %s failed: %s\n",
				ProgName, outFileName, strerror(errno));
			break;
		}

		free(pSample);

		if (sampleMode) {
			close(outFd);
			outFd = -1;
		}
	}

	if (outFd != -1) {
		close(outFd);
	}
}
Exemplo n.º 15
0
extern "C" bool MP4AV_Rfc2429Hinter (MP4FileHandle file,
				     MP4TrackId mediaTrackId,
				     uint16_t maxPayloadSize)
{
  uint32_t numSamples, maxSampleSize;
  MP4TrackId hid;
  MP4Duration duration;

  numSamples = MP4GetTrackNumberOfSamples(file, mediaTrackId);
  if (numSamples == 0) {
    return false;
  }
  maxSampleSize = MP4GetTrackMaxSampleSize(file, mediaTrackId);
  u_int8_t* pSampleBuffer = (u_int8_t*)malloc(maxSampleSize);
  if (pSampleBuffer == NULL) {
    return false;
  }

  hid = MP4AddHintTrack(file, mediaTrackId);
  if (hid == MP4_INVALID_TRACK_ID) {
    return false;
  }

  uint8_t payloadNumber = MP4_SET_DYNAMIC_PAYLOAD;
  MP4SetHintTrackRtpPayload(file,
                            hid,
                            "H263-2000",
                            &payloadNumber,
                            0,
                            NULL,
                            true,
                            false);

  // strictly speaking, this is not required for H.263 - it's a quicktime
  // thing.
  u_int16_t videoWidth = MP4GetTrackVideoWidth(file, mediaTrackId);
  u_int16_t videoHeight = MP4GetTrackVideoHeight(file, mediaTrackId);
  
  char sdpString[80];
  sprintf(sdpString, "a=cliprect:0,0,%d,%d\015\012", videoHeight, videoWidth);
  
  MP4AppendHintTrackSdp(file, 
 			hid,
 			sdpString);

  for (uint32_t sid = 1; sid <= numSamples; sid++) {

    duration = MP4GetSampleDuration(file, mediaTrackId, sid);

    MP4AddRtpVideoHint(file, hid, false, 0);

    u_int32_t sampleSize = maxSampleSize;
    MP4Timestamp startTime;
    MP4Duration duration;
    MP4Duration renderingOffset;
    bool isSyncSample;

    bool rc = MP4ReadSample(file, mediaTrackId, sid,
                            &pSampleBuffer, &sampleSize,
                            &startTime, &duration,
                            &renderingOffset, &isSyncSample);

    if (!rc) {
      MP4DeleteTrack(file, hid);
      free(pSampleBuffer);
      return false;
    }

    // need to skip the first 2 bytes of the packet - it is the
    //start code
    uint16_t payload_head = htons(0x400);
    uint32_t offset = sizeof(payload_head);
    uint32_t remaining = sampleSize - sizeof(payload_head);
    while (remaining) {
      bool last_pak = false;
      uint32_t len;

      if (remaining + 2 <= maxPayloadSize) {
        len = remaining;
        last_pak = true;
      } else {
        len = maxPayloadSize - 2;
      }
      MP4AddRtpPacket(file, hid, last_pak);

      MP4AddRtpImmediateData(file, hid,
                            (u_int8_t*)&payload_head, sizeof(payload_head));
      payload_head = 0;
      MP4AddRtpSampleData(file, hid, sid,
                          offset, len);
      offset += len;
      remaining -= len;
    }
    MP4WriteRtpHint(file, hid, duration, true);
  }

  free(pSampleBuffer);

  return true;
}
Exemplo n.º 16
0
int decodeMP4file(char *sndfile, aac_dec_opt *opt)
{
    int track;
    unsigned long samplerate;
    unsigned char channels;
    void *sample_buffer;

    MP4FileHandle infile;
    MP4SampleId sampleId, numSamples;

    audio_file *aufile;

    faacDecHandle hDecoder;
    faacDecFrameInfo frameInfo;

    unsigned char *buffer;
    int buffer_size;

    int first_time = 1;

    hDecoder = faacDecOpen();

    infile = MP4Read(opt->filename, 0);
    if (!infile)
    {
        /* unable to open file */
        error_handler("Error opening file: %s\n", opt->filename);
        return 1;
    }

    if ((track = GetAACTrack(infile)) < 0)
    {
        error_handler("Unable to find correct AAC sound track in the MP4 file.\n");
        MP4Close(infile);
        return 1;
    }

    buffer = NULL;
    buffer_size = 0;
    MP4GetTrackESConfiguration(infile, track, &buffer, &buffer_size);

    if(faacDecInit2(hDecoder, buffer, buffer_size, &samplerate, &channels) < 0)
    {
        /* If some error initializing occured, skip the file */
        error_handler("Error initializing decoder library.\n");
        faacDecClose(hDecoder);
        MP4Close(infile);
        return 1;
    }
    if (buffer)
        free(buffer);

    numSamples = MP4GetTrackNumberOfSamples(infile, track);

    for (sampleId = 1; sampleId <= numSamples; sampleId++)
    {
        int rc;

        /* get access unit from MP4 file */
        buffer = NULL;
        buffer_size = 0;

        rc = MP4ReadSample(infile, track, sampleId, &buffer, &buffer_size, NULL, NULL, NULL, NULL);
        if (rc == 0)
        {
            error_handler("Reading from MP4 file failed.\n");
            faacDecClose(hDecoder);
            MP4Close(infile);
            return 1;
        }

        sample_buffer = faacDecDecode(hDecoder, &frameInfo, buffer, buffer_size);

        if (buffer)
            free(buffer);

        opt->progress_update((long)numSamples, sampleId);

        /* open the sound file now that the number of channels are known */
        if (first_time && !frameInfo.error)
        {
            if(opt->decode_mode == 0)
            {
                if (Set_WIN_Params (INVALID_FILEDESC, samplerate, SAMPLE_SIZE,
                                frameInfo.channels) < 0)
                {
                    error_handler("\nCan't access %s\n", "WAVE OUT");
                    faacDecClose(hDecoder);
                    MP4Close(infile);
                    return (0);
                }
            }
            else
            {
                aufile = open_audio_file(sndfile, samplerate, frameInfo.channels,
                     opt->output_format, opt->file_type, aacChannelConfig2wavexChannelMask(&frameInfo));

                if (aufile == NULL)
                {
                    faacDecClose(hDecoder);
                    MP4Close(infile);
                    return 0;
                }
            }
            first_time = 0;
        }

        if ((frameInfo.error == 0) && (frameInfo.samples > 0))
        {
            if(opt->decode_mode == 0)
                WIN_Play_Samples((short*)sample_buffer, frameInfo.channels*frameInfo.samples);
            else
                write_audio_file(aufile, sample_buffer, frameInfo.samples, 0);
        }

        if (frameInfo.error > 0)
        {
            error_handler("Error: %s\n",
            faacDecGetErrorMessage(frameInfo.error));
            break;
        }
        if(stop_decoding)
            break;
    }


    faacDecClose(hDecoder);


    MP4Close(infile);

    if(opt->decode_mode == 0)
        WIN_Audio_close();
    else
    {
        if (!first_time)
            close_audio_file(aufile);
    }

    return frameInfo.error;
}
Exemplo n.º 17
0
/*
 * read_frame for video - this will try to read the next frame - it
 * tries to be smart about reading it 1 time if we've already read it
 * while bookmarking
 */
void CMp4ByteStream::read_frame (uint32_t frame_to_read,
				 frame_timestamp_t *pts)
{
#ifdef DEBUG_MP4_FRAME 
  mp4f_message(LOG_DEBUG, "%s - Reading frame %d", m_name, frame_to_read);
#endif
  if (m_frame_in_buffer == frame_to_read) {
#ifdef DEBUG_MP4_FRAME
    mp4f_message(LOG_DEBUG, 
		 "%s - frame in buffer %u %u "U64, m_name, 
		 m_byte_on, m_this_frame_size, m_frame_on_ts);
#endif
    m_byte_on = 0;
    m_frame_on_ts = m_frame_in_buffer_ts;
    m_frame_on_has_sync = m_frame_in_buffer_has_sync;
    if (pts != NULL) {
      pts->msec_timestamp = m_frame_on_ts;
      pts->audio_freq_timestamp = m_frame_on_sample_ts;
      pts->audio_freq = m_sample_freq;
      pts->timestamp_is_pts = false;
    }
    return;
  }
  // Haven't already read the next frame,  so - get the size, see if
  // it fits, then read it into the appropriate buffer
  m_parent->lock_file_mutex();

  m_frame_in_buffer = frame_to_read;

  MP4Timestamp sampleTime;
  MP4Duration sampleDuration, sampleRenderingOffset;
  bool isSyncSample = FALSE;
  bool ret;
  u_int8_t *temp;
  m_this_frame_size = m_max_frame_size;
  temp = m_buffer;
  ret = MP4ReadSample(m_parent->get_file(),
		      m_track,
		      frame_to_read,
		      &temp,
		      &m_this_frame_size,
		      &sampleTime,
		      &sampleDuration,
		      &sampleRenderingOffset,
		      &isSyncSample);
  if (ret == FALSE) {
    mp4f_message(LOG_ALERT, "Couldn't read frame from mp4 file - frame %d %d", 
		 frame_to_read, m_track);
    m_eof = true;
    m_parent->unlock_file_mutex();
    return;
  }
  memset(m_buffer + m_this_frame_size, 0, sizeof(uint32_t));
  //*(uint32_t *)(m_buffer + m_this_frame_size) = 0; // add some 0's
#ifdef OUTPUT_TO_FILE
  fwrite(m_buffer, m_this_frame_size, 1, m_output_file);
#endif
  uint64_t ts;

  ts = MP4ConvertFromTrackTimestamp(m_parent->get_file(),
				    m_track,
				    sampleTime,
				    MP4_MSECS_TIME_SCALE);
  //if (isSyncSample == TRUE && m_has_video != 0 ) player_debug_message("%s has sync sample "U64, m_name, ts);
#if 0
  mp4f_message(LOG_DEBUG, "%s frame %u sample time "U64 " converts to time "U64, 
	       m_name, frame_to_read, sampleTime, ts);
#endif
  if (pts != NULL) {
    pts->msec_timestamp = ts;
    pts->audio_freq_timestamp = sampleTime;
    pts->audio_freq = m_sample_freq;
    pts->timestamp_is_pts = false;
  }
  m_frame_on_sample_ts = sampleTime;
  m_frame_in_buffer_ts = ts;
  m_frame_on_ts = ts;
  m_frame_in_buffer_has_sync = m_frame_on_has_sync = isSyncSample;
		
  m_parent->unlock_file_mutex();
  m_byte_on = 0;
}
Exemplo n.º 18
0
QWORD MP4RtpTrack::Read(Listener *listener)
{
	int last = 0;
	uint8_t* data;
	bool isSyncSample;

	// If it's first packet of a frame
	if (!numHintSamples)
	{
		// Get number of rtp packets for this sample
		if (!MP4ReadRtpHint(mp4, hint, sampleId, &numHintSamples))
		{
			//Print error
			Error("Error reading hintt");
			//Exit
			return MP4_INVALID_TIMESTAMP;
		}

		// Get number of samples for this sample
		frameSamples = MP4GetSampleDuration(mp4, hint, sampleId);

		// Get size of sample
		frameSize = MP4GetSampleSize(mp4, hint, sampleId);

		// Get sample timestamp
		frameTime = MP4GetSampleTime(mp4, hint, sampleId);
		//Convert to miliseconds
		frameTime = MP4ConvertFromTrackTimestamp(mp4, hint, frameTime, 1000);

		// Check if it is H264 and it is a Sync frame
		if (codec==VideoCodec::H264 && MP4GetSampleSync(mp4,track,sampleId))
			// Send SEI info
			SendH263SEI(listener);

		//Get max data lenght
		BYTE *data = NULL;
		DWORD dataLen = 0;
		MP4Timestamp	startTime;
		MP4Duration	duration;
		MP4Duration	renderingOffset;

		//Get values
		data	= frame->GetData();
		dataLen = frame->GetMaxMediaLength();
		
		// Read next rtp packet
		if (!MP4ReadSample(
			mp4,				// MP4FileHandle hFile
			track,				// MP4TrackId hintTrackId
			sampleId,			// MP4SampleId sampleId,
			(u_int8_t **) &data,		// u_int8_t** ppBytes
			(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
			&startTime,			// MP4Timestamp* pStartTime
			&duration,			// MP4Duration* pDuration
			&renderingOffset,		// MP4Duration* pRenderingOffset
			&isSyncSample			// bool* pIsSyncSample
			))
		{
			Error("Error reading sample");
			//Last
			return MP4_INVALID_TIMESTAMP;
		}

		//Check type
		if (media == MediaFrame::Video)
		{
			//Get video frame
			VideoFrame *video = (VideoFrame*)frame;
			//Set lenght
			video->SetLength(dataLen);
			//Timestamp
			video->SetTimestamp(startTime*90000/timeScale);
			//Set intra
			video->SetIntra(isSyncSample);
		} else {
			//Get Audio frame
			AudioFrame *audio = (AudioFrame*)frame;
			//Set lenght
			audio->SetLength(dataLen);
			//Timestamp
			audio->SetTimestamp(startTime*8000/timeScale);
		}

		//Check listener
		if (listener)
			//Frame callback
			listener->onMediaFrame(*frame);
	}

	// if it's the last
	if (packetIndex + 1 == numHintSamples)
		//Set last mark
		last = 1;
	
	// Set mark bit
	rtp.SetMark(last);

	// Get data pointer
	data = rtp.GetMediaData();
	//Get max data lenght
	DWORD dataLen = rtp.GetMaxMediaLength();

	// Read next rtp packet
	if (!MP4ReadRtpPacket(
				mp4,				// MP4FileHandle hFile
				hint,				// MP4TrackId hintTrackId
				packetIndex++,			// u_int16_t packetIndex
				(u_int8_t **) &data,		// u_int8_t** ppBytes
				(u_int32_t *) &dataLen,		// u_int32_t* pNumBytes
				0,				// u_int32_t ssrc DEFAULT(0)
				0,				// bool includeHeader DEFAULT(true)
				1				// bool includePayload DEFAULT(true)
	))
	{
		//Error
		Error("Error reading packet [%d,%d,%d]\n", hint, track,packetIndex);
		//Exit
		return MP4_INVALID_TIMESTAMP;
	}
		

	//Check
	if (dataLen>rtp.GetMaxMediaLength())
	{
		//Error
		Error("RTP packet too big [%u,%u]\n",dataLen,rtp.GetMaxMediaLength());
		//Exit
		return MP4_INVALID_TIMESTAMP;
	}
	
	//Set lenght
	rtp.SetMediaLength(dataLen);
	// Write frame
	listener->onRTPPacket(rtp);

	// Are we the last packet in a hint?
	if (last)
	{
		// The first hint
		packetIndex = 0;
		// Go for next sample
		sampleId++;
		numHintSamples = 0;
		//Return next frame time
		return GetNextFrameTime();
	}

	// This packet is this one
	return frameTime;
}