예제 #1
0
/** Action for setting chapters every n second in <b>job.file</b>
 *
 *  
 *  @param job the job to process
 *  @return mp4v2::util::SUCCESS if successful, mp4v2::util::FAILURE otherwise
 */
bool
ChapterUtility::actionEvery( JobContext& job )
{
    ostringstream oss;
    oss << "Setting " << getChapterTypeName( _ChapterType ) << " chapters every "
        << _ChaptersEvery << " seconds in file " << '"' << job.file << '"' << endl;

    verbose1f( "%s", oss.str().c_str() );
    if( dryrunAbort() )
    {
        return SUCCESS;
    }

    job.fileHandle = MP4Modify( job.file.c_str() );
    if( job.fileHandle == MP4_INVALID_FILE_HANDLE )
    {
        return herrf( "unable to open for write: %s\n", job.file.c_str() );
    }

    bool isVideoTrack = false;
    MP4TrackId refTrackId = getReferencingTrack( job.fileHandle, isVideoTrack );
    if( !MP4_IS_VALID_TRACK_ID(refTrackId) )
    {
        return herrf( "unable to find a video or audio track in file %s\n", job.file.c_str() );
    }

    Timecode refTrackDuration( MP4GetTrackDuration( job.fileHandle, refTrackId ), MP4GetTrackTimeScale( job.fileHandle, refTrackId ) );
    refTrackDuration.setScale( CHAPTERTIMESCALE );

    Timecode chapterDuration( _ChaptersEvery * 1000, CHAPTERTIMESCALE );
    chapterDuration.setFormat( Timecode::DECIMAL );
    vector<MP4Chapter_t> chapters;

    do
    {
        MP4Chapter_t chap;
        chap.duration = refTrackDuration.duration > chapterDuration.duration ? chapterDuration.duration : refTrackDuration.duration;
        sprintf(chap.title, "Chapter %lu", (unsigned long)chapters.size()+1);

        chapters.push_back( chap );
        refTrackDuration -= chapterDuration;
    }
    while( refTrackDuration.duration > 0 );

    if( 0 < chapters.size() )
    {
        MP4SetChapters(job.fileHandle, &chapters[0], (uint32_t)chapters.size(), _ChapterType);
    }

    fixQtScale( job.fileHandle );
    job.optimizeApplicable = true;

    return SUCCESS;
}
예제 #2
0
/**
    \fn setMaxDurationPerChunk
    \brief Max chunk duration is 1 sec par default; set it to ~ 4 frames
*/
bool muxerMp4v2::setMaxDurationPerChunk(MP4TrackId track, uint32_t samples)
{
    uint32_t trackScale=MP4GetTrackTimeScale(handle,track);
    uint32_t   mx;
    mx=4*samples;
    ADM_info("Setting max chunk duration =%d; scale=%d for track %d\n",(int)mx,(int)trackScale,(int)track);
    if(!MP4SetTrackDurationPerChunk(handle,track,mx))
    {
        ADM_error("Cannot set TrackDurationPerChunk\n");
        return false;
    }
    return true;
}
예제 #3
0
    MP4Reader(const std::string &file_path)
            : time_scale(9 * MP4_MSECS_TIME_SCALE)
            , file_path(file_path)
            , handle(MP4_INVALID_FILE_HANDLE)
            , video_track_id(MP4_INVALID_TRACK_ID)
            , next_video_sample_idx(1)
            , video_sample(nullptr)
            , video_timescale(0)
            , video_sample_max_size(0)
            , video_sample_number(0)
            , video_duration(0)
            , pSeqHeaders(nullptr)
            , pSeqHeaderSize(nullptr)
            , pPictHeaders(nullptr)
            , pPictHeaderSize(nullptr)
    {
        handle = MP4Read(this->file_path.c_str());

        video_track_id = MP4FindTrackId(handle, 0, MP4_VIDEO_TRACK_TYPE);
        if (video_track_id != MP4_INVALID_TRACK_ID) {
            video_timescale = MP4GetTrackTimeScale(handle, video_track_id);
            video_sample_max_size = MP4GetTrackMaxSampleSize(handle, video_track_id) * 2;
            video_duration = MP4GetTrackDuration(handle, video_track_id);
            video_sample = new unsigned char[video_sample_max_size];
            video_sample_number = MP4GetTrackNumberOfSamples(handle, video_track_id);

            if (MP4GetTrackH264SeqPictHeaders(handle,
                                              video_track_id,
                                              &pSeqHeaders,
                                              &pSeqHeaderSize,
                                              &pPictHeaders,
                                              &pPictHeaderSize))
            {
                printf("Get SPS(%d) and PPS(%d)\n", *pSeqHeaderSize, *pPictHeaderSize);

                for(int i = 0; (pSeqHeaders[i] && pSeqHeaderSize[i]); i++) {
                    printf("SPS(%d): %02x %02x %02x %02x %02x\n", i,
                           pSeqHeaders[i][0], pSeqHeaders[i][1], pSeqHeaders[i][2],
                           pSeqHeaders[i][3], pSeqHeaders[i][4]);
                }
                for(int i = 0; (pPictHeaders[i] && pPictHeaderSize[i]); i++) {
                    printf("PPS(%d): %02x %02x %02x %02x %02x\n", i,
                           pPictHeaders[i][0], pPictHeaders[i][1], pPictHeaders[i][2],
                           pPictHeaders[i][3], pPictHeaders[i][4]);
                }
            }
        }
    }
예제 #4
0
	void Context::getTracks(const char * file)
	{
		int i = 0;
		bool audioTrack = false, videoTrack = false;

		if (!isOpen()) throw Exception(file, "File is closed.");

		for (;;)
		{
			TrackProperties track;
			if((track.hint = MP4FindTrackId(fh, i++, MP4_HINT_TRACK_TYPE, 0)) == MP4_INVALID_TRACK_ID) break;

			MP4GetHintTrackRtpPayload(fh, track.hint, &track.codecName, &track.payload, NULL, NULL);

			track.track = MP4GetHintTrackReferenceTrackId(fh, track.hint);
			if(track.track == MP4_INVALID_TRACK_ID) continue;
			track.clock = MP4GetTrackTimeScale(fh, track.hint);

			if (!strcmp(MP4GetTrackType(fh, track.track), MP4_AUDIO_TRACK_TYPE)) {
				audioTrack = true;

				if(!strncmp(track.codecName, "PCM", 3))
					track.packetLength = 20;
				else
					track.packetLength = track.clock = 0;

				audio = track;
			} else if (!strcmp(MP4GetTrackType(fh, track.track), MP4_VIDEO_TRACK_TYPE)) {
				videoTrack = true;

				const char * sdp = MP4GetHintTrackSdp(fh, track.hint);
				const char * fmtp = strstr(sdp, "fmtp");

				if (fmtp) {
					// finds beginning of 'fmtp' value;
					for(fmtp += 5; *fmtp != ' '; ++fmtp);
					++fmtp;

					const char * eol = fmtp;
					for(;*eol != '\r' && *eol != '\n'; ++eol);
					video.fmtp = std::string(fmtp, eol);
				}
				video.track = track;
			}
		}

		if (!audioTrack || !videoTrack) throw Exception(file, "Missing audio/video track.");
	}
예제 #5
0
extern "C" u_int32_t MP4AV_AudioGetSamplingRate(
	MP4FileHandle mp4File, 
	MP4TrackId audioTrackId)
{
	u_int8_t audioType = 
		MP4GetTrackEsdsObjectTypeId(mp4File, audioTrackId);

	if (audioType == MP4_INVALID_AUDIO_TYPE) {
		return 0;
	}

	if (MP4_IS_MP3_AUDIO_TYPE(audioType)) {
		MP4AV_Mp3Header mp3Hdr =
			GetMp3Header(mp4File, audioTrackId);

		if (mp3Hdr == 0) {
			return 0;
		}
		return MP4AV_Mp3GetHdrSamplingRate(mp3Hdr);

	} else if (MP4_IS_AAC_AUDIO_TYPE(audioType)) {
		u_int8_t* pAacConfig = NULL;
		u_int32_t aacConfigLength;

		MP4GetTrackESConfiguration(
			mp4File, 
			audioTrackId,
			&pAacConfig,
			&aacConfigLength);

		if (pAacConfig == NULL || aacConfigLength < 2) {
			return 0;
		}

		u_int32_t samplingRate =
			MP4AV_AacConfigGetSamplingRate(pAacConfig);

		free(pAacConfig);

		return samplingRate;

	} else if ((audioType == MP4_PCM16_LITTLE_ENDIAN_AUDIO_TYPE)||
	(audioType == MP4_PCM16_BIG_ENDIAN_AUDIO_TYPE)) {
		return MP4GetTrackTimeScale(mp4File, audioTrackId);
	}

	return 0;
}
예제 #6
0
/**************************************************************************
 * Quicktime stream base class functions
 **************************************************************************/
CMp4ByteStream::CMp4ByteStream (CMp4File *parent,
				MP4TrackId track,
				const char *type,
				bool has_video)
  : COurInByteStream(type)
{
#ifdef ISMACRYP_DEBUG
  my_enc_file = fopen("encbuffer.raw", "w");
  my_unenc_file = fopen("unencbuffer.raw", "w");
  my_unenc_file2 = fopen("unencbuffer2.raw", "w");
#endif
#ifdef OUTPUT_TO_FILE
  char buffer[80];
  strcpy(buffer, type);
  strcat(buffer, ".raw");
  m_output_file = fopen(buffer, "w");
#endif
  m_track = track;
  m_frame_on = 1;
  m_parent = parent;
  m_eof = false;
  MP4FileHandle fh = parent->get_file();
  m_frames_max = MP4GetTrackNumberOfSamples(fh, m_track);
  mp4f_message(LOG_DEBUG, "%s - %u samples", type, m_frames_max);
  m_max_frame_size = MP4GetTrackMaxSampleSize(fh, m_track) + 4; 
  m_sample_freq = MP4GetTrackTimeScale(fh, m_track);
  m_buffer = (u_int8_t *) malloc(m_max_frame_size * sizeof(u_int8_t));
  m_has_video = has_video;
  m_frame_in_buffer = 0xffffffff;
  MP4Duration trackDuration;
  trackDuration = MP4GetTrackDuration(fh, m_track);
  uint64_t max_ts;
  max_ts = MP4ConvertFromTrackDuration(fh, 
				       m_track, 
				       trackDuration,
				       MP4_MSECS_TIME_SCALE);
  m_max_time = UINT64_TO_DOUBLE(max_ts);
  m_max_time /= 1000.0;
  mp4f_message(LOG_DEBUG, 
	       "MP4 %s max time is "U64" %g", type, max_ts, m_max_time);
  read_frame(1, NULL);
}
예제 #7
0
/** Action for importing chapters into the <b>job.file</b>
 *
 *  
 *  @param job the job to process
 *  @return mp4v2::util::SUCCESS if successful, mp4v2::util::FAILURE otherwise
 */
bool
ChapterUtility::actionImport( JobContext& job )
{
    vector<MP4Chapter_t> chapters;
    Timecode::Format format;

    // create the chapter file name
    string inName = job.file;
    if( _ChapterFile.empty() )
    {
        FileSystem::pathnameStripExtension( inName );
        inName.append( ".chapters.txt" );
    }
    else
    {
        inName = _ChapterFile;
    }

    if( parseChapterFile( inName, chapters, format ) )
    {
        return FAILURE;
    }

    ostringstream oss;
    oss << "Importing " << chapters.size() << " " << getChapterTypeName( _ChapterType );
    oss << " chapters from file " << inName << " into file " << '"' << job.file << '"' << endl;

    verbose1f( "%s", oss.str().c_str() );
    if( dryrunAbort() )
    {
        return SUCCESS;
    }

    if( 0 == chapters.size() )
    {
        return herrf( "No chapters found in file %s\n", inName.c_str() );
    }

    job.fileHandle = MP4Modify( job.file.c_str() );
    if( job.fileHandle == MP4_INVALID_FILE_HANDLE )
    {
        return herrf( "unable to open for write: %s\n", job.file.c_str() );
    }

    bool isVideoTrack = false;
    MP4TrackId refTrackId = getReferencingTrack( job.fileHandle, isVideoTrack );
    if( !MP4_IS_VALID_TRACK_ID(refTrackId) )
    {
        return herrf( "unable to find a video or audio track in file %s\n", job.file.c_str() );
    }
    if( Timecode::FRAME == format && !isVideoTrack )
    {
        // we need a video track for this
        return herrf( "unable to find a video track in file %s but chapter file contains frame timestamps\n", job.file.c_str() );
    }

    // get duration and recalculate scale
    Timecode refTrackDuration( MP4GetTrackDuration( job.fileHandle, refTrackId ),
                               MP4GetTrackTimeScale( job.fileHandle, refTrackId ) );
    refTrackDuration.setScale( CHAPTERTIMESCALE );

    // check for chapters starting after duration of reftrack
    for( vector<MP4Chapter_t>::iterator it = chapters.begin(); it != chapters.end(); )
    {
        Timecode curr( (*it).duration, CHAPTERTIMESCALE );
        if( refTrackDuration <= curr )
        {
            hwarnf( "Chapter '%s' start: %s, playlength of file: %s, chapter cannot be set\n",
                    (*it).title, curr.svalue.c_str(), refTrackDuration.svalue.c_str() );
            it = chapters.erase( it );
        }
        else
        {
            ++it;
        }
    }
    if( 0 == chapters.size() )
    {
        return SUCCESS;
    }

    // convert start time into duration
	uint32_t framerate = static_cast<uint32_t>( CHAPTERTIMESCALE );
    if( Timecode::FRAME == format )
    {
        // get the framerate
        MP4SampleId sampleCount = MP4GetTrackNumberOfSamples( job.fileHandle, refTrackId );
        Timecode tmpcd( refTrackDuration.svalue, CHAPTERTIMESCALE );
		framerate = static_cast<uint32_t>( std::ceil( ((double)sampleCount / (double)tmpcd.duration) * CHAPTERTIMESCALE ) );
    }

    for( vector<MP4Chapter_t>::iterator it = chapters.begin(); it != chapters.end(); ++it )
    {
        MP4Duration currDur = (*it).duration;
        MP4Duration nextDur =  chapters.end() == it+1 ? refTrackDuration.duration : (*(it+1)).duration;

        if( Timecode::FRAME == format )
        {
            // convert from frame nr to milliseconds
            currDur = convertFrameToMillis( (*it).duration, framerate );

            if( chapters.end() != it+1 )
            {
                nextDur = convertFrameToMillis( (*(it+1)).duration, framerate );
            }
        }

        (*it).duration = nextDur - currDur;
    }

    // now set the chapters
    MP4SetChapters( job.fileHandle, &chapters[0], (uint32_t)chapters.size(), _ChapterType );

    fixQtScale( job.fileHandle );
    job.optimizeApplicable = true;

    return SUCCESS;
}
예제 #8
0
extern "C" bool MP4AV_RfcIsmaHinter(
	MP4FileHandle mp4File, 
	MP4TrackId mediaTrackId, 
	bool interleave,
	u_int16_t maxPayloadSize)
{
	// gather information, and check for validity

	u_int32_t numSamples =
		MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);

	if (numSamples == 0) {
		return false;
	}

	u_int32_t timeScale =
		MP4GetTrackTimeScale(mp4File, mediaTrackId);

	if (timeScale == 0) {
		return false;
	}

	u_int8_t audioType =
		MP4GetTrackEsdsObjectTypeId(mp4File, mediaTrackId);

	if (audioType != MP4_MPEG4_AUDIO_TYPE
	  && !MP4_IS_AAC_AUDIO_TYPE(audioType)) {
		return false;
	}

	u_int8_t mpeg4AudioType =
		MP4GetTrackAudioMpeg4Type(mp4File, mediaTrackId);

	if (audioType == MP4_MPEG4_AUDIO_TYPE) {
		// check that track contains either MPEG-4 AAC or CELP
		if (!MP4_IS_MPEG4_AAC_AUDIO_TYPE(mpeg4AudioType) 
		  && mpeg4AudioType != MP4_MPEG4_CELP_AUDIO_TYPE) {
			return false;
		}
	}

	MP4Duration sampleDuration = 
		MP4AV_GetAudioSampleDuration(mp4File, mediaTrackId);

	if (sampleDuration == MP4_INVALID_DURATION) {
		return false;
	}

	/* get the ES configuration */
	u_int8_t* pConfig = NULL;
	u_int32_t configSize;
	uint8_t channels;

	if (MP4GetTrackESConfiguration(mp4File, mediaTrackId, 
				       &pConfig, &configSize) == false)
	  return false;

	if (!pConfig) {
		return false;
	}
     
	channels = MP4AV_AacConfigGetChannels(pConfig);

	/* convert ES Config into ASCII form */
	char* sConfig = 
		MP4BinaryToBase16(pConfig, configSize);

	free(pConfig);

	if (!sConfig) {
		return false;
	}

	/* create the appropriate SDP attribute */
	uint sdpBufLen = strlen(sConfig) + 256;
	char* sdpBuf = 
	  (char*)malloc(sdpBufLen);

	if (!sdpBuf) {
		free(sConfig);
		return false;
	}


	// now add the hint track
	MP4TrackId hintTrackId =
		MP4AddHintTrack(mp4File, mediaTrackId);

	if (hintTrackId == MP4_INVALID_TRACK_ID) {
		free(sConfig);
		free(sdpBuf);
		return false;
	}

	u_int8_t payloadNumber = MP4_SET_DYNAMIC_PAYLOAD;
	char buffer[10];
	if (channels != 1) {
	  snprintf(buffer, sizeof(buffer), "%u", channels);
	}
	if (MP4SetHintTrackRtpPayload(mp4File, hintTrackId, 
				      "mpeg4-generic", &payloadNumber, 0,
				      channels != 1 ? buffer : NULL) == false) {
	  MP4DeleteTrack(mp4File, hintTrackId);
	  free(sConfig);
	  free(sdpBuf);
	  return false;
	}

	MP4Duration maxLatency;
	bool OneByteHeader = false;
	if (mpeg4AudioType == MP4_MPEG4_CELP_AUDIO_TYPE) {
	  snprintf(sdpBuf, sdpBufLen,
			"a=fmtp:%u "
			"streamtype=5; profile-level-id=15; mode=CELP-vbr; config=%s; "
			"SizeLength=6; IndexLength=2; IndexDeltaLength=2; Profile=0;"
			"\015\012",
				payloadNumber,
				sConfig); 

		// 200 ms max latency for ISMA profile 1
		maxLatency = timeScale / 5;
		OneByteHeader = true;
	} else { // AAC
	  snprintf(sdpBuf, sdpBufLen,
			"a=fmtp:%u "
			"streamtype=5; profile-level-id=15; mode=AAC-hbr; config=%s; "
			"SizeLength=13; IndexLength=3; IndexDeltaLength=3;"
			"\015\012",
				payloadNumber,
				sConfig); 

		// 500 ms max latency for ISMA profile 1
		maxLatency = timeScale / 2;
	}

	/* add this to the track's sdp */
	bool val = MP4AppendHintTrackSdp(mp4File, hintTrackId, sdpBuf);

	free(sConfig);
	free(sdpBuf);
	if (val == false) {
	  MP4DeleteTrack(mp4File, hintTrackId);
	  return false;
	}

	u_int32_t samplesPerPacket = 0;
 
	if (interleave) {
		u_int32_t maxSampleSize =
			MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);

		// compute how many maximum size samples would fit in a packet
		samplesPerPacket = 
			(maxPayloadSize - 2) / (maxSampleSize + 2);

		// can't interleave if this number is 0 or 1
		if (samplesPerPacket < 2) {
			interleave = false;
		}
	}

	bool rc;

	if (interleave) {
		u_int32_t samplesPerGroup = maxLatency / sampleDuration;
		u_int32_t stride;
		stride = samplesPerGroup / samplesPerPacket;

		if (OneByteHeader && stride > 3) stride = 3;
		if (!OneByteHeader && stride > 7) stride = 7;

#if 0
		printf("max latency %llu sampleDuration %llu spg %u spp %u strid %u\n",
		       maxLatency, sampleDuration, samplesPerGroup,
		       samplesPerPacket, stride);
#endif
		rc = MP4AV_AudioInterleaveHinter(
			mp4File, 
			mediaTrackId, 
			hintTrackId,
			sampleDuration, 
			stride,		// stride
			samplesPerPacket,						// bundle
			maxPayloadSize,
			MP4AV_RfcIsmaConcatenator);

	} else {
		rc = MP4AV_AudioConsecutiveHinter(
			mp4File, 
			mediaTrackId, 
			hintTrackId,
			sampleDuration, 
			2,										// perPacketHeaderSize
			2,										// perSampleHeaderSize
			maxLatency / sampleDuration,			// maxSamplesPerPacket
			maxPayloadSize,
			MP4GetSampleSize,
			MP4AV_RfcIsmaConcatenator,
			MP4AV_RfcIsmaFragmenter);
	}

	if (!rc) {
		MP4DeleteTrack(mp4File, hintTrackId);
		return false;
	}

	return true;
}
예제 #9
0
파일: aac.c 프로젝트: vadmium/deadbeef
static int
aac_init (DB_fileinfo_t *_info, DB_playItem_t *it) {
    aac_info_t *info = (aac_info_t *)_info;

    info->file = deadbeef->fopen (deadbeef->pl_find_meta (it, ":URI"));
    if (!info->file) {
        return -1;
    }

    // probe
    float duration = -1;
    int samplerate = -1;
    int channels = -1;
    int totalsamples = -1;

    info->junk = deadbeef->junk_get_leading_size (info->file);
    if (!info->file->vfs->is_streaming ()) {
        if (info->junk >= 0) {
            deadbeef->fseek (info->file, info->junk, SEEK_SET);
        }
        else {
            info->junk = 0;
        }
    }
    else {
        deadbeef->fset_track (info->file, it);
    }

    info->mp4track = -1;
#if USE_MP4FF
    info->mp4reader.read = aac_fs_read;
    info->mp4reader.write = NULL;
    info->mp4reader.seek = aac_fs_seek;
    info->mp4reader.truncate = NULL;
    info->mp4reader.user_data = info;
#else
    info->mp4reader.open = aac_fs_open;
    info->mp4reader.seek = aac_fs_seek;
    info->mp4reader.read = aac_fs_read;
    info->mp4reader.write = NULL;
    info->mp4reader.close = aac_fs_close;
#endif

    if (!info->file->vfs->is_streaming ()) {
#ifdef USE_MP4FF
        trace ("aac_init: mp4ff_open_read %s\n", deadbeef->pl_find_meta (it, ":URI"));
        info->mp4file = mp4ff_open_read (&info->mp4reader);
        if (info->mp4file) {
            int ntracks = mp4ff_total_tracks (info->mp4file);
            if (ntracks > 0) {
                trace ("m4a container detected, ntracks=%d\n", ntracks);
                int i = -1;
                unsigned char*  buff = 0;
                unsigned int    buff_size = 0;
                for (i = 0; i < ntracks; i++) {
                    mp4AudioSpecificConfig mp4ASC;
                    mp4ff_get_decoder_config (info->mp4file, i, &buff, &buff_size);
                    if(buff){
                        int rc = AudioSpecificConfig(buff, buff_size, &mp4ASC);
                        if(rc < 0)
                            continue;
                        break;
                    }
                }
                trace ("mp4 probe-buffer size: %d\n", buff_size);

                if (i != ntracks && buff) 
                {
                    trace ("mp4 track: %d\n", i);
                    int samples = mp4ff_num_samples(info->mp4file, i);
                    info->mp4samples = samples;
                    info->mp4track = i;

                    // init mp4 decoding
                    info->dec = NeAACDecOpen ();
                    unsigned long srate;
                    unsigned char ch;
                    if (NeAACDecInit2(info->dec, buff, buff_size, &srate, &ch) < 0) {
                        trace ("NeAACDecInit2 returned error\n");
                        free (buff);
                        return -1;
                    }
                    samplerate = srate;
                    channels = ch;
                    samples = (int64_t)samples * srate / mp4ff_time_scale (info->mp4file, i);
                    totalsamples = samples;
                    NeAACDecConfigurationPtr conf = NeAACDecGetCurrentConfiguration (info->dec);
                    conf->dontUpSampleImplicitSBR = 1;
                    NeAACDecSetConfiguration (info->dec, conf);
                    mp4AudioSpecificConfig mp4ASC;
                    if (NeAACDecAudioSpecificConfig(buff, buff_size, &mp4ASC) >= 0)
                    {
                        info->mp4framesize = 1024;
                        if (mp4ASC.frameLengthFlag == 1) {
                            info->mp4framesize = 960;
                        }
//                        if (mp4ASC.sbr_present_flag == 1) {
//                            info->mp4framesize *= 2;
//                        }
                    }
                    totalsamples *= info->mp4framesize;
                    duration = (float)totalsamples  / samplerate;
                }
                else {
                    mp4ff_close (info->mp4file);
                    info->mp4file = NULL;
                }
                if (buff) {
                    free (buff);
                }
            }
            else {
                mp4ff_close (info->mp4file);
                info->mp4file = NULL;
            }
        }
// {{{ libmp4v2 code
#else
        trace ("aac_init: MP4ReadProvider %s\n", deadbeef->pl_find_meta (it, ":URI"));
        info->mp4file = MP4ReadProvider (deadbeef->pl_find_meta (it, ":URI"), 0, &info->mp4reader);
        info->mp4track = MP4FindTrackId(info->mp4file, 0, "audio", 0);
        trace ("aac_init: MP4FindTrackId returned %d\n", info->mp4track);
        if (info->mp4track >= 0) {
            info->timescale = MP4GetTrackTimeScale(info->mp4file, info->mp4track);

            u_int8_t* pConfig;
            uint32_t configSize = 0;
            bool res = MP4GetTrackESConfiguration(info->mp4file, info->mp4track, &pConfig, &configSize);

            mp4AudioSpecificConfig mp4ASC;
            int rc = AudioSpecificConfig(pConfig, configSize, &mp4ASC);
            if (rc >= 0) {
                _info->samplerate = mp4ASC.samplingFrequency;
                _info->channels = MP4GetTrackAudioChannels (info->mp4file, info->mp4track);
                totalsamples = MP4GetTrackNumberOfSamples (info->mp4file, info->mp4track) * 1024 * _info->channels;

                // init mp4 decoding
                info->dec = NeAACDecOpen ();
                unsigned long srate;
                unsigned char ch;
                if (NeAACDecInit2(info->dec, pConfig, configSize, &srate, &ch) < 0) {
                    trace ("NeAACDecInit2 returned error\n");
                    return -1;
                }
                samplerate = srate;
                channels = ch;
                NeAACDecConfigurationPtr conf = NeAACDecGetCurrentConfiguration (info->dec);
                conf->dontUpSampleImplicitSBR = 1;
                NeAACDecSetConfiguration (info->dec, conf);
                mp4AudioSpecificConfig mp4ASC;
                if (NeAACDecAudioSpecificConfig(pConfig, configSize, &mp4ASC) >= 0)
                {
                    info->mp4framesize = 1024;
                    if (mp4ASC.frameLengthFlag == 1) {
                        info->mp4framesize = 960;
                    }
//                    if (mp4ASC.sbr_present_flag == 1) {
//                        info->mp4framesize *= 2;
//                    }
                }
                //totalsamples *= info->mp4framesize;
                free (pConfig);
                info->maxSampleSize = MP4GetTrackMaxSampleSize(info->mp4file, info->mp4track);
                info->samplebuffer = malloc (info->maxSampleSize);
                info->mp4sample = 1;
            }
            else {
                MP4Close (info->mp4file);
                info->mp4file = NULL;
            }
        }
        else {
            MP4Close (info->mp4file);
            info->mp4file = NULL;
        }
#endif
// }}}
        if (!info->mp4file) {
            trace ("mp4 track not found, looking for aac stream...\n");

            if (info->junk >= 0) {
                deadbeef->fseek (info->file, info->junk, SEEK_SET);
            }
            else {
                deadbeef->rewind (info->file);
            }
            int offs = parse_aac_stream (info->file, &samplerate, &channels, &duration, &totalsamples);
            if (offs == -1) {
                trace ("aac stream not found\n");
                return -1;
            }
            if (offs > info->junk) {
                info->junk = offs;
            }
            if (info->junk >= 0) {
                deadbeef->fseek (info->file, info->junk, SEEK_SET);
            }
            else {
                deadbeef->rewind (info->file);
            }
            trace ("found aac stream (junk: %d, offs: %d)\n", info->junk, offs);
        }

        _info->fmt.channels = channels;
        _info->fmt.samplerate = samplerate;
    }
    else {
        // sync before attempting to init
        int samplerate, channels;
        float duration;
        int offs = parse_aac_stream (info->file, &samplerate, &channels, &duration, NULL);
        if (offs < 0) {
            trace ("aac: parse_aac_stream failed\n");
            return -1;
        }
        if (offs > info->junk) {
            info->junk = offs;
        }
        trace("parse_aac_stream returned %x\n", offs);
        deadbeef->pl_replace_meta (it, "!FILETYPE", "AAC");
    }

//    duration = (float)totalsamples / samplerate;
//    deadbeef->pl_set_item_duration (it, duration);

    _info->fmt.bps = 16;
    _info->plugin = &plugin;

    if (!info->mp4file) {
        //trace ("NeAACDecGetCapabilities\n");
        //unsigned long caps = NeAACDecGetCapabilities();

        trace ("NeAACDecOpen\n");
        info->dec = NeAACDecOpen ();

        trace ("prepare for NeAACDecInit: fread %d from offs %lld\n", AAC_BUFFER_SIZE, deadbeef->ftell (info->file));
        info->remaining = deadbeef->fread (info->buffer, 1, AAC_BUFFER_SIZE, info->file);

        NeAACDecConfigurationPtr conf = NeAACDecGetCurrentConfiguration (info->dec);
//        conf->dontUpSampleImplicitSBR = 1;
        NeAACDecSetConfiguration (info->dec, conf);
        unsigned long srate;
        unsigned char ch;
        trace ("NeAACDecInit (%d bytes)\n", info->remaining);
        int consumed = NeAACDecInit (info->dec, info->buffer, info->remaining, &srate, &ch);
        trace ("NeAACDecInit returned samplerate=%d, channels=%d, consumed: %d\n", (int)srate, (int)ch, consumed);
        if (consumed < 0) {
            trace ("NeAACDecInit returned %d\n", consumed);
            return -1;
        }
        if (consumed > info->remaining) {
            trace ("NeAACDecInit consumed more than available! wtf?\n");
            return -1;
        }
        if (consumed == info->remaining) {
            info->remaining = 0;
        }
        else if (consumed > 0) {
            memmove (info->buffer, info->buffer + consumed, info->remaining - consumed);
            info->remaining -= consumed;
        }
        _info->fmt.channels = ch;
        _info->fmt.samplerate = srate;
    }

    if (!info->file->vfs->is_streaming ()) {
        if (it->endsample > 0) {
            info->startsample = it->startsample;
            info->endsample = it->endsample;
            plugin.seek_sample (_info, 0);
        }
        else {
            info->startsample = 0;
            info->endsample = totalsamples-1;
        }
    }
    trace ("totalsamples: %d, endsample: %d, samples-from-duration: %d\n", totalsamples-1, info->endsample, (int)deadbeef->pl_get_item_duration (it)*44100);

    for (int i = 0; i < _info->fmt.channels; i++) {
        _info->fmt.channelmask |= 1 << i;
    }
    info->noremap = 0;
    info->remap[0] = -1;
    trace ("init success\n");

    return 0;
}
예제 #10
0
파일: aac.c 프로젝트: vadmium/deadbeef
// returns -1 for error, 0 for mp4, 1 for aac
int
aac_probe (DB_FILE *fp, const char *fname, MP4FILE_CB *cb, float *duration, int *samplerate, int *channels, int *totalsamples, int *mp4track, MP4FILE *pmp4) {
    // try mp4
    trace ("aac_probe: pos=%lld, junk=%d\n", deadbeef->ftell (fp), ((aac_info_t*)cb->user_data)->junk);

    if (mp4track) {
        *mp4track = -1;
    }
    if (*pmp4) {
        *pmp4 = NULL;
    }
    *duration = -1;
#ifdef USE_MP4FF
    trace ("mp4ff_open_read\n");
    mp4ff_t *mp4 = mp4ff_open_read (cb);
#else
    MP4FileHandle mp4 = MP4ReadProvider (fname, 0, cb);
#endif
    if (!mp4) {
        trace ("not an mp4 file\n");
        return -1;
    }
    if (pmp4) {
        *pmp4 = mp4;
    }
#ifdef USE_MP4FF
    int ntracks = mp4ff_total_tracks (mp4);
    if (ntracks > 0) {
        trace ("m4a container detected, ntracks=%d\n", ntracks);
        int i = -1;
        trace ("looking for mp4 data...\n");
        int sr = -1;
        unsigned char*  buff = 0;
        unsigned int    buff_size = 0;
        for (i = 0; i < ntracks; i++) {
            mp4AudioSpecificConfig mp4ASC;
            mp4ff_get_decoder_config(mp4, i, &buff, &buff_size);
            if (buff) {
                int rc = AudioSpecificConfig(buff, buff_size, &mp4ASC);
                sr = mp4ASC.samplingFrequency;
                if(rc < 0) {
                    free (buff);
                    buff = 0;
                    continue;
                }
                break;
            }
        }
        if (i != ntracks && buff) 
        {
            trace ("found audio track (%d)\n", i);
            // init mp4 decoding
            NeAACDecHandle dec = NeAACDecOpen ();
            unsigned long srate;
            unsigned char ch;
            if (NeAACDecInit2(dec, buff, buff_size, &srate, &ch) < 0) {
                trace ("NeAACDecInit2 returned error\n");
                goto error;
            }
            *samplerate = srate;
            *channels = ch;
            int samples = mp4ff_num_samples(mp4, i);
            samples = (int64_t)samples * srate / mp4ff_time_scale (mp4, i);
            int tsamples = samples;
            NeAACDecConfigurationPtr conf = NeAACDecGetCurrentConfiguration (dec);
            conf->dontUpSampleImplicitSBR = 1;
            NeAACDecSetConfiguration (dec, conf);
            mp4AudioSpecificConfig mp4ASC;
            int mp4framesize;
            if (NeAACDecAudioSpecificConfig(buff, buff_size, &mp4ASC) >= 0)
            {
                mp4framesize = 1024;
                if (mp4ASC.frameLengthFlag == 1) {
                    mp4framesize = 960;
                }
                // commented this out, since it fixes double-duration bug on
                // some mp4 files
                //if (mp4ASC.sbr_present_flag == 1) {
                //    mp4framesize *= 2;
                //}
            }
            else {
                trace ("NeAACDecAudioSpecificConfig failed, can't get mp4framesize\n");
                goto error;
            }
            tsamples *= mp4framesize;

            trace ("mp4 nsamples=%d, samplerate=%d, timescale=%d, duration=%lld\n", samples, *samplerate, mp4ff_time_scale(mp4, i), mp4ff_get_track_duration(mp4, i));
            *duration = (float)tsamples / (*samplerate);
            trace ("mp4 duration: %f (tsamples %d/samplerate %d)\n", *duration, tsamples, *samplerate);
            
            NeAACDecClose (dec);

            if (totalsamples) {
                *totalsamples = tsamples;
            }
            if (mp4track) {
                *mp4track = i;
            }
            if (!*pmp4) {
                mp4ff_close (mp4);
            }
            return 0;
error:
            NeAACDecClose (dec);
            free (buff);
            if (!*pmp4) {
                mp4ff_close (mp4);
            }
            return -1;
        }
        else {
            trace ("audio track not found\n");
            mp4ff_close (mp4);
            mp4 = NULL;
        }
        if (buff) {
            free (buff);
            buff = NULL;
        }

    }
#else
    MP4FileHandle mp4File = mp4;
    MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "audio", 0);
    trace ("trackid: %d\n", trackId);
    uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId);
    MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId);
    MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
    u_int8_t* pConfig;
    uint32_t configSize = 0;
    bool res = MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize);
    if (res && pConfig) {
        mp4AudioSpecificConfig mp4ASC;
        int rc = AudioSpecificConfig(pConfig, configSize, &mp4ASC);
        free (pConfig);
        if (rc >= 0) {
            *samplerate = mp4ASC.samplingFrequency;
            *channels = MP4GetTrackAudioChannels (mp4File, trackId);
//            int64_t duration = MP4ConvertFromTrackDuration (mp4File, trackId, trackDuration, timeScale);
            int samples = MP4GetTrackNumberOfSamples (mp4File, trackId) * 1024 * (*channels);
            trace ("mp4 nsamples=%d, timescale=%d, samplerate=%d\n", samples, timeScale, *samplerate);
            *duration = (float)samples / (*samplerate);

            if (totalsamples) {
                *totalsamples = samples;
            }
            if (mp4track) {
                *mp4track = trackId;
            }
            if (!*pmp4) {
                MP4Close (mp4);
            }
            return 0;
        }
    }
#endif
    if (*pmp4) {
        *pmp4 = NULL;
    }

    if (mp4) {
#if USE_MP4FF
        mp4ff_close (mp4);
#else
        MP4Close (mp4);
#endif
        mp4 = NULL;
    }
    trace ("mp4 track not found, looking for aac stream...\n");

    // not an mp4, try raw aac
#if USE_MP4FF
    deadbeef->rewind (fp);
#endif
    if (parse_aac_stream (fp, samplerate, channels, duration, totalsamples) == -1) {
        trace ("aac stream not found\n");
        return -1;
    }
    trace ("found aac stream\n");
    return 1;
}
void main(int argc, char** argv)
{


	if (argc < 2) {
		fprintf(stderr, "Usage: %s <file>\n", argv[0]);
		exit(1);
	}

	//u_int32_t verbosity = MP4_DETAILS_ALL;
	char* fileName = argv[1];

	// open the mp4 file, and read meta-info
	MP4FileHandle mp4File = MP4Read(fileName  );

	uint8_t profileLevel = MP4GetVideoProfileLevel(mp4File);

	// get a handle on the first video track
	MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "video");

	// gather the crucial track information 

	uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId);

	// note all times and durations 
	// are in units of the track time scale

	MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId);

	MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);

	uint32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, trackId);

	uint8_t* pConfig;
	uint32_t configSize = 0;

	MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize);

	// initialize decoder with Elementary Stream (ES) configuration

	// done with our copy of ES configuration
	free(pConfig);


	// now consecutively read and display the track samples

	uint8_t* pSample = (uint8_t*)malloc(maxSampleSize);
	uint32_t sampleSize;

	MP4Timestamp sampleTime;
	MP4Duration sampleDuration;
	MP4Duration sampleRenderingOffset;
	bool isSyncSample;

	for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {

		// give ReadSample our own buffer, and let it know how big it is
		sampleSize = maxSampleSize;

		// read next sample from video track
		MP4ReadSample(mp4File, trackId, sampleId, 
			&pSample, &sampleSize,
			&sampleTime, &sampleDuration, &sampleRenderingOffset, 
			&isSyncSample);

		// convert timestamp and duration from track time to milliseconds
		uint64_t myTime = MP4ConvertFromTrackTimestamp(mp4File, trackId, 
			sampleTime, MP4_MSECS_TIME_SCALE);

		uint64_t myDuration = MP4ConvertFromTrackDuration(mp4File, trackId,
			sampleDuration, MP4_MSECS_TIME_SCALE);

		// decode frame and display it
	}

	// close mp4 file
	MP4Close(mp4File);


	// Note to seek to time 'when' in the track
	// use MP4GetSampleIdFromTime(MP4FileHandle hFile, 
	//		MP4Timestamp when, bool wantSyncSample)
	// 'wantSyncSample' determines if a sync sample is desired or not
	// e.g.
	// MP4Timestamp when = 
	//	MP4ConvertToTrackTimestamp(mp4File, trackId, 30, MP4_SECS_TIME_SCALE);
	// MP4SampleId newSampleId = MP4GetSampleIdFromTime(mp4File, when, true);
	// MP4ReadSample(mp4File, trackId, newSampleId, ...);
	// 
	// Note that start time for sample may be later than 'when'

	exit(0);
}
예제 #12
0
static char* PrintAudioInfo(
	MP4FileHandle mp4File,
	MP4TrackId trackId)
{
	static const char* mpeg4AudioNames[] = {
		"MPEG-4 AAC main",
		"MPEG-4 AAC LC",
		"MPEG-4 AAC SSR",
		"MPEG-4 AAC LTP",
		"MPEG-4 AAC HE",
		"MPEG-4 AAC Scalable",
		"MPEG-4 TwinVQ",
		"MPEG-4 CELP",
		"MPEG-4 HVXC",
		NULL, NULL,
		"MPEG-4 TTSI",
		"MPEG-4 Main Synthetic",
		"MPEG-4 Wavetable Syn",
		"MPEG-4 General MIDI",
		"MPEG-4 Algo Syn and Audio FX",
		"MPEG-4 ER AAC LC",
		NULL,
		"MPEG-4 ER AAC LTP",
		"MPEG-4 ER AAC Scalable",
		"MPEG-4 ER TwinVQ",
		"MPEG-4 ER BSAC",
		"MPEG-4 ER ACC LD",
		"MPEG-4 ER CELP",
		"MPEG-4 ER HVXC",
		"MPEG-4 ER HILN",
		"MPEG-4 ER Parametric",
		"MPEG-4 SSC",
		"MPEG-4 PS",
		"MPEG-4 MPEG Surround",
		NULL,
		"MPEG-4 Layer-1",
		"MPEG-4 Layer-2",
		"MPEG-4 Layer-3",
		"MPEG-4 DST",
		"MPEG-4 Audio Lossless",
		"MPEG-4 SLS",
		"MPEG-4 SLS non-core", 
	};

	static const u_int8_t mpegAudioTypes[] = {
		MP4_MPEG2_AAC_MAIN_AUDIO_TYPE,	// 0x66
		MP4_MPEG2_AAC_LC_AUDIO_TYPE,	// 0x67
		MP4_MPEG2_AAC_SSR_AUDIO_TYPE,	// 0x68
		MP4_MPEG2_AUDIO_TYPE,			// 0x69
		MP4_MPEG1_AUDIO_TYPE,			// 0x6B
		// private types
		MP4_PCM16_LITTLE_ENDIAN_AUDIO_TYPE,
		MP4_VORBIS_AUDIO_TYPE,
		MP4_ALAW_AUDIO_TYPE,
		MP4_ULAW_AUDIO_TYPE,
		MP4_G723_AUDIO_TYPE,
		MP4_PCM16_BIG_ENDIAN_AUDIO_TYPE,
	};
	static const char* mpegAudioNames[] = {
		"MPEG-2 AAC Main",
		"MPEG-2 AAC LC",
		"MPEG-2 AAC SSR",
		"MPEG-2 Audio (13818-3)",
		"MPEG-1 Audio (11172-3)",
		// private types
		"PCM16 (little endian)",
		"Vorbis",
		"G.711 aLaw",
		"G.711 uLaw",
		"G.723.1",
		"PCM16 (big endian)",
	};
	u_int8_t numMpegAudioTypes =
		sizeof(mpegAudioTypes) / sizeof(u_int8_t);

	const char* typeName = "Unknown";
	bool foundType = false;
	u_int8_t type = 0;
	const char *media_data_name;

	media_data_name = MP4GetTrackMediaDataName(mp4File, trackId);

	if (media_data_name == NULL) {
	  typeName = "Unknown - no media data name";
	} else if (strcasecmp(media_data_name, "samr") == 0) {
	    typeName = "AMR";
	    foundType = true;
	} else if (strcasecmp(media_data_name, "sawb") == 0) {
	    typeName = "AMR-WB";
	    foundType = true;
	} else if (strcasecmp(media_data_name, "mp4a") == 0) {
	    
	  type = MP4GetTrackEsdsObjectTypeId(mp4File, trackId);
	  switch (type) {
	  case MP4_INVALID_AUDIO_TYPE:
	    typeName = "AAC from .mov";
	    foundType = true;
	    break;
	  case MP4_MPEG4_AUDIO_TYPE:  {
	
	    type = MP4GetTrackAudioMpeg4Type(mp4File, trackId);
	    if (type == MP4_MPEG4_INVALID_AUDIO_TYPE ||
		type > NUM_ELEMENTS_IN_ARRAY(mpeg4AudioNames) || 
		mpeg4AudioNames[type - 1] == NULL) {
	      typeName = "MPEG-4 Unknown Profile";
	    } else {
	      typeName = mpeg4AudioNames[type - 1];
	      foundType = true;
	    }
	    break;
	  }
	    // fall through
	  default:
	    for (u_int8_t i = 0; i < numMpegAudioTypes; i++) {
	      if (type == mpegAudioTypes[i]) {
		typeName = mpegAudioNames[i];
		foundType = true;
		break;
	      }
	    }
	  }
	} else {
	  typeName = media_data_name;
	  foundType = true;
	}

	u_int32_t timeScale =
		MP4GetTrackTimeScale(mp4File, trackId);

	MP4Duration trackDuration =
		MP4GetTrackDuration(mp4File, trackId);

	double msDuration =
		UINT64_TO_DOUBLE(MP4ConvertFromTrackDuration(mp4File, trackId,
			trackDuration, MP4_MSECS_TIME_SCALE));

	u_int32_t avgBitRate =
		MP4GetTrackBitRate(mp4File, trackId);

	char *sInfo = (char*)MP4Malloc(256);

	// type duration avgBitrate samplingFrequency
	if (foundType)
	  snprintf(sInfo, 256, 
		  "%u\taudio\t%s%s, %.3f secs, %u kbps, %u Hz\n",
		  trackId,
		  MP4IsIsmaCrypMediaTrack(mp4File, trackId) ? "enca - " : "",
		  typeName,
		  msDuration / 1000.0,
		  (avgBitRate + 500) / 1000,
		  timeScale);
	else
	  snprintf(sInfo, 256,
		  "%u\taudio\t%s%s(%u), %.3f secs, %u kbps, %u Hz\n",
		  trackId,
		  MP4IsIsmaCrypMediaTrack(mp4File, trackId) ? "enca - " : "",
		  typeName,
		  type,
		  msDuration / 1000.0,
		  (avgBitRate + 500) / 1000,
		  timeScale);

	return sInfo;
}
예제 #13
0
static void DumpTrack (MP4FileHandle mp4file, MP4TrackId tid, 
		       bool dump_off, bool dump_rend)
{
  uint32_t numSamples;
  MP4SampleId sid;
  uint8_t *buffer;
  uint32_t max_frame_size;
  uint32_t timescale;
  uint64_t msectime;
  const char *media_data_name;
  uint32_t len_size = 0;
  uint8_t video_type = 0;
  numSamples = MP4GetTrackNumberOfSamples(mp4file, tid);
  max_frame_size = MP4GetTrackMaxSampleSize(mp4file, tid) + 4;
  media_data_name = MP4GetTrackMediaDataName(mp4file, tid);
  if (strcasecmp(media_data_name, "avc1") == 0) {
    MP4GetTrackH264LengthSize(mp4file, tid, &len_size);
  } else if (strcasecmp(media_data_name, "mp4v") == 0) {
    video_type = MP4GetTrackEsdsObjectTypeId(mp4file, tid);
  }
  buffer = (uint8_t *)malloc(max_frame_size);
  if (buffer == NULL) {
    printf("couldn't get buffer\n");
    return;
  }

  timescale = MP4GetTrackTimeScale(mp4file, tid);
  printf("mp4file %s, track %d, samples %d, timescale %d\n", 
	 Mp4FileName, tid, numSamples, timescale);

  for (sid = 1; sid <= numSamples; sid++) {
    MP4Timestamp sampleTime;
    MP4Duration sampleDuration, sampleRenderingOffset;
    bool isSyncSample = FALSE;
    bool ret;
    u_int8_t *temp;
    uint32_t this_frame_size = max_frame_size;
    temp = buffer;
    ret = MP4ReadSample(mp4file, 
			tid,
			sid,
			&temp,
			&this_frame_size,
			&sampleTime,
			&sampleDuration,
			&sampleRenderingOffset,
			&isSyncSample);

    msectime = sampleTime;
    msectime *= TO_U64(1000);
    msectime /= timescale;

    printf("sampleId %6d, size %5u time "U64"("U64")",
	  sid,  MP4GetSampleSize(mp4file, tid, sid), 
	   sampleTime, msectime);
    if (dump_rend) printf(" %6"U64F, sampleRenderingOffset);
    if (strcasecmp(media_data_name, "mp4v") == 0) {
      if (MP4_IS_MPEG4_VIDEO_TYPE(video_type))
	ParseMpeg4(temp, this_frame_size, dump_off);
    } else if (strcasecmp(media_data_name, "avc1") == 0) {
      ParseH264(temp, this_frame_size, len_size, dump_off);
    }
    printf("\n");
  }
}
예제 #14
0
파일: aac.c 프로젝트: Koss64/deadbeef
// returns -1 for error, 0 for mp4, 1 for aac
int
aac_probe (DB_FILE *fp, const char *fname, MP4FILE_CB *cb, float *duration, int *samplerate, int *channels, int *totalsamples, int *mp4track, MP4FILE *pmp4) {
    // try mp4

    if (mp4track) {
        *mp4track = -1;
    }
    if (*pmp4) {
        *pmp4 = NULL;
    }
    *duration = -1;
#ifdef USE_MP4FF
    mp4ff_t *mp4 = mp4ff_open_read (cb);
#else
    MP4FileHandle mp4 = MP4ReadProvider (fname, 0, cb);
#endif
    if (!mp4) {
        trace ("not an mp4 file\n");
        return -1;
    }
    if (pmp4) {
        *pmp4 = mp4;
    }
#ifdef USE_MP4FF
    int ntracks = mp4ff_total_tracks (mp4);
    if (ntracks > 0) {
        trace ("m4a container detected, ntracks=%d\n", ntracks);
        int i = -1;
        trace ("looking for mp4 data...\n");
        int sr = -1;
        for (i = 0; i < ntracks; i++) {
            unsigned char*  buff = 0;
            unsigned int    buff_size = 0;
            mp4AudioSpecificConfig mp4ASC;
            mp4ff_get_decoder_config(mp4, i, &buff, &buff_size);
            if(buff){
                int rc = AudioSpecificConfig(buff, buff_size, &mp4ASC);
                sr = mp4ASC.samplingFrequency;
                free(buff);
                if(rc < 0)
                    continue;
                break;
            }
        }

        if (i != ntracks) 
        {
            trace ("mp4 track: %d\n", i);
            if (sr != -1) {
                *samplerate = sr;
            }
            else {
                *samplerate = mp4ff_get_sample_rate (mp4, i);
            }
            *channels = mp4ff_get_channel_count (mp4, i);
            int samples = mp4ff_num_samples(mp4, i) * 1024;
            samples = (int64_t)samples * (*samplerate) / mp4ff_time_scale (mp4, i);

            trace ("mp4 nsamples=%d, samplerate=%d, timescale=%d, duration=%lld\n", samples, *samplerate, mp4ff_time_scale(mp4, i), mp4ff_get_track_duration(mp4, i));
            *duration = (float)samples / (*samplerate);

            if (totalsamples) {
                *totalsamples = samples;
            }
            if (mp4track) {
                *mp4track = i;
            }
            if (!*pmp4) {
                mp4ff_close (mp4);
            }
            return 0;
        }
    }
#else
    MP4FileHandle mp4File = mp4;
    MP4TrackId trackId = MP4FindTrackId(mp4File, 0, "audio", 0);
    trace ("trackid: %d\n", trackId);
    uint32_t timeScale = MP4GetTrackTimeScale(mp4File, trackId);
    MP4Duration trackDuration = MP4GetTrackDuration(mp4File, trackId);
    MP4SampleId numSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
    u_int8_t* pConfig;
    uint32_t configSize = 0;
    bool res = MP4GetTrackESConfiguration(mp4File, trackId, &pConfig, &configSize);
    if (res && pConfig) {
        mp4AudioSpecificConfig mp4ASC;
        int rc = AudioSpecificConfig(pConfig, configSize, &mp4ASC);
        free (pConfig);
        if (rc >= 0) {
            *samplerate = mp4ASC.samplingFrequency;
            *channels = MP4GetTrackAudioChannels (mp4File, trackId);
//            int64_t duration = MP4ConvertFromTrackDuration (mp4File, trackId, trackDuration, timeScale);
            int samples = MP4GetTrackNumberOfSamples (mp4File, trackId) * 1024 * (*channels);
            trace ("mp4 nsamples=%d, timescale=%d, samplerate=%d\n", samples, timeScale, *samplerate);
            *duration = (float)samples / (*samplerate);

            if (totalsamples) {
                *totalsamples = samples;
            }
            if (mp4track) {
                *mp4track = trackId;
            }
            if (!*pmp4) {
                MP4Close (mp4);
            }
            return 0;
        }
    }
#endif
    if (*pmp4) {
        *pmp4 = NULL;
    }

    if (mp4) {
#if USE_MP4FF
        mp4ff_close (mp4);
#else
        MP4Close (mp4);
#endif
        mp4 = NULL;
    }
    trace ("mp4 track not found, looking for aac stream...\n");

    // not an mp4, try raw aac
#if USE_MP4FF
    deadbeef->rewind (fp);
#endif
    if (parse_aac_stream (fp, samplerate, channels, duration, totalsamples) == -1) {
        trace ("aac stream not found\n");
        return -1;
    }
    trace ("found aac stream\n");
    return 1;
}
예제 #15
0
bool MP4Metadata::ReadMetadata(CFErrorRef *error)
{
	// Start from scratch
	CFDictionaryRemoveAllValues(mMetadata);
	CFDictionaryRemoveAllValues(mChangedMetadata);

	UInt8 buf [PATH_MAX];
	if(!CFURLGetFileSystemRepresentation(mURL, FALSE, buf, PATH_MAX))
		return false;
	
	// Open the file for reading
	MP4FileHandle file = MP4Read(reinterpret_cast<const char *>(buf));
	
	if(MP4_INVALID_FILE_HANDLE == file) {
		if(error) {
			CFMutableDictionaryRef errorDictionary = CFDictionaryCreateMutable(kCFAllocatorDefault, 
																			   32,
																			   &kCFTypeDictionaryKeyCallBacks,
																			   &kCFTypeDictionaryValueCallBacks);
			
			CFStringRef displayName = CreateDisplayNameForURL(mURL);
			CFStringRef errorString = CFStringCreateWithFormat(kCFAllocatorDefault, 
															   NULL, 
															   CFCopyLocalizedString(CFSTR("The file “%@” is not a valid MPEG-4 file."), ""), 
															   displayName);
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedDescriptionKey, 
								 errorString);
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedFailureReasonKey, 
								 CFCopyLocalizedString(CFSTR("Not an MPEG-4 file"), ""));
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedRecoverySuggestionKey, 
								 CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""));
			
			CFRelease(errorString), errorString = NULL;
			CFRelease(displayName), displayName = NULL;
			
			*error = CFErrorCreate(kCFAllocatorDefault, 
								   AudioMetadataErrorDomain, 
								   AudioMetadataFileFormatNotRecognizedError, 
								   errorDictionary);
			
			CFRelease(errorDictionary), errorDictionary = NULL;				
		}
		
		return false;
	}

	// Read the properties
	if(0 < MP4GetNumberOfTracks(file)) {
		// Should be type 'soun', media data name'mp4a'
		MP4TrackId trackID = MP4FindTrackId(file, 0);

		// Verify this is an MPEG-4 audio file
		if(MP4_INVALID_TRACK_ID == trackID || strncmp("soun", MP4GetTrackType(file, trackID), 4)) {
			MP4Close(file), file = NULL;
			
			if(error) {
				CFMutableDictionaryRef errorDictionary = CFDictionaryCreateMutable(kCFAllocatorDefault, 
																				   32,
																				   &kCFTypeDictionaryKeyCallBacks,
																				   &kCFTypeDictionaryValueCallBacks);
				
				CFStringRef displayName = CreateDisplayNameForURL(mURL);
				CFStringRef errorString = CFStringCreateWithFormat(kCFAllocatorDefault, 
																   NULL, 
																   CFCopyLocalizedString(CFSTR("The file “%@” is not a valid MPEG-4 file."), ""), 
																   displayName);
				
				CFDictionarySetValue(errorDictionary, 
									 kCFErrorLocalizedDescriptionKey, 
									 errorString);
				
				CFDictionarySetValue(errorDictionary, 
									 kCFErrorLocalizedFailureReasonKey, 
									 CFCopyLocalizedString(CFSTR("Not an MPEG-4 file"), ""));
				
				CFDictionarySetValue(errorDictionary, 
									 kCFErrorLocalizedRecoverySuggestionKey, 
									 CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""));
				
				CFRelease(errorString), errorString = NULL;
				CFRelease(displayName), displayName = NULL;
				
				*error = CFErrorCreate(kCFAllocatorDefault, 
									   AudioMetadataErrorDomain, 
									   AudioMetadataFileFormatNotSupportedError, 
									   errorDictionary);
				
				CFRelease(errorDictionary), errorDictionary = NULL;				
			}
			
			return false;
		}
		
		MP4Duration mp4Duration = MP4GetTrackDuration(file, trackID);
		uint32_t mp4TimeScale = MP4GetTrackTimeScale(file, trackID);
		
		CFNumberRef totalFrames = CFNumberCreate(kCFAllocatorDefault, kCFNumberLongLongType, &mp4Duration);
		CFDictionarySetValue(mMetadata, kPropertiesTotalFramesKey, totalFrames);
		CFRelease(totalFrames), totalFrames = NULL;
		
		CFNumberRef sampleRate = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &mp4TimeScale);
		CFDictionarySetValue(mMetadata, kPropertiesSampleRateKey, sampleRate);
		CFRelease(sampleRate), sampleRate = NULL;
		
		double length = static_cast<double>(mp4Duration / mp4TimeScale);
		CFNumberRef duration = CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &length);
		CFDictionarySetValue(mMetadata, kPropertiesDurationKey, duration);
		CFRelease(duration), duration = NULL;

		// "mdia.minf.stbl.stsd.*[0].channels"
		int channels = MP4GetTrackAudioChannels(file, trackID);
		CFNumberRef channelsPerFrame = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &channels);
		CFDictionaryAddValue(mMetadata, kPropertiesChannelsPerFrameKey, channelsPerFrame);
		CFRelease(channelsPerFrame), channelsPerFrame = NULL;

		// ALAC files
		if(MP4HaveTrackAtom(file, trackID, "mdia.minf.stbl.stsd.alac")) {
			CFDictionarySetValue(mMetadata, kPropertiesFormatNameKey, CFSTR("Apple Lossless"));
			
			uint64_t sampleSize;
			uint8_t *decoderConfig;
			uint32_t decoderConfigSize;
			if(MP4GetTrackBytesProperty(file, trackID, "mdia.minf.stbl.stsd.alac.alac.decoderConfig", &decoderConfig, &decoderConfigSize) && 28 <= decoderConfigSize) {
				// The ALAC magic cookie seems to have the following layout (28 bytes, BE):
				// Byte 10: Sample size
				// Bytes 25-28: Sample rate
				CFNumberRef bitsPerChannel = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type, decoderConfig + 9);
				CFDictionaryAddValue(mMetadata, kPropertiesBitsPerChannelKey, bitsPerChannel);
				CFRelease(bitsPerChannel), bitsPerChannel = NULL;

				double losslessBitrate = static_cast<double>(mp4TimeScale * channels * sampleSize) / 1000;
				CFNumberRef bitrate = CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &losslessBitrate);
				CFDictionarySetValue(mMetadata, kPropertiesBitrateKey, bitrate);
				CFRelease(bitrate), bitrate = NULL;

				free(decoderConfig), decoderConfig = NULL;
			}			
			else if(MP4GetTrackIntegerProperty(file, trackID, "mdia.minf.stbl.stsd.alac.sampleSize", &sampleSize)) {
				CFNumberRef bitsPerChannel = CFNumberCreate(kCFAllocatorDefault, kCFNumberLongLongType, &sampleSize);
				CFDictionaryAddValue(mMetadata, kPropertiesBitsPerChannelKey, bitsPerChannel);
				CFRelease(bitsPerChannel), bitsPerChannel = NULL;

				double losslessBitrate = static_cast<double>(mp4TimeScale * channels * sampleSize) / 1000;
				CFNumberRef bitrate = CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &losslessBitrate);
				CFDictionarySetValue(mMetadata, kPropertiesBitrateKey, bitrate);
				CFRelease(bitrate), bitrate = NULL;
			}
		}

		// AAC files
		if(MP4HaveTrackAtom(file, trackID, "mdia.minf.stbl.stsd.mp4a")) {
			CFDictionarySetValue(mMetadata, kPropertiesFormatNameKey, CFSTR("AAC"));

			// "mdia.minf.stbl.stsd.*.esds.decConfigDescr.avgBitrate"
			uint32_t trackBitrate = MP4GetTrackBitRate(file, trackID) / 1000;
			CFNumberRef bitrate = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &trackBitrate);
			CFDictionaryAddValue(mMetadata, kPropertiesBitrateKey, bitrate);
			CFRelease(bitrate), bitrate = NULL;
			
		}
	}
	// No valid tracks in file
	else {
		MP4Close(file), file = NULL;
		
		if(error) {
			CFMutableDictionaryRef errorDictionary = CFDictionaryCreateMutable(kCFAllocatorDefault, 
																			   32,
																			   &kCFTypeDictionaryKeyCallBacks,
																			   &kCFTypeDictionaryValueCallBacks);
			
			CFStringRef displayName = CreateDisplayNameForURL(mURL);
			CFStringRef errorString = CFStringCreateWithFormat(kCFAllocatorDefault, 
															   NULL, 
															   CFCopyLocalizedString(CFSTR("The file “%@” is not a valid MPEG-4 file."), ""), 
															   displayName);
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedDescriptionKey, 
								 errorString);
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedFailureReasonKey, 
								 CFCopyLocalizedString(CFSTR("Not an MPEG-4 file"), ""));
			
			CFDictionarySetValue(errorDictionary, 
								 kCFErrorLocalizedRecoverySuggestionKey, 
								 CFCopyLocalizedString(CFSTR("The file's extension may not match the file's type."), ""));
			
			CFRelease(errorString), errorString = NULL;
			CFRelease(displayName), displayName = NULL;
			
			*error = CFErrorCreate(kCFAllocatorDefault, 
								   AudioMetadataErrorDomain, 
								   AudioMetadataFileFormatNotSupportedError, 
								   errorDictionary);
			
			CFRelease(errorDictionary), errorDictionary = NULL;				
		}
		
		return false;
	}

	// Read the tags
	const MP4Tags *tags = MP4TagsAlloc();

	if(NULL == tags) {
		MP4Close(file), file = NULL;

		if(error)
			*error = CFErrorCreate(kCFAllocatorDefault, kCFErrorDomainPOSIX, ENOMEM, NULL);

		return false;
	}
	
	MP4TagsFetch(tags, file);
	
	// Album title
	if(tags->album) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->album, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataAlbumTitleKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Artist
	if(tags->artist) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->artist, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataArtistKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Album Artist
	if(tags->albumArtist) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->albumArtist, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataAlbumArtistKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Genre
	if(tags->genre) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->genre, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataGenreKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Release date
	if(tags->releaseDate) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->releaseDate, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataReleaseDateKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Composer
	if(tags->composer) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->composer, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataComposerKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Comment
	if(tags->comments) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->comments, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataCommentKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Track title
	if(tags->name) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->name, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataTitleKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Track number
	if(tags->track) {
		if(tags->track->index) {
			CFNumberRef num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt16Type, &tags->track->index);
			CFDictionarySetValue(mMetadata, kMetadataTrackNumberKey, num);
			CFRelease(num), num = NULL;
		}
		
		if(tags->track->total) {
			CFNumberRef num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt16Type, &tags->track->total);
			CFDictionarySetValue(mMetadata, kMetadataTrackTotalKey, num);
			CFRelease(num), num = NULL;
		}
	}
	
	// Disc number
	if(tags->disk) {
		if(tags->disk->index) {
			CFNumberRef num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt16Type, &tags->disk->index);
			CFDictionarySetValue(mMetadata, kMetadataDiscNumberKey, num);
			CFRelease(num), num = NULL;
		}
		
		if(tags->disk->total) {
			CFNumberRef num = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt16Type, &tags->disk->total);
			CFDictionarySetValue(mMetadata, kMetadataDiscTotalKey, num);
			CFRelease(num), num = NULL;
		}
	}
	
	// Compilation
	if(tags->compilation)
		CFDictionarySetValue(mMetadata, kMetadataCompilationKey, *(tags->compilation) ? kCFBooleanTrue : kCFBooleanFalse);
	
	// BPM
	if(tags->tempo) {
		
	}
	
	// Lyrics
	if(tags->lyrics) {
		CFStringRef str = CFStringCreateWithCString(kCFAllocatorDefault, tags->lyrics, kCFStringEncodingUTF8);
		CFDictionarySetValue(mMetadata, kMetadataLyricsKey, str);
		CFRelease(str), str = NULL;
	}
	
	// Album art
	if(tags->artworkCount) {
		for(uint32_t i = 0; i < tags->artworkCount; ++i) {
			CFDataRef data = CFDataCreate(kCFAllocatorDefault, reinterpret_cast<const UInt8 *>(tags->artwork[i].data), tags->artwork[i].size);
			CFDictionarySetValue(mMetadata, kAlbumArtFrontCoverKey, data);
			CFRelease(data), data = NULL;
		}
	}
	
	// ReplayGain
	// Reference loudness
	MP4ItmfItemList *items = MP4ItmfGetItemsByMeaning(file, "com.apple.iTunes", "replaygain_reference_loudness");
	if(NULL != items) {
		float referenceLoudnessValue;
		if(1 <= items->size && 1 <= items->elements[0].dataList.size && sscanf(reinterpret_cast<const char *>(items->elements[0].dataList.elements[0].value), "%f", &referenceLoudnessValue)) {
			CFNumberRef referenceLoudness = CFNumberCreate(kCFAllocatorDefault, kCFNumberFloatType, &referenceLoudnessValue);
			CFDictionaryAddValue(mMetadata, kReplayGainReferenceLoudnessKey, referenceLoudness);
			CFRelease(referenceLoudness), referenceLoudness = NULL;
		}
		
		MP4ItmfItemListFree(items), items = NULL;
	}
	
	// Track gain
	items = MP4ItmfGetItemsByMeaning(file, "com.apple.iTunes", "replaygain_track_gain");
	if(NULL != items) {
		float trackGainValue;
		if(1 <= items->size && 1 <= items->elements[0].dataList.size && sscanf(reinterpret_cast<const char *>(items->elements[0].dataList.elements[0].value), "%f", &trackGainValue)) {
			CFNumberRef trackGain = CFNumberCreate(kCFAllocatorDefault, kCFNumberFloatType, &trackGainValue);
			CFDictionaryAddValue(mMetadata, kReplayGainTrackGainKey, trackGain);
			CFRelease(trackGain), trackGain = NULL;
		}
		
		MP4ItmfItemListFree(items), items = NULL;
	}		
	
	// Track peak
	items = MP4ItmfGetItemsByMeaning(file, "com.apple.iTunes", "replaygain_track_peak");
	if(NULL != items) {
		float trackPeakValue;
		if(1 <= items->size && 1 <= items->elements[0].dataList.size && sscanf(reinterpret_cast<const char *>(items->elements[0].dataList.elements[0].value), "%f", &trackPeakValue)) {
			CFNumberRef trackPeak = CFNumberCreate(kCFAllocatorDefault, kCFNumberFloatType, &trackPeakValue);
			CFDictionaryAddValue(mMetadata, kReplayGainTrackPeakKey, trackPeak);
			CFRelease(trackPeak), trackPeak = NULL;
		}
		
		MP4ItmfItemListFree(items), items = NULL;
	}		
	
	// Album gain
	items = MP4ItmfGetItemsByMeaning(file, "com.apple.iTunes", "replaygain_album_gain");
	if(NULL != items) {
		float albumGainValue;
		if(1 <= items->size && 1 <= items->elements[0].dataList.size && sscanf(reinterpret_cast<const char *>(items->elements[0].dataList.elements[0].value), "%f", &albumGainValue)) {
			CFNumberRef albumGain = CFNumberCreate(kCFAllocatorDefault, kCFNumberFloatType, &albumGainValue);
			CFDictionaryAddValue(mMetadata, kReplayGainAlbumGainKey, albumGain);
			CFRelease(albumGain), albumGain = NULL;
		}
		
		MP4ItmfItemListFree(items), items = NULL;
	}		
	
	// Album peak
	items = MP4ItmfGetItemsByMeaning(file, "com.apple.iTunes", "replaygain_album_peak");
	if(NULL != items) {
		float albumPeakValue;
		if(1 <= items->size && 1 <= items->elements[0].dataList.size && sscanf(reinterpret_cast<const char *>(items->elements[0].dataList.elements[0].value), "%f", &albumPeakValue)) {
			CFNumberRef albumPeak = CFNumberCreate(kCFAllocatorDefault, kCFNumberDoubleType, &albumPeakValue);
			CFDictionaryAddValue(mMetadata, kReplayGainAlbumPeakKey, albumPeak);
			CFRelease(albumPeak), albumPeak = NULL;
		}
		
		MP4ItmfItemListFree(items), items = NULL;
	}

	// Clean up
	MP4TagsFree(tags), tags = NULL;
	MP4Close(file), file = NULL;

	return true;
}
예제 #16
0
int CMp4File::create_media (CPlayerSession *psptr,
			    int have_audio_driver,
			    control_callback_vft_t *cc_vft)
{
  uint video_count, video_offset;
  uint text_count, text_offset;
  uint audio_count, audio_offset;
  MP4TrackId trackId;
  video_query_t *vq;
  audio_query_t *aq;
  text_query_t *tq;
  uint ix;
  codec_plugin_t *plugin;
  int ret_value = 0;
  uint8_t *foo;
  u_int32_t bufsize;
  
  uint32_t verb = MP4GetVerbosity(m_mp4file);
  MP4SetVerbosity(m_mp4file, verb & ~(MP4_DETAILS_ERROR));
  video_count = MP4GetNumberOfTracks(m_mp4file, MP4_VIDEO_TRACK_TYPE);
  audio_count = MP4GetNumberOfTracks(m_mp4file, MP4_AUDIO_TRACK_TYPE);
  text_count = MP4GetNumberOfTracks(m_mp4file, MP4_CNTL_TRACK_TYPE);
  mp4f_message(LOG_DEBUG, "cntl tracks %u", text_count);
  MP4SetVerbosity(m_mp4file, verb);

  if (video_count == 0 && audio_count == 0 && text_count == 0) {
    psptr->set_message("No audio, video or control tracks in file");
    return -1;
  }

  if (video_count > 0) {
    vq = (video_query_t *)malloc(sizeof(video_query_t) * video_count);
    memset(vq, 0, sizeof(video_query_t) * video_count);
  } else {
    vq = NULL;
  }
  if (have_audio_driver && audio_count > 0) {
    aq = (audio_query_t *)malloc(sizeof(audio_query_t) * audio_count);
    memset(aq, 0, sizeof(audio_query_t) * audio_count);
  } else {
    aq = NULL;
  }

  if (text_count > 0) {
    tq = (text_query_t *)malloc(sizeof(text_query_t) * text_count);
    memset(tq, 0, sizeof(text_query_t) * text_count);
  } else {
    tq = NULL;
  }
  for (ix = 0, video_offset = 0; ix < video_count; ix++) {
    trackId = MP4FindTrackId(m_mp4file, ix, MP4_VIDEO_TRACK_TYPE);
    const char *media_data_name;
    media_data_name = MP4GetTrackMediaDataName(m_mp4file, trackId);
    // for now, treat mp4v and encv the same
    vq[video_offset].track_id = trackId;
    vq[video_offset].stream_type = STREAM_TYPE_MP4_FILE;
    vq[video_offset].compressor = media_data_name;
    if (strcasecmp(media_data_name, "mp4v") == 0 ||
	strcasecmp(media_data_name, "encv") == 0) {
      uint8_t video_type = MP4GetTrackEsdsObjectTypeId(m_mp4file, trackId);
      uint8_t profileID = MP4GetVideoProfileLevel(m_mp4file, trackId);
      mp4f_message(LOG_DEBUG, "MP4 - got track %x profile ID %d", 
		 trackId, profileID);
      MP4SetVerbosity(m_mp4file, verb & ~(MP4_DETAILS_ERROR));
      MP4GetTrackESConfiguration(m_mp4file, trackId, &foo, &bufsize);
      MP4SetVerbosity(m_mp4file, verb);
      vq[video_offset].type = video_type;
      vq[video_offset].profile = profileID;
      vq[video_offset].fptr = NULL;
      vq[video_offset].config = foo;
      vq[video_offset].config_len = bufsize;
    } else if (strcasecmp(media_data_name, "avc1") == 0) {
      uint8_t profile, level;
      uint8_t **seqheader, **pictheader;
      uint32_t *pictheadersize, *seqheadersize;
      uint32_t ix;
      MP4GetTrackH264ProfileLevel(m_mp4file, trackId, &profile, &level);
      MP4GetTrackH264SeqPictHeaders(m_mp4file, trackId, 
				    &seqheader, &seqheadersize,
				    &pictheader, &pictheadersize);
      bufsize = 0;
      for (ix = 0; seqheadersize[ix] != 0; ix++) {
	bufsize += seqheadersize[ix] + 4;
      }
      for (ix = 0; pictheadersize[ix] != 0; ix++) {
	bufsize += pictheadersize[ix] + 4;
      }
      foo = (uint8_t *)malloc(bufsize + 4);
      memset(foo, 0, bufsize + 4);
      uint32_t copied = 0;
      // headers do not have the byte stream start code stored in the file
      for (ix = 0; seqheadersize[ix] != 0; ix++) {
	foo[copied] = 0;
	foo[copied + 1] = 0;
	foo[copied + 2] = 0;
	foo[copied + 3] = 1;
	copied += 4; // add header
	memcpy(foo + copied, 
	       seqheader[ix], 
	       seqheadersize[ix]);
	copied += seqheadersize[ix];
	free(seqheader[ix]);
      }
      free(seqheader);
      free(seqheadersize);
      for (ix = 0; pictheadersize[ix] != 0; ix++) {
	foo[copied] = 0;
	foo[copied + 1] = 0;
	foo[copied + 2] = 0;
	foo[copied + 3] = 1;
	copied += 4; // add header
	memcpy(foo + copied, 
	       pictheader[ix], 
	       pictheadersize[ix]);
	copied += pictheadersize[ix];
	free(pictheader[ix]);
      }
      free(pictheader);
      free(pictheadersize);
	
      vq[video_offset].type = level;
      vq[video_offset].profile = profile;
      vq[video_offset].fptr = NULL;
      vq[video_offset].config = foo;
      vq[video_offset].config_len = bufsize;
    } else {
      MP4GetTrackVideoMetadata(m_mp4file, trackId, &foo, &bufsize);
      vq[video_offset].config = foo;
      vq[video_offset].config_len = bufsize;
    }

      
    plugin = check_for_video_codec(vq[video_offset].stream_type,
				   vq[video_offset].compressor,
				   NULL,
				   vq[video_offset].type,
				   vq[video_offset].profile,
				   vq[video_offset].config,
				   vq[video_offset].config_len,
				   &config);
    if (plugin == NULL) {
      psptr->set_message("Can't find plugin for video %s type %d, profile %d",
			 vq[video_offset].compressor,
			 vq[video_offset].type, 
			 vq[video_offset].profile);
      m_illegal_video_codec++;
      ret_value = 1;
      // possibly memleak for foo here
    } else {
      vq[video_offset].h = MP4GetTrackVideoHeight(m_mp4file, trackId);
      vq[video_offset].w = MP4GetTrackVideoWidth(m_mp4file, trackId);
      vq[video_offset].frame_rate = MP4GetTrackVideoFrameRate(m_mp4file, trackId);
      vq[video_offset].enabled = 0;
      vq[video_offset].reference = NULL;
      video_offset++;
    }
  }

  audio_offset = 0;
  if (have_audio_driver) {
    for (ix = 0; ix < audio_count; ix++) {
      trackId = MP4FindTrackId(m_mp4file, ix, MP4_AUDIO_TRACK_TYPE);
      const char *media_data_name;
      media_data_name = MP4GetTrackMediaDataName(m_mp4file, trackId);

      aq[audio_offset].track_id = trackId;
      aq[audio_offset].stream_type = STREAM_TYPE_MP4_FILE;
      aq[audio_offset].compressor = media_data_name;
      if (strcasecmp(media_data_name, "mp4a") == 0 ||
	  strcasecmp(media_data_name, "enca") == 0) {
	uint8_t *userdata = NULL;
	u_int32_t userdata_size;
	aq[audio_offset].type = MP4GetTrackEsdsObjectTypeId(m_mp4file, trackId);
	MP4SetVerbosity(m_mp4file, verb & ~(MP4_DETAILS_ERROR));
	aq[audio_offset].profile = MP4GetAudioProfileLevel(m_mp4file);
	MP4GetTrackESConfiguration(m_mp4file, 
				   trackId, 
				   &userdata, 
				   &userdata_size);
	MP4SetVerbosity(m_mp4file, verb);
	aq[audio_offset].config = userdata;
	aq[audio_offset].config_len = userdata_size;
      }
      plugin = check_for_audio_codec(aq[audio_offset].stream_type,
				     aq[audio_offset].compressor,
				     NULL,
				     aq[audio_offset].type,
				     aq[audio_offset].profile,
				     aq[audio_offset].config,
				     aq[audio_offset].config_len,
				     &config);
      if (plugin != NULL) {
	aq[audio_offset].fptr = NULL;
	aq[audio_offset].sampling_freq = 
	  MP4GetTrackTimeScale(m_mp4file, trackId);
	MP4SetVerbosity(m_mp4file, verb & ~(MP4_DETAILS_ERROR));
	aq[audio_offset].chans = MP4GetTrackAudioChannels(m_mp4file, trackId);
	MP4SetVerbosity(m_mp4file, verb);
	aq[audio_offset].enabled = 0;
	aq[audio_offset].reference = NULL;
	audio_offset++;
	m_have_audio = true;
      } else {
	m_illegal_audio_codec++;
	ret_value = 1;
      }
    }
  } else {
    if (audio_count)
      ret_value = 1;
  }
  text_offset = 0;
  for (ix = 0; ix < text_count; ix++) {
    trackId = MP4FindTrackId(m_mp4file, ix, MP4_CNTL_TRACK_TYPE);
    const char *media_data_name;
    media_data_name = MP4GetTrackMediaDataName(m_mp4file, trackId);

    tq[text_offset].track_id = trackId;
    tq[text_offset].stream_type = STREAM_TYPE_MP4_FILE;
    tq[text_offset].compressor = media_data_name;
    plugin = check_for_text_codec(tq[text_offset].stream_type,
				  tq[text_offset].compressor,
				  NULL,
				  NULL,
				  0, 
				  &config);
    if (plugin != NULL) {
      tq[text_offset].fptr = NULL;
      tq[text_offset].enabled = 0;
      tq[text_offset].reference = NULL;
      text_offset++;
    } else {
      m_illegal_text_codec++;
      ret_value = 1;
    }
  }

  if (video_offset == 0 && audio_offset == 0 && text_offset == 0) {
    psptr->set_message("No playable codecs in mp4 file");
    return -1;
  }
  if (cc_vft && cc_vft->media_list_query != NULL) {
    (cc_vft->media_list_query)(psptr, video_offset, vq, audio_offset, aq, text_offset, tq);
  } else {
    if (video_offset > 0) {
      vq[0].enabled = 1;
    }
    if (audio_offset > 0) {
      aq[0].enabled = 1;
    }
    if (text_offset > 0) {
      tq[0].enabled = 1;
    }
  }

  int vidret, audret, textret;
  uint start_desc = 1;
  vidret = create_video(psptr, vq, video_offset, start_desc);
  free(vq);

  if (vidret < 0) {
    free(aq);
    free(tq);
    return -1;
  }
 
  audret = create_audio(psptr, aq, audio_offset, start_desc);
  free(aq);

  textret = create_text(psptr, tq, text_offset, start_desc);
  free(tq);

  if (audret < 0 || textret < 0) ret_value = -1;

  char *name;
  verb = MP4GetVerbosity(m_mp4file);
  MP4SetVerbosity(m_mp4file, verb & ~(MP4_DETAILS_ERROR));
  if (MP4GetMetadataName(m_mp4file, &name) &&
      name != NULL) {
    psptr->set_session_desc(0, name);
    free(name);
  }
  MP4SetVerbosity(m_mp4file, verb);
  
  return (ret_value);
}
예제 #17
0
int MP4Streamer::Open(const char *filename)
{
	//LOg
	Log(">MP4 opening [%s]\n",filename);

	//Lock
	pthread_mutex_lock(&mutex);

	//If already opened
	if (opened)
	{
		//Unlock
		pthread_mutex_unlock(&mutex);
		//Return error
		return Error("Already opened\n");
	}
	
	// Open mp4 file
	mp4 = MP4Read(filename);

	// If not valid
	if (mp4 == MP4_INVALID_FILE_HANDLE)
	{
		//Unlock
		pthread_mutex_unlock(&mutex);
		//Return error
		return Error("Invalid file handle for %s\n",filename);
	}
	
	//No tracks
	audio = NULL;
	video = NULL;
	text = NULL;

	//Iterate thougth tracks
	DWORD i = 0;

	// Get the first hint track
	MP4TrackId hintId = MP4_INVALID_TRACK_ID;

	// Iterate hint tracks
	do
	{
		// Get the next hint track
		hintId = MP4FindTrackId(mp4, i++, MP4_HINT_TRACK_TYPE, 0);

		Log("-Found hint track [hintId:%d]\n", hintId);

		// Get asociated track
		MP4TrackId trackId = MP4GetHintTrackReferenceTrackId(mp4, hintId);

		// Check it's good
		if (trackId != MP4_INVALID_TRACK_ID)
		{
			// Get track type
			const char *type = MP4GetTrackType(mp4, trackId);

			// Get rtp track
			char *name;
			BYTE payload;
			MP4GetHintTrackRtpPayload(mp4, hintId, &name, &payload, NULL, NULL);

			Log("-Streaming media [trackId:%d,type:\"%s\",name:\"%s\",payload:%d]\n", trackId, type, name, payload);

			// Check track type
			if ((strcmp(type, MP4_AUDIO_TRACK_TYPE) == 0) && !audio)
			{
				// Depending on the name
				if (strcmp("PCMU", name) == 0)
					//Create new audio track
					audio = new MP4RtpTrack(MediaFrame::Audio,AudioCodec::PCMU,payload);
				else if (strcmp("PCMA", name) == 0)
					//Create new audio track
					audio = new MP4RtpTrack(MediaFrame::Audio,AudioCodec::PCMA,payload);
				else
					//Skip
					continue;

				// Get time scale
				audio->timeScale = MP4GetTrackTimeScale(mp4, hintId);

				//Store the other values
				audio->mp4 = mp4;
				audio->hint = hintId;
				audio->track = trackId;
				audio->sampleId = 1;
				audio->packetIndex = 0;

			} else if ((strcmp(type, MP4_VIDEO_TRACK_TYPE) == 0) && !video) {
				// Depending on the name
				if (strcmp("H263", name) == 0)
					//Create new video track
					video = new MP4RtpTrack(MediaFrame::Video,VideoCodec::H263_1996,payload);
				else if (strcmp("H263-1998", name) == 0)
					//Create new video track
					video = new MP4RtpTrack(MediaFrame::Video,VideoCodec::H263_1998,payload);
				else if (strcmp("H263-2000", name) == 0)
					//Create new video track
					video = new MP4RtpTrack(MediaFrame::Video,VideoCodec::H263_1998,payload);
				else if (strcmp("H264", name) == 0)
					//Create new video track
					video = new MP4RtpTrack(MediaFrame::Video,VideoCodec::H264,payload);
				else
					continue;
					
				// Get time scale
				video->timeScale = MP4GetTrackTimeScale(mp4, hintId);
				// it's video
				video->mp4 = mp4;
				video->hint = hintId;
				video->track = trackId;
				video->sampleId = 1;
				video->packetIndex = 0;
			}
		} 
	} while (hintId != MP4_INVALID_TRACK_ID);

	// Get the first text
	MP4TrackId textId = MP4FindTrackId(mp4, 0, MP4_TEXT_TRACK_TYPE, 0);

	Log("-Found text track [%d]\n",textId);

	// Iterate hint tracks
	if (textId != MP4_INVALID_TRACK_ID)
	{
		//We have it
		text = new MP4TextTrack();
		//Set values
		text->mp4 = mp4;
		text->track = textId;
		text->sampleId = 1;
		// Get time scale
		text->timeScale = MP4GetTrackTimeScale(mp4, textId);
	}

	//We are opened
	opened = true;

	//Unlock
	pthread_mutex_unlock(&mutex);
	
	return 1;
}