コード例 #1
0
ファイル: rfc3016.cpp プロジェクト: BluePandaLi/mpeg4ip
extern "C" bool MP4AV_Rfc3016Hinter(
    MP4FileHandle mp4File,
    MP4TrackId mediaTrackId,
    u_int16_t maxPayloadSize)
{
    u_int32_t numSamples = MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);
    u_int32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);

    if (numSamples == 0 || maxSampleSize == 0) {
        return false;
    }

    MP4TrackId hintTrackId =
        MP4AV_Rfc3016_HintTrackCreate(mp4File, mediaTrackId);

    if (hintTrackId == MP4_INVALID_TRACK_ID) {
        return false;
    }

    u_int8_t* pSampleBuffer = (u_int8_t*)malloc(maxSampleSize);
    if (pSampleBuffer == NULL) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }

    for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {
        u_int32_t sampleSize = maxSampleSize;
        MP4Timestamp startTime;
        MP4Duration duration;
        MP4Duration renderingOffset;
        bool isSyncSample;

        bool rc = MP4ReadSample(
                      mp4File, mediaTrackId, sampleId,
                      &pSampleBuffer, &sampleSize,
                      &startTime, &duration,
                      &renderingOffset, &isSyncSample);

        if (rc == false ||
                MP4AV_Rfc3016_HintAddSample(mp4File,
                                            hintTrackId,
                                            sampleId,
                                            pSampleBuffer,
                                            sampleSize,
                                            duration,
                                            renderingOffset,
                                            isSyncSample,
                                            maxPayloadSize) == false) {
            MP4DeleteTrack(mp4File, hintTrackId);
            CHECK_AND_FREE(pSampleBuffer);
            return false;
        }
    }
    CHECK_AND_FREE(pSampleBuffer);

    return true;
}
コード例 #2
0
ファイル: rfc2429.cpp プロジェクト: dazzle-multimedia/mpeg4ip
extern "C" bool MP4AV_Rfc2429Hinter (MP4FileHandle file,
				     MP4TrackId mediaTrackId,
				     uint16_t maxPayloadSize)
{
  uint32_t numSamples, maxSampleSize;
  MP4TrackId hid;
  MP4Duration duration;

  numSamples = MP4GetTrackNumberOfSamples(file, mediaTrackId);
  if (numSamples == 0) {
    return false;
  }
  maxSampleSize = MP4GetTrackMaxSampleSize(file, mediaTrackId);
  u_int8_t* pSampleBuffer = (u_int8_t*)malloc(maxSampleSize);
  if (pSampleBuffer == NULL) {
    return false;
  }

  hid = MP4AddHintTrack(file, mediaTrackId);
  if (hid == MP4_INVALID_TRACK_ID) {
    return false;
  }

  uint8_t payloadNumber = MP4_SET_DYNAMIC_PAYLOAD;
  MP4SetHintTrackRtpPayload(file,
                            hid,
                            "H263-2000",
                            &payloadNumber,
                            0,
                            NULL,
                            true,
                            false);

  // strictly speaking, this is not required for H.263 - it's a quicktime
  // thing.
  u_int16_t videoWidth = MP4GetTrackVideoWidth(file, mediaTrackId);
  u_int16_t videoHeight = MP4GetTrackVideoHeight(file, mediaTrackId);
  
  char sdpString[80];
  sprintf(sdpString, "a=cliprect:0,0,%d,%d\015\012", videoHeight, videoWidth);
  
  MP4AppendHintTrackSdp(file, 
 			hid,
 			sdpString);

  for (uint32_t sid = 1; sid <= numSamples; sid++) {

    duration = MP4GetSampleDuration(file, mediaTrackId, sid);

    MP4AddRtpVideoHint(file, hid, false, 0);

    u_int32_t sampleSize = maxSampleSize;
    MP4Timestamp startTime;
    MP4Duration duration;
    MP4Duration renderingOffset;
    bool isSyncSample;

    bool rc = MP4ReadSample(file, mediaTrackId, sid,
                            &pSampleBuffer, &sampleSize,
                            &startTime, &duration,
                            &renderingOffset, &isSyncSample);

    if (!rc) {
      MP4DeleteTrack(file, hid);
      free(pSampleBuffer);
      return false;
    }

    // need to skip the first 2 bytes of the packet - it is the
    //start code
    uint16_t payload_head = htons(0x400);
    uint32_t offset = sizeof(payload_head);
    uint32_t remaining = sampleSize - sizeof(payload_head);
    while (remaining) {
      bool last_pak = false;
      uint32_t len;

      if (remaining + 2 <= maxPayloadSize) {
        len = remaining;
        last_pak = true;
      } else {
        len = maxPayloadSize - 2;
      }
      MP4AddRtpPacket(file, hid, last_pak);

      MP4AddRtpImmediateData(file, hid,
                            (u_int8_t*)&payload_head, sizeof(payload_head));
      payload_head = 0;
      MP4AddRtpSampleData(file, hid, sid,
                          offset, len);
      offset += len;
      remaining -= len;
    }
    MP4WriteRtpHint(file, hid, duration, true);
  }

  free(pSampleBuffer);

  return true;
}
コード例 #3
0
ファイル: mp4v.cpp プロジェクト: BluePandaLi/mpeg4ip
MP4TrackId Mp4vCreator(MP4FileHandle mp4File, FILE* inFile, bool doEncrypt,
		       bool allowVariableFrameRate)
{
    bool rc;

    u_int8_t sampleBuffer[256 * 1024 * 2];
    u_int8_t* pCurrentSample = sampleBuffer;
    u_int32_t maxSampleSize = sizeof(sampleBuffer) / 2;
    u_int32_t prevSampleSize = 0;

    // the current syntactical object
    // typically 1:1 with a sample
    // but not always, i.e. non-VOP's
    u_int8_t* pObj = pCurrentSample;
    u_int32_t objSize;
    u_int8_t objType;

    // the current sample
    MP4SampleId sampleId = 1;
    MP4Timestamp currentSampleTime = 0;

    // the last reference VOP
    MP4SampleId refVopId = 1;
    MP4Timestamp refVopTime = 0;

    // track configuration info
    u_int8_t videoProfileLevel = MPEG4_SP_L3;
    u_int8_t timeBits = 15;
    u_int16_t timeTicks = 30000;
    u_int16_t frameDuration = 3000;
    u_int16_t frameWidth = 320;
    u_int16_t frameHeight = 240;
    u_int32_t esConfigSize = 0;
    int vopType = 0;
    int prevVopType = 0;
    bool foundVOSH = false, foundVO = false, foundVOL = false;
    u_int32_t lastVopTimeIncrement = 0;
    bool variableFrameRate = false;
    bool lastFrame = false;
    bool haveBframes = false;
    mpeg4_frame_t *head = NULL, *tail = NULL;

    // start reading objects until we get the first VOP
    while (LoadNextObject(inFile, pObj, &objSize, &objType)) {
        // guard against buffer overflow
        if (pObj + objSize >= pCurrentSample + maxSampleSize) {
            fprintf(stderr,
                    "%s: buffer overflow, invalid video stream?\n", ProgName);
            return MP4_INVALID_TRACK_ID;
        }
#ifdef DEBUG_MP4V
        if (Verbosity & MP4_DETAILS_SAMPLE) {
            printf("MP4V type %x size %u\n",
                    objType, objSize);
        }
#endif

        if (objType == MP4AV_MPEG4_VOSH_START) {
            MP4AV_Mpeg4ParseVosh(pObj, objSize,
                    &videoProfileLevel);
            foundVOSH = true;
        } else if (objType == MP4AV_MPEG4_VO_START) {
            foundVO = true;
        } else if (objType == MP4AV_MPEG4_VOL_START) {
            MP4AV_Mpeg4ParseVol(pObj, objSize,
                    &timeBits, &timeTicks, &frameDuration,
                    &frameWidth, &frameHeight);

            foundVOL = true;
#ifdef DEBUG_MP4V
            printf("ParseVol: timeBits %u timeTicks %u frameDuration %u\n",
                    timeBits, timeTicks, frameDuration);
#endif

        } else if (foundVOL == true || objType == MP4AV_MPEG4_VOP_START) {
            esConfigSize = pObj - pCurrentSample;
            // ready to set up mp4 track
            break;
        }
        /* XXX why do we need this if ?
         * It looks like it will remove this object ... XXX */
	// It does.  On Purpose.  wmay 6/2004
        if (objType != MP4AV_MPEG4_USER_DATA_START) {
            pObj += objSize;
        }
    }

    if (foundVOSH == false) {
        fprintf(stderr,
                "%s: no VOSH header found in MPEG-4 video.\n"
                "This can cause problems with players other than mp4player. \n",
                ProgName);
    } else {
        if (VideoProfileLevelSpecified &&
                videoProfileLevel != VideoProfileLevel) {
            fprintf(stderr,
                    "%s: You have specified a different video profile level than was detected in the VOSH header\n"
                    "The level you specified was %d and %d was read from the VOSH\n",
                    ProgName, VideoProfileLevel, videoProfileLevel);
        }
    }
    if (foundVO == false) {
        fprintf(stderr,
                "%s: No VO header found in mpeg-4 video.\n"
                "This can cause problems with players other than mp4player\n",
                ProgName);
    }
    if (foundVOL == false) {
        fprintf(stderr,
                "%s: fatal: No VOL header found in mpeg-4 video stream\n",
                ProgName);
        return MP4_INVALID_TRACK_ID;
    }

    // convert frame duration to canonical time scale
    // note zero value for frame duration signals variable rate video
    if (timeTicks == 0) {
        timeTicks = 1;
    }
    u_int32_t mp4FrameDuration = 0;

    if (VideoFrameRate) {
      mp4FrameDuration = (u_int32_t)(((double)Mp4TimeScale) / VideoFrameRate);    
    } else if (frameDuration) {
	  VideoFrameRate = frameDuration;
	  VideoFrameRate /= timeTicks;
	  mp4FrameDuration = (Mp4TimeScale * frameDuration) / timeTicks;
    } else {
      if (allowVariableFrameRate == false ) {
	fprintf(stderr,
		"%s: variable rate video stream signalled,"
		" please specify average frame rate with -r option\n"
		" or --variable-frame-rate argument\n",
		ProgName);
	return MP4_INVALID_TRACK_ID;
      }

        variableFrameRate = true;
    }

    ismacryp_session_id_t ismaCrypSId;
    mp4v2_ismacrypParams *icPp =  (mp4v2_ismacrypParams *) malloc(sizeof(mp4v2_ismacrypParams));
    memset(icPp, 0, sizeof(mp4v2_ismacrypParams));


    // initialize ismacryp session if encrypting
    if (doEncrypt) {

        if (ismacrypInitSession(&ismaCrypSId,KeyTypeVideo) != 0) {
            fprintf(stderr, "%s: could not initialize the ISMAcryp session\n",
                    ProgName);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetScheme(ismaCrypSId, &(icPp->scheme_type)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp scheme type. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetSchemeVersion(ismaCrypSId, &(icPp->scheme_version)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp scheme ver. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetKMSUri(ismaCrypSId, &(icPp->kms_uri)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp kms uri. sid %d\n",
                    ProgName, ismaCrypSId);
            CHECK_AND_FREE(icPp->kms_uri);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if ( ismacrypGetSelectiveEncryption(ismaCrypSId, &(icPp->selective_enc)) != ismacryp_rc_ok ) {
            fprintf(stderr, "%s: could not get ismacryp selec enc. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetKeyIndicatorLength(ismaCrypSId, &(icPp->key_ind_len)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp key ind len. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetIVLength(ismaCrypSId, &(icPp->iv_len)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp iv len. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
    }

    // create the new video track
    MP4TrackId trackId;
    if (doEncrypt) {
        trackId =
            MP4AddEncVideoTrack(
                    mp4File,
                    Mp4TimeScale,
                    mp4FrameDuration,
                    frameWidth,
                    frameHeight,
                    icPp,
                    MP4_MPEG4_VIDEO_TYPE);
    } else {
        trackId =
            MP4AddVideoTrack(
                    mp4File,
                    Mp4TimeScale,
                    mp4FrameDuration,
                    frameWidth,
                    frameHeight,
                    MP4_MPEG4_VIDEO_TYPE);
    }

    if (trackId == MP4_INVALID_TRACK_ID) {
        fprintf(stderr,
                "%s: can't create video track\n", ProgName);
        return MP4_INVALID_TRACK_ID;
    }

    if (VideoProfileLevelSpecified) {
        videoProfileLevel = VideoProfileLevel;
    }
    if (MP4GetNumberOfTracks(mp4File, MP4_VIDEO_TRACK_TYPE) == 1) {
        MP4SetVideoProfileLevel(mp4File, videoProfileLevel);
    }
    printf("es config size is %d\n", esConfigSize);
    if (esConfigSize) {
        MP4SetTrackESConfiguration(mp4File, trackId,
                pCurrentSample, esConfigSize);

        // move past ES config, so it doesn't go into first sample
        pCurrentSample += esConfigSize;
    }
    // Move the current frame to the beginning of the
    // buffer
    memmove(sampleBuffer, pCurrentSample, pObj - pCurrentSample + objSize);
    pObj = sampleBuffer + (pObj - pCurrentSample);
    pCurrentSample = sampleBuffer;
    MP4Timestamp prevFrameTimestamp = 0;

    // now process the rest of the video stream
    while ( true ) {
        if ( objType != MP4AV_MPEG4_VOP_START ) {
	  // keep it in the buffer until a VOP comes along
	  // Actually, do nothings, since we only want VOP
	  // headers in the stream - wmay 6/2004
	  //pObj += objSize;

        } else { // we have VOP
            u_int32_t sampleSize = (pObj + objSize) - pCurrentSample;

            vopType = MP4AV_Mpeg4GetVopType(pObj, objSize);

	    mpeg4_frame_t *fr = MALLOC_STRUCTURE(mpeg4_frame_t);
	    if (head == NULL) {
	      head = tail = fr;
	    } else {
	      tail->next = fr;
	      tail = fr;
	    }
	    fr->vopType = vopType;
	    fr->frameTimestamp = currentSampleTime;
	    fr->next = NULL;
            if ( variableFrameRate ) {
                // variable frame rate:  recalculate "mp4FrameDuration"
                if ( lastFrame ) {
                    // last frame
                    mp4FrameDuration = Mp4TimeScale / timeTicks;
                } else {
                    // not the last frame
                    u_int32_t vopTimeIncrement;
                    MP4AV_Mpeg4ParseVop(pObj, objSize, &vopType, timeBits, timeTicks, &vopTimeIncrement);
                    u_int32_t vopTime = vopTimeIncrement - lastVopTimeIncrement;
                    mp4FrameDuration = (Mp4TimeScale * vopTime) / timeTicks;
                    lastVopTimeIncrement = vopTimeIncrement % timeTicks;
                }
	    }
            if ( prevSampleSize > 0 ) { // not the first time
                // fill sample data & length to write
                u_int8_t* sampleData2Write = NULL;
                u_int32_t sampleLen2Write = 0;
                if ( doEncrypt ) {
                    if ( ismacrypEncryptSampleAddHeader(ismaCrypSId,
                                sampleSize,
                                sampleBuffer,
                                &sampleLen2Write,
                                &sampleData2Write) != 0 ) {
                        fprintf(stderr,
                                "%s: can't encrypt video sample and add header %u\n",
                                ProgName, sampleId);
                    }
                } else {
                    sampleData2Write = sampleBuffer;
                    sampleLen2Write = prevSampleSize;
                }

		
            if (variableFrameRate == false) {
	      double now_calc;
	      now_calc = sampleId;
	      now_calc *= Mp4TimeScale;
	      now_calc /= VideoFrameRate;
	      MP4Timestamp now_ts = (MP4Timestamp)now_calc;
	      mp4FrameDuration = now_ts - prevFrameTimestamp;
	      prevFrameTimestamp = now_ts;
	      currentSampleTime = now_ts;
	    }
                // Write the previous sample
                rc = MP4WriteSample(mp4File, trackId,
                        sampleData2Write, sampleLen2Write,
                        mp4FrameDuration, 0, prevVopType == VOP_TYPE_I);

                if ( doEncrypt && sampleData2Write ) {
                    // buffer allocated by encrypt function.
                    // must free it!
                    free(sampleData2Write);
                }

                if ( !rc ) {
                    fprintf(stderr,
                            "%s: can't write video frame %u\n",
                            ProgName, sampleId);
                    MP4DeleteTrack(mp4File, trackId);
                    return MP4_INVALID_TRACK_ID;
                }

                // deal with rendering time offsets
                // that can occur when B frames are being used
                // which is the case for all profiles except Simple Profile
		haveBframes |= (prevVopType == VOP_TYPE_B);

		if ( lastFrame ) {
		  // finish read frames
		  break;
		}
                sampleId++;
            } // not the first time

            currentSampleTime += mp4FrameDuration;

            // Move the current frame to the beginning of the
            // buffer
            memmove(sampleBuffer, pCurrentSample, sampleSize);
            prevSampleSize = sampleSize;
            prevVopType = vopType;
            // reset pointers
            pObj = pCurrentSample = sampleBuffer + sampleSize;
        } // we have VOP

        // load next object from bitstream
        if (!LoadNextObject(inFile, pObj, &objSize, &objType)) {
            if (objType != MP4AV_MPEG4_VOP_START)
                break;
            lastFrame = true;
            objSize = 0;
            continue;
        }
        // guard against buffer overflow
        if (pObj + objSize >= pCurrentSample + maxSampleSize) {
            fprintf(stderr,
                    "%s: buffer overflow, invalid video stream?\n", ProgName);
            MP4DeleteTrack(mp4File, trackId);
            return MP4_INVALID_TRACK_ID;
        }
#ifdef DEBUG_MP4V
        if (Verbosity & MP4_DETAILS_SAMPLE) {
            printf("MP4V type %x size %u\n",
                    objType, objSize);
        }
#endif
    }
    bool doRenderingOffset = false;
    switch (videoProfileLevel) {
    case MPEG4_SP_L0:
    case MPEG4_SP_L1:
    case MPEG4_SP_L2:
    case MPEG4_SP_L3:
      break;
    default:
      doRenderingOffset = true;
      break;
    }
   
    if (doRenderingOffset && haveBframes) {
      // only generate ctts (with rendering offset for I, P frames) when
      // we need one.  We saved all the frames types and timestamps above - 
      // we can't use MP4ReadSample, because the end frames might not have
      // been written 
      refVopId = 1;
      refVopTime = 0;
      MP4SampleId maxSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
      // start with sample 2 - we know the first one is a I frame
      mpeg4_frame_t *fr = head->next; // skip the first one
      for (MP4SampleId ix = 2; ix <= maxSamples; ix++) {
	if (fr->vopType != VOP_TYPE_B) {
#ifdef DEBUG_MP4V_TS
            printf("sample %u %u renderingOffset "U64"\n",
		   refVopId, fr->vopType, fr->frameTimestamp - refVopTime);
#endif
	  MP4SetSampleRenderingOffset(mp4File, trackId, refVopId, 
				      fr->frameTimestamp - refVopTime);
	  refVopId = ix;
	  refVopTime = fr->frameTimestamp;
	}
	fr = fr->next;
      }
      
#ifdef DEBUG_MP4V_TS
      printf("sample %u %u renderingOffset "U64"\n",
	     refVopId, fr->vopType, fr->frameTimestamp - refVopTime);
#endif
      MP4SetSampleRenderingOffset(mp4File, trackId, refVopId, 
				  fr->frameTimestamp - refVopTime);
    }

    while (head != NULL) {
      tail = head->next;
      free(head);
      head = tail;
    }
    // terminate session if encrypting
    if (doEncrypt) {
        if (ismacrypEndSession(ismaCrypSId) != 0) {
            fprintf(stderr,
                    "%s: could not end the ISMAcryp session\n",
                    ProgName);
        }
    }

    return trackId;
}
コード例 #4
0
ファイル: rfcavsm.cpp プロジェクト: hellowangh/avs-transcoder
extern "C" bool MP4AV_AVSMHinter(
				 MP4FileHandle mp4File, 
				 MP4TrackId mediaTrackId, 
				 u_int16_t maxPayloadSize)
{
  u_int32_t numSamples = MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);
  u_int32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);
	
  uint32_t sizeLength;

  if (numSamples == 0 || maxSampleSize == 0) {
    return false;
  }

  /*if (MP4GetTrackAVSMLengthSize(mp4File, mediaTrackId, &sizeLength) == false) {
    return false;
  }*/
  sizeLength=4;						//why?

  MP4TrackId hintTrackId = 
    MP4AV_AVSM_HintTrackCreate(mp4File, mediaTrackId);				//****AVSMspecial****

  if (hintTrackId == MP4_INVALID_TRACK_ID) {
    return false;
  }

  u_int8_t* pSampleBuffer = (u_int8_t*)malloc(maxSampleSize);
  if (pSampleBuffer == NULL) {
    MP4DeleteTrack(mp4File, hintTrackId);
    return false;
  }
  for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {
    u_int32_t sampleSize = maxSampleSize;
    MP4Timestamp startTime;
    MP4Duration duration;
    MP4Duration renderingOffset;
    bool isSyncSample;//stss指定同步帧

    bool rc = MP4ReadSample(
			    mp4File, mediaTrackId, sampleId, 
			    &pSampleBuffer, &sampleSize, 
			    &startTime, &duration, 
			    &renderingOffset, &isSyncSample);

    if (!rc) {
      MP4DeleteTrack(mp4File, hintTrackId);
      CHECK_AND_FREE(pSampleBuffer);
      return false;
    }

    MP4AV_AVSM_HintAddSample(mp4File,								//****AVSMspecial****
			     hintTrackId,
			     sampleId,
			     pSampleBuffer,
			     sampleSize,
			     sizeLength,
			     duration,
			     renderingOffset,
			     isSyncSample,
			     maxPayloadSize);
	
  }
   CHECK_AND_FREE(pSampleBuffer);

  return true;
}
コード例 #5
0
ファイル: rfcisma.cpp プロジェクト: BluePandaLi/mpeg4ip
extern "C" bool MP4AV_RfcIsmaHinter(
	MP4FileHandle mp4File, 
	MP4TrackId mediaTrackId, 
	bool interleave,
	u_int16_t maxPayloadSize)
{
	// gather information, and check for validity

	u_int32_t numSamples =
		MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);

	if (numSamples == 0) {
		return false;
	}

	u_int32_t timeScale =
		MP4GetTrackTimeScale(mp4File, mediaTrackId);

	if (timeScale == 0) {
		return false;
	}

	u_int8_t audioType =
		MP4GetTrackEsdsObjectTypeId(mp4File, mediaTrackId);

	if (audioType != MP4_MPEG4_AUDIO_TYPE
	  && !MP4_IS_AAC_AUDIO_TYPE(audioType)) {
		return false;
	}

	u_int8_t mpeg4AudioType =
		MP4GetTrackAudioMpeg4Type(mp4File, mediaTrackId);

	if (audioType == MP4_MPEG4_AUDIO_TYPE) {
		// check that track contains either MPEG-4 AAC or CELP
		if (!MP4_IS_MPEG4_AAC_AUDIO_TYPE(mpeg4AudioType) 
		  && mpeg4AudioType != MP4_MPEG4_CELP_AUDIO_TYPE) {
			return false;
		}
	}

	MP4Duration sampleDuration = 
		MP4AV_GetAudioSampleDuration(mp4File, mediaTrackId);

	if (sampleDuration == MP4_INVALID_DURATION) {
		return false;
	}

	/* get the ES configuration */
	u_int8_t* pConfig = NULL;
	u_int32_t configSize;
	uint8_t channels;

	if (MP4GetTrackESConfiguration(mp4File, mediaTrackId, 
				       &pConfig, &configSize) == false)
	  return false;

	if (!pConfig) {
		return false;
	}
     
	channels = MP4AV_AacConfigGetChannels(pConfig);

	/* convert ES Config into ASCII form */
	char* sConfig = 
		MP4BinaryToBase16(pConfig, configSize);

	free(pConfig);

	if (!sConfig) {
		return false;
	}

	/* create the appropriate SDP attribute */
	uint sdpBufLen = strlen(sConfig) + 256;
	char* sdpBuf = 
	  (char*)malloc(sdpBufLen);

	if (!sdpBuf) {
		free(sConfig);
		return false;
	}


	// now add the hint track
	MP4TrackId hintTrackId =
		MP4AddHintTrack(mp4File, mediaTrackId);

	if (hintTrackId == MP4_INVALID_TRACK_ID) {
		free(sConfig);
		free(sdpBuf);
		return false;
	}

	u_int8_t payloadNumber = MP4_SET_DYNAMIC_PAYLOAD;
	char buffer[10];
	if (channels != 1) {
	  snprintf(buffer, sizeof(buffer), "%u", channels);
	}
	if (MP4SetHintTrackRtpPayload(mp4File, hintTrackId, 
				      "mpeg4-generic", &payloadNumber, 0,
				      channels != 1 ? buffer : NULL) == false) {
	  MP4DeleteTrack(mp4File, hintTrackId);
	  free(sConfig);
	  free(sdpBuf);
	  return false;
	}

	MP4Duration maxLatency;
	bool OneByteHeader = false;
	if (mpeg4AudioType == MP4_MPEG4_CELP_AUDIO_TYPE) {
	  snprintf(sdpBuf, sdpBufLen,
			"a=fmtp:%u "
			"streamtype=5; profile-level-id=15; mode=CELP-vbr; config=%s; "
			"SizeLength=6; IndexLength=2; IndexDeltaLength=2; Profile=0;"
			"\015\012",
				payloadNumber,
				sConfig); 

		// 200 ms max latency for ISMA profile 1
		maxLatency = timeScale / 5;
		OneByteHeader = true;
	} else { // AAC
	  snprintf(sdpBuf, sdpBufLen,
			"a=fmtp:%u "
			"streamtype=5; profile-level-id=15; mode=AAC-hbr; config=%s; "
			"SizeLength=13; IndexLength=3; IndexDeltaLength=3;"
			"\015\012",
				payloadNumber,
				sConfig); 

		// 500 ms max latency for ISMA profile 1
		maxLatency = timeScale / 2;
	}

	/* add this to the track's sdp */
	bool val = MP4AppendHintTrackSdp(mp4File, hintTrackId, sdpBuf);

	free(sConfig);
	free(sdpBuf);
	if (val == false) {
	  MP4DeleteTrack(mp4File, hintTrackId);
	  return false;
	}

	u_int32_t samplesPerPacket = 0;
 
	if (interleave) {
		u_int32_t maxSampleSize =
			MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);

		// compute how many maximum size samples would fit in a packet
		samplesPerPacket = 
			(maxPayloadSize - 2) / (maxSampleSize + 2);

		// can't interleave if this number is 0 or 1
		if (samplesPerPacket < 2) {
			interleave = false;
		}
	}

	bool rc;

	if (interleave) {
		u_int32_t samplesPerGroup = maxLatency / sampleDuration;
		u_int32_t stride;
		stride = samplesPerGroup / samplesPerPacket;

		if (OneByteHeader && stride > 3) stride = 3;
		if (!OneByteHeader && stride > 7) stride = 7;

#if 0
		printf("max latency %llu sampleDuration %llu spg %u spp %u strid %u\n",
		       maxLatency, sampleDuration, samplesPerGroup,
		       samplesPerPacket, stride);
#endif
		rc = MP4AV_AudioInterleaveHinter(
			mp4File, 
			mediaTrackId, 
			hintTrackId,
			sampleDuration, 
			stride,		// stride
			samplesPerPacket,						// bundle
			maxPayloadSize,
			MP4AV_RfcIsmaConcatenator);

	} else {
		rc = MP4AV_AudioConsecutiveHinter(
			mp4File, 
			mediaTrackId, 
			hintTrackId,
			sampleDuration, 
			2,										// perPacketHeaderSize
			2,										// perSampleHeaderSize
			maxLatency / sampleDuration,			// maxSamplesPerPacket
			maxPayloadSize,
			MP4GetSampleSize,
			MP4AV_RfcIsmaConcatenator,
			MP4AV_RfcIsmaFragmenter);
	}

	if (!rc) {
		MP4DeleteTrack(mp4File, hintTrackId);
		return false;
	}

	return true;
}
コード例 #6
0
ファイル: rfc3016.cpp プロジェクト: BluePandaLi/mpeg4ip
extern "C" MP4TrackId MP4AV_Rfc3016_HintTrackCreate (MP4FileHandle mp4File,
        MP4TrackId mediaTrackId)
{
    MP4TrackId hintTrackId =
        MP4AddHintTrack(mp4File, mediaTrackId);

    if (hintTrackId == MP4_INVALID_TRACK_ID) {
        return MP4_INVALID_TRACK_ID;
    }

    u_int8_t payloadNumber = MP4_SET_DYNAMIC_PAYLOAD;

    if (MP4SetHintTrackRtpPayload(mp4File, hintTrackId,
                                  "MP4V-ES", &payloadNumber, 0) == false) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return MP4_INVALID_TRACK_ID;
    }

    /* get the mpeg4 video configuration */
    u_int8_t* pConfig;
    u_int32_t configSize;
    u_int8_t systemsProfileLevel = 0xFE;

    if (MP4GetTrackESConfiguration(mp4File, mediaTrackId, &pConfig, &configSize) == false) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return MP4_INVALID_TRACK_ID;
    }

    if (pConfig) {
        // attempt to get a valid profile-level
        static u_int8_t voshStartCode[4] = {
            0x00, 0x00, 0x01, MP4AV_MPEG4_VOSH_START
        };
        if (configSize >= 5 && !memcmp(pConfig, voshStartCode, 4)) {
            systemsProfileLevel = pConfig[4];
        }
        if (systemsProfileLevel == 0xFE) {
            u_int8_t iodProfileLevel = MP4GetVideoProfileLevel(mp4File);
            if (iodProfileLevel > 0 && iodProfileLevel < 0xFE) {
                systemsProfileLevel = iodProfileLevel;
            } else {
                systemsProfileLevel = 1;
            }
        }

        /* convert it into ASCII form */
        char* sConfig = MP4BinaryToBase16(pConfig, configSize);
        free(pConfig);
        if (sConfig == NULL) {
            MP4DeleteTrack(mp4File, hintTrackId);
            return MP4_INVALID_TRACK_ID;
        }

        /* create the appropriate SDP attribute */
        char* sdpBuf = (char*)malloc(strlen(sConfig) + 128);

        if (sdpBuf == NULL) {
            free(sConfig);
            MP4DeleteTrack(mp4File, hintTrackId);
            return MP4_INVALID_TRACK_ID;
        }
        snprintf(sdpBuf,
                 strlen(sConfig) + 128,
                 "a=fmtp:%u profile-level-id=%u; config=%s;\015\012",
                 payloadNumber,
                 systemsProfileLevel,
                 sConfig);
        free(sConfig);

        /* add this to the track's sdp */
        if (MP4AppendHintTrackSdp(mp4File, hintTrackId, sdpBuf) == false) {
            MP4DeleteTrack(mp4File, hintTrackId);
            hintTrackId = MP4_INVALID_TRACK_ID;
        }

        free(sdpBuf);
    }
    return hintTrackId;
}
コード例 #7
0
ファイル: rfc3016.cpp プロジェクト: BluePandaLi/mpeg4ip
extern "C" bool MP4AV_Rfc3016LatmHinter (MP4FileHandle mp4File,
        MP4TrackId mediaTrackId,
        u_int16_t maxPayloadSize)
{
    u_int32_t numSamples = MP4GetTrackNumberOfSamples(mp4File, mediaTrackId);
    u_int32_t maxSampleSize = MP4GetTrackMaxSampleSize(mp4File, mediaTrackId);
    MP4Duration sampleDuration =
        MP4AV_GetAudioSampleDuration(mp4File, mediaTrackId);

    if (sampleDuration == MP4_INVALID_DURATION) {
        return false;
    }

    if (numSamples == 0 || maxSampleSize == 0) {
        return false;
    }


    /* get the mpeg4 video configuration */
    u_int8_t* pAudioSpecificConfig;
    u_int32_t AudioSpecificConfigSize;

    if (MP4GetTrackESConfiguration(mp4File, mediaTrackId,
                                   &pAudioSpecificConfig,
                                   &AudioSpecificConfigSize) == false)
        return false;

    if (pAudioSpecificConfig == NULL ||
            AudioSpecificConfigSize == 0) return false;

    uint8_t channels = MP4AV_AacConfigGetChannels(pAudioSpecificConfig);
    uint32_t freq = MP4AV_AacConfigGetSamplingRate(pAudioSpecificConfig);
    uint8_t type = MP4AV_AacConfigGetAudioObjectType(pAudioSpecificConfig);

    uint8_t *pConfig;
    uint32_t configSize;

    MP4AV_LatmGetConfiguration(&pConfig, &configSize,
                               pAudioSpecificConfig, AudioSpecificConfigSize);
    free(pAudioSpecificConfig);

    if (pConfig == NULL || configSize == 0) {
        CHECK_AND_FREE(pConfig);
        return false;
    }

    MP4TrackId hintTrackId =
        MP4AddHintTrack(mp4File, mediaTrackId);

    if (hintTrackId == MP4_INVALID_TRACK_ID) {
        free(pConfig);
        return false;
    }
    u_int8_t payloadNumber = MP4_SET_DYNAMIC_PAYLOAD;

    char buffer[10];
    if (channels != 1) {
        snprintf(buffer, sizeof(buffer), "%u", channels);
    }

    /* convert it into ASCII form */
    char* sConfig = MP4BinaryToBase16(pConfig, configSize);
    free(pConfig);
    if (sConfig == NULL ||
            MP4SetHintTrackRtpPayload(mp4File, hintTrackId,
                                      "MP4A-LATM", &payloadNumber, 0,
                                      channels != 1 ? buffer : NULL) == false) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }

    uint32_t profile_level;
    // from gpac code
    switch (type) {
    case 2:
        if (channels <= 2) profile_level = freq <= 24000 ? 0x28 : 0x29;
        else profile_level = freq <= 48000 ? 0x2a : 0x2b;
        break;
    case 5:
        if (channels <= 2) profile_level = freq < 24000 ? 0x2c : 0x2d;
        else profile_level = freq <= 48000 ? 0x2e : 0x2f;
        break;
    default:
        if (channels <= 2) profile_level = freq < 24000 ? 0x0e : 0x0f;
        else profile_level = 0x10;
        break;
    }

    /* create the appropriate SDP attribute */
    char* sdpBuf = (char*)malloc(strlen(sConfig) + 128);

    if (sdpBuf == NULL) {
        free(sConfig);
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }
    snprintf(sdpBuf,
             strlen(sConfig) + 128,
             "a=fmtp:%u profile-level-id=%u; cpresent=0; config=%s;\015\012",
             payloadNumber,
             profile_level,
             sConfig);

    /* add this to the track's sdp */
    bool val = MP4AppendHintTrackSdp(mp4File, hintTrackId, sdpBuf);

    free(sConfig);
    free(sdpBuf);
    if (val == false) {
        MP4DeleteTrack(mp4File, hintTrackId);
        return false;
    }

    for (MP4SampleId sampleId = 1; sampleId <= numSamples; sampleId++) {
        uint8_t buffer[32];
        uint32_t offset = 0;
        uint32_t sampleSize =
            MP4GetSampleSize(mp4File, mediaTrackId, sampleId);
        uint32_t size_left = sampleSize;

        while (size_left > 0) {
            if (size_left > 0xff) {
                size_left -= 0xff;
                buffer[offset] = 0xff;
            } else {
                buffer[offset] = size_left;
                size_left = 0;
            }
            offset++;
        }
        if (MP4AddRtpHint(mp4File, hintTrackId) == false ||
                MP4AddRtpPacket(mp4File, hintTrackId, true) == false ||
                MP4AddRtpImmediateData(mp4File, hintTrackId,
                                       buffer, offset) == false ||
                MP4AddRtpSampleData(mp4File, hintTrackId,
                                    sampleId, 0, sampleSize) == false ||
                MP4WriteRtpHint(mp4File, hintTrackId, sampleDuration) == false) {
            MP4DeleteTrack(mp4File, hintTrackId);
            return false;
        }
    }
    return true;

}
コード例 #8
0
ファイル: mpeg.cpp プロジェクト: acassis/emlinux-ssd1935
static MP4TrackId AudioCreate (MP4FileHandle mp4file, 
			       mpeg2ps_t *file, 
			       int astream,
			       bool doEncrypt)
{
  uint16_t freq;
  int type;
  MP4TrackId id;
  uint16_t samples_per_frame;
  uint8_t *buf = NULL;
  uint32_t blen = 0;
  uint32_t frame_num = 1;
  ismacryp_session_id_t ismaCrypSId;
  mp4v2_ismacrypParams *icPp =  (mp4v2_ismacrypParams *) malloc(sizeof(mp4v2_ismacrypParams));
  MP4AV_Mp3Header hdr;
  u_int8_t mpegVersion;
  memset(icPp, 0, sizeof(mp4v2_ismacrypParams));

  type = mpeg2ps_get_audio_stream_type(file, astream);

  if (type != MPEG_AUDIO_MPEG) {
    fprintf(stderr, "Unsupported audio format %d in audio stream %d\n", 
	    type, astream);
    return MP4_INVALID_TRACK_ID;
  }

  freq = mpeg2ps_get_audio_stream_sample_freq(file, astream);

  if (mpeg2ps_get_audio_frame(file, 
			      astream,
			      &buf, 
			      &blen,
			      TS_90000,
			      NULL, 
			      NULL) == false) {
    fprintf(stderr, "No audio tracks in audio stream %d\n", astream);
    return MP4_INVALID_TRACK_ID;
  }
  
  hdr = MP4AV_Mp3HeaderFromBytes(buf);
  mpegVersion = MP4AV_Mp3GetHdrVersion(hdr);
  samples_per_frame = MP4AV_Mp3GetHdrSamplingWindow(hdr);

  u_int8_t audioType = MP4AV_Mp3ToMp4AudioType(mpegVersion);
  
  if (audioType == MP4_INVALID_AUDIO_TYPE
      || samples_per_frame == 0) {
    fprintf(stderr,	
	    "%s: data in file doesn't appear to be valid audio\n",
	    ProgName);
    return MP4_INVALID_TRACK_ID;
  }

  MP4Duration duration = (90000 * samples_per_frame) / freq;

  if (doEncrypt) {
    // initialize the ismacryp session
    if (ismacrypInitSession(&ismaCrypSId,KeyTypeAudio) != 0) {
      fprintf(stderr, 
	      "%s: could not initialize the ISMAcryp session\n",
	      ProgName);
      return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetScheme(ismaCrypSId, &(icPp->scheme_type)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp scheme type. sid %d\n", 
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetSchemeVersion(ismaCrypSId, &(icPp->scheme_version)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp scheme ver. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetKMSUri(ismaCrypSId, &(icPp->kms_uri)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp kms uri. sid %d\n",
               ProgName, ismaCrypSId);
       if (icPp->kms_uri != NULL) free(icPp->kms_uri);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if ( ismacrypGetSelectiveEncryption(ismaCrypSId, &(icPp->selective_enc)) != ismacryp_rc_ok ) {
       fprintf(stderr, "%s: could not get ismacryp selec enc. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetKeyIndicatorLength(ismaCrypSId, &(icPp->key_ind_len)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp key ind len. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    if (ismacrypGetIVLength(ismaCrypSId, &(icPp->iv_len)) != ismacryp_rc_ok) {
       fprintf(stderr, "%s: could not get ismacryp iv len. sid %d\n",
               ProgName, ismaCrypSId);
       ismacrypEndSession(ismaCrypSId);
       return MP4_INVALID_TRACK_ID;
    }
    id = MP4AddEncAudioTrack(mp4file, 
			     90000, 
			     duration,
                             icPp,
			     audioType);
  } else {
    id = MP4AddAudioTrack(mp4file, 
			  90000, 
			  duration,
			  audioType);
  }
  
  if (id == MP4_INVALID_TRACK_ID) {
    fprintf(stderr, 
	    "%s: can't create audio track from stream %d\n", 
	    ProgName, astream);
    return MP4_INVALID_TRACK_ID;
  }

  if (MP4GetNumberOfTracks(mp4file, MP4_AUDIO_TRACK_TYPE) == 1) {
    MP4SetAudioProfileLevel(mp4file, 0xFE);
  }

  do {
    // encrypt if needed
     if (doEncrypt) {
       u_int8_t* encSampleData = NULL;
       u_int32_t encSampleLen = 0;
       if (ismacrypEncryptSampleAddHeader(ismaCrypSId, blen, buf,
					  &encSampleLen, &encSampleData) != 0) {
	 fprintf(stderr,	
		 "%s: can't encrypt audio sample and add header %u\n", ProgName, id);
       }
       // now write the sample
       if (!MP4WriteSample(mp4file, id, encSampleData, encSampleLen)) {
	 fprintf(stderr, "%s: can't write audio track %u, stream %d",
		 ProgName, frame_num, astream);
	 MP4DeleteTrack(mp4file, id);
	 return MP4_INVALID_TRACK_ID;
       }
       if (encSampleData != NULL) {
	 free(encSampleData);
       }
    }
     // now write the sample
    if (!MP4WriteSample(mp4file, id, buf, blen)) {
      fprintf(stderr, "%s: can't write audio track %u, stream %d",
	      ProgName, frame_num, astream);
      MP4DeleteTrack(mp4file, id);
      return MP4_INVALID_TRACK_ID;
    }
    frame_num++;
#if 0
    if ((frame_num % 100) == 0) printf("Audio frame %d\n", frame_num);
#endif
  }  while (mpeg2ps_get_audio_frame(file, 
				    astream, 
				    &buf, 
				    &blen,
				    TS_90000,
				    NULL, NULL));
  
  // if encrypting, terminate the ismacryp session
  if (doEncrypt) {
    if (ismacrypEndSession(ismaCrypSId) != 0) {
      fprintf(stderr, 
	      "%s: could not end the ISMAcryp session\n",
	      ProgName);
      return MP4_INVALID_TRACK_ID;
    }
  }

  return id;
}