Beispiel #1
0
MP4TrackId Mp4vCreator(MP4FileHandle mp4File, FILE* inFile, bool doEncrypt,
		       bool allowVariableFrameRate)
{
    bool rc;

    u_int8_t sampleBuffer[256 * 1024 * 2];
    u_int8_t* pCurrentSample = sampleBuffer;
    u_int32_t maxSampleSize = sizeof(sampleBuffer) / 2;
    u_int32_t prevSampleSize = 0;

    // the current syntactical object
    // typically 1:1 with a sample
    // but not always, i.e. non-VOP's
    u_int8_t* pObj = pCurrentSample;
    u_int32_t objSize;
    u_int8_t objType;

    // the current sample
    MP4SampleId sampleId = 1;
    MP4Timestamp currentSampleTime = 0;

    // the last reference VOP
    MP4SampleId refVopId = 1;
    MP4Timestamp refVopTime = 0;

    // track configuration info
    u_int8_t videoProfileLevel = MPEG4_SP_L3;
    u_int8_t timeBits = 15;
    u_int16_t timeTicks = 30000;
    u_int16_t frameDuration = 3000;
    u_int16_t frameWidth = 320;
    u_int16_t frameHeight = 240;
    u_int32_t esConfigSize = 0;
    int vopType = 0;
    int prevVopType = 0;
    bool foundVOSH = false, foundVO = false, foundVOL = false;
    u_int32_t lastVopTimeIncrement = 0;
    bool variableFrameRate = false;
    bool lastFrame = false;
    bool haveBframes = false;
    mpeg4_frame_t *head = NULL, *tail = NULL;

    // start reading objects until we get the first VOP
    while (LoadNextObject(inFile, pObj, &objSize, &objType)) {
        // guard against buffer overflow
        if (pObj + objSize >= pCurrentSample + maxSampleSize) {
            fprintf(stderr,
                    "%s: buffer overflow, invalid video stream?\n", ProgName);
            return MP4_INVALID_TRACK_ID;
        }
#ifdef DEBUG_MP4V
        if (Verbosity & MP4_DETAILS_SAMPLE) {
            printf("MP4V type %x size %u\n",
                    objType, objSize);
        }
#endif

        if (objType == MP4AV_MPEG4_VOSH_START) {
            MP4AV_Mpeg4ParseVosh(pObj, objSize,
                    &videoProfileLevel);
            foundVOSH = true;
        } else if (objType == MP4AV_MPEG4_VO_START) {
            foundVO = true;
        } else if (objType == MP4AV_MPEG4_VOL_START) {
            MP4AV_Mpeg4ParseVol(pObj, objSize,
                    &timeBits, &timeTicks, &frameDuration,
                    &frameWidth, &frameHeight);

            foundVOL = true;
#ifdef DEBUG_MP4V
            printf("ParseVol: timeBits %u timeTicks %u frameDuration %u\n",
                    timeBits, timeTicks, frameDuration);
#endif

        } else if (foundVOL == true || objType == MP4AV_MPEG4_VOP_START) {
            esConfigSize = pObj - pCurrentSample;
            // ready to set up mp4 track
            break;
        }
        /* XXX why do we need this if ?
         * It looks like it will remove this object ... XXX */
	// It does.  On Purpose.  wmay 6/2004
        if (objType != MP4AV_MPEG4_USER_DATA_START) {
            pObj += objSize;
        }
    }

    if (foundVOSH == false) {
        fprintf(stderr,
                "%s: no VOSH header found in MPEG-4 video.\n"
                "This can cause problems with players other than mp4player. \n",
                ProgName);
    } else {
        if (VideoProfileLevelSpecified &&
                videoProfileLevel != VideoProfileLevel) {
            fprintf(stderr,
                    "%s: You have specified a different video profile level than was detected in the VOSH header\n"
                    "The level you specified was %d and %d was read from the VOSH\n",
                    ProgName, VideoProfileLevel, videoProfileLevel);
        }
    }
    if (foundVO == false) {
        fprintf(stderr,
                "%s: No VO header found in mpeg-4 video.\n"
                "This can cause problems with players other than mp4player\n",
                ProgName);
    }
    if (foundVOL == false) {
        fprintf(stderr,
                "%s: fatal: No VOL header found in mpeg-4 video stream\n",
                ProgName);
        return MP4_INVALID_TRACK_ID;
    }

    // convert frame duration to canonical time scale
    // note zero value for frame duration signals variable rate video
    if (timeTicks == 0) {
        timeTicks = 1;
    }
    u_int32_t mp4FrameDuration = 0;

    if (VideoFrameRate) {
      mp4FrameDuration = (u_int32_t)(((double)Mp4TimeScale) / VideoFrameRate);    
    } else if (frameDuration) {
	  VideoFrameRate = frameDuration;
	  VideoFrameRate /= timeTicks;
	  mp4FrameDuration = (Mp4TimeScale * frameDuration) / timeTicks;
    } else {
      if (allowVariableFrameRate == false ) {
	fprintf(stderr,
		"%s: variable rate video stream signalled,"
		" please specify average frame rate with -r option\n"
		" or --variable-frame-rate argument\n",
		ProgName);
	return MP4_INVALID_TRACK_ID;
      }

        variableFrameRate = true;
    }

    ismacryp_session_id_t ismaCrypSId;
    mp4v2_ismacrypParams *icPp =  (mp4v2_ismacrypParams *) malloc(sizeof(mp4v2_ismacrypParams));
    memset(icPp, 0, sizeof(mp4v2_ismacrypParams));


    // initialize ismacryp session if encrypting
    if (doEncrypt) {

        if (ismacrypInitSession(&ismaCrypSId,KeyTypeVideo) != 0) {
            fprintf(stderr, "%s: could not initialize the ISMAcryp session\n",
                    ProgName);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetScheme(ismaCrypSId, &(icPp->scheme_type)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp scheme type. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetSchemeVersion(ismaCrypSId, &(icPp->scheme_version)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp scheme ver. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetKMSUri(ismaCrypSId, &(icPp->kms_uri)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp kms uri. sid %d\n",
                    ProgName, ismaCrypSId);
            CHECK_AND_FREE(icPp->kms_uri);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if ( ismacrypGetSelectiveEncryption(ismaCrypSId, &(icPp->selective_enc)) != ismacryp_rc_ok ) {
            fprintf(stderr, "%s: could not get ismacryp selec enc. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetKeyIndicatorLength(ismaCrypSId, &(icPp->key_ind_len)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp key ind len. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
        if (ismacrypGetIVLength(ismaCrypSId, &(icPp->iv_len)) != ismacryp_rc_ok) {
            fprintf(stderr, "%s: could not get ismacryp iv len. sid %d\n",
                    ProgName, ismaCrypSId);
            ismacrypEndSession(ismaCrypSId);
            return MP4_INVALID_TRACK_ID;
        }
    }

    // create the new video track
    MP4TrackId trackId;
    if (doEncrypt) {
        trackId =
            MP4AddEncVideoTrack(
                    mp4File,
                    Mp4TimeScale,
                    mp4FrameDuration,
                    frameWidth,
                    frameHeight,
                    icPp,
                    MP4_MPEG4_VIDEO_TYPE);
    } else {
        trackId =
            MP4AddVideoTrack(
                    mp4File,
                    Mp4TimeScale,
                    mp4FrameDuration,
                    frameWidth,
                    frameHeight,
                    MP4_MPEG4_VIDEO_TYPE);
    }

    if (trackId == MP4_INVALID_TRACK_ID) {
        fprintf(stderr,
                "%s: can't create video track\n", ProgName);
        return MP4_INVALID_TRACK_ID;
    }

    if (VideoProfileLevelSpecified) {
        videoProfileLevel = VideoProfileLevel;
    }
    if (MP4GetNumberOfTracks(mp4File, MP4_VIDEO_TRACK_TYPE) == 1) {
        MP4SetVideoProfileLevel(mp4File, videoProfileLevel);
    }
    printf("es config size is %d\n", esConfigSize);
    if (esConfigSize) {
        MP4SetTrackESConfiguration(mp4File, trackId,
                pCurrentSample, esConfigSize);

        // move past ES config, so it doesn't go into first sample
        pCurrentSample += esConfigSize;
    }
    // Move the current frame to the beginning of the
    // buffer
    memmove(sampleBuffer, pCurrentSample, pObj - pCurrentSample + objSize);
    pObj = sampleBuffer + (pObj - pCurrentSample);
    pCurrentSample = sampleBuffer;
    MP4Timestamp prevFrameTimestamp = 0;

    // now process the rest of the video stream
    while ( true ) {
        if ( objType != MP4AV_MPEG4_VOP_START ) {
	  // keep it in the buffer until a VOP comes along
	  // Actually, do nothings, since we only want VOP
	  // headers in the stream - wmay 6/2004
	  //pObj += objSize;

        } else { // we have VOP
            u_int32_t sampleSize = (pObj + objSize) - pCurrentSample;

            vopType = MP4AV_Mpeg4GetVopType(pObj, objSize);

	    mpeg4_frame_t *fr = MALLOC_STRUCTURE(mpeg4_frame_t);
	    if (head == NULL) {
	      head = tail = fr;
	    } else {
	      tail->next = fr;
	      tail = fr;
	    }
	    fr->vopType = vopType;
	    fr->frameTimestamp = currentSampleTime;
	    fr->next = NULL;
            if ( variableFrameRate ) {
                // variable frame rate:  recalculate "mp4FrameDuration"
                if ( lastFrame ) {
                    // last frame
                    mp4FrameDuration = Mp4TimeScale / timeTicks;
                } else {
                    // not the last frame
                    u_int32_t vopTimeIncrement;
                    MP4AV_Mpeg4ParseVop(pObj, objSize, &vopType, timeBits, timeTicks, &vopTimeIncrement);
                    u_int32_t vopTime = vopTimeIncrement - lastVopTimeIncrement;
                    mp4FrameDuration = (Mp4TimeScale * vopTime) / timeTicks;
                    lastVopTimeIncrement = vopTimeIncrement % timeTicks;
                }
	    }
            if ( prevSampleSize > 0 ) { // not the first time
                // fill sample data & length to write
                u_int8_t* sampleData2Write = NULL;
                u_int32_t sampleLen2Write = 0;
                if ( doEncrypt ) {
                    if ( ismacrypEncryptSampleAddHeader(ismaCrypSId,
                                sampleSize,
                                sampleBuffer,
                                &sampleLen2Write,
                                &sampleData2Write) != 0 ) {
                        fprintf(stderr,
                                "%s: can't encrypt video sample and add header %u\n",
                                ProgName, sampleId);
                    }
                } else {
                    sampleData2Write = sampleBuffer;
                    sampleLen2Write = prevSampleSize;
                }

		
            if (variableFrameRate == false) {
	      double now_calc;
	      now_calc = sampleId;
	      now_calc *= Mp4TimeScale;
	      now_calc /= VideoFrameRate;
	      MP4Timestamp now_ts = (MP4Timestamp)now_calc;
	      mp4FrameDuration = now_ts - prevFrameTimestamp;
	      prevFrameTimestamp = now_ts;
	      currentSampleTime = now_ts;
	    }
                // Write the previous sample
                rc = MP4WriteSample(mp4File, trackId,
                        sampleData2Write, sampleLen2Write,
                        mp4FrameDuration, 0, prevVopType == VOP_TYPE_I);

                if ( doEncrypt && sampleData2Write ) {
                    // buffer allocated by encrypt function.
                    // must free it!
                    free(sampleData2Write);
                }

                if ( !rc ) {
                    fprintf(stderr,
                            "%s: can't write video frame %u\n",
                            ProgName, sampleId);
                    MP4DeleteTrack(mp4File, trackId);
                    return MP4_INVALID_TRACK_ID;
                }

                // deal with rendering time offsets
                // that can occur when B frames are being used
                // which is the case for all profiles except Simple Profile
		haveBframes |= (prevVopType == VOP_TYPE_B);

		if ( lastFrame ) {
		  // finish read frames
		  break;
		}
                sampleId++;
            } // not the first time

            currentSampleTime += mp4FrameDuration;

            // Move the current frame to the beginning of the
            // buffer
            memmove(sampleBuffer, pCurrentSample, sampleSize);
            prevSampleSize = sampleSize;
            prevVopType = vopType;
            // reset pointers
            pObj = pCurrentSample = sampleBuffer + sampleSize;
        } // we have VOP

        // load next object from bitstream
        if (!LoadNextObject(inFile, pObj, &objSize, &objType)) {
            if (objType != MP4AV_MPEG4_VOP_START)
                break;
            lastFrame = true;
            objSize = 0;
            continue;
        }
        // guard against buffer overflow
        if (pObj + objSize >= pCurrentSample + maxSampleSize) {
            fprintf(stderr,
                    "%s: buffer overflow, invalid video stream?\n", ProgName);
            MP4DeleteTrack(mp4File, trackId);
            return MP4_INVALID_TRACK_ID;
        }
#ifdef DEBUG_MP4V
        if (Verbosity & MP4_DETAILS_SAMPLE) {
            printf("MP4V type %x size %u\n",
                    objType, objSize);
        }
#endif
    }
    bool doRenderingOffset = false;
    switch (videoProfileLevel) {
    case MPEG4_SP_L0:
    case MPEG4_SP_L1:
    case MPEG4_SP_L2:
    case MPEG4_SP_L3:
      break;
    default:
      doRenderingOffset = true;
      break;
    }
   
    if (doRenderingOffset && haveBframes) {
      // only generate ctts (with rendering offset for I, P frames) when
      // we need one.  We saved all the frames types and timestamps above - 
      // we can't use MP4ReadSample, because the end frames might not have
      // been written 
      refVopId = 1;
      refVopTime = 0;
      MP4SampleId maxSamples = MP4GetTrackNumberOfSamples(mp4File, trackId);
      // start with sample 2 - we know the first one is a I frame
      mpeg4_frame_t *fr = head->next; // skip the first one
      for (MP4SampleId ix = 2; ix <= maxSamples; ix++) {
	if (fr->vopType != VOP_TYPE_B) {
#ifdef DEBUG_MP4V_TS
            printf("sample %u %u renderingOffset "U64"\n",
		   refVopId, fr->vopType, fr->frameTimestamp - refVopTime);
#endif
	  MP4SetSampleRenderingOffset(mp4File, trackId, refVopId, 
				      fr->frameTimestamp - refVopTime);
	  refVopId = ix;
	  refVopTime = fr->frameTimestamp;
	}
	fr = fr->next;
      }
      
#ifdef DEBUG_MP4V_TS
      printf("sample %u %u renderingOffset "U64"\n",
	     refVopId, fr->vopType, fr->frameTimestamp - refVopTime);
#endif
      MP4SetSampleRenderingOffset(mp4File, trackId, refVopId, 
				  fr->frameTimestamp - refVopTime);
    }

    while (head != NULL) {
      tail = head->next;
      free(head);
      head = tail;
    }
    // terminate session if encrypting
    if (doEncrypt) {
        if (ismacrypEndSession(ismaCrypSId) != 0) {
            fprintf(stderr,
                    "%s: could not end the ISMAcryp session\n",
                    ProgName);
        }
    }

    return trackId;
}
Beispiel #2
0
static int look_and_parse_vol (xvid_codec_t *xvid,
			       uint8_t *bufptr,
			       uint32_t len)
{
  uint8_t *volptr;
  uint32_t vollen;
  int ret;
  volptr = MP4AV_Mpeg4FindVol(bufptr, len);
  if (volptr == NULL) {
    return -1;
  }
  vollen = len - (volptr - bufptr);

  uint8_t timeBits;
  uint16_t timeTicks, dur, width, height;
  uint8_t aspect_ratio, aspect_ratio_w, aspect_ratio_h;
  if (MP4AV_Mpeg4ParseVol(volptr, 
			  vollen, 
			  &timeBits, 
			  &timeTicks,
			  &dur,
			  &width,
			  &height,
			  &aspect_ratio,
			  &aspect_ratio_w,
			  &aspect_ratio_h) == false) {
    return -1;
  }

  xvid_message(LOG_DEBUG, "xvid", "aspect ratio %x %d %d", 
	       aspect_ratio, aspect_ratio_w, aspect_ratio_h);
  // Get the VO/VOL header.  If we fail, set the bytestream back
  ret = 0;
  XVID_DEC_PARAM param;
  param.width = width;
  param.height = height;

  ret = xvid_decore(NULL, XVID_DEC_CREATE,
		    &param, NULL);
  if (ret != XVID_ERR_OK) {
    return -1;
  }
  double ar = 0.0;
  switch (aspect_ratio) {
  default:
    aspect_ratio_h = 0;
    break; 
  case 2:
    aspect_ratio_w = 12;
    aspect_ratio_h = 11;
    break;
  case 3:
    aspect_ratio_w = 10;
    aspect_ratio_h = 11;
    break;
  case 4:
    aspect_ratio_w = 16;
    aspect_ratio_h = 11;
    break;
  case 5:
    aspect_ratio_w = 40;
    aspect_ratio_h = 33;
    break;
  case 0xf:
    break;
  }
  if (aspect_ratio_h != 0) {
    ar = (double)aspect_ratio_w;
    ar *= (double) param.width;
    ar /= (double)aspect_ratio_h;
    ar /= (double)param.height;
  }
  xvid->m_xvid_handle = param.handle;
  xvid->m_vft->video_configure(xvid->m_ifptr, 
			       param.width,
			       param.height,
			       VIDEO_FORMAT_YUV,
			       ar);
  // we need to then run the VOL through the decoder.
  XVID_DEC_FRAME frame;
  XVID_DEC_PICTURE decpict;

  frame.bitstream = (void *)volptr;
  frame.length = vollen;
  frame.colorspace = XVID_CSP_USER;
  frame.image = &decpict;
  ret = xvid_decore(xvid->m_xvid_handle, XVID_DEC_DECODE, &frame, NULL);
  xvid_message(LOG_NOTICE, "xvidif", "decoded vol ret %d", ret);
  
  return ret == XVID_ERR_OK ? 0 : -1;
}
Beispiel #3
0
static codec_data_t *ffmpeg_create (const char *stream_type,
                                    const char *compressor,
                                    int type,
                                    int profile,
                                    format_list_t *media_fmt,
                                    video_info_t *vinfo,
                                    const uint8_t *userdata,
                                    uint32_t ud_size,
                                    video_vft_t *vft,
                                    void *ifptr)
{
    ffmpeg_codec_t *ffmpeg;

    ffmpeg = MALLOC_STRUCTURE(ffmpeg_codec_t);
    memset(ffmpeg, 0, sizeof(*ffmpeg));

    ffmpeg->m_vft = vft;
    ffmpeg->m_ifptr = ifptr;
    avcodec_init();
    avcodec_register_all();
    av_log_set_level(AV_LOG_QUIET);

    ffmpeg->m_codecId = ffmpeg_find_codec(stream_type, compressor, type,
                                          profile, media_fmt, userdata, ud_size);

    // must have a codecID - we checked it earlier
    ffmpeg->m_codec = avcodec_find_decoder(ffmpeg->m_codecId);
    ffmpeg->m_c = avcodec_alloc_context();
    ffmpeg->m_picture = avcodec_alloc_frame();
    bool open_codec = true;
    bool run_userdata = false;
    bool free_userdata = false;

    switch (ffmpeg->m_codecId) {
    case CODEC_ID_MJPEG:
        break;
    case CODEC_ID_H264:
        // need to find height and width
        if (media_fmt != NULL && media_fmt->fmt_param != NULL) {
            userdata = h264_sdp_parse_sprop_param_sets(media_fmt->fmt_param,
                       &ud_size,
                       ffmpeg->m_vft->log_msg);
            if (userdata != NULL) free_userdata = true;
            ffmpeg_message(LOG_DEBUG, "ffmpeg", "sprop len %d", ud_size);
        }
        if (ud_size > 0) {
            ffmpeg_message(LOG_DEBUG, "ffmpeg", "userdata len %d", ud_size);
            open_codec = ffmpeg_find_h264_size(ffmpeg, userdata, ud_size);
            ffmpeg_message(LOG_DEBUG, "ffmpeg", "open codec is %d", open_codec);
            run_userdata = true;
        } else {
            open_codec = false;
        }
        break;
    case CODEC_ID_MPEG4: {
        fmtp_parse_t *fmtp = NULL;
        open_codec = false;
        if (media_fmt != NULL) {
            fmtp = parse_fmtp_for_mpeg4(media_fmt->fmt_param,
                                        ffmpeg->m_vft->log_msg);
            if (fmtp->config_binary != NULL) {
                userdata = fmtp->config_binary;
                ud_size = fmtp->config_binary_len;
                fmtp->config_binary = NULL;
                free_userdata = true;
            }
        }

        if (ud_size > 0) {
            uint8_t *vol = MP4AV_Mpeg4FindVol((uint8_t *)userdata, ud_size);
            u_int8_t TimeBits;
            u_int16_t TimeTicks;
            u_int16_t FrameDuration;
            u_int16_t FrameWidth;
            u_int16_t FrameHeight;
            u_int8_t  aspectRatioDefine;
            u_int8_t  aspectRatioWidth;
            u_int8_t  aspectRatioHeight;
            if (vol) {
                if (MP4AV_Mpeg4ParseVol(vol,
                                        ud_size - (vol - userdata),
                                        &TimeBits,
                                        &TimeTicks,
                                        &FrameDuration,
                                        &FrameWidth,
                                        &FrameHeight,
                                        &aspectRatioDefine,
                                        &aspectRatioWidth,
                                        &aspectRatioHeight)) {
                    ffmpeg->m_c->width = FrameWidth;
                    ffmpeg->m_c->height = FrameHeight;
                    open_codec = true;
                    run_userdata = true;
                }
            }
        }
        if (fmtp != NULL) {
            free_fmtp_parse(fmtp);
        }
    }
    break;
    case CODEC_ID_SVQ3:
        ffmpeg->m_c->extradata = (void *)userdata;
        ffmpeg->m_c->extradata_size = ud_size;
        if (vinfo != NULL) {
            ffmpeg->m_c->width = vinfo->width;
            ffmpeg->m_c->height = vinfo->height;
        }
        break;
    default:
        break;
    }
    if (open_codec) {
        if (avcodec_open(ffmpeg->m_c, ffmpeg->m_codec) < 0) {
            ffmpeg_message(LOG_CRIT, "ffmpeg", "failed to open codec");
            return NULL;
        }
        ffmpeg_message(LOG_DEBUG, "ffmpeg", "pixel format is %d",
                       ffmpeg->m_c->pix_fmt);
        ffmpeg->m_codec_opened = true;
        if (run_userdata) {
            uint32_t offset = 0;
            do {
                int got_picture;
                offset += avcodec_decode_video(ffmpeg->m_c,
                                               ffmpeg->m_picture,
                                               &got_picture,
                                               (uint8_t *)userdata + offset,
                                               ud_size - offset);
            } while (offset < ud_size);
        }

    }

    if (free_userdata) {
        CHECK_AND_FREE(userdata);
    }
    ffmpeg->m_did_pause = 1;
    return ((codec_data_t *)ffmpeg);
}
Beispiel #4
0
static int look_for_vol (xvid_codec_t *xvid, 
			 uint8_t *bufptr, 
			 uint32_t len)
{
  uint8_t *volptr;
  int vollen;
  int ret;
  volptr = MP4AV_Mpeg4FindVol(bufptr, len);
  if (volptr == NULL) {
    return -1 ;
  }
  vollen = len - (volptr - bufptr);

  uint8_t timeBits;
  uint16_t timeTicks, dur, width, height;
  uint8_t aspect_ratio, aspect_ratio_w, aspect_ratio_h;
  if (MP4AV_Mpeg4ParseVol(volptr, 
			  vollen, 
			  &timeBits, 
			  &timeTicks,
			  &dur,
			  &width,
			  &height,
			  &aspect_ratio,
			  &aspect_ratio_w,
			  &aspect_ratio_h) == false) {
    return -1;
  }

  xvid_message(LOG_DEBUG, "xvid", "aspect ratio %x %d %d", 
	       aspect_ratio, aspect_ratio_w, aspect_ratio_h);
  // Get the VO/VOL header.  If we fail, set the bytestream back

  xvid_dec_create_t create;
  create.version = XVID_VERSION;
  create.width = width;
  create.height = height;

  ret = xvid_decore(NULL, XVID_DEC_CREATE,
		    &create, NULL);
    double ar = 0.0;
    switch (aspect_ratio) {
    default:
      aspect_ratio_h = 0;
      break; 
    case 2:
      aspect_ratio_w = 12;
      aspect_ratio_h = 11;
      break;
    case 3:
      aspect_ratio_w = 10;
      aspect_ratio_h = 11;
      break;
    case 4:
      aspect_ratio_w = 16;
      aspect_ratio_h = 11;
      break;
    case 5:
      aspect_ratio_w = 40;
      aspect_ratio_h = 33;
      break;
    case 0xf:
      break;
    }
    if (aspect_ratio_h != 0) {
      ar = (double)aspect_ratio_w;
      ar *= (double) create.width;
      ar /= (double)aspect_ratio_h;
      ar /= (double)create.height;
    }
    xvid->m_xvid_handle = create.handle;
    xvid->m_vft->video_configure(xvid->m_ifptr, 
				 create.width,
				 create.height,
				 VIDEO_FORMAT_YUV,
				 ar);
   // we need to then run the VOL through the decoder.
  xvid_dec_frame_t dec;
  xvid_dec_stats_t stats;
  do {
    dec.version = XVID_VERSION;
    dec.bitstream = volptr;
    dec.length = vollen;
    dec.output.csp = XVID_CSP_INTERNAL;

    stats.version = XVID_VERSION;

    ret = xvid_decore(xvid->m_xvid_handle, 
		      XVID_DEC_DECODE, 
		      &dec, 
		      &stats);
    if (ret < 0) {
      xvid_message(LOG_NOTICE, "xvidif", "decoded vol ret %d", ret);
    }
      
    if (ret < 0 || ret > vollen) {
      vollen = 0;
    } else {
      vollen -= ret;
      volptr += ret;
    }
    // we could check for vol changes, etc here, if we wanted.
  } while (vollen > 4 && stats.type == 0);
  return 0;
}