コード例 #1
0
ファイル: webmdec.c プロジェクト: Brainiarc7/libvpx
int webm_read_frame(struct WebmInputContext *webm_ctx,
                    uint8_t **buffer,
                    size_t *bytes_in_buffer,
                    size_t *buffer_size) {
  if (webm_ctx->chunk >= webm_ctx->chunks) {
    uint32_t track;

    do {
      /* End of this packet, get another. */
      if (webm_ctx->pkt) {
        nestegg_free_packet(webm_ctx->pkt);
        webm_ctx->pkt = NULL;
      }

      if (nestegg_read_packet(webm_ctx->nestegg_ctx, &webm_ctx->pkt) <= 0 ||
          nestegg_packet_track(webm_ctx->pkt, &track)) {
        return 1;
      }
    } while (track != webm_ctx->video_track);

    if (nestegg_packet_count(webm_ctx->pkt, &webm_ctx->chunks))
      return 1;

    webm_ctx->chunk = 0;
  }

  if (nestegg_packet_data(webm_ctx->pkt, webm_ctx->chunk,
                          buffer, bytes_in_buffer)) {
    return 1;
  }

  webm_ctx->chunk++;
  return 0;
}
コード例 #2
0
static int audio_decode_frame(audio_context *audio_ctx, uint8_t *audio_buf, int buf_size)
{
    vorbis_context *vorbis_ctx = &audio_ctx->vorbis_ctx;
    //audio_clock += 1000000000LL * audio_ctx->last_samples / audio_ctx->frequency;
    int samples = buf_size / (vorbis_ctx->channels * 2);
    audio_ctx->last_samples = samples;

    while (samples)
    {
        if (audio_ctx->avail_samples == 0)
        {
            nestegg_packet *pkt;
            uint64_t timestamp;
            unsigned chunk, num_chunks;

            debug_printf("audio queue size=%i\n", audio_ctx->packet_queue->size);
            if ((pkt = queue_get(audio_ctx->packet_queue)) == NULL)
                return -1;
            nestegg_packet_tstamp(pkt, &timestamp);
            //audio_clock = timestamp;
            nestegg_packet_count(pkt, &num_chunks);
            for (chunk=0; chunk<num_chunks; chunk++)
            {
                unsigned char *data;
                size_t data_size;
                nestegg_packet_data(pkt, chunk, &data, &data_size);
                audio_ctx->avail_samples = vorbis_packet(vorbis_ctx, data, data_size);
            }
            nestegg_free_packet(pkt);
        }

        int samples_read = MIN(audio_ctx->avail_samples, samples);
        vorbis_getpcm(vorbis_ctx, audio_buf, samples_read);
        audio_buf += 2 * vorbis_ctx->channels * samples_read;
        audio_ctx->avail_samples -= samples_read;
        samples -= samples_read;
    }
    return buf_size;
}
コード例 #3
0
static int read_frame(struct input_ctx      *input,
                      uint8_t               **buf,
                      size_t                *buf_sz,
                      size_t                *buf_alloc_sz) {
  char            raw_hdr[IVF_FRAME_HDR_SZ];
  size_t          new_buf_sz;
  FILE           *infile = input->infile;
  enum file_kind  kind = input->kind;
  if (kind == WEBM_FILE) {
    if (input->chunk >= input->chunks) {
      unsigned int track;

      do {
        /* End of this packet, get another. */
        if (input->pkt)
          nestegg_free_packet(input->pkt);

        if (nestegg_read_packet(input->nestegg_ctx, &input->pkt) <= 0
            || nestegg_packet_track(input->pkt, &track))
          return 1;

      } while (track != input->video_track);

      if (nestegg_packet_count(input->pkt, &input->chunks))
        return 1;
      input->chunk = 0;
    }

    if (nestegg_packet_data(input->pkt, input->chunk, buf, buf_sz))
      return 1;
    input->chunk++;

    return 0;
  }
  /* For both the raw and ivf formats, the frame size is the first 4 bytes
   * of the frame header. We just need to special case on the header
   * size.
   */
  else if (fread(raw_hdr, kind == IVF_FILE
                 ? IVF_FRAME_HDR_SZ : RAW_FRAME_HDR_SZ, 1, infile) != 1) {
    if (!feof(infile))
      fprintf(stderr, "Failed to read frame size\n");

    new_buf_sz = 0;
  } else {
    new_buf_sz = mem_get_le32(raw_hdr);

    if (new_buf_sz > 256 * 1024 * 1024) {
      fprintf(stderr, "Error: Read invalid frame size (%u)\n",
              (unsigned int)new_buf_sz);
      new_buf_sz = 0;
    }

    if (kind == RAW_FILE && new_buf_sz > 256 * 1024)
      fprintf(stderr, "Warning: Read invalid frame size (%u)"
              " - not a raw file?\n", (unsigned int)new_buf_sz);

    if (new_buf_sz > *buf_alloc_sz) {
      uint8_t *new_buf = realloc(*buf, 2 * new_buf_sz);

      if (new_buf) {
        *buf = new_buf;
        *buf_alloc_sz = 2 * new_buf_sz;
      } else {
        fprintf(stderr, "Failed to allocate compressed data buffer\n");
        new_buf_sz = 0;
      }
    }
  }

  *buf_sz = new_buf_sz;

  if (!feof(infile)) {
    if (fread(*buf, 1, *buf_sz, infile) != *buf_sz) {
      fprintf(stderr, "Failed to read full frame\n");
      return 1;
    }

    return 0;
  }

  return 1;
}
コード例 #4
0
bool
SoftwareWebMVideoDecoder::DecodeVideoFrame(bool &aKeyframeSkip,
                                           int64_t aTimeThreshold)
{
  MOZ_ASSERT(mReader->OnTaskQueue());

  // Record number of frames decoded and parsed. Automatically update the
  // stats counters using the AutoNotifyDecoded stack-based class.
  AbstractMediaDecoder::AutoNotifyDecoded a(mReader->GetDecoder());

  nsAutoRef<NesteggPacketHolder> holder(mReader->NextPacket(WebMReader::VIDEO));
  if (!holder) {
    return false;
  }

  nestegg_packet* packet = holder->mPacket;
  unsigned int track = 0;
  int r = nestegg_packet_track(packet, &track);
  if (r == -1) {
    return false;
  }

  unsigned int count = 0;
  r = nestegg_packet_count(packet, &count);
  if (r == -1) {
    return false;
  }

  uint64_t tstamp = 0;
  r = nestegg_packet_tstamp(packet, &tstamp);
  if (r == -1) {
    return false;
  }

  // The end time of this frame is the start time of the next frame.  Fetch
  // the timestamp of the next packet for this track.  If we've reached the
  // end of the resource, use the file's duration as the end time of this
  // video frame.
  uint64_t next_tstamp = 0;
  nsAutoRef<NesteggPacketHolder> next_holder(mReader->NextPacket(WebMReader::VIDEO));
  if (next_holder) {
    r = nestegg_packet_tstamp(next_holder->mPacket, &next_tstamp);
    if (r == -1) {
      return false;
    }
    mReader->PushVideoPacket(next_holder.disown());
  } else {
    next_tstamp = tstamp;
    next_tstamp += tstamp - mReader->GetLastVideoFrameTime();
  }
  mReader->SetLastVideoFrameTime(tstamp);

  int64_t tstamp_usecs = tstamp / NS_PER_USEC;
  for (uint32_t i = 0; i < count; ++i) {
    unsigned char* data;
    size_t length;
    r = nestegg_packet_data(packet, i, &data, &length);
    if (r == -1) {
      return false;
    }

    vpx_codec_stream_info_t si;
    memset(&si, 0, sizeof(si));
    si.sz = sizeof(si);
    if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP8) {
      vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si);
    } else if (mReader->GetVideoCodec() == NESTEGG_CODEC_VP9) {
      vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), data, length, &si);
    }
    if (aKeyframeSkip && (!si.is_kf || tstamp_usecs < aTimeThreshold)) {
      // Skipping to next keyframe...
      a.mParsed++; // Assume 1 frame per chunk.
      a.mDropped++;
      continue;
    }

    if (aKeyframeSkip && si.is_kf) {
      aKeyframeSkip = false;
    }

    if (vpx_codec_decode(&mVPX, data, length, nullptr, 0)) {
      return false;
    }

    // If the timestamp of the video frame is less than
    // the time threshold required then it is not added
    // to the video queue and won't be displayed.
    if (tstamp_usecs < aTimeThreshold) {
      a.mParsed++; // Assume 1 frame per chunk.
      a.mDropped++;
      continue;
    }

    vpx_codec_iter_t  iter = nullptr;
    vpx_image_t      *img;

    while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
      NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420, "WebM image format not I420");

      // Chroma shifts are rounded down as per the decoding examples in the SDK
      VideoData::YCbCrBuffer b;
      b.mPlanes[0].mData = img->planes[0];
      b.mPlanes[0].mStride = img->stride[0];
      b.mPlanes[0].mHeight = img->d_h;
      b.mPlanes[0].mWidth = img->d_w;
      b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;

      b.mPlanes[1].mData = img->planes[1];
      b.mPlanes[1].mStride = img->stride[1];
      b.mPlanes[1].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[1].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
      b.mPlanes[1].mOffset = b.mPlanes[1].mSkip = 0;

      b.mPlanes[2].mData = img->planes[2];
      b.mPlanes[2].mStride = img->stride[2];
      b.mPlanes[2].mHeight = (img->d_h + 1) >> img->y_chroma_shift;
      b.mPlanes[2].mWidth = (img->d_w + 1) >> img->x_chroma_shift;
      b.mPlanes[2].mOffset = b.mPlanes[2].mSkip = 0;

      nsIntRect pictureRect = mReader->GetPicture();
      IntRect picture = pictureRect;
      nsIntSize initFrame = mReader->GetInitialFrame();
      if (img->d_w != static_cast<uint32_t>(initFrame.width) ||
          img->d_h != static_cast<uint32_t>(initFrame.height)) {
        // Frame size is different from what the container reports. This is
        // legal in WebM, and we will preserve the ratio of the crop rectangle
        // as it was reported relative to the picture size reported by the
        // container.
        picture.x = (pictureRect.x * img->d_w) / initFrame.width;
        picture.y = (pictureRect.y * img->d_h) / initFrame.height;
        picture.width = (img->d_w * pictureRect.width) / initFrame.width;
        picture.height = (img->d_h * pictureRect.height) / initFrame.height;
      }

      VideoInfo videoInfo = mReader->GetMediaInfo().mVideo;
      nsRefPtr<VideoData> v = VideoData::Create(videoInfo,
                                                mReader->GetDecoder()->GetImageContainer(),
                                                holder->mOffset,
                                                tstamp_usecs,
                                                (next_tstamp / NS_PER_USEC) - tstamp_usecs,
                                                b,
                                                si.is_kf,
                                                -1,
                                                picture);
      if (!v) {
        return false;
      }
      a.mParsed++;
      a.mDecoded++;
      NS_ASSERTION(a.mDecoded <= a.mParsed,
        "Expect only 1 frame per chunk per packet in WebM...");
      mReader->VideoQueue().Push(v);
    }
  }

  return true;
}
コード例 #5
0
ファイル: dump.c プロジェクト: SingingTree/nestegg
int
main(int argc, char * argv[])
{
  FILE * fp;
  int r, type;
  nestegg * ctx;
  nestegg_audio_params aparams;
  nestegg_packet * pkt;
  nestegg_video_params vparams;
  size_t length, size;
  uint64_t duration, tstamp, pkt_tstamp;
  unsigned char * codec_data, * ptr;
  unsigned int cnt, i, j, track, tracks, pkt_cnt, pkt_track;
  unsigned int data_items = 0;
  nestegg_io io = {
    stdio_read,
    stdio_seek,
    stdio_tell,
    NULL
  };

  if (argc != 2)
    return EXIT_FAILURE;

  fp = fopen(argv[1], "rb");
  if (!fp)
    return EXIT_FAILURE;

  io.userdata = fp;

  ctx = NULL;
  r = nestegg_init(&ctx, io, log_callback, -1);
  if (r != 0)
    return EXIT_FAILURE;

  nestegg_track_count(ctx, &tracks);
  r = nestegg_duration(ctx, &duration);
  if (r == 0) {
#if defined(DEBUG)
    fprintf(stderr, "media has %u tracks and duration %fs\n", tracks, duration / 1e9);
#endif
  } else {
#if defined(DEBUG)
    fprintf(stderr, "media has %u tracks and unknown duration, using 10s default\n", tracks);
#endif
    duration = 10000000000;
  }

  for (i = 0; i < tracks; ++i) {
    type = nestegg_track_type(ctx, i);
#if defined(DEBUG)
    fprintf(stderr, "track %u: type: %d codec: %d", i,
            type, nestegg_track_codec_id(ctx, i));
#endif
    nestegg_track_codec_data_count(ctx, i, &data_items);
    for (j = 0; j < data_items; ++j) {
      nestegg_track_codec_data(ctx, i, j, &codec_data, &length);
#if defined(DEBUG)
      fprintf(stderr, " (%p, %u)", codec_data, (unsigned int) length);
#endif
    }
    if (type == NESTEGG_TRACK_VIDEO) {
      nestegg_track_video_params(ctx, i, &vparams);
#if defined(DEBUG)
      fprintf(stderr, " video: %ux%u (d: %ux%u %ux%ux%ux%u)",
              vparams.width, vparams.height,
              vparams.display_width, vparams.display_height,
              vparams.crop_top, vparams.crop_left, vparams.crop_bottom, vparams.crop_right);
#endif
    } else if (type == NESTEGG_TRACK_AUDIO) {
      nestegg_track_audio_params(ctx, i, &aparams);
#if defined(DEBUG)
      fprintf(stderr, " audio: %.2fhz %u bit %u channels",
              aparams.rate, aparams.depth, aparams.channels);
#endif
    }
#if defined(DEBUG)
    fprintf(stderr, "\n");
#endif
  }

#if defined(SEEK_TEST)
#if defined(DEBUG)
  fprintf(stderr, "seek to middle\n");
#endif
  r = nestegg_track_seek(ctx, 0, duration / 2);
  if (r == 0) {
#if defined(DEBUG)
    fprintf(stderr, "middle ");
#endif
    r = nestegg_read_packet(ctx, &pkt);
    if (r == 1) {
      nestegg_packet_track(pkt, &track);
      nestegg_packet_count(pkt, &cnt);
      nestegg_packet_tstamp(pkt, &tstamp);
#if defined(DEBUG)
      fprintf(stderr, "* t %u pts %f frames %u\n", track, tstamp / 1e9, cnt);
#endif
      nestegg_free_packet(pkt);
    } else {
#if defined(DEBUG)
      fprintf(stderr, "middle seek failed\n");
#endif
    }
  }

#if defined(DEBUG)
  fprintf(stderr, "seek to ~end\n");
#endif
  r = nestegg_track_seek(ctx, 0, duration - (duration / 10));
  if (r == 0) {
#if defined(DEBUG)
    fprintf(stderr, "end ");
#endif
    r = nestegg_read_packet(ctx, &pkt);
    if (r == 1) {
      nestegg_packet_track(pkt, &track);
      nestegg_packet_count(pkt, &cnt);
      nestegg_packet_tstamp(pkt, &tstamp);
#if defined(DEBUG)
      fprintf(stderr, "* t %u pts %f frames %u\n", track, tstamp / 1e9, cnt);
#endif
      nestegg_free_packet(pkt);
    } else {
#if defined(DEBUG)
      fprintf(stderr, "end seek failed\n");
#endif
    }
  }

#if defined(DEBUG)
  fprintf(stderr, "seek to ~start\n");
#endif
  r = nestegg_track_seek(ctx, 0, duration / 10);
  if (r == 0) {
#if defined(DEBUG)
    fprintf(stderr, "start ");
#endif
    r = nestegg_read_packet(ctx, &pkt);
    if (r == 1) {
      nestegg_packet_track(pkt, &track);
      nestegg_packet_count(pkt, &cnt);
      nestegg_packet_tstamp(pkt, &tstamp);
#if defined(DEBUG)
      fprintf(stderr, "* t %u pts %f frames %u\n", track, tstamp / 1e9, cnt);
#endif
      nestegg_free_packet(pkt);
    } else {
#if defined(DEBUG)
      fprintf(stderr, "start seek failed\n");
#endif
    }
  }
#endif

  while (nestegg_read_packet(ctx, &pkt) > 0) {
    nestegg_packet_track(pkt, &pkt_track);
    nestegg_packet_count(pkt, &pkt_cnt);
    nestegg_packet_tstamp(pkt, &pkt_tstamp);

#if defined(DEBUG)
    fprintf(stderr, "t %u pts %f frames %u: ", pkt_track, pkt_tstamp / 1e9, pkt_cnt);
#endif

    for (i = 0; i < pkt_cnt; ++i) {
      nestegg_packet_data(pkt, i, &ptr, &size);
#if defined(DEBUG)
      fprintf(stderr, "%u ", (unsigned int) size);
#endif
    }
#if defined(DEBUG)
    fprintf(stderr, "\n");
#endif

    nestegg_free_packet(pkt);
  }

  nestegg_destroy(ctx);
  fclose(fp);

  return EXIT_SUCCESS;
}
コード例 #6
0
static int video_thread(void *data)
{
    video_context *ctx = (video_context*) data;
    uint64_t timestamp;

    while(!quit_video)
    {
        unsigned int chunk, chunks;
        nestegg_packet *pkt;

        debug_printf("video queue size=%i\n", ctx->packet_queue->size);
        pkt = queue_get(ctx->packet_queue);
        if (quit_video || pkt == NULL) break;
        nestegg_packet_count(pkt, &chunks);
        nestegg_packet_tstamp(pkt, &timestamp);

        for (chunk = 0; chunk < chunks; ++chunk)
        {
            unsigned char *data;
            size_t data_size;
            nestegg_packet_data(pkt, chunk, &data, &data_size);

            vpx_image_t *img;
            vpx_codec_iter_t iter = NULL;
            if (vpx_codec_decode(&ctx->vpx_ctx, data, data_size, NULL, 0))
            {
                printf("Error: libvpx failed to decode chunk\n");
                quit_video = 1;
                break;
            }
            while((img = vpx_codec_get_frame(&ctx->vpx_ctx, &iter)))
            {
                assert(img->d_w == ctx->width);
                assert(img->d_h == ctx->height);
                yuv_frame *frame = yuv_frame_create(img->d_w, img->d_h);
                frame->timestamp = timestamp;

                int y;
                for(y = 0; y < img->d_h; y++)
                    memcpy(frame->lum+(y*img->d_w), img->planes[0]+(y*img->stride[0]), img->d_w);
                for(y = 0; y < img->d_h / 2; y++)
                {
                    memcpy(frame->cr+(y*img->d_w/2), img->planes[1]+(y*img->stride[1]), img->d_w / 2);
                    memcpy(frame->cb+(y*img->d_w/2), img->planes[2]+(y*img->stride[2]), img->d_w / 2);
                }

                if (queue_insert(ctx->frame_queue, (void *)frame) < 0)
                {
                    debug_printf("destroying last frame\n");
                    yuv_frame_destroy(frame);
                    break;
                }
                timestamp += ctx->frame_delay;
            }
        }
        nestegg_free_packet(pkt);
    }

    queue_insert(ctx->frame_queue, NULL);
    return 0;
}