Ejemplo n.º 1
0
static hb_buffer_t* Encode(hb_work_object_t *w)
{
    hb_work_private_t *pv = w->private_data;
    hb_audio_t *audio = w->audio;
    uint64_t pts, pos;

    if (hb_list_bytes(pv->list) < pv->input_samples * sizeof(float))
    {
        return NULL;
    }

    hb_list_getbytes(pv->list, pv->input_buf, pv->input_samples * sizeof(float),
                     &pts, &pos);

    // Prepare input frame
    int out_linesize;
    int out_size = av_samples_get_buffer_size(&out_linesize,
                                              pv->context->channels,
                                              pv->samples_per_frame,
                                              pv->context->sample_fmt, 1);
    AVFrame frame = { .nb_samples = pv->samples_per_frame, };
    avcodec_fill_audio_frame(&frame,
                             pv->context->channels, pv->context->sample_fmt,
                             pv->output_buf, out_size, 1);
    if (pv->avresample != NULL)
    {
        int in_linesize;
        av_samples_get_buffer_size(&in_linesize, pv->context->channels,
                                   frame.nb_samples, AV_SAMPLE_FMT_FLT, 1);
        int out_samples = avresample_convert(pv->avresample,
                                             frame.extended_data, out_linesize,
                                             frame.nb_samples,
                                             &pv->input_buf,       in_linesize,
                                             frame.nb_samples);
        if (out_samples != pv->samples_per_frame)
        {
            // we're not doing sample rate conversion, so this shouldn't happen
            hb_log("encavcodecaWork: avresample_convert() failed");
            return NULL;
        }
    }

    // Libav requires that timebase of audio frames be in sample_rate units
    frame.pts = pts + (90000 * pos / (sizeof(float) *
                                      pv->out_discrete_channels *
                                      audio->config.out.samplerate));
    frame.pts = av_rescale(frame.pts, pv->context->sample_rate, 90000);

    // Prepare output packet
    AVPacket pkt;
    int got_packet;
    hb_buffer_t *out = hb_buffer_init(pv->max_output_bytes);
    av_init_packet(&pkt);
    pkt.data = out->data;
    pkt.size = out->alloc;

    // Encode
    int ret = avcodec_encode_audio2(pv->context, &pkt, &frame, &got_packet);
    if (ret < 0)
    {
        hb_log("encavcodeca: avcodec_encode_audio failed");
        hb_buffer_close(&out);
        return NULL;
    }

    if (got_packet && pkt.size)
    {
        out->size = pkt.size;
        // The output pts from libav is in context->time_base. Convert it back
        // to our timebase.
        out->s.start     = av_rescale_q(pkt.pts, pv->context->time_base,
                                        (AVRational){1, 90000});
        out->s.duration  = (double)90000 * pv->samples_per_frame /
                                           audio->config.out.samplerate;
        out->s.stop      = out->s.start + out->s.duration;
        out->s.type      = AUDIO_BUF;
        out->s.frametype = HB_FRAME_AUDIO;
    }
    else
    {
        hb_buffer_close(&out);
        return Encode(w);
    }

    return out;
}

static hb_buffer_t * Flush( hb_work_object_t * w )
{
    hb_buffer_t *first, *buf, *last;

    first = last = buf = Encode( w );
    while( buf )
    {
        last = buf;
        buf->next = Encode( w );
        buf = buf->next;
    }

    if( last )
    {
        last->next = hb_buffer_init( 0 );
    }
    else
    {
        first = hb_buffer_init( 0 );
    }

    return first;
}
Ejemplo n.º 2
0
static bool encode_audio(ffemu_t *handle, AVPacket *pkt, bool dry)
{
   av_init_packet(pkt);
   pkt->data = handle->audio.outbuf;
   pkt->size = handle->audio.outbuf_size;

#ifdef HAVE_FFMPEG_AVCODEC_ENCODE_AUDIO2
   AVFrame frame;
   avcodec_get_frame_defaults(&frame);

   frame.nb_samples = handle->audio.frames_in_buffer;
   frame.pts = handle->audio.frame_cnt;

   int samples_size = frame.nb_samples *
      handle->audio.codec->channels *
      sizeof(int16_t);

   avcodec_fill_audio_frame(&frame, handle->audio.codec->channels,
         handle->audio.codec->sample_fmt, (const uint8_t*)handle->audio.buffer,
         samples_size, 1);

   int got_packet = 0;
   if (avcodec_encode_audio2(handle->audio.codec,
            pkt, dry ? NULL : &frame, &got_packet) < 0)
      return false;

   if (!got_packet)
   {
      pkt->size = 0;
      pkt->pts = AV_NOPTS_VALUE;
      pkt->dts = AV_NOPTS_VALUE;
      return true;
   }

   if (pkt->pts != (int64_t)AV_NOPTS_VALUE)
   {
      pkt->pts = av_rescale_q(pkt->pts,
            handle->audio.codec->time_base,
            handle->muxer.astream->time_base);
   }

   if (pkt->dts != (int64_t)AV_NOPTS_VALUE)
   {
      pkt->dts = av_rescale_q(pkt->dts,
            handle->audio.codec->time_base,
            handle->muxer.astream->time_base);
   }

#else
   if (dry)
      return false;

   memset(handle->audio.buffer + handle->audio.frames_in_buffer * handle->audio.codec->channels, 0,
         (handle->audio.codec->frame_size - handle->audio.frames_in_buffer) *
         handle->audio.codec->channels * sizeof(int16_t));

   int out_size = avcodec_encode_audio(handle->audio.codec,
         handle->audio.outbuf, handle->audio.outbuf_size, handle->audio.buffer);

   if (out_size < 0)
      return false;

   if (out_size == 0)
   {
      pkt->size = 0;
      return true;
   }

   pkt->size = out_size;

   if (handle->audio.codec->coded_frame->pts != (int64_t)AV_NOPTS_VALUE)
   {
      pkt->pts = av_rescale_q(handle->audio.codec->coded_frame->pts,
            handle->audio.codec->time_base,
            handle->muxer.astream->time_base);
   }
   else
      pkt->pts = AV_NOPTS_VALUE;

   if (handle->audio.codec->coded_frame->key_frame)
      pkt->flags |= AV_PKT_FLAG_KEY;
#endif

   pkt->stream_index = handle->muxer.astream->index;
   return true;
}
Ejemplo n.º 3
0
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
    AVFilterContext  *ctx = inlink->dst;
    ResampleContext    *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    int ret;

    if (s->avr) {
        AVFilterBufferRef *buf_out;
        int delay, nb_samples;

        /* maximum possible samples lavr can output */
        delay      = avresample_get_delay(s->avr);
        nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay,
                                    outlink->sample_rate, inlink->sample_rate,
                                    AV_ROUND_UP);

        buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
        if (!buf_out) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        ret     = avresample_convert(s->avr, (void**)buf_out->extended_data,
                                     buf_out->linesize[0], nb_samples,
                                     (void**)buf->extended_data, buf->linesize[0],
                                     buf->audio->nb_samples);
        if (ret < 0) {
            avfilter_unref_buffer(buf_out);
            goto fail;
        }

        av_assert0(!avresample_available(s->avr));

        if (s->next_pts == AV_NOPTS_VALUE) {
            if (buf->pts == AV_NOPTS_VALUE) {
                av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
                       "assuming 0.\n");
                s->next_pts = 0;
            } else
                s->next_pts = av_rescale_q(buf->pts, inlink->time_base,
                                           outlink->time_base);
        }

        if (ret > 0) {
            buf_out->audio->nb_samples = ret;
            if (buf->pts != AV_NOPTS_VALUE) {
                buf_out->pts = av_rescale_q(buf->pts, inlink->time_base,
                                            outlink->time_base) -
                               av_rescale(delay, outlink->sample_rate,
                                          inlink->sample_rate);
            } else
                buf_out->pts = s->next_pts;

            s->next_pts = buf_out->pts + buf_out->audio->nb_samples;

            ret = ff_filter_samples(outlink, buf_out);
            s->got_output = 1;
        }

fail:
        avfilter_unref_buffer(buf);
    } else {
        ret = ff_filter_samples(outlink, buf);
        s->got_output = 1;
    }

    return ret;
}
Ejemplo n.º 4
0
void AVIDump::AddFrame(const u8* data, int width, int height, int stride, const Frame& state)
{
  // Assume that the timing is valid, if the savestate id of the new frame
  // doesn't match the last one.
  if (state.savestate_index != s_last_savestate_index)
  {
    s_last_savestate_index = state.savestate_index;
    s_last_frame_is_valid = false;
  }

  CheckResolution(width, height);
  s_src_frame->data[0] = const_cast<u8*>(data);
  s_src_frame->linesize[0] = stride;
  s_src_frame->format = s_pix_fmt;
  s_src_frame->width = s_width;
  s_src_frame->height = s_height;

  // Convert image from {BGR24, RGBA} to desired pixel format
  if ((s_sws_context =
           sws_getCachedContext(s_sws_context, width, height, s_pix_fmt, s_width, s_height,
                                s_stream->codec->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr)))
  {
    sws_scale(s_sws_context, s_src_frame->data, s_src_frame->linesize, 0, height,
              s_scaled_frame->data, s_scaled_frame->linesize);
  }

  // Encode and write the image.
  AVPacket pkt;
  PreparePacket(&pkt);
  int got_packet = 0;
  int error = 0;
  u64 delta;
  s64 last_pts;
  // Check to see if the first frame being dumped is the first frame of output from the emulator.
  // This prevents an issue with starting dumping later in emulation from placing the frames
  // incorrectly.
  if (!s_last_frame_is_valid)
  {
    s_last_frame = state.ticks;
    s_last_frame_is_valid = true;
  }
  if (!s_start_dumping && state.first_frame)
  {
    delta = state.ticks;
    last_pts = AV_NOPTS_VALUE;
    s_start_dumping = true;
  }
  else
  {
    delta = state.ticks - s_last_frame;
    last_pts = (s_last_pts * s_stream->codec->time_base.den) / state.ticks_per_second;
  }
  u64 pts_in_ticks = s_last_pts + delta;
  s_scaled_frame->pts = (pts_in_ticks * s_stream->codec->time_base.den) / state.ticks_per_second;
  if (s_scaled_frame->pts != last_pts)
  {
    s_last_frame = state.ticks;
    s_last_pts = pts_in_ticks;
    error = avcodec_encode_video2(s_stream->codec, &pkt, s_scaled_frame, &got_packet);
  }
  while (!error && got_packet)
  {
    // Write the compressed frame in the media file.
    if (pkt.pts != (s64)AV_NOPTS_VALUE)
    {
      pkt.pts = av_rescale_q(pkt.pts, s_stream->codec->time_base, s_stream->time_base);
    }
    if (pkt.dts != (s64)AV_NOPTS_VALUE)
    {
      pkt.dts = av_rescale_q(pkt.dts, s_stream->codec->time_base, s_stream->time_base);
    }
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(56, 60, 100)
    if (s_stream->codec->coded_frame->key_frame)
      pkt.flags |= AV_PKT_FLAG_KEY;
#endif
    pkt.stream_index = s_stream->index;
    av_interleaved_write_frame(s_format_context, &pkt);

    // Handle delayed frames.
    PreparePacket(&pkt);
    error = avcodec_encode_video2(s_stream->codec, &pkt, nullptr, &got_packet);
  }
  if (error)
    ERROR_LOG(VIDEO, "Error while encoding video: %d", error);
}
Ejemplo n.º 5
0
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext  *ctx = inlink->dst;
    ASyncContext       *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    int nb_channels = av_get_channel_layout_nb_channels(buf->channel_layout);
    int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts :
                  av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
    int out_size, ret;
    int64_t delta;
    int64_t new_pts;

    /* buffer data until we get the next timestamp */
    if (s->pts == AV_NOPTS_VALUE || pts == AV_NOPTS_VALUE) {
        if (pts != AV_NOPTS_VALUE) {
            s->pts = pts - get_delay(s);
        }
        return write_to_fifo(s, buf);
    }

    if (s->first_pts != AV_NOPTS_VALUE) {
        handle_trimming(ctx);
        if (!avresample_available(s->avr))
            return write_to_fifo(s, buf);
    }

    /* when we have two timestamps, compute how many samples would we have
     * to add/remove to get proper sync between data and timestamps */
    delta    = pts - s->pts - get_delay(s);
    out_size = avresample_available(s->avr);

    if (llabs(delta) > s->min_delta ||
        (s->first_frame && delta && s->first_pts != AV_NOPTS_VALUE)) {
        av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta);
        out_size = av_clipl_int32((int64_t)out_size + delta);
    } else {
        if (s->resample) {
            // adjust the compensation if delta is non-zero
            int delay = get_delay(s);
            int comp = s->comp + av_clip(delta * inlink->sample_rate / delay,
                                         -s->max_comp, s->max_comp);
            if (comp != s->comp) {
                av_log(ctx, AV_LOG_VERBOSE, "Compensating %d samples per second.\n", comp);
                if (avresample_set_compensation(s->avr, comp, inlink->sample_rate) == 0) {
                    s->comp = comp;
                }
            }
        }
        // adjust PTS to avoid monotonicity errors with input PTS jitter
        pts -= delta;
        delta = 0;
    }

    if (out_size > 0) {
        AVFrame *buf_out = ff_get_audio_buffer(outlink, out_size);
        if (!buf_out) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        if (s->first_frame && delta > 0) {
            int planar = av_sample_fmt_is_planar(buf_out->format);
            int planes = planar ?  nb_channels : 1;
            int block_size = av_get_bytes_per_sample(buf_out->format) *
                             (planar ? 1 : nb_channels);

            int ch;

            av_samples_set_silence(buf_out->extended_data, 0, delta,
                                   nb_channels, buf->format);

            for (ch = 0; ch < planes; ch++)
                buf_out->extended_data[ch] += delta * block_size;

            avresample_read(s->avr, buf_out->extended_data, out_size);

            for (ch = 0; ch < planes; ch++)
                buf_out->extended_data[ch] -= delta * block_size;
        } else {
            avresample_read(s->avr, buf_out->extended_data, out_size);

            if (delta > 0) {
                av_samples_set_silence(buf_out->extended_data, out_size - delta,
                                       delta, nb_channels, buf->format);
            }
        }
        buf_out->pts = s->pts;
        ret = ff_filter_frame(outlink, buf_out);
        if (ret < 0)
            goto fail;
        s->got_output = 1;
    } else if (avresample_available(s->avr)) {
        av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
               "whole buffer.\n");
    }

    /* drain any remaining buffered data */
    avresample_read(s->avr, NULL, avresample_available(s->avr));

    new_pts = pts - avresample_get_delay(s->avr);
    /* check for s->pts monotonicity */
    if (new_pts > s->pts) {
        s->pts = new_pts;
        ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
                                 buf->linesize[0], buf->nb_samples);
    } else {
        av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
               "whole buffer.\n");
        ret = 0;
    }

    s->first_frame = 0;
fail:
    av_frame_free(&buf);

    return ret;
}
Ejemplo n.º 6
0
static int omx_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *frame, int *got_packet)
{
    OMXCodecContext *s = avctx->priv_data;
    int ret = 0;
    OMX_BUFFERHEADERTYPE* buffer;
    OMX_ERRORTYPE err;

    if (frame) {
        uint8_t *dst[4];
        int linesize[4];
        int need_copy;
        buffer = get_buffer(&s->input_mutex, &s->input_cond,
                            &s->num_free_in_buffers, s->free_in_buffers, 1);

        buffer->nFilledLen = av_image_fill_arrays(dst, linesize, buffer->pBuffer, avctx->pix_fmt, s->stride, s->plane_size, 1);

        if (s->input_zerocopy) {
            uint8_t *src[4] = { NULL };
            int src_linesize[4];
            av_image_fill_arrays(src, src_linesize, frame->data[0], avctx->pix_fmt, s->stride, s->plane_size, 1);
            if (frame->linesize[0] == src_linesize[0] &&
                frame->linesize[1] == src_linesize[1] &&
                frame->linesize[2] == src_linesize[2] &&
                frame->data[1] == src[1] &&
                frame->data[2] == src[2]) {
                // If the input frame happens to have all planes stored contiguously,
                // with the right strides, just clone the frame and set the OMX
                // buffer header to point to it
                AVFrame *local = av_frame_clone(frame);
                if (!local) {
                    // Return the buffer to the queue so it's not lost
                    append_buffer(&s->input_mutex, &s->input_cond, &s->num_free_in_buffers, s->free_in_buffers, buffer);
                    return AVERROR(ENOMEM);
                } else {
                    buffer->pAppPrivate = local;
                    buffer->pOutputPortPrivate = NULL;
                    buffer->pBuffer = local->data[0];
                    need_copy = 0;
                }
            } else {
                // If not, we need to allocate a new buffer with the right
                // size and copy the input frame into it.
                uint8_t *buf = NULL;
                int image_buffer_size = av_image_get_buffer_size(avctx->pix_fmt, s->stride, s->plane_size, 1);
                if (image_buffer_size >= 0)
                    buf = av_malloc(image_buffer_size);
                if (!buf) {
                    // Return the buffer to the queue so it's not lost
                    append_buffer(&s->input_mutex, &s->input_cond, &s->num_free_in_buffers, s->free_in_buffers, buffer);
                    return AVERROR(ENOMEM);
                } else {
                    buffer->pAppPrivate = buf;
                    // Mark that pAppPrivate is an av_malloc'ed buffer, not an AVFrame
                    buffer->pOutputPortPrivate = (void*) 1;
                    buffer->pBuffer = buf;
                    need_copy = 1;
                    buffer->nFilledLen = av_image_fill_arrays(dst, linesize, buffer->pBuffer, avctx->pix_fmt, s->stride, s->plane_size, 1);
                }
            }
        } else {
            need_copy = 1;
        }
        if (need_copy)
            av_image_copy(dst, linesize, (const uint8_t**) frame->data, frame->linesize, avctx->pix_fmt, avctx->width, avctx->height);
        buffer->nFlags = OMX_BUFFERFLAG_ENDOFFRAME;
        buffer->nOffset = 0;
        // Convert the timestamps to microseconds; some encoders can ignore
        // the framerate and do VFR bit allocation based on timestamps.
        buffer->nTimeStamp = to_omx_ticks(av_rescale_q(frame->pts, avctx->time_base, AV_TIME_BASE_Q));
        err = OMX_EmptyThisBuffer(s->handle, buffer);
        if (err != OMX_ErrorNone) {
            append_buffer(&s->input_mutex, &s->input_cond, &s->num_free_in_buffers, s->free_in_buffers, buffer);
            av_log(avctx, AV_LOG_ERROR, "OMX_EmptyThisBuffer failed: %x\n", err);
            return AVERROR_UNKNOWN;
        }
        s->num_in_frames++;
    }

    while (!*got_packet && ret == 0) {
        // Only wait for output if flushing and not all frames have been output
        buffer = get_buffer(&s->output_mutex, &s->output_cond,
                            &s->num_done_out_buffers, s->done_out_buffers,
                            !frame && s->num_out_frames < s->num_in_frames);
        if (!buffer)
            break;

        if (buffer->nFlags & OMX_BUFFERFLAG_CODECCONFIG && avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
            if ((ret = av_reallocp(&avctx->extradata, avctx->extradata_size + buffer->nFilledLen + AV_INPUT_BUFFER_PADDING_SIZE)) < 0) {
                avctx->extradata_size = 0;
                goto end;
            }
            memcpy(avctx->extradata + avctx->extradata_size, buffer->pBuffer + buffer->nOffset, buffer->nFilledLen);
            avctx->extradata_size += buffer->nFilledLen;
            memset(avctx->extradata + avctx->extradata_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
        } else {
            if (buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME)
                s->num_out_frames++;
            if (!(buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME) || !pkt->data) {
                // If the output packet isn't preallocated, just concatenate everything in our
                // own buffer
                int newsize = s->output_buf_size + buffer->nFilledLen + AV_INPUT_BUFFER_PADDING_SIZE;
                if ((ret = av_reallocp(&s->output_buf, newsize)) < 0) {
                    s->output_buf_size = 0;
                    goto end;
                }
                memcpy(s->output_buf + s->output_buf_size, buffer->pBuffer + buffer->nOffset, buffer->nFilledLen);
                s->output_buf_size += buffer->nFilledLen;
                if (buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME) {
                    if ((ret = av_packet_from_data(pkt, s->output_buf, s->output_buf_size)) < 0) {
                        av_freep(&s->output_buf);
                        s->output_buf_size = 0;
                        goto end;
                    }
                    s->output_buf = NULL;
                    s->output_buf_size = 0;
                }
            } else {
                // End of frame, and the caller provided a preallocated frame
                if ((ret = ff_alloc_packet2(avctx, pkt, s->output_buf_size + buffer->nFilledLen, 0)) < 0) {
                    av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n",
                           (int)(s->output_buf_size + buffer->nFilledLen));
                    goto end;
                }
                memcpy(pkt->data, s->output_buf, s->output_buf_size);
                memcpy(pkt->data + s->output_buf_size, buffer->pBuffer + buffer->nOffset, buffer->nFilledLen);
                av_freep(&s->output_buf);
                s->output_buf_size = 0;
            }
            if (buffer->nFlags & OMX_BUFFERFLAG_ENDOFFRAME) {
                pkt->pts = av_rescale_q(from_omx_ticks(buffer->nTimeStamp), AV_TIME_BASE_Q, avctx->time_base);
                // We don't currently enable B-frames for the encoders, so set
                // pkt->dts = pkt->pts. (The calling code behaves worse if the encoder
                // doesn't set the dts).
                pkt->dts = pkt->pts;
                if (buffer->nFlags & OMX_BUFFERFLAG_SYNCFRAME)
                    pkt->flags |= AV_PKT_FLAG_KEY;
                *got_packet = 1;
            }
        }
end:
        err = OMX_FillThisBuffer(s->handle, buffer);
        if (err != OMX_ErrorNone) {
            append_buffer(&s->output_mutex, &s->output_cond, &s->num_done_out_buffers, s->done_out_buffers, buffer);
            av_log(avctx, AV_LOG_ERROR, "OMX_FillThisBuffer failed: %x\n", err);
            ret = AVERROR_UNKNOWN;
        }
    }
    return ret;
}
/*
 * Return
 * - 0 -- one packet was read and processed
 * - AVERROR(EAGAIN) -- no packets were available for selected file,
 *   this function should be called again
 * - AVERROR_EOF -- this function should not be called again
 */
static int process_input(int file_index)
{
    InputFile *ifile = input_files[file_index];
    AVFormatContext *is;
    InputStream *ist;
    AVPacket pkt;
    int ret, i, j;

    is  = ifile->ctx;
    // ****<<  Capture a frame: audio/video/subtitle
    ret = get_input_packet(ifile, &pkt); // ****<<  Call stack: av_read_frame() --> read_frame_internal() --> ff_read_packet() --> s->iformat->read_packet(s, pkt); --> dshow_read_frame();

    if (ret == AVERROR(EAGAIN)) {
        ifile->eagain = 1;
        return ret;
    }
    if (ret < 0) {
        if (ret != AVERROR_EOF) {
            print_error(is->filename, ret);
            if (exit_on_error)
                exit_program(1);
        }
        ifile->eof_reached = 1;

        for (i = 0; i < ifile->nb_streams; i++) {
            ist = input_streams[ifile->ist_index + i];
            if (ist->decoding_needed)
                output_packet(ist, NULL);

            /* mark all outputs that don't go through lavfi as finished */
            for (j = 0; j < nb_output_streams; j++) {
                OutputStream *ost = output_streams[j];

                if (ost->source_index == ifile->ist_index + i &&
                        (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
                    close_output_stream(ost);
            }
        }

        return AVERROR(EAGAIN);
    }

    reset_eagain();

    if (do_pkt_dump) {
        av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
                         is->streams[pkt.stream_index]);
    }
    /* the following test is needed in case new streams appear
       dynamically in stream : we ignore them */
    if (pkt.stream_index >= ifile->nb_streams) {
        report_new_stream(file_index, &pkt);
        goto discard_packet;
    }

    ist = input_streams[ifile->ist_index + pkt.stream_index];

    ist->data_size += pkt.size;
    ist->nb_packets++;

    if (ist->discard)
        goto discard_packet;

    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
               "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
               av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
               av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
               av_ts2str(input_files[ist->file_index]->ts_offset),
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
    }

    if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64) {
        int64_t stime, stime2;
        // Correcting starttime based on the enabled streams
        // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
        //       so we instead do it here as part of discontinuity handling
        if (   ist->next_dts == AV_NOPTS_VALUE
                && ifile->ts_offset == -is->start_time
                && (is->iformat->flags & AVFMT_TS_DISCONT)) {
            int64_t new_start_time = INT64_MAX;
            for (i=0; i<is->nb_streams; i++) {
                AVStream *st = is->streams[i];
                if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
                    continue;
                new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
            }
            if (new_start_time > is->start_time) {
                av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
                ifile->ts_offset = -new_start_time;
            }
        }

        stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
        stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
        ist->wrap_correction_done = 1;

        if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
            pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
            ist->wrap_correction_done = 0;
        }
        if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
            pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
            ist->wrap_correction_done = 0;
        }
    }

    /* add the stream-global side data to the first packet */
    if (ist->nb_packets == 1)
        if (ist->st->nb_side_data)
            av_packet_split_side_data(&pkt);
    for (i = 0; i < ist->st->nb_side_data; i++) {
        AVPacketSideData *src_sd = &ist->st->side_data[i];
        uint8_t *dst_data;

        if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
            continue;

        dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
        if (!dst_data)
            exit_program(1);

        memcpy(dst_data, src_sd->data, src_sd->size);
    }

    // ****<<  pkt.pts:   采集时间戳(以毫秒为单位; 一般为系统时间)
    // ****<<  ts_offset: 起始时间戳(以毫秒为单位; 一般为第一帧采集时间戳的相反数)
    // ****<<  最终.pts:  相对时间戳(以毫秒为单位; 从第一帧到现在的相对时间)
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);

    if (pkt.pts != AV_NOPTS_VALUE)
        pkt.pts *= ist->ts_scale;
    if (pkt.dts != AV_NOPTS_VALUE)
        pkt.dts *= ist->ts_scale;

    if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
            && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ifile->last_ts;
        if(delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
                (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
                 ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
            ifile->ts_offset -= delta;
            av_log(NULL, AV_LOG_DEBUG,
                   "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                   delta, ifile->ts_offset);
            pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            if (pkt.pts != AV_NOPTS_VALUE)
                pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
        }
    }

    if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
            !copy_ts) {
        int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
        int64_t delta   = pkt_dts - ist->next_dts;
        if (is->iformat->flags & AVFMT_TS_DISCONT) {
            if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
                    (delta > 1LL*dts_delta_threshold*AV_TIME_BASE &&
                     ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) ||
                    pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
                ifile->ts_offset -= delta;
                av_log(NULL, AV_LOG_DEBUG,
                       "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
                       delta, ifile->ts_offset);
                pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
            }
        } else {
            if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
                    (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
                av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
                pkt.dts = AV_NOPTS_VALUE;
            }
            if (pkt.pts != AV_NOPTS_VALUE) {
                int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
                delta   = pkt_pts - ist->next_dts;
                if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
                        (delta > 1LL*dts_error_threshold*AV_TIME_BASE && ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)) {
                    av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
                    pkt.pts = AV_NOPTS_VALUE;
                }
            }
        }
    }

    if (pkt.dts != AV_NOPTS_VALUE)
        ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);

    if (debug_ts) {
        av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
               ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
               av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
               av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
               av_ts2str(input_files[ist->file_index]->ts_offset),
               av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
    }

    sub2video_heartbeat(ist, pkt.pts);

    ret = output_packet(ist, &pkt); // ****<<  see output_packet.c
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
               ist->file_index, ist->st->index, av_err2str(ret));
        if (exit_on_error)
            exit_program(1);
    }

discard_packet:
    av_free_packet(&pkt);

    return 0;
}
Ejemplo n.º 8
0
jint perfetch_start(JNIEnv *pEnv, jobject pObj, jobject pMainAct, jstring pFileName, jint video_fps) {
	AVFormatContext *pFormatCtx = NULL;
	int             i, videoStream;
	AVCodecContext  *pCodecCtx = NULL;
	AVCodec         *pCodec = NULL;
	AVFrame         *pFrame = NULL;
	AVFrame         *pFrameRGBA = NULL;
	AVPacket        packet;
	int             frameFinished;
	jobject			bitmap;
	void* 			buffer;

	AVDictionary    *optionsDict = NULL;
	struct SwsContext      *sws_ctx = NULL;
	char *videoFileName;

	// Register all formats and codecs
	av_register_all();

	//get C string from JNI jstring
	videoFileName = (char *)(*pEnv)->GetStringUTFChars(pEnv, pFileName, NULL);

	// Open video file
	if(avformat_open_input(&pFormatCtx, videoFileName, NULL, NULL)!=0)
		return -1; // Couldn't open file

	// Retrieve stream information
	if(avformat_find_stream_info(pFormatCtx, NULL)<0)
		return -1; // Couldn't find stream information

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, videoFileName, 0);

	// Find the first video stream
	videoStream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) {
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	}
	if(videoStream==-1)
		return -1; // Didn't find a video stream

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec
	if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
		return -1; // Could not open codec

	// Allocate video frame
	pFrame=avcodec_alloc_frame();

	// Allocate an AVFrame structure
	pFrameRGBA=avcodec_alloc_frame();
	if(pFrameRGBA==NULL)
		return -1;

	//create a bitmap as the buffer for pFrameRGBA
	bitmap = createBitmap(pEnv, pCodecCtx->width, pCodecCtx->height);
	if (AndroidBitmap_lockPixels(pEnv, bitmap, &buffer) < 0)
		return -1;
	//get the scaling context
	sws_ctx = sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        AV_PIX_FMT_RGBA,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );

	// Assign appropriate parts of bitmap to image planes in pFrameRGBA
	// Note that pFrameRGBA is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *)pFrameRGBA, buffer, AV_PIX_FMT_RGBA,
		 pCodecCtx->width, pCodecCtx->height);

	// Read frames and save first five frames to disk
	i=0;
	int ret;
	int fps = 0;
	int previous_pts = 0;
	int current_pts = 0;
	int prefetch_frame_index = 100;
	finish = 0;
	while(finish == 0) {
//		LOGI("av_read_frame start");
		ret = av_read_frame(pFormatCtx, &packet);
//		LOGI("av_read_frame end");
		if (ret <0){
			av_free_packet(&packet);
			break;
		}
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream) {

			// Decode video frame
			LOGI("avcodec_decode_video2 start");
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
					&packet);
			LOGI("avcodec_decode_video2 end");
			// Did we get a video frame?
			if(frameFinished) {
				if (fps == 0){
					if (i == 0){
						previous_pts = av_frame_get_best_effort_timestamp ( pFrame );
					}else if (i == 8){
						current_pts = av_frame_get_best_effort_timestamp ( pFrame );
						//fps = 800000/(current_pts - previous_pts);
						fps = video_fps;
						LOGI("video fps %d", fps);
						prefetch_frame_index = fps*16/60;
						LOGI("prefetch_frame_index %d", prefetch_frame_index);
					}
				}


				if (i++%prefetch_frame_index == 0 && i < 1500){
					// Convert the image from its native format to RGBA
					sws_scale
					(
							sws_ctx,
							(uint8_t const * const *)pFrame->data,
							pFrame->linesize,
							0,
							pCodecCtx->height,
							pFrameRGBA->data,
							pFrameRGBA->linesize
					);

					// return the frame to java layer
					int64_t pts = av_frame_get_best_effort_timestamp ( pFrame );
					pts = av_rescale_q ( pts,  pFormatCtx->streams[videoStream]->time_base, AV_TIME_BASE_Q );
					LOGI("save frame %d, pts: %d", i, (int)pts);
					SaveFrame(pEnv, pMainAct, bitmap, pCodecCtx->width, pCodecCtx->height, i, pts);
					int got_packet_ptr;
					AVCodecContext *c= NULL;
					c = avcodec_alloc_context3(avcodec_find_encoder(pCodecCtx->codec_id));
					if (!c) {
							LOGI("Could not allocate video codec context\n");
					        return 0;
					}
					av_free_packet(&packet);
				}
			}
		}
		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}
	LOGI("final frame %d", i);
	//unlock the bitmap
	AndroidBitmap_unlockPixels(pEnv, bitmap);

	// Free the RGB image
	av_free(pFrameRGBA);

	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);

	// Close the video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
Ejemplo n.º 9
0
int main(int argc, char **argv)
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    const char *in_filename, *out_filename;
    int ret, i;
    if (argc < 3) {
        printf("usage: %s input output\n"
               "API example program to remux a media file with libavformat and libavcodec.\n"
               "The output format is guessed according to the file extension.\n"
               "\n", argv[0]);
        return 1;
    }
    in_filename  = argv[1];
    out_filename = argv[2];
    av_register_all();
    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }
    av_dump_format(ifmt_ctx, 0, in_filename, 0);
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    ofmt = ofmt_ctx->oformat;
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }
    while (1) {
        AVStream *in_stream, *out_stream;
        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;
        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
        log_packet(ifmt_ctx, &pkt, "in");
        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(ofmt_ctx, &pkt, "out");
        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);
    }
    av_write_trailer(ofmt_ctx);
end:
    avformat_close_input(&ifmt_ctx);
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }
    return 0;
}
Ejemplo n.º 10
0
static int oggvorbis_encode_frame(AVCodecContext *avccontext,
                                  unsigned char *packets,
                           int buf_size, void *data)
{
    OggVorbisContext *context = avccontext->priv_data ;
    ogg_packet op ;
    signed short *audio = data ;
    int l;

    if(data) {
        int samples = OGGVORBIS_FRAME_SIZE;
        float **buffer ;

        buffer = vorbis_analysis_buffer(&context->vd, samples) ;
        if(context->vi.channels == 1) {
            for(l = 0 ; l < samples ; l++)
                buffer[0][l]=audio[l]/32768.f;
        } else {
            for(l = 0 ; l < samples ; l++){
                buffer[0][l]=audio[l*2]/32768.f;
                buffer[1][l]=audio[l*2+1]/32768.f;
            }
        }
        vorbis_analysis_wrote(&context->vd, samples) ;
    } else {
        if(!context->eof)
            vorbis_analysis_wrote(&context->vd, 0) ;
        context->eof = 1;
    }

    while(vorbis_analysis_blockout(&context->vd, &context->vb) == 1) {
        vorbis_analysis(&context->vb, NULL);
        vorbis_bitrate_addblock(&context->vb) ;

        while(vorbis_bitrate_flushpacket(&context->vd, &op)) {
            /* i'd love to say the following line is a hack, but sadly it's
             * not, apparently the end of stream decision is in libogg. */
            if(op.bytes==1)
                continue;
            memcpy(context->buffer + context->buffer_index, &op, sizeof(ogg_packet));
            context->buffer_index += sizeof(ogg_packet);
            memcpy(context->buffer + context->buffer_index, op.packet, op.bytes);
            context->buffer_index += op.bytes;
//            av_log(avccontext, AV_LOG_DEBUG, "e%d / %d\n", context->buffer_index, op.bytes);
        }
    }

    l=0;
    if(context->buffer_index){
        ogg_packet *op2= (ogg_packet*)context->buffer;
        op2->packet = context->buffer + sizeof(ogg_packet);

        l=  op2->bytes;
        avccontext->coded_frame->pts= av_rescale_q(op2->granulepos, (AVRational){1, avccontext->sample_rate}, avccontext->time_base);
        //FIXME we should reorder the user supplied pts and not assume that they are spaced by 1/sample_rate

        memcpy(packets, op2->packet, l);
        context->buffer_index -= l + sizeof(ogg_packet);
        memcpy(context->buffer, context->buffer + l + sizeof(ogg_packet), context->buffer_index);
//        av_log(avccontext, AV_LOG_DEBUG, "E%d\n", l);
    }

    return l;
}
Ejemplo n.º 11
0
static void* avplay_thread(struct avplay_t* avplay)
{
  AVFormatContext *fmt_ctx = NULL;
  AVPacket pkt;
  int video_stream_idx = -1, audio_stream_idx = -1;
  int ret = 0;
  struct packet_t* packet;
  int msg;

  /* register all formats and codecs */
  av_register_all();

restart:
  /* Wait for a MSG_PLAY message */
  while (((msg = msgqueue_get(&avplay->msgqueue,10000)) != MSG_PLAY) && (!avplay->next_url));

  fprintf(stderr,"avplay: waiting for playback mutex\n");
  pthread_mutex_lock(&avplay->codecs->playback_mutex);
  fprintf(stderr,"avplay: gotplayback mutex\n");

  /* Resume sending packets to codecs */
  codec_new_channel(&avplay->codecs->vcodec);
  codec_new_channel(&avplay->codecs->acodec);
  avplay->codecs->acodec.first_packet = 1;
  avplay->codecs->vcodec.first_packet = 1;

  avplay->codecs->vcodec.is_running = 1;
  avplay->codecs->acodec.is_running = 1;

  avplay->url = strdup(avplay->next_url);
  avplay->next_url = NULL;

  /* open input file, and allocate format context */
  if (avformat_open_input(&fmt_ctx, avplay->url, NULL, NULL) < 0) {
    fprintf(stderr, "Could not open source file %s\n", avplay->url);
    return 1;
  }

  /* retrieve stream information */
  if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
    fprintf(stderr, "Could not find stream information\n");
    ret = 2;
    goto end;
  }

  /* dump input information to stderr */
  av_dump_format(fmt_ctx, 0, avplay->url, 0);

  if ((ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0)) >= 0) {
    video_stream_idx = ret;
  }

  if ((ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0)) >= 0) {
    audio_stream_idx = ret;
  }

  if (audio_stream_idx == -1 && video_stream_idx == -1) {
    fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
    ret = 3;
    goto end;
  }

  fprintf(stderr,"video_stream_idx=%d\n",video_stream_idx);
  fprintf(stderr,"audio_stream_idx=%d\n",audio_stream_idx);

  if ((avplay->codecs->vcodec.vcodectype = map_vcodec(fmt_ctx->streams[video_stream_idx]->codec->codec_id)) == -1) {
    fprintf(stderr,"Unsupported video codec\n");
    exit(1);
  }
  if ((avplay->codecs->acodec.acodectype = map_acodec(fmt_ctx->streams[audio_stream_idx]->codec->codec_id)) == -1) {
    fprintf(stderr,"Unsupported audio codec\n");
    exit(1);
  }

  if (fmt_ctx->streams[audio_stream_idx]->codec->codec_id == CODEC_ID_AAC) {
    AVCodecContext* c =  fmt_ctx->streams[audio_stream_idx]->codec;
    if (c->extradata_size != 2) {
      fprintf(stderr,"Unexpected AAC extradata size %d, aborting\n",c->extradata_size);
      exit(1);
    }

    packet = malloc(sizeof(*packet));
    packet->packet = malloc(2);
    packet->packetlength = 2;
    memcpy(packet->packet, c->extradata, 2);

    codec_queue_add_item(&avplay->codecs->acodec,packet,MSG_CODECDATA);
  }


  AVCodecContext* c =  fmt_ctx->streams[video_stream_idx]->codec;

  unsigned char* annexb_extradata = NULL;
  int annexb_extradata_size = 0;
  int nalsize = 0;

  if ((avplay->codecs->vcodec.vcodectype == OMX_VIDEO_CodingAVC) && (c->extradata)) {
    if ((c->extradata[0]==0) && (c->extradata[1]==0) && (c->extradata[2]==0) && (c->extradata[3]==1)) {
      fprintf(stderr,"Extradata is already in annexb format.\n");
      annexb_extradata = c->extradata;
      annexb_extradata_size = c->extradata_size;
    } else {
      int i,j,k;

      nalsize = (c->extradata[4] & 0x3) + 1;
      if (nalsize != 4) {
         fprintf(stderr,"Unsupported nalsize %d, aborting\n",nalsize);
         exit(1);
      }
      int sps_len = (c->extradata[6] << 8) | c->extradata[7];
      fprintf(stderr,"sps_len=%d\n",sps_len);
      annexb_extradata_size = sps_len + 4;

      int pps_count = c->extradata[8+sps_len];
      i = 9 + sps_len;
      for (j=0;j<pps_count;j++) {
        int pps_len = (c->extradata[i] << 8) | c->extradata[i+1];
        i += 2;
        fprintf(stderr,"pps_len=%d\n",pps_len);
        i += pps_len;
        annexb_extradata_size += 4 + pps_len;
      }

      fprintf(stderr,"annexb_extradata_size = %d\n",annexb_extradata_size);
      annexb_extradata = malloc(annexb_extradata_size);

      annexb_extradata[0] = 0;
      annexb_extradata[1] = 0;
      annexb_extradata[2] = 0;
      annexb_extradata[3] = 1;
      memcpy(annexb_extradata + 4, c->extradata+8, sps_len);
      k = sps_len + 4;
      i = 9 + sps_len;
      for (j=0;j<pps_count;j++) {
        int pps_len = (c->extradata[i] << 8) | c->extradata[i+1];
        i += 2;
        annexb_extradata[k] = 0;
        annexb_extradata[k+1] = 0;
        annexb_extradata[k+2] = 0;
        annexb_extradata[k+3] = 1;
        k += 4;
        memcpy(annexb_extradata + k, c->extradata+i, pps_len);
        i += pps_len;
      }
    }
  }

  /* initialize packet, set data to NULL, let the demuxer fill it */
  av_init_packet(&pkt);
  pkt.data = NULL;
  pkt.size = 0;

#ifdef DUMP_VIDEO
  int fd;
  static int track = 0;
  char filename[32];

  sprintf(filename,"video%03d.dump",++track);
  fd = open(filename,O_CREAT|O_TRUNC|O_RDWR,0666);
  if (fd < 0) {
    fprintf(stderr,"Could not create video.dump\n");
    exit(1);
  }
#endif

  int first_video = 1; 
  AVRational omx_timebase = {1,1000000};
  /* read frames from the file */
  while (1) {
    msg = msgqueue_get(&avplay->msgqueue,0);

    if (msg == MSG_STOP) {
      break;
    }

    if (av_read_frame(fmt_ctx, &pkt) < 0) {
      fprintf(stderr,"Error reading stream, ending\n");
      break;
    }

    //fprintf(stderr,"Read pkt - index=%d\n",pkt.stream_index);

    if ((pkt.stream_index == video_stream_idx) || (pkt.stream_index == audio_stream_idx)) {
      packet = malloc(sizeof(*packet));
      packet->PTS = av_rescale_q(pkt.pts, fmt_ctx->streams[pkt.stream_index]->time_base, omx_timebase);
      packet->DTS = -1;

      packet->packetlength = pkt.size;

      if ((pkt.stream_index == video_stream_idx) && (first_video) && (annexb_extradata_size > 0)) {
        /* Add extradata to first video frame */
        packet->packetlength += annexb_extradata_size;
        packet->packet = malloc(packet->packetlength);
        memcpy(packet->packet, annexb_extradata, annexb_extradata_size);
        if (nalsize > 0) {
          convert4(packet->packet+annexb_extradata_size,pkt.data,pkt.size);
        } else {
          memcpy(packet->packet+annexb_extradata_size,pkt.data,pkt.size);
        }
      } else {
        packet->packet = malloc(packet->packetlength);
        if ((pkt.stream_index == video_stream_idx) && (nalsize > 0)) {
          convert4(packet->packet,pkt.data,pkt.size);
        } else {
          memcpy(packet->packet,pkt.data,pkt.size);
        }
      }
      packet->buf = packet->packet;  /* This is what is free()ed */
      
      if (pkt.stream_index == video_stream_idx) {
#ifdef DUMP_VIDEO
        write(fd,packet->packet,packet->packetlength);
#endif
        //fprintf(stderr,"Adding video packet - PTS=%lld, size=%d\n",packet->PTS, packet->packetlength);
        first_video = 0;
        while (avplay->codecs->vcodec.queue_count > 100) { usleep(100000); }  // FIXME
        codec_queue_add_item(&avplay->codecs->vcodec,packet,MSG_PACKET);
      } else {
	  //fprintf(stderr,"Adding audio packet - PTS=%lld, size=%d\n",packet->PTS, packet->packetlength);
        while (avplay->codecs->acodec.queue_count > 1000) { usleep(100000); }  // FIXME
        codec_queue_add_item(&avplay->codecs->acodec,packet,MSG_PACKET);
      }
    }
  
    av_free_packet(&pkt);
  }

#ifdef DUMP_VIDEO
  close(fd);
#endif

end:
  codec_stop(&avplay->codecs->vcodec);
  codec_stop(&avplay->codecs->acodec);
  if (avplay->url) free(avplay->url);  /* This should never be null */
  pthread_mutex_unlock(&avplay->codecs->playback_mutex);
  avformat_close_input(&fmt_ctx);
  goto restart;

  return 0;
}
Ejemplo n.º 12
0
int decode_thread(void *arg)
{

    VideoState *is = (VideoState *)arg;
    AVFormatContext *pFormatCtx;
    AVPacket pkt1, *packet = &pkt1;

    int video_index = -1;
    int audio_index = -1;
    int i;

    is->videoStream=-1;
    is->audioStream=-1;

    global_video_state = is;
    // will interrupt blocking functions if we quit!
    url_set_interrupt_cb(decode_interrupt_cb);

    // Open video file
    if(av_open_input_file(&pFormatCtx, is->filename, NULL, 0, NULL)!=0)
        return -1; // Couldn't open file

    is->pFormatCtx = pFormatCtx;

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, is->filename, 0);

    // Find the first video stream

    for(i=0; i<pFormatCtx->nb_streams; i++)
    {
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO &&
            video_index < 0)
        {
            video_index=i;
        }
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO &&
            audio_index < 0)
        {
            audio_index=i;
        }
    }
    if(audio_index >= 0)
    {
        stream_component_open(is, audio_index);
    }
    if(video_index >= 0)
    {
        stream_component_open(is, video_index);
    }

    if(is->videoStream < 0 || is->audioStream < 0)
    {
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
        goto fail;
    }

    // main decode loop
    av_init_packet(&flush_pkt);
    flush_pkt.data = (uint8_t *)"FLUSH";
    for(;;)
    {
        if(is->quit)
        {
            break;
        }

        // seek stuff goes here
        if(is->seek_req)
        {
            int stream_index= -1;
            int64_t seek_target = is->seek_pos;

            if(is->videoStream >= 0)
                stream_index = is->videoStream;
            else if(is->audioStream >= 0)
                stream_index = is->audioStream;

            if(stream_index>=0)
            {
                AVRational base_q;
                base_q.den = 1;
                base_q.num = AV_TIME_BASE;
                seek_target = av_rescale_q(seek_target, base_q,pFormatCtx->streams[stream_index]->time_base);
            }
            if(!av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags))
            {
                fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename);
            }
            else
            {
                if(is->audioStream >= 0)
                {
                    packet_queue_flush(&is->audioq);
                    //	packet_queue_put(&is->audioq, &flush_pkt);
                }
                if(is->videoStream >= 0)
                {
                    packet_queue_flush(&is->videoq);
                    //	packet_queue_put(&is->videoq, &flush_pkt);
                }
            }
            is->seek_req = 0;
            printf("av_seek_frame called\n");
        }

        if(is->audioq.size > MAX_AUDIOQ_SIZE ||
            is->videoq.size > MAX_VIDEOQ_SIZE)
        {
            SDL_Delay(10);
            continue;
        }
        if(av_read_frame(is->pFormatCtx, packet) < 0)
        {
            if(url_ferror(pFormatCtx->pb) == 0)
            {
                SDL_Delay(100); /* no error; wait for user input */
                continue;
            }
            else
            {
                break;
            }
        }
        // Is this a packet from the video stream?
        if(packet->stream_index == is->videoStream)
        {
            packet_queue_put(&is->videoq, packet);
        }
        else if(packet->stream_index == is->audioStream)
        {
            packet_queue_put(&is->audioq, packet);
        }
        else
        {
            av_free_packet(packet);
        }
    }
    /* all done - wait for it */
    while(!is->quit)
    {
        SDL_Delay(100);
    }

fail:
    {
        SDL_Event event;
        event.type = FF_QUIT_EVENT;
        event.user.data1 = is;
        SDL_PushEvent(&event);
    }
    return 0;
}
Ejemplo n.º 13
0
Archivo: hls.c Proyecto: lihp1603/libav
static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    HLSContext *c = s->priv_data;
    int ret, i, minvariant = -1;

    if (c->first_packet) {
        recheck_discard_flags(s, 1);
        c->first_packet = 0;
    }

start:
    c->end_of_segment = 0;
    for (i = 0; i < c->n_variants; i++) {
        struct variant *var = c->variants[i];
        /* Make sure we've got one buffered packet from each open variant
         * stream */
        if (var->needed && !var->pkt.data) {
            while (1) {
                int64_t ts_diff;
                AVStream *st;
                ret = av_read_frame(var->ctx, &var->pkt);
                if (ret < 0) {
                    if (!var->pb.eof_reached)
                        return ret;
                    reset_packet(&var->pkt);
                    break;
                } else {
                    if (c->first_timestamp == AV_NOPTS_VALUE &&
                        var->pkt.dts       != AV_NOPTS_VALUE)
                        c->first_timestamp = av_rescale_q(var->pkt.dts,
                            var->ctx->streams[var->pkt.stream_index]->time_base,
                            AV_TIME_BASE_Q);
                }

                if (c->seek_timestamp == AV_NOPTS_VALUE)
                    break;

                if (var->pkt.dts == AV_NOPTS_VALUE) {
                    c->seek_timestamp = AV_NOPTS_VALUE;
                    break;
                }

                st = var->ctx->streams[var->pkt.stream_index];
                ts_diff = av_rescale_rnd(var->pkt.dts, AV_TIME_BASE,
                                         st->time_base.den, AV_ROUND_DOWN) -
                          c->seek_timestamp;
                if (ts_diff >= 0 && (c->seek_flags  & AVSEEK_FLAG_ANY ||
                                     var->pkt.flags & AV_PKT_FLAG_KEY)) {
                    c->seek_timestamp = AV_NOPTS_VALUE;
                    break;
                }
                av_free_packet(&var->pkt);
                reset_packet(&var->pkt);
            }
        }
        /* Check if this stream still is on an earlier segment number, or
         * has the packet with the lowest dts */
        if (var->pkt.data) {
            struct variant *minvar = minvariant < 0 ?
                                     NULL : c->variants[minvariant];
            if (minvariant < 0 || var->cur_seq_no < minvar->cur_seq_no) {
                minvariant = i;
            } else if (var->cur_seq_no == minvar->cur_seq_no) {
                int64_t dts     =    var->pkt.dts;
                int64_t mindts  = minvar->pkt.dts;
                AVStream *st    =    var->ctx->streams[var->pkt.stream_index];
                AVStream *minst = minvar->ctx->streams[minvar->pkt.stream_index];

                if (dts == AV_NOPTS_VALUE) {
                    minvariant = i;
                } else if (mindts != AV_NOPTS_VALUE) {
                    if (st->start_time    != AV_NOPTS_VALUE)
                        dts    -= st->start_time;
                    if (minst->start_time != AV_NOPTS_VALUE)
                        mindts -= minst->start_time;

                    if (av_compare_ts(dts, st->time_base,
                                      mindts, minst->time_base) < 0)
                        minvariant = i;
                }
            }
        }
    }
    if (c->end_of_segment) {
        if (recheck_discard_flags(s, 0))
            goto start;
    }
    /* If we got a packet, return it */
    if (minvariant >= 0) {
        *pkt = c->variants[minvariant]->pkt;
        pkt->stream_index += c->variants[minvariant]->stream_offset;
        reset_packet(&c->variants[minvariant]->pkt);
        return 0;
    }
    return AVERROR_EOF;
}
Ejemplo n.º 14
0
static int encavcodecaInit(hb_work_object_t *w, hb_job_t *job)
{
    AVCodec *codec;
    AVCodecContext *context;
    hb_audio_t *audio = w->audio;

    hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
    w->private_data       = pv;
    pv->job               = job;
    pv->list              = hb_list_init();

    // channel count, layout and matrix encoding
    int matrix_encoding;
    uint64_t channel_layout   = hb_ff_mixdown_xlat(audio->config.out.mixdown,
                                                   &matrix_encoding);
    pv->out_discrete_channels =
        hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);

    // default settings and options
    AVDictionary *av_opts          = NULL;
    const char *codec_name         = NULL;
    enum AVCodecID codec_id        = AV_CODEC_ID_NONE;
    enum AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
    int bits_per_raw_sample        = 0;
    int profile                    = FF_PROFILE_UNKNOWN;

    // override with encoder-specific values
    switch (audio->config.out.codec)
    {
        case HB_ACODEC_AC3:
            codec_id = AV_CODEC_ID_AC3;
            if (matrix_encoding != AV_MATRIX_ENCODING_NONE)
                av_dict_set(&av_opts, "dsur_mode", "on", 0);
            break;

        case HB_ACODEC_FDK_AAC:
        case HB_ACODEC_FDK_HAAC:
            codec_name          = "libfdk_aac";
            sample_fmt          = AV_SAMPLE_FMT_S16;
            bits_per_raw_sample = 16;
            switch (audio->config.out.codec)
            {
                case HB_ACODEC_FDK_HAAC:
                    profile = FF_PROFILE_AAC_HE;
                    break;
                default:
                    profile = FF_PROFILE_AAC_LOW;
                    break;
            }
            // Libav's libfdk-aac wrapper expects back channels for 5.1
            // audio, and will error out unless we translate the layout
            if (channel_layout == AV_CH_LAYOUT_5POINT1)
                channel_layout  = AV_CH_LAYOUT_5POINT1_BACK;
            break;

        case HB_ACODEC_FFAAC:
            codec_name = "aac";
            av_dict_set(&av_opts, "stereo_mode", "ms_off", 0);
            break;

        case HB_ACODEC_FFFLAC:
        case HB_ACODEC_FFFLAC24:
            codec_id = AV_CODEC_ID_FLAC;
            switch (audio->config.out.codec)
            {
                case HB_ACODEC_FFFLAC24:
                    sample_fmt          = AV_SAMPLE_FMT_S32;
                    bits_per_raw_sample = 24;
                    break;
                default:
                    sample_fmt          = AV_SAMPLE_FMT_S16;
                    bits_per_raw_sample = 16;
                    break;
            }
            break;

        default:
            hb_error("encavcodecaInit: unsupported codec (0x%x)",
                     audio->config.out.codec);
            return 1;
    }
    if (codec_name != NULL)
    {
        codec = avcodec_find_encoder_by_name(codec_name);
        if (codec == NULL)
        {
            hb_error("encavcodecaInit: avcodec_find_encoder_by_name(%s) failed",
                     codec_name);
            return 1;
        }
    }
    else
    {
        codec = avcodec_find_encoder(codec_id);
        if (codec == NULL)
        {
            hb_error("encavcodecaInit: avcodec_find_encoder(%d) failed",
                     codec_id);
            return 1;
        }
    }
    // allocate the context and apply the settings
    context                      = avcodec_alloc_context3(codec);
    hb_ff_set_sample_fmt(context, codec, sample_fmt);
    context->bits_per_raw_sample = bits_per_raw_sample;
    context->profile             = profile;
    context->channel_layout      = channel_layout;
    context->channels            = pv->out_discrete_channels;
    context->sample_rate         = audio->config.out.samplerate;

    if (audio->config.out.bitrate > 0)
    {
        context->bit_rate = audio->config.out.bitrate * 1000;
    }
    else if (audio->config.out.quality >= 0)
    {
        context->global_quality = audio->config.out.quality * FF_QP2LAMBDA;
        context->flags |= CODEC_FLAG_QSCALE;
    }

    if (audio->config.out.compression_level >= 0)
    {
        context->compression_level = audio->config.out.compression_level;
    }

    // For some codecs, libav requires the following flag to be set
    // so that it fills extradata with global header information.
    // If this flag is not set, it inserts the data into each
    // packet instead.
    context->flags |= CODEC_FLAG_GLOBAL_HEADER;

    if (hb_avcodec_open(context, codec, &av_opts, 0))
    {
        hb_error("encavcodecaInit: hb_avcodec_open() failed");
        return 1;
    }
    // avcodec_open populates the opts dictionary with the
    // things it didn't recognize.
    AVDictionaryEntry *t = NULL;
    while ((t = av_dict_get(av_opts, "", t, AV_DICT_IGNORE_SUFFIX)))
    {
        hb_log("encavcodecaInit: Unknown avcodec option %s", t->key);
    }
    av_dict_free(&av_opts);

    pv->context           = context;
    audio->config.out.samples_per_frame =
    pv->samples_per_frame = context->frame_size;
    pv->input_samples     = context->frame_size * context->channels;
    pv->input_buf         = malloc(pv->input_samples * sizeof(float));
    // Some encoders in libav (e.g. fdk-aac) fail if the output buffer
    // size is not some minumum value.  8K seems to be enough :(
    pv->max_output_bytes  = MAX(FF_MIN_BUFFER_SIZE,
                                (pv->input_samples *
                                 av_get_bytes_per_sample(context->sample_fmt)));

    // sample_fmt conversion
    if (context->sample_fmt != AV_SAMPLE_FMT_FLT)
    {
        pv->output_buf = malloc(pv->max_output_bytes);
        pv->avresample = avresample_alloc_context();
        if (pv->avresample == NULL)
        {
            hb_error("encavcodecaInit: avresample_alloc_context() failed");
            return 1;
        }
        av_opt_set_int(pv->avresample, "in_sample_fmt",
                       AV_SAMPLE_FMT_FLT, 0);
        av_opt_set_int(pv->avresample, "out_sample_fmt",
                       context->sample_fmt, 0);
        av_opt_set_int(pv->avresample, "in_channel_layout",
                       context->channel_layout, 0);
        av_opt_set_int(pv->avresample, "out_channel_layout",
                       context->channel_layout, 0);
        if (hb_audio_dither_is_supported(audio->config.out.codec))
        {
            // dithering needs the sample rate
            av_opt_set_int(pv->avresample, "in_sample_rate",
                           context->sample_rate, 0);
            av_opt_set_int(pv->avresample, "out_sample_rate",
                           context->sample_rate, 0);
            av_opt_set_int(pv->avresample, "dither_method",
                           audio->config.out.dither_method, 0);
        }
        if (avresample_open(pv->avresample))
        {
            hb_error("encavcodecaInit: avresample_open() failed");
            avresample_free(&pv->avresample);
            return 1;
        }
    }
    else
    {
        pv->avresample = NULL;
        pv->output_buf = pv->input_buf;
    }

    if (context->extradata != NULL)
    {
        memcpy(w->config->extradata.bytes, context->extradata,
               context->extradata_size);
        w->config->extradata.length = context->extradata_size;
    }

    audio->config.out.delay = av_rescale_q(context->delay, context->time_base,
                                           (AVRational){1, 90000});

    return 0;
}
static int feed_input_buffer(JNIEnv *env, IJKFF_Pipenode *node, int64_t timeUs, int *enqueue_count)
{
    IJKFF_Pipenode_Opaque *opaque   = node->opaque;
    FFPlayer              *ffp      = opaque->ffp;
    IJKFF_Pipeline        *pipeline = opaque->pipeline;
    VideoState            *is       = ffp->is;
    Decoder               *d        = &is->viddec;
    PacketQueue           *q        = d->queue;
    sdl_amedia_status_t    amc_ret  = 0;
    int                    ret      = 0;
    ssize_t  input_buffer_index = 0;
    uint8_t* input_buffer_ptr   = NULL;
    size_t   input_buffer_size  = 0;
    size_t   copy_size          = 0;
    int64_t  time_stamp         = 0;

    if (enqueue_count)
        *enqueue_count = 0;

    if (d->queue->abort_request) {
        ret = 0;
        goto fail;
    }

    if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
#if AMC_USE_AVBITSTREAM_FILTER
#else
        H264ConvertState convert_state = {0, 0};
#endif
        AVPacket pkt;
        do {
            if (d->queue->nb_packets == 0)
                SDL_CondSignal(d->empty_queue_cond);
            if (ffp_packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0) {
                ret = -1;
                goto fail;
            }
            if (ffp_is_flush_packet(&pkt) || opaque->acodec_flush_request) {
                // request flush before lock, or never get mutex
                opaque->acodec_flush_request = true;
                SDL_LockMutex(opaque->acodec_mutex);
                if (SDL_AMediaCodec_isStarted(opaque->acodec)) {
                    if (opaque->input_packet_count > 0) {
                        // flush empty queue cause error on OMX.SEC.AVC.Decoder (Nexus S)
                        SDL_AMediaCodec_flush(opaque->acodec);
                        opaque->input_packet_count = 0;
                    }
                    // If codec is configured in synchronous mode, codec will resume automatically
                    // SDL_AMediaCodec_start(opaque->acodec);
                }
                opaque->acodec_flush_request = false;
                SDL_CondSignal(opaque->acodec_cond);
                SDL_UnlockMutex(opaque->acodec_mutex);
                d->finished = 0;
                d->next_pts = d->start_pts;
                d->next_pts_tb = d->start_pts_tb;
            }
        } while (ffp_is_flush_packet(&pkt) || d->queue->serial != d->pkt_serial);
        av_free_packet(&d->pkt);
        d->pkt_temp = d->pkt = pkt;
        d->packet_pending = 1;
#if AMC_USE_AVBITSTREAM_FILTER
        // d->pkt_temp->data could be allocated by av_bitstream_filter_filter
        if (d->bfsc_ret > 0) {
            if (d->bfsc_data)
                av_freep(&d->bfsc_data);
            d->bfsc_ret = 0;
        }
        d->bfsc_ret =
            av_bitstream_filter_filter(opaque->bsfc, opaque->avctx, NULL, &d->pkt_temp.data, &d->pkt_temp.size,
                                       d->pkt.data, d->pkt.size, d->pkt.flags & AV_PKT_FLAG_KEY);
        if (d->bfsc_ret > 0) {
            d->bfsc_data = d->pkt_temp.data;
        } else if (d->bfsc_ret < 0) {
            ALOGE("%s: av_bitstream_filter_filter failed\n", __func__);
            ret = -1;
            goto fail;
        }

        if (d->pkt_temp.size == d->pkt.size + opaque->avctx->extradata_size) {
            d->pkt_temp.data += opaque->avctx->extradata_size;
            d->pkt_temp.size  = d->pkt.size;
        }

        AMCTRACE("bsfc->filter(%d): %p[%d] -> %p[%d]", d->bfsc_ret, d->pkt.data, (int)d->pkt.size, d->pkt_temp.data, (int)d->pkt_temp.size);
#else
#if 0
        AMCTRACE("raw [%d][%d] %02x%02x%02x%02x%02x%02x%02x%02x", (int)d->pkt_temp.size,
            (int)opaque->nal_size,
            d->pkt_temp.data[0],
            d->pkt_temp.data[1],
            d->pkt_temp.data[2],
            d->pkt_temp.data[3],
            d->pkt_temp.data[4],
            d->pkt_temp.data[5],
            d->pkt_temp.data[6],
            d->pkt_temp.data[7]);
#endif
        convert_h264_to_annexb(d->pkt_temp.data, d->pkt_temp.size, opaque->nal_size, &convert_state);
        int64_t time_stamp = d->pkt_temp.pts;
        if (!time_stamp && d->pkt_temp.dts)
            time_stamp = d->pkt_temp.dts;
        if (time_stamp > 0) {
            time_stamp = av_rescale_q(time_stamp, is->video_st->time_base, AV_TIME_BASE_Q);
        } else {
            time_stamp = 0;
        }
#if 0
        AMCTRACE("input[%d][%d][%lld,%lld (%d, %d) -> %lld] %02x%02x%02x%02x%02x%02x%02x%02x", (int)d->pkt_temp.size,
            (int)opaque->nal_size,
            (int64_t)d->pkt_temp.pts,
            (int64_t)d->pkt_temp.dts,
            (int)is->video_st->time_base.num,
            (int)is->video_st->time_base.den,
            (int64_t)time_stamp,
            d->pkt_temp.data[0],
            d->pkt_temp.data[1],
            d->pkt_temp.data[2],
            d->pkt_temp.data[3],
            d->pkt_temp.data[4],
            d->pkt_temp.data[5],
            d->pkt_temp.data[6],
            d->pkt_temp.data[7]);
#endif
#endif
    }

    if (d->pkt_temp.data) {
        // reconfigure surface if surface changed
        // NULL surface cause no display
        if (ffpipeline_is_surface_need_reconfigure(pipeline)) {
            // request reconfigure before lock, or never get mutex
            opaque->acodec_reconfigure_request = true;
            SDL_LockMutex(opaque->acodec_mutex);
            ret = reconfigure_codec_l(env, node);
            opaque->acodec_reconfigure_request = false;
            SDL_CondSignal(opaque->acodec_cond);
            SDL_UnlockMutex(opaque->acodec_mutex);
            if (ret != 0) {
                ALOGE("%s: reconfigure_codec failed\n", __func__);
                ret = 0;
                goto fail;
            }

            SDL_LockMutex(opaque->acodec_first_dequeue_output_mutex);
            while (!q->abort_request &&
                !opaque->acodec_reconfigure_request &&
                !opaque->acodec_flush_request &&
                opaque->acodec_first_dequeue_output_request) {
                SDL_CondWaitTimeout(opaque->acodec_first_dequeue_output_cond, opaque->acodec_first_dequeue_output_mutex, 1000);
            }
            SDL_UnlockMutex(opaque->acodec_first_dequeue_output_mutex);

            if (q->abort_request || opaque->acodec_reconfigure_request || opaque->acodec_flush_request) {
                ret = 0;
                goto fail;
            }
        }

        // no need to decode without surface
        if (!opaque->jsurface) {
            ret = amc_decode_picture_fake(node, 1000);
            goto fail;
        }

        input_buffer_index = SDL_AMediaCodec_dequeueInputBuffer(opaque->acodec, timeUs);
        if (input_buffer_index < 0) {
            if (SDL_AMediaCodec_isInputBuffersValid(opaque->acodec)) {
                // timeout
                ret = 0;
                goto fail;
            } else {
                // exception
                ret = amc_decode_picture_fake(node, 1000);
                goto fail;
            }
        } else {
            // remove all fake pictures
            if (opaque->fake_pictq.nb_packets > 0)
                ffp_packet_queue_flush(&opaque->fake_pictq);
        }

        input_buffer_ptr = SDL_AMediaCodec_getInputBuffer(opaque->acodec, input_buffer_index, &input_buffer_size);
        if (!input_buffer_ptr) {
            ALOGE("%s: SDL_AMediaCodec_getInputBuffer failed\n", __func__);
            ret = -1;
            goto fail;
        }

        copy_size = FFMIN(input_buffer_size, d->pkt_temp.size);
        memcpy(input_buffer_ptr, d->pkt_temp.data, copy_size);

        time_stamp = d->pkt_temp.pts;
        if (!time_stamp && d->pkt_temp.dts)
            time_stamp = d->pkt_temp.dts;
        if (time_stamp > 0) {
            time_stamp = av_rescale_q(time_stamp, is->video_st->time_base, AV_TIME_BASE_Q);
        } else {
            time_stamp = 0;
        }
        // ALOGE("queueInputBuffer, %lld\n", time_stamp);
        amc_ret = SDL_AMediaCodec_queueInputBuffer(opaque->acodec, input_buffer_index, 0, copy_size, time_stamp, 0);
        if (amc_ret != SDL_AMEDIA_OK) {
            ALOGE("%s: SDL_AMediaCodec_getInputBuffer failed\n", __func__);
            ret = -1;
            goto fail;
        }
        // ALOGE("%s: queue %d/%d", __func__, (int)copy_size, (int)input_buffer_size);
        opaque->input_packet_count++;
        if (enqueue_count)
            ++*enqueue_count;
    }

    if (input_buffer_size < 0) {
        d->packet_pending = 0;
    } else {
        d->pkt_temp.dts =
        d->pkt_temp.pts = AV_NOPTS_VALUE;
        if (d->pkt_temp.data) {
            d->pkt_temp.data += copy_size;
            d->pkt_temp.size -= copy_size;
            if (d->pkt_temp.size <= 0)
                d->packet_pending = 0;
        } else {
            // FIXME: detect if decode finished
            // if (!got_frame) {
                d->packet_pending = 0;
                d->finished = d->pkt_serial;
            // }
        }
    }

fail:
    return ret;
}
Ejemplo n.º 16
0
/* get the input surface */
static QSVFrame *submit_frame(QSVVPPContext *s, AVFilterLink *inlink, AVFrame *picref)
{
    QSVFrame        *qsv_frame;
    AVFilterContext *ctx = inlink->dst;

    clear_unused_frames(s->in_frame_list);

    qsv_frame = get_free_frame(&s->in_frame_list);
    if (!qsv_frame)
        return NULL;

    /* Turn AVFrame into mfxFrameSurface1.
     * For video/opaque memory mode, pix_fmt is AV_PIX_FMT_QSV, and
     * mfxFrameSurface1 is stored in AVFrame->data[3];
     * for system memory mode, raw video data is stored in
     * AVFrame, we should map it into mfxFrameSurface1.
     */
    if (!IS_SYSTEM_MEMORY(s->in_mem_mode)) {
        if (picref->format != AV_PIX_FMT_QSV) {
            av_log(ctx, AV_LOG_ERROR, "QSVVPP gets a wrong frame.\n");
            return NULL;
        }
        qsv_frame->frame   = av_frame_clone(picref);
        qsv_frame->surface = (mfxFrameSurface1 *)qsv_frame->frame->data[3];
    } else {
        /* make a copy if the input is not padded as libmfx requires */
        if (picref->height & 31 || picref->linesize[0] & 31) {
            qsv_frame->frame = ff_get_video_buffer(inlink,
                                                   FFALIGN(inlink->w, 32),
                                                   FFALIGN(inlink->h, 32));
            if (!qsv_frame->frame)
                return NULL;

            qsv_frame->frame->width   = picref->width;
            qsv_frame->frame->height  = picref->height;

            if (av_frame_copy(qsv_frame->frame, picref) < 0) {
                av_frame_free(&qsv_frame->frame);
                return NULL;
            }

            av_frame_copy_props(qsv_frame->frame, picref);
            av_frame_free(&picref);
        } else
            qsv_frame->frame = av_frame_clone(picref);

        if (map_frame_to_surface(qsv_frame->frame,
                                &qsv_frame->surface_internal) < 0) {
            av_log(ctx, AV_LOG_ERROR, "Unsupported frame.\n");
            return NULL;
        }
        qsv_frame->surface = &qsv_frame->surface_internal;
    }

    qsv_frame->surface->Info           = s->frame_infos[FF_INLINK_IDX(inlink)];
    qsv_frame->surface->Data.TimeStamp = av_rescale_q(qsv_frame->frame->pts,
                                                      inlink->time_base, default_tb);

    qsv_frame->surface->Info.PicStruct =
            !qsv_frame->frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE :
            (qsv_frame->frame->top_field_first ? MFX_PICSTRUCT_FIELD_TFF :
                                                 MFX_PICSTRUCT_FIELD_BFF);
    if (qsv_frame->frame->repeat_pict == 1)
        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED;
    else if (qsv_frame->frame->repeat_pict == 2)
        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING;
    else if (qsv_frame->frame->repeat_pict == 4)
        qsv_frame->surface->Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING;

    return qsv_frame;
}
Ejemplo n.º 17
0
Archivo: trim.c Proyecto: cnh/FFmpeg
static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    TrimContext       *s = ctx->priv;
    int64_t start_sample, end_sample = frame->nb_samples;
    int64_t pts;
    int drop;

    /* drop everything if EOF has already been returned */
    if (s->eof) {
        av_frame_free(&frame);
        return 0;
    }

    if (frame->pts != AV_NOPTS_VALUE)
        pts = av_rescale_q(frame->pts, inlink->time_base,
                           (AVRational){ 1, inlink->sample_rate });
    else
        pts = s->next_pts;
    s->next_pts = pts + frame->nb_samples;

    /* check if at least a part of the frame is after the start time */
    if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
        start_sample = 0;
    } else {
        drop = 1;
        start_sample = frame->nb_samples;

        if (s->start_sample >= 0 &&
            s->nb_samples + frame->nb_samples > s->start_sample) {
            drop         = 0;
            start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
        }

        if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
            pts + frame->nb_samples > s->start_pts) {
            drop = 0;
            start_sample = FFMIN(start_sample, s->start_pts - pts);
        }

        if (drop)
            goto drop;
    }

    if (s->first_pts == AV_NOPTS_VALUE)
        s->first_pts = pts + start_sample;

    /* check if at least a part of the frame is before the end time */
    if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
        end_sample = frame->nb_samples;
    } else {
        drop       = 1;
        end_sample = 0;

        if (s->end_sample != INT64_MAX &&
            s->nb_samples < s->end_sample) {
            drop       = 0;
            end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
        }

        if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
            pts < s->end_pts) {
            drop       = 0;
            end_sample = FFMAX(end_sample, s->end_pts - pts);
        }

        if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
            drop       = 0;
            end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
        }

        if (drop) {
            s->eof = inlink->closed = 1;
            goto drop;
        }
    }

    s->nb_samples += frame->nb_samples;
    start_sample   = FFMAX(0, start_sample);
    end_sample     = FFMIN(frame->nb_samples, end_sample);
    av_assert0(start_sample < end_sample || (start_sample == end_sample && !frame->nb_samples));

    if (start_sample) {
        AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
        if (!out) {
            av_frame_free(&frame);
            return AVERROR(ENOMEM);
        }

        av_frame_copy_props(out, frame);
        av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
                        out->nb_samples, inlink->channels,
                        frame->format);
        if (out->pts != AV_NOPTS_VALUE)
            out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
                                     inlink->time_base);

        av_frame_free(&frame);
        frame = out;
    } else
        frame->nb_samples = end_sample;

    return ff_filter_frame(ctx->outputs[0], frame);

drop:
    s->nb_samples += frame->nb_samples;
    av_frame_free(&frame);
    return 0;
}
Ejemplo n.º 18
0
static int rtp_write_header(AVFormatContext *s1)
{
    RTPMuxContext *s = s1->priv_data;
    int max_packet_size, n;
    AVStream *st;

    if (s1->nb_streams != 1)
        return -1;
    st = s1->streams[0];
    if (!is_supported(st->codec->codec_id)) {
        av_log(s1, AV_LOG_ERROR, "Unsupported codec %x\n", st->codec->codec_id);

        return -1;
    }

    if (s->payload_type < 0)
        s->payload_type = ff_rtp_get_payload_type(s1, st->codec);
    s->base_timestamp = av_get_random_seed();
    s->timestamp = s->base_timestamp;
    s->cur_timestamp = 0;
    s->ssrc = av_get_random_seed();
    s->first_packet = 1;
    s->first_rtcp_ntp_time = ff_ntp_time();
    if (s1->start_time_realtime)
        /* Round the NTP time to whole milliseconds. */
        s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 +
                                 NTP_OFFSET_US;

    max_packet_size = s1->pb->max_packet_size;
    if (max_packet_size <= 12)
        return AVERROR(EIO);
    s->buf = av_malloc(max_packet_size);
    if (s->buf == NULL) {
        return AVERROR(ENOMEM);
    }
    s->max_payload_size = max_packet_size - 12;

    s->max_frames_per_packet = 0;
    if (s1->max_delay) {
        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            if (st->codec->frame_size == 0) {
                av_log(s1, AV_LOG_ERROR, "Cannot respect max delay: frame size = 0\n");
            } else {
                s->max_frames_per_packet = av_rescale_rnd(s1->max_delay, st->codec->sample_rate, AV_TIME_BASE * st->codec->frame_size, AV_ROUND_DOWN);
            }
        }
        if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            /* FIXME: We should round down here... */
            s->max_frames_per_packet = av_rescale_q(s1->max_delay, (AVRational){1, 1000000}, st->codec->time_base);
        }
    }

    avpriv_set_pts_info(st, 32, 1, 90000);
    switch(st->codec->codec_id) {
    case CODEC_ID_MP2:
    case CODEC_ID_MP3:
        s->buf_ptr = s->buf + 4;
        break;
    case CODEC_ID_MPEG1VIDEO:
    case CODEC_ID_MPEG2VIDEO:
        break;
    case CODEC_ID_MPEG2TS:
        n = s->max_payload_size / TS_PACKET_SIZE;
        if (n < 1)
            n = 1;
        s->max_payload_size = n * TS_PACKET_SIZE;
        s->buf_ptr = s->buf;
        break;
    case CODEC_ID_H264:
        /* check for H.264 MP4 syntax */
        if (st->codec->extradata_size > 4 && st->codec->extradata[0] == 1) {
            s->nal_length_size = (st->codec->extradata[4] & 0x03) + 1;
        }
        break;
    case CODEC_ID_VORBIS:
    case CODEC_ID_THEORA:
        if (!s->max_frames_per_packet) s->max_frames_per_packet = 15;
        s->max_frames_per_packet = av_clip(s->max_frames_per_packet, 1, 15);
        s->max_payload_size -= 6; // ident+frag+tdt/vdt+pkt_num+pkt_length
        s->num_frames = 0;
        goto defaultcase;
    case CODEC_ID_VP8:
        av_log(s1, AV_LOG_ERROR, "RTP VP8 payload implementation is "
                                 "incompatible with the latest spec drafts.\n");
        break;
    case CODEC_ID_ADPCM_G722:
        /* Due to a historical error, the clock rate for G722 in RTP is
         * 8000, even if the sample rate is 16000. See RFC 3551. */
        avpriv_set_pts_info(st, 32, 1, 8000);
        break;
    case CODEC_ID_AMR_NB:
    case CODEC_ID_AMR_WB:
        if (!s->max_frames_per_packet)
            s->max_frames_per_packet = 12;
        if (st->codec->codec_id == CODEC_ID_AMR_NB)
            n = 31;
        else
            n = 61;
        /* max_header_toc_size + the largest AMR payload must fit */
        if (1 + s->max_frames_per_packet + n > s->max_payload_size) {
            av_log(s1, AV_LOG_ERROR, "RTP max payload size too small for AMR\n");
            return -1;
        }
        if (st->codec->channels != 1) {
            av_log(s1, AV_LOG_ERROR, "Only mono is supported\n");
            return -1;
        }
    case CODEC_ID_AAC:
        s->num_frames = 0;
    default:
defaultcase:
        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate);
        }
        s->buf_ptr = s->buf;
        break;
    }

    return 0;
}
Ejemplo n.º 19
0
void AVIDump::AddFrame(const u8* data, int width, int height)
{
	avpicture_fill((AVPicture*)s_src_frame, const_cast<u8*>(data), AV_PIX_FMT_BGR24, width, height);

	// Convert image from BGR24 to desired pixel format, and scale to initial
	// width and height
	if ((s_sws_context = sws_getCachedContext(s_sws_context,
	                                          width, height, AV_PIX_FMT_BGR24,
	                                          s_width, s_height, s_stream->codec->pix_fmt,
	                                          SWS_BICUBIC, nullptr, nullptr, nullptr)))
	{
		sws_scale(s_sws_context, s_src_frame->data, s_src_frame->linesize, 0,
		          height, s_scaled_frame->data, s_scaled_frame->linesize);
	}

	s_scaled_frame->format = s_stream->codec->pix_fmt;
	s_scaled_frame->width = s_width;
	s_scaled_frame->height = s_height;

	// Encode and write the image.
	AVPacket pkt;
	PreparePacket(&pkt);
	int got_packet = 0;
	int error = 0;
	u64 delta;
	s64 last_pts;
	if (!s_start_dumping && s_last_frame <= SystemTimers::GetTicksPerSecond())
	{
		delta = CoreTiming::GetTicks();
		last_pts = AV_NOPTS_VALUE;
		s_start_dumping = true;
	}
	else
	{
		delta = CoreTiming::GetTicks() - s_last_frame;
		last_pts = (s_last_pts * s_stream->codec->time_base.den) / SystemTimers::GetTicksPerSecond();
	}
	u64 pts_in_ticks = s_last_pts + delta;
	s_scaled_frame->pts = (pts_in_ticks * s_stream->codec->time_base.den) / SystemTimers::GetTicksPerSecond();
	if (s_scaled_frame->pts != last_pts)
	{
		s_last_frame = CoreTiming::GetTicks();
		s_last_pts = pts_in_ticks;
		error = avcodec_encode_video2(s_stream->codec, &pkt, s_scaled_frame, &got_packet);
	}
	while (!error && got_packet)
	{
		// Write the compressed frame in the media file.
		if (pkt.pts != (s64)AV_NOPTS_VALUE)
		{
			pkt.pts = av_rescale_q(pkt.pts,
			                       s_stream->codec->time_base, s_stream->time_base);
		}
		if (pkt.dts != (s64)AV_NOPTS_VALUE)
		{
			pkt.dts = av_rescale_q(pkt.dts,
			                       s_stream->codec->time_base, s_stream->time_base);
		}
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(56, 60, 100)
		if (s_stream->codec->coded_frame->key_frame)
			pkt.flags |= AV_PKT_FLAG_KEY;
#endif
		pkt.stream_index = s_stream->index;
		av_interleaved_write_frame(s_format_context, &pkt);

		// Handle delayed frames.
		PreparePacket(&pkt);
		error = avcodec_encode_video2(s_stream->codec, &pkt, nullptr, &got_packet);
	}
	if (error)
		ERROR_LOG(VIDEO, "Error while encoding video: %d", error);
}
Ejemplo n.º 20
0
static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
                        AVFrame **rframe)
{
    AVFrame *frame0, *frame, *buf;
    unsigned nb_samples, nb_frames, i, p;
    int ret;

    /* Note: this function relies on no format changes and must only be
       called with enough samples. */
    av_assert1(samples_ready(link));
    frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
    if (frame->nb_samples >= min && frame->nb_samples < max) {
        *rframe = ff_framequeue_take(&link->fifo);
        return 0;
    }
    nb_frames = 0;
    nb_samples = 0;
    while (1) {
        if (nb_samples + frame->nb_samples > max) {
            if (nb_samples < min)
                nb_samples = max;
            break;
        }
        nb_samples += frame->nb_samples;
        nb_frames++;
        if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
            break;
        frame = ff_framequeue_peek(&link->fifo, nb_frames);
    }

    buf = ff_get_audio_buffer(link, nb_samples);
    if (!buf)
        return AVERROR(ENOMEM);
    ret = av_frame_copy_props(buf, frame0);
    if (ret < 0) {
        av_frame_free(&buf);
        return ret;
    }
    buf->pts = frame0->pts;

    p = 0;
    for (i = 0; i < nb_frames; i++) {
        frame = ff_framequeue_take(&link->fifo);
        av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
                        frame->nb_samples, link->channels, link->format);
        p += frame->nb_samples;
        av_frame_free(&frame);
    }
    if (p < nb_samples) {
        unsigned n = nb_samples - p;
        frame = ff_framequeue_peek(&link->fifo, 0);
        av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
                        link->channels, link->format);
        frame->nb_samples -= n;
        av_samples_copy(frame->extended_data, frame->extended_data, 0, n,
                        frame->nb_samples, link->channels, link->format);
        if (frame->pts != AV_NOPTS_VALUE)
            frame->pts += av_rescale_q(n, av_make_q(1, link->sample_rate), link->time_base);
        ff_framequeue_update_peeked(&link->fifo, 0);
        ff_framequeue_skip_samples(&link->fifo, n);
    }

    *rframe = buf;
    return 0;
}
Ejemplo n.º 21
0
static demuxer_t* demux_open_lavf(demuxer_t *demuxer){
    AVFormatContext *avfc;
    AVDictionaryEntry *t = NULL;
    lavf_priv_t *priv= demuxer->priv;
    int i;
    char mp_filename[256]="mp:";

    stream_seek(demuxer->stream, 0);

    avfc = avformat_alloc_context();

    if (opt_cryptokey)
        parse_cryptokey(avfc, opt_cryptokey);
    if (user_correct_pts != 0)
        avfc->flags |= AVFMT_FLAG_GENPTS;
    if (index_mode == 0)
        avfc->flags |= AVFMT_FLAG_IGNIDX;

    if(opt_probesize) {
        if (av_opt_set_int(avfc, "probesize", opt_probesize, 0) < 0)
            mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option probesize to %u\n", opt_probesize);
    }
    if(opt_analyzeduration) {
        if (av_opt_set_int(avfc, "analyzeduration", opt_analyzeduration * AV_TIME_BASE, 0) < 0)
            mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option analyzeduration to %u\n", opt_analyzeduration);
    }

    if(opt_avopt){
        if(parse_avopts(avfc, opt_avopt) < 0){
            mp_msg(MSGT_HEADER,MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", opt_avopt);
            return NULL;
        }
    }

    if(demuxer->stream->url) {
        if (!strncmp(demuxer->stream->url, "ffmpeg://rtsp:", 14))
            av_strlcpy(mp_filename, demuxer->stream->url + 9, sizeof(mp_filename));
        else
            av_strlcat(mp_filename, demuxer->stream->url, sizeof(mp_filename));
    } else
        av_strlcat(mp_filename, "foobar.dummy", sizeof(mp_filename));

    if (!(priv->avif->flags & AVFMT_NOFILE)) {
        priv->pb = avio_alloc_context(priv->buffer, BIO_BUFFER_SIZE, 0,
                                      demuxer, mp_read, NULL, mp_seek);
        priv->pb->read_seek = mp_read_seek;
        if (!demuxer->stream->end_pos || (demuxer->stream->flags & MP_STREAM_SEEK) != MP_STREAM_SEEK)
            priv->pb->seekable = 0;
        avfc->pb = priv->pb;
    }

    if(avformat_open_input(&avfc, mp_filename, priv->avif, NULL)<0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_open_input_stream() failed\n");
        return NULL;
    }

    priv->avfc= avfc;

    if(avformat_find_stream_info(avfc, NULL) < 0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_find_stream_info() failed\n");
        return NULL;
    }

    /* Add metadata. */
    while((t = av_dict_get(avfc->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
        demux_info_add(demuxer, t->key, t->value);

    for(i=0; i < avfc->nb_chapters; i++) {
        AVChapter *c = avfc->chapters[i];
        uint64_t start = av_rescale_q(c->start, c->time_base, (AVRational){1,1000});
        uint64_t end   = av_rescale_q(c->end, c->time_base, (AVRational){1,1000});
        t = av_dict_get(c->metadata, "title", NULL, 0);
        demuxer_add_chapter(demuxer, t ? t->value : NULL, start, end);
    }

    for(i=0; i<avfc->nb_streams; i++)
        handle_stream(demuxer, avfc, i);
    priv->nb_streams_last = avfc->nb_streams;

    if(avfc->nb_programs) {
        int p;
        for (p = 0; p < avfc->nb_programs; p++) {
            AVProgram *program = avfc->programs[p];
            t = av_dict_get(program->metadata, "title", NULL, 0);
            mp_msg(MSGT_HEADER,MSGL_INFO,"LAVF: Program %d %s\n", program->id, t ? t->value : "");
            mp_msg(MSGT_IDENTIFY, MSGL_V, "PROGRAM_ID=%d\n", program->id);
        }
    }

    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: %d audio and %d video streams found\n",priv->audio_streams,priv->video_streams);
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: build %d\n", LIBAVFORMAT_BUILD);
    if(!priv->audio_streams) demuxer->audio->id=-2;  // nosound
//    else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
    if(!priv->video_streams){
        if(!priv->audio_streams){
	    mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF: no audio or video headers found - broken file?\n");
            return NULL;
        }
        demuxer->video->id=-2; // audio-only
    } //else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

    return demuxer;
}
Ejemplo n.º 22
0
static int WriteFrame(AVFrame* pFrame)
{
    double AudioTime, VideoTime;
    int ret;
    // write interleaved audio frame
    if (g_pAStream)
    {
        VideoTime = (double)g_pVFrame->pts * g_pVStream->time_base.num/g_pVStream->time_base.den;
        do
        {
            AudioTime = (double)g_pAFrame->pts * g_pAStream->time_base.num/g_pAStream->time_base.den;
            ret = WriteAudioFrame();
        }
        while (AudioTime < VideoTime && ret);
        if (ret < 0)
            return ret;
    }

    if (!g_pVStream)
        return 0;

    AVPacket Packet;
    av_init_packet(&Packet);
    Packet.data = NULL;
    Packet.size = 0;

    g_pVFrame->pts++;
#if LIBAVCODEC_VERSION_MAJOR < 58
    if (g_pFormat->flags & AVFMT_RAWPICTURE)
    {
        /* raw video case. The API will change slightly in the near
           future for that. */
        Packet.flags |= AV_PKT_FLAG_KEY;
        Packet.stream_index = g_pVStream->index;
        Packet.data = (uint8_t*)pFrame;
        Packet.size = sizeof(AVPicture);

        if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
            return FatalError("Error while writing video frame");
        return 0;
    }
    else
#endif
    {
#if LIBAVCODEC_VERSION_MAJOR >= 54
        int got_packet;
        if (avcodec_encode_video2(g_pVideo, &Packet, pFrame, &got_packet) < 0)
            return FatalError("avcodec_encode_video2 failed");
        if (!got_packet)
            return 0;

        av_packet_rescale_ts(&Packet, g_pVideo->time_base, g_pVStream->time_base);
#else
        Packet.size = avcodec_encode_video(g_pVideo, g_OutBuffer, OUTBUFFER_SIZE, pFrame);
        if (Packet.size < 0)
            return FatalError("avcodec_encode_video failed");
        if (Packet.size == 0)
            return 0;

        if( g_pVideo->coded_frame->pts != AV_NOPTS_VALUE)
            Packet.pts = av_rescale_q(g_pVideo->coded_frame->pts, g_pVideo->time_base, g_pVStream->time_base);
        if( g_pVideo->coded_frame->key_frame )
            Packet.flags |= AV_PKT_FLAG_KEY;
        Packet.data = g_OutBuffer;
#endif
        // write the compressed frame in the media file
        Packet.stream_index = g_pVStream->index;
        if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
            return FatalError("Error while writing video frame");

        return 1;
    }
}
Ejemplo n.º 23
0
static int source_run(source_t *n)
{
  packet_consumer_t *consumer;
  int64_t ts;
  AVPacket *pkt=&n->pkt;

  switch (n->state) {
  case STATE_RUN:
    for (;;) {
      n->consumer.n=n->consumer.h;
#if defined (FFSOX_DEPRECATED_AV_FREE_PACKET) // {
      av_free_packet(pkt);
#else // } {
      av_packet_unref(pkt);
#endif // }

      if (av_read_frame(n->f.fc,pkt)<0) {
        n->state=STATE_FLUSH;
        goto flush;
      }

      if (NULL!=n->cb)
        n->cb(n,n->data);

//DVWRITELN("pkt->stream_index: %d",pkt->stream_index);
      while (NULL!=n->consumer.n) {
        consumer=n->consumer.n->consumer;
//DVWRITELN("consumer->vmt->name: \"%s\", consumer->si.stream_index: %d",consumer->vmt->name,consumer->si.stream_index);

        if (pkt->stream_index==consumer->si.stream_index) {
          if (0ll<n->ts) {
            ts=av_rescale_q(n->ts,AV_TIME_BASE_Q,consumer->si.st->time_base);

            if (pkt->dts!=AV_NOPTS_VALUE)
              pkt->dts-=ts;

            if (pkt->pts!=AV_NOPTS_VALUE)
              pkt->pts-=ts;
          }

          consumer->vmt->set_packet(consumer,pkt);
          n->next=consumer;

          return MACHINE_PUSH;
        }

        LIST_NEXT(&n->consumer.n,n->consumer.h);
//DVWRITELN("n->consumer.n: %p",n->consumer.n);
      }
    }
  case STATE_FLUSH:
  flush:
    if (NULL==n->consumer.n) {
      n->next=NULL;
      n->state=STATE_END;

      return MACHINE_POP;
    }
    else {
      consumer=n->consumer.n->consumer;
      consumer->state=STATE_FLUSH;
//DVWRITELN("consumer->vmt->name: \"%s\"",consumer->vmt->name);
      consumer->vmt->set_packet(consumer,NULL);
      n->next=consumer;
      LIST_NEXT(&n->consumer.n,n->consumer.h);

      return MACHINE_PUSH;
    }
  case STATE_END:
    return MACHINE_POP;
  default:
    DMESSAGE("illegal source state");
    return -1;
  }
}
Ejemplo n.º 24
0
int main(int argc, char **argv)
{
    char *in_graph_desc, **out_dev_name;
    int nb_out_dev = 0, nb_streams = 0;
    AVFilterGraph *in_graph = NULL;
    Stream *streams = NULL, *st;
    AVFrame *frame = NULL;
    int i, j, run = 1, ret;

    //av_log_set_level(AV_LOG_DEBUG);

    if (argc < 3) {
        av_log(NULL, AV_LOG_ERROR,
               "Usage: %s filter_graph dev:out [dev2:out2...]\n\n"
               "Examples:\n"
               "%s movie=file.nut:s=v+a xv:- alsa:default\n"
               "%s movie=file.nut:s=v+a uncodedframecrc:pipe:0\n",
               argv[0], argv[0], argv[0]);
        exit(1);
    }
    in_graph_desc = argv[1];
    out_dev_name = argv + 2;
    nb_out_dev = argc - 2;

    av_register_all();
    avdevice_register_all();
    avfilter_register_all();

    /* Create input graph */
    if (!(in_graph = avfilter_graph_alloc())) {
        ret = AVERROR(ENOMEM);
        av_log(NULL, AV_LOG_ERROR, "Unable to alloc graph graph: %s\n",
               av_err2str(ret));
        goto fail;
    }
    ret = avfilter_graph_parse_ptr(in_graph, in_graph_desc, NULL, NULL, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Unable to parse graph: %s\n",
               av_err2str(ret));
        goto fail;
    }
    nb_streams = 0;
    for (i = 0; i < in_graph->nb_filters; i++) {
        AVFilterContext *f = in_graph->filters[i];
        for (j = 0; j < f->nb_inputs; j++) {
            if (!f->inputs[j]) {
                av_log(NULL, AV_LOG_ERROR, "Graph has unconnected inputs\n");
                ret = AVERROR(EINVAL);
                goto fail;
            }
        }
        for (j = 0; j < f->nb_outputs; j++)
            if (!f->outputs[j])
                nb_streams++;
    }
    if (!nb_streams) {
        av_log(NULL, AV_LOG_ERROR, "Graph has no output stream\n");
        ret = AVERROR(EINVAL);
        goto fail;
    }
    if (nb_out_dev != 1 && nb_out_dev != nb_streams) {
        av_log(NULL, AV_LOG_ERROR,
               "Graph has %d output streams, %d devices given\n",
               nb_streams, nb_out_dev);
        ret = AVERROR(EINVAL);
        goto fail;
    }

    if (!(streams = av_calloc(nb_streams, sizeof(*streams)))) {
        ret = AVERROR(ENOMEM);
        av_log(NULL, AV_LOG_ERROR, "Could not allocate streams\n");
    }
    st = streams;
    for (i = 0; i < in_graph->nb_filters; i++) {
        AVFilterContext *f = in_graph->filters[i];
        for (j = 0; j < f->nb_outputs; j++) {
            if (!f->outputs[j]) {
                if ((ret = create_sink(st++, in_graph, f, j)) < 0)
                    goto fail;
            }
        }
    }
    av_assert0(st - streams == nb_streams);
    if ((ret = avfilter_graph_config(in_graph, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Failed to configure graph\n");
        goto fail;
    }

    /* Create output devices */
    for (i = 0; i < nb_out_dev; i++) {
        char *fmt = NULL, *dev = out_dev_name[i];
        st = &streams[i];
        if ((dev = strchr(dev, ':'))) {
            *(dev++) = 0;
            fmt = out_dev_name[i];
        }
        ret = avformat_alloc_output_context2(&st->mux, NULL, fmt, dev);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Failed to allocate output: %s\n",
                   av_err2str(ret));
            goto fail;
        }
        if (!(st->mux->oformat->flags & AVFMT_NOFILE)) {
            ret = avio_open2(&st->mux->pb, st->mux->filename, AVIO_FLAG_WRITE,
                             NULL, NULL);
            if (ret < 0) {
                av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n",
                       av_err2str(ret));
                goto fail;
            }
        }
    }
    for (; i < nb_streams; i++)
        streams[i].mux = streams[0].mux;

    /* Create output device streams */
    for (i = 0; i < nb_streams; i++) {
        st = &streams[i];
        if (!(st->stream = avformat_new_stream(st->mux, NULL))) {
            ret = AVERROR(ENOMEM);
            av_log(NULL, AV_LOG_ERROR, "Failed to create output stream\n");
            goto fail;
        }
        st->stream->codec->codec_type = st->link->type;
        st->stream->time_base = st->stream->codec->time_base =
            st->link->time_base;
        switch (st->link->type) {
        case AVMEDIA_TYPE_VIDEO:
            st->stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
            st->stream->avg_frame_rate =
            st->stream->  r_frame_rate = av_buffersink_get_frame_rate(st->sink);
            st->stream->codec->width               = st->link->w;
            st->stream->codec->height              = st->link->h;
            st->stream->codec->sample_aspect_ratio = st->link->sample_aspect_ratio;
            st->stream->codec->pix_fmt             = st->link->format;
            break;
        case AVMEDIA_TYPE_AUDIO:
            st->stream->codec->channel_layout = st->link->channel_layout;
            st->stream->codec->channels = avfilter_link_get_channels(st->link);
            st->stream->codec->sample_rate = st->link->sample_rate;
            st->stream->codec->sample_fmt = st->link->format;
            st->stream->codec->codec_id =
                av_get_pcm_codec(st->stream->codec->sample_fmt, -1);
            break;
        default:
            av_assert0(!"reached");
        }
    }

    /* Init output devices */
    for (i = 0; i < nb_out_dev; i++) {
        st = &streams[i];
        if ((ret = avformat_write_header(st->mux, NULL)) < 0) {
            av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n",
                   av_err2str(ret));
            goto fail;
        }
    }

    /* Check output devices */
    for (i = 0; i < nb_streams; i++) {
        st = &streams[i];
        ret = av_write_uncoded_frame_query(st->mux, st->stream->index);
        if (ret < 0) {
            av_log(st->mux, AV_LOG_ERROR,
                   "Uncoded frames not supported on stream #%d: %s\n",
                   i, av_err2str(ret));
            goto fail;
        }
    }

    while (run) {
        ret = avfilter_graph_request_oldest(in_graph);
        if (ret < 0) {
            if (ret == AVERROR_EOF) {
                run = 0;
            } else {
                av_log(NULL, AV_LOG_ERROR, "Error filtering: %s\n",
                       av_err2str(ret));
                break;
            }
        }
        for (i = 0; i < nb_streams; i++) {
            st = &streams[i];
            while (1) {
                if (!frame && !(frame = av_frame_alloc())) {
                    ret = AVERROR(ENOMEM);
                    av_log(NULL, AV_LOG_ERROR, "Could not allocate frame\n");
                    goto fail;
                }
                ret = av_buffersink_get_frame_flags(st->sink, frame,
                                                    AV_BUFFERSINK_FLAG_NO_REQUEST);
                if (ret < 0) {
                    if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
                        av_log(NULL, AV_LOG_WARNING, "Error in sink: %s\n",
                               av_err2str(ret));
                    break;
                }
                if (frame->pts != AV_NOPTS_VALUE)
                    frame->pts = av_rescale_q(frame->pts,
                                              st->link  ->time_base,
                                              st->stream->time_base);
                ret = av_interleaved_write_uncoded_frame(st->mux,
                                                         st->stream->index,
                                                         frame);
                frame = NULL;
                if (ret < 0) {
                    av_log(st->stream->codec, AV_LOG_ERROR,
                           "Error writing frame: %s\n", av_err2str(ret));
                    goto fail;
                }
            }
        }
    }
    ret = 0;

    for (i = 0; i < nb_out_dev; i++) {
        st = &streams[i];
        av_write_trailer(st->mux);
    }

fail:
    av_frame_free(&frame);
    avfilter_graph_free(&in_graph);
    if (streams) {
        for (i = 0; i < nb_out_dev; i++) {
            st = &streams[i];
            if (st->mux) {
                if (st->mux->pb)
                    avio_closep(&st->mux->pb);
                avformat_free_context(st->mux);
            }
        }
    }
    av_freep(&streams);
    return ret < 0;
}
Ejemplo n.º 25
0
static bool encode_audio(ffmpeg_t *handle, AVPacket *pkt, bool dry)
{
   av_init_packet(pkt);
   pkt->data = handle->audio.outbuf;
   pkt->size = handle->audio.outbuf_size;

   AVFrame *frame = av_frame_alloc();
   if (!frame)
      return false;

   frame->nb_samples     = handle->audio.frames_in_buffer;
   frame->format         = handle->audio.codec->sample_fmt;
   frame->channel_layout = handle->audio.codec->channel_layout;
   frame->pts            = handle->audio.frame_cnt;

   planarize_audio(handle);

   int samples_size = av_samples_get_buffer_size(NULL, handle->audio.codec->channels,
         handle->audio.frames_in_buffer,
         handle->audio.codec->sample_fmt, 0);

   avcodec_fill_audio_frame(frame, handle->audio.codec->channels,
         handle->audio.codec->sample_fmt,
         handle->audio.is_planar ? (uint8_t*)handle->audio.planar_buf : handle->audio.buffer,
         samples_size, 0);

   int got_packet = 0;
   if (avcodec_encode_audio2(handle->audio.codec,
            pkt, dry ? NULL : frame, &got_packet) < 0)
   {
      av_frame_free(&frame);
      return false;
   }

   if (!got_packet)
   {
      pkt->size = 0;
      pkt->pts = AV_NOPTS_VALUE;
      pkt->dts = AV_NOPTS_VALUE;
      av_frame_free(&frame);
      return true;
   }

   if (pkt->pts != (int64_t)AV_NOPTS_VALUE)
   {
      pkt->pts = av_rescale_q(pkt->pts,
            handle->audio.codec->time_base,
            handle->muxer.astream->time_base);
   }

   if (pkt->dts != (int64_t)AV_NOPTS_VALUE)
   {
      pkt->dts = av_rescale_q(pkt->dts,
            handle->audio.codec->time_base,
            handle->muxer.astream->time_base);
   }

   av_frame_free(&frame);

   pkt->stream_index = handle->muxer.astream->index;
   return true;
}
Ejemplo n.º 26
0
void Movie::EncodeAudio(bool last)
{
    AVStream *astream = av->fmt_ctx->streams[av->audio_stream_idx];
    AVCodecContext *acodec = astream->codec;
    
    
    av_fifo_generic_write(av->audio_fifo, &audiobuf[0], audiobuf.size(), NULL);
    
    // bps: bytes per sample
    int channels = acodec->channels;
    int read_bps = 2;
    int write_bps = av_get_bytes_per_sample(acodec->sample_fmt);
    
    int max_read = acodec->frame_size * read_bps * channels;
    int min_read = last ? read_bps * channels : max_read;
    while (av_fifo_size(av->audio_fifo) >= min_read)
    {
        int read_bytes = MIN(av_fifo_size(av->audio_fifo), max_read);
        av_fifo_generic_read(av->audio_fifo, av->audio_data, read_bytes, NULL);
        
        // convert
        int read_samples = read_bytes / (read_bps * channels);
        int write_samples = read_samples;
        if (read_samples < acodec->frame_size)
        {
            // shrink or pad audio frame
            if (acodec->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME)
                acodec->frame_size = write_samples;
            else
                write_samples = acodec->frame_size;
        }

        convert_audio(read_samples, acodec->channels, -1,
                      AV_SAMPLE_FMT_S16, av->audio_data,
                      write_samples, acodec->channels, write_samples * write_bps,
                      acodec->sample_fmt, av->audio_data_conv);
                      
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
        avcodec_get_frame_defaults(av->audio_frame);
#else
        av_frame_unref(av->audio_frame);
#endif
        av->audio_frame->nb_samples = write_samples;
        av->audio_frame->pts = av_rescale_q(av->audio_counter,
                                            (AVRational){1, acodec->sample_rate},
                                            acodec->time_base);
        av->audio_counter += write_samples;
        int asize = avcodec_fill_audio_frame(av->audio_frame, acodec->channels,
                                             acodec->sample_fmt,
                                             av->audio_data_conv,
                                             write_samples * write_bps * channels, 1);
        if (asize >= 0)
        {
            AVPacket pkt;
            memset(&pkt, 0, sizeof(AVPacket));
            av_init_packet(&pkt);
            
            int got_pkt = 0;
            if (0 == avcodec_encode_audio2(acodec, &pkt, av->audio_frame, &got_pkt)
                && got_pkt)
            {
                if (pkt.pts != AV_NOPTS_VALUE && pkt.pts < pkt.dts)
                    pkt.pts = pkt.dts;
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts = av_rescale_q(pkt.pts, acodec->time_base, astream->time_base);
                if (pkt.dts != AV_NOPTS_VALUE)
                    pkt.dts = av_rescale_q(pkt.dts, acodec->time_base, astream->time_base);
                pkt.duration = av_rescale_q(pkt.duration, acodec->time_base, astream->time_base);
                pkt.stream_index = astream->index;
                av_interleaved_write_frame(av->fmt_ctx, &pkt);
                av_free_packet(&pkt);
            }
        }
    }
    if (last)
    {
        bool done = false;
        while (!done)
        {
            AVPacket pkt;
            memset(&pkt, 0, sizeof(AVPacket));
            av_init_packet(&pkt);
            
            int got_pkt = 0;
            if (0 == avcodec_encode_audio2(acodec, &pkt, NULL, &got_pkt)
                && got_pkt)
            {
                if (pkt.pts != AV_NOPTS_VALUE && pkt.pts < pkt.dts)
                    pkt.pts = pkt.dts;
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts = av_rescale_q(pkt.pts, acodec->time_base, astream->time_base);
                if (pkt.dts != AV_NOPTS_VALUE)
                    pkt.dts = av_rescale_q(pkt.dts, acodec->time_base, astream->time_base);
                pkt.duration = av_rescale_q(pkt.duration, acodec->time_base, astream->time_base);
                pkt.stream_index = astream->index;
                av_interleaved_write_frame(av->fmt_ctx, &pkt);
                av_free_packet(&pkt);
            }
            else
            {
                done = true;
            }
        }
        
    }
    
}
Ejemplo n.º 27
0
static int write_video_frame(AVFormatContext *oc, AVStream *st)
{
    int out_size, ret;
    AVCodecContext *c;
    static struct SwsContext *sctx = NULL;

    c = st->codec;

    if (c->pix_fmt != PIX_FMT_YUV420P) {
        if (sctx == NULL) {
            sctx = sws_getContext(c->width, c->height, PIX_FMT_YUV420P,
                    c->width, c->height, c->pix_fmt,
                    SWS_BICUBIC, NULL, NULL, NULL);
            if (sctx == NULL)
                return -1;
        }

        fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
        sws_scale(sctx, (const uint8_t * const *) tmp_picture->data, tmp_picture->linesize,
                0, c->height, picture->data, picture->linesize);
    } else {
        fill_yuv_image(picture, frame_count, c->width, c->height);
    }


    /* encode the image */
    out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
    /* if zero size, it means the image was buffered */
    if (out_size > 0) {
        AVPacket pkt;
        FILE *ff = fopen("xx.jpeg", "w+");

        av_init_packet(&pkt);

        if (c->coded_frame->pts != AV_NOPTS_VALUE)
            pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
        if(c->coded_frame->key_frame)
            pkt.flags |= AV_PKT_FLAG_KEY;
        pkt.stream_index= st->index;
        pkt.data= video_outbuf;
        pkt.size= out_size;

        /* write the compressed frame in the media file */
        ret = av_interleaved_write_frame(oc, &pkt);

        fwrite(video_outbuf, out_size, 1, ff);
        fclose(ff);
    } else {
        ret = 0;
    }

    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame\n");
        return -1;
    }

    printf("Frame written: %d\n", frame_count);
    frame_count++;

    return 0;
}
int video_thr(LPVOID lpParam)
{
	int iRet = -1;
	AVFormatContext		*	pFmtCtx				= NULL;
	AVFormatContext		*	pRtmpFmtCtx			= NULL;
	AVInputFormat		*	pVideoInputFmt		= NULL;
	AVOutputFormat		*	pVideoOutfmt		= NULL;
	struct_stream_info	*	strct_streaminfo	= NULL;
	AVCodecContext		*	pCodecContext		= NULL;
	AVCodec				*	pCodec				= NULL;
	int						iVideoIndex			= -1;
	int						iVideo_Height		= 0;
	int						iVideo_Width		= 0;
	int						iVideoPic			= 0;
	int64_t					start_time			= 0;
	int						frame_index			= 0;
	SDL_Event				event;

	CLS_DlgStreamPusher* pThis = (CLS_DlgStreamPusher*)lpParam;
	if (pThis == NULL){
		TRACE("video_thr--pThis == NULL\n");
		return iRet;
	}
	pVideoInputFmt = av_find_input_format("dshow");
	if (pVideoInputFmt == NULL){
		TRACE("pVideoInputFmt == NULL\n");
		return iRet;
	}

	char* psDevName = pThis->GetDeviceName(n_Video);
	if (psDevName == NULL){
		TRACE("video_thr--psDevName == NULL");
		return iRet;
	}

	while (1){
		if (pThis->m_cstrPushAddr != ""){
			break;
		}
	}

	//根据推流地址获取到AVFormatContext
	avformat_alloc_output_context2(&pRtmpFmtCtx, NULL, "flv", pThis->m_cstrPushAddr);

	if (NULL == pRtmpFmtCtx){
		TRACE("NULL == pRtmpFmtCtx");
		return iRet;
	}
	pVideoOutfmt = pRtmpFmtCtx->oformat;

	strct_streaminfo = pThis->m_pStreamInfo;
	if (NULL == strct_streaminfo){
		TRACE("NULL == strct_streaminfo");
		return iRet;
	}

	FILE *fp_yuv = fopen("output.yuv", "wb+");

	pFmtCtx = avformat_alloc_context();
	if (avformat_open_input(&pFmtCtx, psDevName, pVideoInputFmt, NULL) != 0){
		TRACE("avformat_open_input err!\n");
		goto END;
	}
	if (avformat_find_stream_info(pFmtCtx, NULL) < 0){
		TRACE("avformat_find_stream_info(pFmtCtx, NULL) < 0\n");
		goto END;
	}

	for (int i = 0; i < pFmtCtx->nb_streams; i++){
		if (pFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){
			iVideoIndex = i;
			break;
		}
	}

	if (iVideoIndex < 0){
		TRACE("iVideoIndex < 0\n");
		goto END;
	}

	pCodecContext = pFmtCtx->streams[iVideoIndex]->codec;
	if (NULL == pCodecContext){
		TRACE("NULL == pCodecContext");
		goto END;
	}
	pCodec = avcodec_find_decoder(pCodecContext->codec_id);
	if (pCodec == NULL){
		TRACE("avcodec_find_decoder<0");
		goto END;
	}
	if (avcodec_open2(pCodecContext, pCodec, NULL)<0){
		TRACE("avcodec_open2<0");
		goto END;
	}

	for (int i = 0; i < pFmtCtx->nb_streams; i++) {
		//根据输入流创建输出流(Create output AVStream according to input AVStream)  
		AVStream *in_stream = pFmtCtx->streams[i];
		AVStream *out_stream = avformat_new_stream(pRtmpFmtCtx, in_stream->codec->codec);
		if (!out_stream) {
			printf("Failed allocating output stream\n");
			iRet = AVERROR_UNKNOWN;
			goto END;
		}
		//复制AVCodecContext的设置(Copy the settings of AVCodecContext)  
		iRet = avcodec_copy_context(out_stream->codec, in_stream->codec);
		if (iRet < 0) {
			TRACE("Failed to copy context from input to output stream codec context\n");
			goto END;
		}
		out_stream->codec->codec_tag = 0;
		if (pRtmpFmtCtx->oformat->flags & AVFMT_GLOBALHEADER)
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}

	if (!(pVideoOutfmt->flags & AVFMT_NOFILE)) {
		iRet = avio_open(&pRtmpFmtCtx->pb, pThis->m_cstrPushAddr, AVIO_FLAG_WRITE);
		if (iRet < 0) {
			TRACE("Could not open output URL '%s'", pThis->m_cstrPushAddr);
			goto END;
		}
	}

	iRet = avformat_write_header(pRtmpFmtCtx, NULL);
	if (iRet < 0) {
		TRACE("Error occurred when opening output URL\n");
		goto END;
	}

	start_time = av_gettime();

	//获取视频的宽与高
	iVideo_Height = pCodecContext->height;//strct_streaminfo->m_height;//
	iVideo_Width = pCodecContext->width;//strct_streaminfo->m_width;//
	TRACE("video_thr--video_height[%d],video_width[%d]", iVideo_Height
	, iVideo_Width);

	strct_streaminfo->m_pVideoPacket = (AVPacket*)av_malloc(sizeof(AVPacket));

	strct_streaminfo->m_pVideoFrame = av_frame_alloc();
	strct_streaminfo->m_pVideoFrameYUV = av_frame_alloc();
	strct_streaminfo->m_pVideoOutBuffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, iVideo_Width, iVideo_Height));
	avpicture_fill((AVPicture *)strct_streaminfo->m_pVideoFrameYUV, strct_streaminfo->m_pVideoOutBuffer, AV_PIX_FMT_YUV420P, iVideo_Width, iVideo_Height);
	strct_streaminfo->m_video_sws_ctx = sws_getContext(iVideo_Width, iVideo_Height, pCodecContext->pix_fmt,
		iVideo_Width, iVideo_Height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	if (NULL == strct_streaminfo->m_video_sws_ctx){
		TRACE("NULL == strct_streaminfo->m_video_sws_ctx\n");
		goto END;
	}

	strct_streaminfo->m_video_refresh_tid = SDL_CreateThread(video_refresh_thread, NULL, strct_streaminfo);

	//从摄像头获取数据
	for (;;){
		AVStream *in_stream, *out_stream;
		SDL_WaitEvent(&event);
		if (event.type == FF_VIDEO_REFRESH_EVENT){
			if (av_read_frame(pFmtCtx, strct_streaminfo->m_pVideoPacket) >= 0){
				if (strct_streaminfo->m_pVideoPacket->pts == AV_NOPTS_VALUE){
					//Write PTS  
					AVRational time_base1 = pFmtCtx->streams[iVideoIndex]->time_base;
					//Duration between 2 frames (us)  
					int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(pFmtCtx->streams[iVideoIndex]->r_frame_rate);
					//Parameters  
					strct_streaminfo->m_pVideoPacket->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
					strct_streaminfo->m_pVideoPacket->dts = strct_streaminfo->m_pVideoPacket->pts;
					strct_streaminfo->m_pVideoPacket->duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
				}

				if (strct_streaminfo->m_pVideoPacket->stream_index == iVideoIndex){

					AVRational time_base = pFmtCtx->streams[iVideoIndex]->time_base;
					AVRational time_base_q = { 1, AV_TIME_BASE };
					int64_t pts_time = av_rescale_q(strct_streaminfo->m_pVideoPacket->dts, time_base, time_base_q);
					int64_t now_time = av_gettime() - start_time;
					if (pts_time > now_time)
						av_usleep(pts_time - now_time);

					in_stream = pFmtCtx->streams[strct_streaminfo->m_pVideoPacket->stream_index];
					out_stream = pRtmpFmtCtx->streams[strct_streaminfo->m_pVideoPacket->stream_index];
					/* copy packet */
					//转换PTS/DTS(Convert PTS/DTS)  
					strct_streaminfo->m_pVideoPacket->pts = av_rescale_q_rnd(strct_streaminfo->m_pVideoPacket->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
					strct_streaminfo->m_pVideoPacket->dts = av_rescale_q_rnd(strct_streaminfo->m_pVideoPacket->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
					strct_streaminfo->m_pVideoPacket->duration = av_rescale_q(strct_streaminfo->m_pVideoPacket->duration, in_stream->time_base, out_stream->time_base);
					strct_streaminfo->m_pVideoPacket->pos = -1;
					TRACE("Send %8d video frames to output URL\n", frame_index);

					frame_index++;

					iRet = av_interleaved_write_frame(pRtmpFmtCtx, strct_streaminfo->m_pVideoPacket);

					if (iRet < 0) {
						TRACE("Error muxing packet\n");
						break;
					}

					if (pThis->m_blVideoShow){

						//解码显示
						iRet = avcodec_decode_video2(pCodecContext, strct_streaminfo->m_pVideoFrame, &iVideoPic, strct_streaminfo->m_pVideoPacket);
						if (iRet < 0){
							TRACE("Decode Error.\n");
							av_free_packet(strct_streaminfo->m_pVideoPacket);
							goto END;
						}
						if (iVideoPic <= 0){
							TRACE("iVideoPic <= 0");
							av_free_packet(strct_streaminfo->m_pVideoPacket);
							goto END;
						}

						if (sws_scale(strct_streaminfo->m_video_sws_ctx, (const uint8_t* const*)strct_streaminfo->m_pVideoFrame->data, strct_streaminfo->m_pVideoFrame->linesize, 0, /*strct_streaminfo->m_height*/iVideo_Height,
							strct_streaminfo->m_pVideoFrameYUV->data, strct_streaminfo->m_pVideoFrameYUV->linesize) < 0){
							TRACE("sws_scale < 0");
							av_free_packet(strct_streaminfo->m_pVideoPacket);
							goto END;
						}

						if (pThis->m_blPushStream){
							//进行推流操作
							if (pThis->push_stream() < 0){
								TRACE("pThis->push_stream() < 0");
								goto END;
							}
						}


						if (SDL_UpdateTexture(strct_streaminfo->m_sdlTexture, NULL, strct_streaminfo->m_pVideoFrameYUV->data[0], strct_streaminfo->m_pVideoFrameYUV->linesize[0]) < 0){
							TRACE("SDL_UpdateTexture < 0\n");
							goto END;
						}

						if (SDL_RenderClear(strct_streaminfo->m_sdlRenderer) < 0){
							TRACE("SDL_RenderClear<0\n");
							goto END;
						}
						
						if (SDL_RenderCopy(strct_streaminfo->m_sdlRenderer, strct_streaminfo->m_sdlTexture, NULL, NULL) < 0){
							TRACE("SDL_RenderCopy<0\n");
							goto END;
						}

						SDL_RenderPresent(strct_streaminfo->m_sdlRenderer);
					}
				}
				av_free_packet(strct_streaminfo->m_pVideoPacket);
			}
		}
		else if (event.type == FF_BREAK_EVENT){
			break;
		}
	}
	av_write_trailer(pRtmpFmtCtx);

	iRet = 1;
END:
	//fclose(fp_yuv);
	if (strct_streaminfo->m_video_sws_ctx){
		sws_freeContext(strct_streaminfo->m_video_sws_ctx);
	}
	if (strct_streaminfo->m_pVideoFrameYUV){
		av_frame_free(&strct_streaminfo->m_pVideoFrameYUV);
	}
	avformat_close_input(&pFmtCtx);
	avformat_free_context(pRtmpFmtCtx);
	return iRet;
}
Ejemplo n.º 29
0
static int mediacodec_wrap_hw_buffer(AVCodecContext *avctx,
                                  MediaCodecDecContext *s,
                                  ssize_t index,
                                  FFAMediaCodecBufferInfo *info,
                                  AVFrame *frame)
{
    int ret = 0;
    int status = 0;
    AVMediaCodecBuffer *buffer = NULL;

    frame->buf[0] = NULL;
    frame->width = avctx->width;
    frame->height = avctx->height;
    frame->format = avctx->pix_fmt;

    if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
        frame->pts = av_rescale_q(info->presentationTimeUs,
                                      av_make_q(1, 1000000),
                                      avctx->pkt_timebase);
    } else {
        frame->pts = info->presentationTimeUs;
    }
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
    frame->pkt_pts = frame->pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
    frame->pkt_dts = AV_NOPTS_VALUE;

    buffer = av_mallocz(sizeof(AVMediaCodecBuffer));
    if (!buffer) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    buffer->released = 0;

    frame->buf[0] = av_buffer_create(NULL,
                                     0,
                                     mediacodec_buffer_release,
                                     buffer,
                                     AV_BUFFER_FLAG_READONLY);

    if (!frame->buf[0]) {
        ret = AVERROR(ENOMEM);
        goto fail;

    }

    buffer->ctx = s;
    ff_mediacodec_dec_ref(s);

    buffer->index = index;
    buffer->pts = info->presentationTimeUs;

    frame->data[3] = (uint8_t *)buffer;

    return 0;
fail:
    av_freep(buffer);
    av_buffer_unref(&frame->buf[0]);
    status = ff_AMediaCodec_releaseOutputBuffer(s->codec, index, 0);
    if (status < 0) {
        av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
        ret = AVERROR_EXTERNAL;
    }

    return ret;
}
Ejemplo n.º 30
0
/**
 * Try to find Xing/Info/VBRI tags and compute duration from info therein
 */
static int mp3_parse_vbr_tags(AVFormatContext *s, AVStream *st, int64_t base)
{
    uint32_t v, spf;
    unsigned frames = 0; /* Total number of frames in file */
    unsigned size = 0; /* Total number of bytes in the stream */
    const int64_t xing_offtbl[2][2] = {{32, 17}, {17,9}};
    MPADecodeHeader c;
    int vbrtag_size = 0;
    int is_cbr;

    v = avio_rb32(s->pb);
    if(ff_mpa_check_header(v) < 0)
      return -1;

    if (avpriv_mpegaudio_decode_header(&c, v) == 0)
        vbrtag_size = c.frame_size;
    if(c.layer != 3)
        return -1;

    spf = c.lsf ? 576 : 1152; /* Samples per frame, layer 3 */

    /* Check for Xing / Info tag */
    avio_skip(s->pb, xing_offtbl[c.lsf == 1][c.nb_channels == 1]);
    v = avio_rb32(s->pb);
    is_cbr = v == MKBETAG('I', 'n', 'f', 'o');
    if (v == MKBETAG('X', 'i', 'n', 'g') || is_cbr) {
        v = avio_rb32(s->pb);
        if(v & XING_FLAG_FRAMES)
            frames = avio_rb32(s->pb);
        if(v & XING_FLAG_SIZE)
            size = avio_rb32(s->pb);
        if (v & XING_FLAG_TOC && frames)
            read_xing_toc(s, size, av_rescale_q(frames, (AVRational){spf, c.sample_rate},
                                    st->time_base));
    }

    /* Check for VBRI tag (always 32 bytes after end of mpegaudio header) */
    avio_seek(s->pb, base + 4 + 32, SEEK_SET);
    v = avio_rb32(s->pb);
    if(v == MKBETAG('V', 'B', 'R', 'I')) {
        /* Check tag version */
        if(avio_rb16(s->pb) == 1) {
            /* skip delay and quality */
            avio_skip(s->pb, 4);
            size = avio_rb32(s->pb);
            frames = avio_rb32(s->pb);
        }
    }

    if(!frames && !size)
        return -1;

    /* Skip the vbr tag frame */
    avio_seek(s->pb, base + vbrtag_size, SEEK_SET);

    if(frames)
        st->duration = av_rescale_q(frames, (AVRational){spf, c.sample_rate},
                                    st->time_base);
    if (size && frames && !is_cbr)
        st->codec->bit_rate = av_rescale(size, 8 * c.sample_rate, frames * (int64_t)spf);

    return 0;
}