예제 #1
0
EC_U32 FFmpegReader::GetMediaInfo(MediaCtxInfo **ppMediaInfo)
{
    if (EC_NULL == ppMediaInfo)
        return EC_Err_BadParam;
    if (EC_NULL == m_pFormatCtx)
        return Source_Err_NotInit;

    (*ppMediaInfo)->m_nVideoWidth = 0;
    (*ppMediaInfo)->m_nVideoHeight = 0;
    (*ppMediaInfo)->m_nAudioIndex = m_nAudioIndex;
    (*ppMediaInfo)->m_nVideoIndex = m_nVideoIndex;
    (*ppMediaInfo)->m_pFormatInfo = m_pFormatCtx;
    (*ppMediaInfo)->m_nDuration = (EC_U32)(m_pFormatCtx->duration / 1000);
    if (m_nAudioIndex != -1)
    {
        AVCodecContext *pACodecCtx = m_pFormatCtx->streams[m_nAudioIndex]->codec;
        (*ppMediaInfo)->m_pAudioCodecInfo = m_pFormatCtx->streams[m_nAudioIndex]->codec;
        (*ppMediaInfo)->m_nChannels = pACodecCtx->channels;
        (*ppMediaInfo)->m_nSampleRate = pACodecCtx->sample_rate;
        (*ppMediaInfo)->m_nSampleSize = pACodecCtx->frame_size;
        (*ppMediaInfo)->m_nSampleFormat = pACodecCtx->sample_fmt;
        (*ppMediaInfo)->m_nChannelsLayOut = (EC_U32)pACodecCtx->channel_layout;
    }
    if (m_nVideoIndex != -1)
    {
        AVStream *pVideoStream = m_pFormatCtx->streams[m_nVideoIndex];
        AVCodecContext *pVCodecCtx = m_pFormatCtx->streams[m_nVideoIndex]->codec;
        EC_DOUBLE dFps = 0;
        EC_S32 fps = pVideoStream->avg_frame_rate.den && pVideoStream->avg_frame_rate.num;
        EC_S32 tbr = pVideoStream->r_frame_rate.den && pVideoStream->r_frame_rate.num;
        EC_S32 tbn = pVideoStream->time_base.den && pVideoStream->time_base.num;
        EC_S32 tbc = pVideoStream->codec->time_base.den && pVideoStream->codec->time_base.num;
        if (fps)
            dFps = av_q2d(av_make_q(pVideoStream->avg_frame_rate.den,
            pVideoStream->avg_frame_rate.num));
        else if (tbr)
            dFps = av_q2d(av_make_q(pVideoStream->r_frame_rate.den,
            pVideoStream->r_frame_rate.num));
        else if (tbn)
            dFps = av_q2d(pVideoStream->time_base);
        else if (tbc)
            dFps = av_q2d(pVideoStream->codec->time_base);
        (*ppMediaInfo)->m_nVideoWidth = pVCodecCtx->width;
        (*ppMediaInfo)->m_nVideoHeight = pVCodecCtx->height;
        (*ppMediaInfo)->m_nFramePixFmt = pVCodecCtx->pix_fmt;
        (*ppMediaInfo)->m_pVideoCodecInfo = m_pFormatCtx->streams[m_nVideoIndex]->codec;
        (*ppMediaInfo)->m_FrameAVGDuration = dFps;
    }

    return EC_Err_None;
}
예제 #2
0
파일: Stream.cpp 프로젝트: FyhSky/sfeMovie
 sf::Time Stream::computeEncodedPosition()
 {
     if (!m_packetList.size())
     {
         m_dataSource.requestMoreData(*this);
     }
     
     if (!m_packetList.size())
     {
         return sf::Time::Zero;
     }
     else
     {
         sf::Lock l(m_readerMutex);
         AVPacket* packet = m_packetList.front();
         CHECK(packet, "internal inconcistency");
         
         int64_t timestamp = -424242;
         
         if (packet->dts != AV_NOPTS_VALUE)
         {
             timestamp = packet->dts;
         }
         else if (packet->pts != AV_NOPTS_VALUE)
         {
             int64_t startTime = m_stream->start_time != AV_NOPTS_VALUE ? m_stream->start_time : 0;
             timestamp = packet->pts - startTime;
         }
         
         AVRational seconds = av_mul_q(av_make_q(timestamp, 1), m_stream->time_base);
         return sf::milliseconds(1000 * av_q2d(seconds));
     }
 }
예제 #3
0
파일: Stream.cpp 프로젝트: FyhSky/sfeMovie
 sf::Time Stream::packetDuration(const AVPacket* packet) const
 {
     CHECK(packet, "inconcistency error: null packet");
     CHECK(packet->stream_index == m_streamID, "Asking for duration of a packet for a different stream!");
     
     if (packet->duration != 0)
     {
         AVRational seconds = av_mul_q(av_make_q(packet->duration, 1), m_stream->time_base);
         return sf::seconds(av_q2d(seconds));
     }
     else
     {
         return sf::seconds(1. / av_q2d(av_guess_frame_rate(m_formatCtx, m_stream, nullptr)));
     }
 }
예제 #4
0
static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
                        AVFrame **rframe)
{
    AVFrame *frame0, *frame, *buf;
    unsigned nb_samples, nb_frames, i, p;
    int ret;

    /* Note: this function relies on no format changes and must only be
       called with enough samples. */
    av_assert1(samples_ready(link));
    frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
    if (frame->nb_samples >= min && frame->nb_samples < max) {
        *rframe = ff_framequeue_take(&link->fifo);
        return 0;
    }
    nb_frames = 0;
    nb_samples = 0;
    while (1) {
        if (nb_samples + frame->nb_samples > max) {
            if (nb_samples < min)
                nb_samples = max;
            break;
        }
        nb_samples += frame->nb_samples;
        nb_frames++;
        if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
            break;
        frame = ff_framequeue_peek(&link->fifo, nb_frames);
    }

    buf = ff_get_audio_buffer(link, nb_samples);
    if (!buf)
        return AVERROR(ENOMEM);
    ret = av_frame_copy_props(buf, frame0);
    if (ret < 0) {
        av_frame_free(&buf);
        return ret;
    }
    buf->pts = frame0->pts;

    p = 0;
    for (i = 0; i < nb_frames; i++) {
        frame = ff_framequeue_take(&link->fifo);
        av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
                        frame->nb_samples, link->channels, link->format);
        p += frame->nb_samples;
        av_frame_free(&frame);
    }
    if (p < nb_samples) {
        unsigned n = nb_samples - p;
        frame = ff_framequeue_peek(&link->fifo, 0);
        av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
                        link->channels, link->format);
        frame->nb_samples -= n;
        av_samples_copy(frame->extended_data, frame->extended_data, 0, n,
                        frame->nb_samples, link->channels, link->format);
        if (frame->pts != AV_NOPTS_VALUE)
            frame->pts += av_rescale_q(n, av_make_q(1, link->sample_rate), link->time_base);
        ff_framequeue_update_peeked(&link->fifo, 0);
        ff_framequeue_skip_samples(&link->fifo, n);
    }

    *rframe = buf;
    return 0;
}
예제 #5
0
int ff_mediacodec_dec_send(AVCodecContext *avctx, MediaCodecDecContext *s,
                           AVPacket *pkt)
{
    int offset = 0;
    int need_draining = 0;
    uint8_t *data;
    ssize_t index;
    size_t size;
    FFAMediaCodec *codec = s->codec;
    int status;
    int64_t input_dequeue_timeout_us = INPUT_DEQUEUE_TIMEOUT_US;

    if (s->flushing) {
        av_log(avctx, AV_LOG_ERROR, "Decoder is flushing and cannot accept new buffer "
                                    "until all output buffers have been released\n");
        return AVERROR_EXTERNAL;
    }

    if (pkt->size == 0) {
        need_draining = 1;
    }

    if (s->draining && s->eos) {
        return AVERROR_EOF;
    }

    while (offset < pkt->size || (need_draining && !s->draining)) {

        index = ff_AMediaCodec_dequeueInputBuffer(codec, input_dequeue_timeout_us);
        if (ff_AMediaCodec_infoTryAgainLater(codec, index)) {
            av_log(avctx, AV_LOG_TRACE, "No input buffer available, try again later\n");
            break;
        }

        if (index < 0) {
            av_log(avctx, AV_LOG_ERROR, "Failed to dequeue input buffer (status=%zd)\n", index);
            return AVERROR_EXTERNAL;
        }

        data = ff_AMediaCodec_getInputBuffer(codec, index, &size);
        if (!data) {
            av_log(avctx, AV_LOG_ERROR, "Failed to get input buffer\n");
            return AVERROR_EXTERNAL;
        }

        if (need_draining) {
            int64_t pts = pkt->pts;
            uint32_t flags = ff_AMediaCodec_getBufferFlagEndOfStream(codec);

            if (s->surface) {
                pts = av_rescale_q(pts, avctx->pkt_timebase, av_make_q(1, 1000000));
            }

            av_log(avctx, AV_LOG_DEBUG, "Sending End Of Stream signal\n");

            status = ff_AMediaCodec_queueInputBuffer(codec, index, 0, 0, pts, flags);
            if (status < 0) {
                av_log(avctx, AV_LOG_ERROR, "Failed to queue input empty buffer (status = %d)\n", status);
                return AVERROR_EXTERNAL;
            }

            av_log(avctx, AV_LOG_TRACE,
                   "Queued input buffer %zd size=%zd ts=%"PRIi64"\n", index, size, pts);

            s->draining = 1;
            break;
        } else {
            int64_t pts = pkt->pts;

            size = FFMIN(pkt->size - offset, size);
            memcpy(data, pkt->data + offset, size);
            offset += size;

            if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
                pts = av_rescale_q(pts, avctx->pkt_timebase, av_make_q(1, 1000000));
            }

            status = ff_AMediaCodec_queueInputBuffer(codec, index, 0, size, pts, 0);
            if (status < 0) {
                av_log(avctx, AV_LOG_ERROR, "Failed to queue input buffer (status = %d)\n", status);
                return AVERROR_EXTERNAL;
            }

            av_log(avctx, AV_LOG_TRACE,
                   "Queued input buffer %zd size=%zd ts=%"PRIi64"\n", index, size, pts);
        }
    }

    if (offset == 0)
        return AVERROR(EAGAIN);
    return offset;
}
예제 #6
0
static int mediacodec_wrap_sw_buffer(AVCodecContext *avctx,
                                  MediaCodecDecContext *s,
                                  uint8_t *data,
                                  size_t size,
                                  ssize_t index,
                                  FFAMediaCodecBufferInfo *info,
                                  AVFrame *frame)
{
    int ret = 0;
    int status = 0;

    frame->width = avctx->width;
    frame->height = avctx->height;
    frame->format = avctx->pix_fmt;

    /* MediaCodec buffers needs to be copied to our own refcounted buffers
     * because the flush command invalidates all input and output buffers.
     */
    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer\n");
        goto done;
    }

    /* Override frame->pkt_pts as ff_get_buffer will override its value based
     * on the last avpacket received which is not in sync with the frame:
     *   * N avpackets can be pushed before 1 frame is actually returned
     *   * 0-sized avpackets are pushed to flush remaining frames at EOS */
    if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
        frame->pts = av_rescale_q(info->presentationTimeUs,
                                      av_make_q(1, 1000000),
                                      avctx->pkt_timebase);
    } else {
        frame->pts = info->presentationTimeUs;
    }
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
    frame->pkt_pts = frame->pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
    frame->pkt_dts = AV_NOPTS_VALUE;

    av_log(avctx, AV_LOG_TRACE,
            "Frame: width=%d stride=%d height=%d slice-height=%d "
            "crop-top=%d crop-bottom=%d crop-left=%d crop-right=%d encoder=%s\n"
            "destination linesizes=%d,%d,%d\n" ,
            avctx->width, s->stride, avctx->height, s->slice_height,
            s->crop_top, s->crop_bottom, s->crop_left, s->crop_right, s->codec_name,
            frame->linesize[0], frame->linesize[1], frame->linesize[2]);

    switch (s->color_format) {
    case COLOR_FormatYUV420Planar:
        ff_mediacodec_sw_buffer_copy_yuv420_planar(avctx, s, data, size, info, frame);
        break;
    case COLOR_FormatYUV420SemiPlanar:
    case COLOR_QCOM_FormatYUV420SemiPlanar:
    case COLOR_QCOM_FormatYUV420SemiPlanar32m:
        ff_mediacodec_sw_buffer_copy_yuv420_semi_planar(avctx, s, data, size, info, frame);
        break;
    case COLOR_TI_FormatYUV420PackedSemiPlanar:
    case COLOR_TI_FormatYUV420PackedSemiPlanarInterlaced:
        ff_mediacodec_sw_buffer_copy_yuv420_packed_semi_planar(avctx, s, data, size, info, frame);
        break;
    case COLOR_QCOM_FormatYUV420PackedSemiPlanar64x32Tile2m8ka:
        ff_mediacodec_sw_buffer_copy_yuv420_packed_semi_planar_64x32Tile2m8ka(avctx, s, data, size, info, frame);
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Unsupported color format 0x%x (value=%d)\n",
            s->color_format, s->color_format);
        ret = AVERROR(EINVAL);
        goto done;
    }

    ret = 0;
done:
    status = ff_AMediaCodec_releaseOutputBuffer(s->codec, index, 0);
    if (status < 0) {
        av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
        ret = AVERROR_EXTERNAL;
    }

    return ret;
}
예제 #7
0
static int config_out_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = outlink->src->inputs[0];
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
    TInterlaceContext *tinterlace = ctx->priv;
    int i;

    tinterlace->vsub = desc->log2_chroma_h;
    outlink->w = inlink->w;
    outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?
        inlink->h*2 : inlink->h;
    if (tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD)
        outlink->sample_aspect_ratio = av_mul_q(inlink->sample_aspect_ratio,
                                                av_make_q(2, 1));

    if (tinterlace->mode == MODE_PAD) {
        uint8_t black[4] = { 16, 128, 128, 16 };
        int i, ret;
        if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
            black[0] = black[3] = 0;
        ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
                             outlink->w, outlink->h, outlink->format, 1);
        if (ret < 0)
            return ret;

        /* fill black picture with black */
        for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
            int h = i == 1 || i == 2 ? FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;
            memset(tinterlace->black_data[i], black[i],
                   tinterlace->black_linesize[i] * h);
        }
    }
    if ((tinterlace->flags & TINTERLACE_FLAG_VLPF)
            && !(tinterlace->mode == MODE_INTERLEAVE_TOP
              || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
        av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d\n",
                tinterlace->mode);
        tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;
    }
    tinterlace->preout_time_base = inlink->time_base;
    if (tinterlace->mode == MODE_INTERLACEX2) {
        tinterlace->preout_time_base.den *= 2;
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){1,2});
    } else if (tinterlace->mode != MODE_PAD) {
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){2,1});
    }

    for (i = 0; i<FF_ARRAY_ELEMS(standard_tbs); i++){
        if (!av_cmp_q(standard_tbs[i], outlink->time_base))
            break;
    }
    if (i == FF_ARRAY_ELEMS(standard_tbs) ||
        (tinterlace->flags & TINTERLACE_FLAG_EXACT_TB))
        outlink->time_base = tinterlace->preout_time_base;

    if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
        tinterlace->lowpass_line = lowpass_line_c;
        if (ARCH_X86)
            ff_tinterlace_init_x86(tinterlace);
    }

    av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n",
           tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off",
           inlink->h, outlink->h);

    return 0;
}
예제 #8
0
int ff_mediacodec_dec_decode(AVCodecContext *avctx, MediaCodecDecContext *s,
                             AVFrame *frame, int *got_frame,
                             AVPacket *pkt)
{
    int ret;
    int offset = 0;
    int need_draining = 0;
    uint8_t *data;
    ssize_t index;
    size_t size;
    FFAMediaCodec *codec = s->codec;
    FFAMediaCodecBufferInfo info = { 0 };

    int status;

    int64_t input_dequeue_timeout_us = INPUT_DEQUEUE_TIMEOUT_US;
    int64_t output_dequeue_timeout_us = OUTPUT_DEQUEUE_TIMEOUT_US;

    if (s->flushing) {
        av_log(avctx, AV_LOG_ERROR, "Decoder is flushing and cannot accept new buffer "
                                    "until all output buffers have been released\n");
        return AVERROR_EXTERNAL;
    }

    if (pkt->size == 0) {
        need_draining = 1;
    }

    if (s->draining && s->eos) {
        return 0;
    }

    while (offset < pkt->size || (need_draining && !s->draining)) {

        index = ff_AMediaCodec_dequeueInputBuffer(codec, input_dequeue_timeout_us);
        if (ff_AMediaCodec_infoTryAgainLater(codec, index)) {
            break;
        }

        if (index < 0) {
            av_log(avctx, AV_LOG_ERROR, "Failed to dequeue input buffer (status=%zd)\n", index);
            return AVERROR_EXTERNAL;
        }

        data = ff_AMediaCodec_getInputBuffer(codec, index, &size);
        if (!data) {
            av_log(avctx, AV_LOG_ERROR, "Failed to get input buffer\n");
            return AVERROR_EXTERNAL;
        }

        if (need_draining) {
            int64_t pts = pkt->pts;
            uint32_t flags = ff_AMediaCodec_getBufferFlagEndOfStream(codec);

            if (s->surface) {
                pts = av_rescale_q(pts, avctx->pkt_timebase, av_make_q(1, 1000000));
            }

            av_log(avctx, AV_LOG_DEBUG, "Sending End Of Stream signal\n");

            status = ff_AMediaCodec_queueInputBuffer(codec, index, 0, 0, pts, flags);
            if (status < 0) {
                av_log(avctx, AV_LOG_ERROR, "Failed to queue input empty buffer (status = %d)\n", status);
                return AVERROR_EXTERNAL;
            }

            s->draining = 1;
            break;
        } else {
            int64_t pts = pkt->pts;

            size = FFMIN(pkt->size - offset, size);

            memcpy(data, pkt->data + offset, size);
            offset += size;

            if (s->surface && avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
                pts = av_rescale_q(pts, avctx->pkt_timebase, av_make_q(1, 1000000));
            }

            status = ff_AMediaCodec_queueInputBuffer(codec, index, 0, size, pts, 0);
            if (status < 0) {
                av_log(avctx, AV_LOG_ERROR, "Failed to queue input buffer (status = %d)\n", status);
                return AVERROR_EXTERNAL;
            }
        }
    }

    if (need_draining || s->draining) {
        /* If the codec is flushing or need to be flushed, block for a fair
         * amount of time to ensure we got a frame */
        output_dequeue_timeout_us = OUTPUT_DEQUEUE_BLOCK_TIMEOUT_US;
    } else if (s->dequeued_buffer_nb == 0) {
        /* If the codec hasn't produced any frames, do not block so we
         * can push data to it as fast as possible, and get the first
         * frame */
        output_dequeue_timeout_us = 0;
    }

    index = ff_AMediaCodec_dequeueOutputBuffer(codec, &info, output_dequeue_timeout_us);
    if (index >= 0) {
        int ret;

        if (!s->first_buffer++) {
            av_log(avctx, AV_LOG_DEBUG, "Got first buffer after %fms\n", (av_gettime() - s->first_buffer_at) / 1000);
        }

        av_log(avctx, AV_LOG_DEBUG, "Got output buffer %zd"
                " offset=%" PRIi32 " size=%" PRIi32 " ts=%" PRIi64
                " flags=%" PRIu32 "\n", index, info.offset, info.size,
                info.presentationTimeUs, info.flags);

        if (info.flags & ff_AMediaCodec_getBufferFlagEndOfStream(codec)) {
            s->eos = 1;
        }

        if (info.size) {
            if (s->surface) {
                if ((ret = mediacodec_wrap_hw_buffer(avctx, s, index, &info, frame)) < 0) {
                    av_log(avctx, AV_LOG_ERROR, "Failed to wrap MediaCodec buffer\n");
                    return ret;
                }
            } else {
                data = ff_AMediaCodec_getOutputBuffer(codec, index, &size);
                if (!data) {
                    av_log(avctx, AV_LOG_ERROR, "Failed to get output buffer\n");
                    return AVERROR_EXTERNAL;
                }

                if ((ret = mediacodec_wrap_sw_buffer(avctx, s, data, size, index, &info, frame)) < 0) {
                    av_log(avctx, AV_LOG_ERROR, "Failed to wrap MediaCodec buffer\n");
                    return ret;
                }
            }

            *got_frame = 1;
            s->dequeued_buffer_nb++;
        } else {
            status = ff_AMediaCodec_releaseOutputBuffer(codec, index, 0);
            if (status < 0) {
                av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
            }
        }

    } else if (ff_AMediaCodec_infoOutputFormatChanged(codec, index)) {
        char *format = NULL;

        if (s->format) {
            status = ff_AMediaFormat_delete(s->format);
            if (status < 0) {
                av_log(avctx, AV_LOG_ERROR, "Failed to delete MediaFormat %p\n", s->format);
            }
        }

        s->format = ff_AMediaCodec_getOutputFormat(codec);
        if (!s->format) {
            av_log(avctx, AV_LOG_ERROR, "Failed to get output format\n");
            return AVERROR_EXTERNAL;
        }

        format = ff_AMediaFormat_toString(s->format);
        if (!format) {
            return AVERROR_EXTERNAL;
        }
        av_log(avctx, AV_LOG_INFO, "Output MediaFormat changed to %s\n", format);
        av_freep(&format);

        if ((ret = mediacodec_dec_parse_format(avctx, s)) < 0) {
            return ret;
        }

    } else if (ff_AMediaCodec_infoOutputBuffersChanged(codec, index)) {
        ff_AMediaCodec_cleanOutputBuffers(codec);
    } else if (ff_AMediaCodec_infoTryAgainLater(codec, index)) {
        if (s->draining) {
            av_log(avctx, AV_LOG_ERROR, "Failed to dequeue output buffer within %" PRIi64 "ms "
                                        "while draining remaining frames, output will probably lack frames\n",
                                        output_dequeue_timeout_us / 1000);
        } else {
            av_log(avctx, AV_LOG_DEBUG, "No output buffer available, try again later\n");
        }
    } else {
        av_log(avctx, AV_LOG_ERROR, "Failed to dequeue output buffer (status=%zd)\n", index);
        return AVERROR_EXTERNAL;
    }

    return offset;
}
예제 #9
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = ctx->inputs[0];
    ShowFreqsContext *s = ctx->priv;
    float overlap;
    int i;

    s->nb_freq = 1 << (s->fft_bits - 1);
    s->win_size = s->nb_freq << 1;
    av_audio_fifo_free(s->fifo);
    av_fft_end(s->fft);
    s->fft = av_fft_init(s->fft_bits, 0);
    if (!s->fft) {
        av_log(ctx, AV_LOG_ERROR, "Unable to create FFT context. "
               "The window size might be too high.\n");
        return AVERROR(ENOMEM);
    }

    /* FFT buffers: x2 for each (display) channel buffer.
     * Note: we use free and malloc instead of a realloc-like function to
     * make sure the buffer is aligned in memory for the FFT functions. */
    for (i = 0; i < s->nb_channels; i++) {
        av_freep(&s->fft_data[i]);
        av_freep(&s->avg_data[i]);
    }
    av_freep(&s->fft_data);
    av_freep(&s->avg_data);
    s->nb_channels = inlink->channels;

    s->fft_data = av_calloc(s->nb_channels, sizeof(*s->fft_data));
    if (!s->fft_data)
        return AVERROR(ENOMEM);
    s->avg_data = av_calloc(s->nb_channels, sizeof(*s->avg_data));
    if (!s->fft_data)
        return AVERROR(ENOMEM);
    for (i = 0; i < s->nb_channels; i++) {
        s->fft_data[i] = av_calloc(s->win_size, sizeof(**s->fft_data));
        s->avg_data[i] = av_calloc(s->nb_freq, sizeof(**s->avg_data));
        if (!s->fft_data[i] || !s->avg_data[i])
            return AVERROR(ENOMEM);
    }

    /* pre-calc windowing function */
    s->window_func_lut = av_realloc_f(s->window_func_lut, s->win_size,
                                      sizeof(*s->window_func_lut));
    if (!s->window_func_lut)
        return AVERROR(ENOMEM);
    ff_generate_window_func(s->window_func_lut, s->win_size, s->win_func, &overlap);
    if (s->overlap == 1.)
        s->overlap = overlap;
    s->skip_samples = (1. - s->overlap) * s->win_size;
    if (s->skip_samples < 1) {
        av_log(ctx, AV_LOG_ERROR, "overlap %f too big\n", s->overlap);
        return AVERROR(EINVAL);
    }

    for (s->scale = 0, i = 0; i < s->win_size; i++) {
        s->scale += s->window_func_lut[i] * s->window_func_lut[i];
    }

    outlink->frame_rate = av_make_q(inlink->sample_rate, s->win_size * (1.-s->overlap));
    outlink->sample_aspect_ratio = (AVRational){1,1};
    outlink->w = s->w;
    outlink->h = s->h;

    s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->win_size);
    if (!s->fifo)
        return AVERROR(ENOMEM);
    return 0;
}
예제 #10
0
파일: D2V.cpp 프로젝트: dubhater/D2VWitch
bool D2V::printSettings() {
    int stream_type = getStreamType(f->fctx->iformat->name);

    int video_id = video_stream->id;
    int audio_id = 0;
    int64_t ts_packetsize = 0;
    if (stream_type == TRANSPORT_STREAM) {
        const AVStream *audio_stream = nullptr;
        for (unsigned i = 0; i < f->fctx->nb_streams; i++) {
            if (f->fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
                audio_stream = f->fctx->streams[i];
                break;
            }
        }

        if (audio_stream)
            audio_id = audio_stream->id;

        if (av_opt_get_int(f->fctx, "ts_packetsize", AV_OPT_SEARCH_CHILDREN, &ts_packetsize) < 0)
            ts_packetsize = 0;
    }

    int mpeg_type = 0;
    if (video_stream->codec->codec_id == AV_CODEC_ID_MPEG1VIDEO)
        mpeg_type = 1;
    else if (video_stream->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO)
        mpeg_type = 2;

    int yuvrgb_scale = input_range == ColourRangeLimited ? 1 : 0;

    int width, height;
    if (av_opt_get_image_size(video_stream->codec, "video_size", 0, &width, &height) < 0)
        width = height = -1;

    AVRational sar;
    if (av_opt_get_q(video_stream->codec, "aspect", 0, &sar) < 0)
        sar = { 1, 1 };
    AVRational dar = av_mul_q(av_make_q(width, height), sar);
    av_reduce(&dar.num, &dar.den, dar.num, dar.den, 1024);

    // No AVOption for framerate?
    AVRational frame_rate = video_stream->codec->framerate;

    std::string settings;

    settings += "Stream_Type=" + std::to_string(stream_type) + "\n";
    if (stream_type == TRANSPORT_STREAM) {
        char pids[100] = { 0 };
        snprintf(pids, 100, "%x,%x,%x", video_id, audio_id, 0);
        settings += "MPEG2_Transport_PID=";
        settings += pids;
        settings += "\n";

        settings += "Transport_Packet_Size=" + std::to_string(ts_packetsize) + "\n";
    }
    settings += "MPEG_Type=" + std::to_string(mpeg_type) + "\n";
    settings += "iDCT_Algorithm=6\n"; // "32-bit SSEMMX (Skal)". No one cares anyway.
    settings += "YUVRGB_Scale=" + std::to_string(yuvrgb_scale) + "\n";
    settings += "Luminance_Filter=0,0\n"; // We don't care.
    settings += "Clipping=0,0,0,0\n"; // We don't crop here.
    settings += "Aspect_Ratio=" + std::to_string(dar.num) + ":" + std::to_string(dar.den) + "\n";
    settings += "Picture_Size=" + std::to_string(width) + "x" + std::to_string(height) + "\n";
    settings += "Field_Operation=0\n"; // Always tell them honor the pulldown flags.
    settings += "Frame_Rate=" + std::to_string((int)((float)frame_rate.num * 1000 / frame_rate.den)) + " (" + std::to_string(frame_rate.num) + "/" + std::to_string(frame_rate.den) + ")\n";
    settings += "Location=0,0,0,0\n"; // Whatever.

    if (fprintf(d2v_file, "%s", settings.c_str()) < 0) {
        error = "Failed to print d2v settings section: fprintf() failed.";
        return false;
    }

    return true;
}
예제 #11
0
av_cold int ff_decklink_read_header(AVFormatContext *avctx)
{
    struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
    struct decklink_ctx *ctx;
    AVStream *st;
    HRESULT result;
    char fname[1024];
    char *tmp;
    int mode_num = 0;
    int ret;

    ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
    if (!ctx)
        return AVERROR(ENOMEM);
    ctx->list_devices = cctx->list_devices;
    ctx->list_formats = cctx->list_formats;
    ctx->teletext_lines = cctx->teletext_lines;
    ctx->preroll      = cctx->preroll;
    ctx->duplex_mode  = cctx->duplex_mode;
    if (cctx->video_input > 0 && (unsigned int)cctx->video_input < FF_ARRAY_ELEMS(decklink_video_connection_map))
        ctx->video_input = decklink_video_connection_map[cctx->video_input];
    if (cctx->audio_input > 0 && (unsigned int)cctx->audio_input < FF_ARRAY_ELEMS(decklink_audio_connection_map))
        ctx->audio_input = decklink_audio_connection_map[cctx->audio_input];
    ctx->audio_pts_source = cctx->audio_pts_source;
    ctx->video_pts_source = cctx->video_pts_source;
    ctx->draw_bars = cctx->draw_bars;
    cctx->ctx = ctx;

#if !CONFIG_LIBZVBI
    if (ctx->teletext_lines) {
        av_log(avctx, AV_LOG_ERROR, "Libzvbi support is needed for capturing teletext, please recompile FFmpeg.\n");
        return AVERROR(ENOSYS);
    }
#endif

    /* Check audio channel option for valid values: 2, 8 or 16 */
    switch (cctx->audio_channels) {
        case 2:
        case 8:
        case 16:
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Value of channels option must be one of 2, 8 or 16\n");
            return AVERROR(EINVAL);
    }

    /* List available devices. */
    if (ctx->list_devices) {
        ff_decklink_list_devices(avctx);
        return AVERROR_EXIT;
    }

    strcpy (fname, avctx->filename);
    tmp=strchr (fname, '@');
    if (tmp != NULL) {
        mode_num = atoi (tmp+1);
        *tmp = 0;
    }

    ret = ff_decklink_init_device(avctx, fname);
    if (ret < 0)
        return ret;

    /* Get input device. */
    if (ctx->dl->QueryInterface(IID_IDeckLinkInput, (void **) &ctx->dli) != S_OK) {
        av_log(avctx, AV_LOG_ERROR, "Could not open input device from '%s'\n",
               avctx->filename);
        ret = AVERROR(EIO);
        goto error;
    }

    /* List supported formats. */
    if (ctx->list_formats) {
        ff_decklink_list_formats(avctx, DIRECTION_IN);
        ret = AVERROR_EXIT;
        goto error;
    }

    if (mode_num > 0) {
        if (ff_decklink_set_format(avctx, DIRECTION_IN, mode_num) < 0) {
            av_log(avctx, AV_LOG_ERROR, "Could not set mode %d for %s\n", mode_num, fname);
            ret = AVERROR(EIO);
            goto error;
        }
    }

    /* Setup streams. */
    st = avformat_new_stream(avctx, NULL);
    if (!st) {
        av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
        ret = AVERROR(ENOMEM);
        goto error;
    }
    st->codecpar->codec_type  = AVMEDIA_TYPE_AUDIO;
    st->codecpar->codec_id    = AV_CODEC_ID_PCM_S16LE;
    st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
    st->codecpar->channels    = cctx->audio_channels;
    avpriv_set_pts_info(st, 64, 1, 1000000);  /* 64 bits pts in us */
    ctx->audio_st=st;

    st = avformat_new_stream(avctx, NULL);
    if (!st) {
        av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
        ret = AVERROR(ENOMEM);
        goto error;
    }
    st->codecpar->codec_type  = AVMEDIA_TYPE_VIDEO;
    st->codecpar->width       = ctx->bmd_width;
    st->codecpar->height      = ctx->bmd_height;

    st->time_base.den      = ctx->bmd_tb_den;
    st->time_base.num      = ctx->bmd_tb_num;
    av_stream_set_r_frame_rate(st, av_make_q(st->time_base.den, st->time_base.num));

    if (cctx->v210) {
        st->codecpar->codec_id    = AV_CODEC_ID_V210;
        st->codecpar->codec_tag   = MKTAG('V', '2', '1', '0');
        st->codecpar->bit_rate    = av_rescale(ctx->bmd_width * ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
    } else {
        st->codecpar->codec_id    = AV_CODEC_ID_RAWVIDEO;
        st->codecpar->format      = AV_PIX_FMT_UYVY422;
        st->codecpar->codec_tag   = MKTAG('U', 'Y', 'V', 'Y');
        st->codecpar->bit_rate    = av_rescale(ctx->bmd_width * ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
    }

    avpriv_set_pts_info(st, 64, 1, 1000000);  /* 64 bits pts in us */

    ctx->video_st=st;

    if (ctx->teletext_lines) {
        st = avformat_new_stream(avctx, NULL);
        if (!st) {
            av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
            ret = AVERROR(ENOMEM);
            goto error;
        }
        st->codecpar->codec_type  = AVMEDIA_TYPE_SUBTITLE;
        st->time_base.den         = ctx->bmd_tb_den;
        st->time_base.num         = ctx->bmd_tb_num;
        st->codecpar->codec_id    = AV_CODEC_ID_DVB_TELETEXT;
        avpriv_set_pts_info(st, 64, 1, 1000000);  /* 64 bits pts in us */
        ctx->teletext_st = st;
    }

    av_log(avctx, AV_LOG_VERBOSE, "Using %d input audio channels\n", ctx->audio_st->codecpar->channels);
    result = ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType16bitInteger, ctx->audio_st->codecpar->channels);

    if (result != S_OK) {
        av_log(avctx, AV_LOG_ERROR, "Cannot enable audio input\n");
        ret = AVERROR(EIO);
        goto error;
    }

    result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
                                        cctx->v210 ? bmdFormat10BitYUV : bmdFormat8BitYUV,
                                        bmdVideoInputFlagDefault);

    if (result != S_OK) {
        av_log(avctx, AV_LOG_ERROR, "Cannot enable video input\n");
        ret = AVERROR(EIO);
        goto error;
    }

    avpacket_queue_init (avctx, &ctx->queue);

    if (decklink_start_input (avctx) != S_OK) {
        av_log(avctx, AV_LOG_ERROR, "Cannot start input stream\n");
        ret = AVERROR(EIO);
        goto error;
    }

    return 0;

error:
    ff_decklink_cleanup(avctx);
    return ret;
}
static int config_out_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = outlink->src->inputs[0];
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
    TInterlaceContext *tinterlace = ctx->priv;
    int i;

    tinterlace->vsub = desc->log2_chroma_h;
    outlink->w = inlink->w;
    outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2?
        inlink->h*2 : inlink->h;
    if (tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2)
        outlink->sample_aspect_ratio = av_mul_q(inlink->sample_aspect_ratio,
                                                av_make_q(2, 1));

    if (tinterlace->mode == MODE_PAD) {
        uint8_t black[4] = { 0, 0, 0, 16 };
        int ret;
        ff_draw_init(&tinterlace->draw, outlink->format, 0);
        ff_draw_color(&tinterlace->draw, &tinterlace->color, black);
        if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
            tinterlace->color.comp[0].u8[0] = 0;
        ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
                             outlink->w, outlink->h, outlink->format, 16);
        if (ret < 0)
            return ret;

        ff_fill_rectangle(&tinterlace->draw, &tinterlace->color, tinterlace->black_data,
                          tinterlace->black_linesize, 0, 0, outlink->w, outlink->h);
    }
    if (tinterlace->flags & (TINTERLACE_FLAG_VLPF | TINTERLACE_FLAG_CVLPF)
            && !(tinterlace->mode == MODE_INTERLEAVE_TOP
              || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
        av_log(ctx, AV_LOG_WARNING, "low_pass_filter flags ignored with mode %d\n",
                tinterlace->mode);
        tinterlace->flags &= ~(TINTERLACE_FLAG_VLPF | TINTERLACE_FLAG_CVLPF);
    }
    tinterlace->preout_time_base = inlink->time_base;
    if (tinterlace->mode == MODE_INTERLACEX2) {
        tinterlace->preout_time_base.den *= 2;
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){1,2});
    } else if (tinterlace->mode == MODE_MERGEX2) {
        outlink->frame_rate = inlink->frame_rate;
        outlink->time_base  = inlink->time_base;
    } else if (tinterlace->mode != MODE_PAD) {
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){2,1});
    }

    for (i = 0; i<FF_ARRAY_ELEMS(standard_tbs); i++){
        if (!av_cmp_q(standard_tbs[i], outlink->time_base))
            break;
    }
    if (i == FF_ARRAY_ELEMS(standard_tbs) ||
        (tinterlace->flags & TINTERLACE_FLAG_EXACT_TB))
        outlink->time_base = tinterlace->preout_time_base;

    tinterlace->csp = av_pix_fmt_desc_get(outlink->format);
    if (tinterlace->flags & TINTERLACE_FLAG_CVLPF) {
        if (tinterlace->csp->comp[0].depth > 8)
            tinterlace->lowpass_line = lowpass_line_complex_c_16;
        else
            tinterlace->lowpass_line = lowpass_line_complex_c;
        if (ARCH_X86)
            ff_tinterlace_init_x86(tinterlace);
    } else if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
        if (tinterlace->csp->comp[0].depth > 8)
            tinterlace->lowpass_line = lowpass_line_c_16;
        else
            tinterlace->lowpass_line = lowpass_line_c;
        if (ARCH_X86)
            ff_tinterlace_init_x86(tinterlace);
    }

    av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n", tinterlace->mode,
           (tinterlace->flags & TINTERLACE_FLAG_CVLPF) ? "complex" :
           (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "linear" : "off",
           inlink->h, outlink->h);

    return 0;
}
예제 #13
0
int ff_decklink_set_format(AVFormatContext *avctx,
                               int width, int height,
                               int tb_num, int tb_den,
                               enum AVFieldOrder field_order,
                               decklink_direction_t direction, int num)
{
    struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
    struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
    BMDDisplayModeSupport support;
    IDeckLinkDisplayModeIterator *itermode;
    IDeckLinkDisplayMode *mode;
    int i = 1;
    HRESULT res;

    av_log(avctx, AV_LOG_DEBUG, "Trying to find mode for frame size %dx%d, frame timing %d/%d, field order %d, direction %d, mode number %d, format code %s\n",
        width, height, tb_num, tb_den, field_order, direction, num, (cctx->format_code) ? cctx->format_code : "(unset)");

    if (ctx->duplex_mode) {
        DECKLINK_BOOL duplex_supported = false;

        if (ctx->attr->GetFlag(BMDDeckLinkSupportsDuplexModeConfiguration, &duplex_supported) != S_OK)
            duplex_supported = false;

        if (duplex_supported) {
            res = ctx->cfg->SetInt(bmdDeckLinkConfigDuplexMode, ctx->duplex_mode == 2 ? bmdDuplexModeFull : bmdDuplexModeHalf);
            if (res != S_OK)
                av_log(avctx, AV_LOG_WARNING, "Setting duplex mode failed.\n");
            else
                av_log(avctx, AV_LOG_VERBOSE, "Successfully set duplex mode to %s duplex.\n", ctx->duplex_mode == 2 ? "full" : "half");
        } else {
            av_log(avctx, AV_LOG_WARNING, "Unable to set duplex mode, because it is not supported.\n");
        }
    }

    if (direction == DIRECTION_IN) {
        int ret;
        ret = decklink_select_input(avctx, bmdDeckLinkConfigAudioInputConnection);
        if (ret < 0)
            return ret;
        ret = decklink_select_input(avctx, bmdDeckLinkConfigVideoInputConnection);
        if (ret < 0)
            return ret;
        res = ctx->dli->GetDisplayModeIterator (&itermode);
    } else {
        res = ctx->dlo->GetDisplayModeIterator (&itermode);
    }

    if (res!= S_OK) {
            av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
            return AVERROR(EIO);
    }

    char format_buf[] = "    ";
    if (cctx->format_code)
        memcpy(format_buf, cctx->format_code, FFMIN(strlen(cctx->format_code), sizeof(format_buf)));
    BMDDisplayMode target_mode = (BMDDisplayMode)AV_RB32(format_buf);
    AVRational target_tb = av_make_q(tb_num, tb_den);
    ctx->bmd_mode = bmdModeUnknown;
    while ((ctx->bmd_mode == bmdModeUnknown) && itermode->Next(&mode) == S_OK) {
        BMDTimeValue bmd_tb_num, bmd_tb_den;
        int bmd_width  = mode->GetWidth();
        int bmd_height = mode->GetHeight();
        BMDDisplayMode bmd_mode = mode->GetDisplayMode();
        BMDFieldDominance bmd_field_dominance = mode->GetFieldDominance();

        mode->GetFrameRate(&bmd_tb_num, &bmd_tb_den);
        AVRational mode_tb = av_make_q(bmd_tb_num, bmd_tb_den);

        if ((bmd_width == width &&
             bmd_height == height &&
             !av_cmp_q(mode_tb, target_tb) &&
             field_order_eq(field_order, bmd_field_dominance))
             || i == num
             || target_mode == bmd_mode) {
            ctx->bmd_mode   = bmd_mode;
            ctx->bmd_width  = bmd_width;
            ctx->bmd_height = bmd_height;
            ctx->bmd_tb_den = bmd_tb_den;
            ctx->bmd_tb_num = bmd_tb_num;
            ctx->bmd_field_dominance = bmd_field_dominance;
            av_log(avctx, AV_LOG_INFO, "Found Decklink mode %d x %d with rate %.2f%s\n",
                bmd_width, bmd_height, 1/av_q2d(mode_tb),
                (ctx->bmd_field_dominance==bmdLowerFieldFirst || ctx->bmd_field_dominance==bmdUpperFieldFirst)?"(i)":"");
        }

        mode->Release();
        i++;
    }

    itermode->Release();

    if (ctx->bmd_mode == bmdModeUnknown)
        return -1;
    if (direction == DIRECTION_IN) {
        if (ctx->dli->DoesSupportVideoMode(ctx->bmd_mode, bmdFormat8BitYUV,
                                           bmdVideoOutputFlagDefault,
                                           &support, NULL) != S_OK)
            return -1;
    } else {
        if (ctx->dlo->DoesSupportVideoMode(ctx->bmd_mode, bmdFormat8BitYUV,
                                           bmdVideoOutputFlagDefault,
                                           &support, NULL) != S_OK)
        return -1;
    }
    if (support == bmdDisplayModeSupported)
        return 0;

    return -1;
}
static int ndi_find_sources(AVFormatContext *avctx, const char *name, NDIlib_source_t *source_to_connect_to)
{
    int j = AVERROR(ENODEV);
    unsigned int n, i;
    struct NDIContext *ctx = avctx->priv_data;
    const NDIlib_source_t *ndi_srcs = NULL;
    const NDIlib_find_create_t find_create_desc = { .show_local_sources = true,
        .p_groups = NULL, .p_extra_ips = NULL };

    if (!ctx->ndi_find)
        ctx->ndi_find = NDIlib_find_create2(&find_create_desc);
    if (!ctx->ndi_find) {
        av_log(avctx, AV_LOG_ERROR, "NDIlib_find_create failed.\n");
        return AVERROR(EIO);
    }

    while (1)
    {
        int f, t = ctx->wait_sources / 1000;
        av_log(avctx, AV_LOG_DEBUG, "Waiting for sources %d miliseconds\n", t);
        f = NDIlib_find_wait_for_sources(ctx->ndi_find, t);
        av_log(avctx, AV_LOG_DEBUG, "NDIlib_find_wait_for_sources returns %d\n", f);
        if (!f)
            break;
    };

    ndi_srcs = NDIlib_find_get_current_sources(ctx->ndi_find, &n);

    if (ctx->find_sources)
        av_log(avctx, AV_LOG_INFO, "Found %d NDI sources:\n", n);

    for (i = 0; i < n; i++) {
        if (ctx->find_sources)
            av_log(avctx, AV_LOG_INFO, "\t'%s'\t'%s'\n", ndi_srcs[i].p_ndi_name, ndi_srcs[i].p_ip_address);

        if (!strcmp(name, ndi_srcs[i].p_ndi_name)) {
            *source_to_connect_to = ndi_srcs[i];
            j = i;
        }
    }

    return j;
}

static int ndi_read_header(AVFormatContext *avctx)
{
    int ret;
    NDIlib_recv_create_t recv_create_desc;
    const NDIlib_tally_t tally_state = { .on_program = true, .on_preview = false };
    struct NDIContext *ctx = avctx->priv_data;

    if (!NDIlib_initialize()) {
        av_log(avctx, AV_LOG_ERROR, "NDIlib_initialize failed.\n");
        return AVERROR_EXTERNAL;
    }

    /* Find available sources. */
    ret = ndi_find_sources(avctx, avctx->url, &recv_create_desc.source_to_connect_to);
    if (ctx->find_sources) {
        return AVERROR_EXIT;
    }
    if (ret < 0)
        return ret;

    /* Create receiver description */
    recv_create_desc.color_format = NDIlib_recv_color_format_e_UYVY_RGBA;
    recv_create_desc.bandwidth = NDIlib_recv_bandwidth_highest;
    recv_create_desc.allow_video_fields = ctx->allow_video_fields;

    /* Create the receiver */
    ctx->recv = NDIlib_recv_create(&recv_create_desc);
    if (!ctx->recv) {
        av_log(avctx, AV_LOG_ERROR, "NDIlib_recv_create2 failed.\n");
        return AVERROR(EIO);
    }

    /* Set tally */
    NDIlib_recv_set_tally(ctx->recv, &tally_state);

    avctx->ctx_flags |= AVFMTCTX_NOHEADER;

    return 0;
}

static int ndi_create_video_stream(AVFormatContext *avctx, NDIlib_video_frame_t *v)
{
    AVStream *st;
    AVRational tmp;
    struct NDIContext *ctx = avctx->priv_data;

    st = avformat_new_stream(avctx, NULL);
    if (!st) {
        av_log(avctx, AV_LOG_ERROR, "Cannot add video stream\n");
        return AVERROR(ENOMEM);
    }

    st->time_base                   = NDI_TIME_BASE_Q;
    st->r_frame_rate                = av_make_q(v->frame_rate_N, v->frame_rate_D);

    tmp = av_mul_q(av_d2q(v->picture_aspect_ratio, INT_MAX), (AVRational){v->yres, v->xres});
    av_reduce(&st->sample_aspect_ratio.num, &st->sample_aspect_ratio.den, tmp.num, tmp.den, 1000);
    st->codecpar->sample_aspect_ratio = st->sample_aspect_ratio;

    st->codecpar->codec_type        = AVMEDIA_TYPE_VIDEO;
    st->codecpar->width             = v->xres;
    st->codecpar->height            = v->yres;
    st->codecpar->codec_id          = AV_CODEC_ID_RAWVIDEO;
    st->codecpar->bit_rate          = av_rescale(v->xres * v->yres * 16, v->frame_rate_N, v->frame_rate_D);
    st->codecpar->field_order       = v->frame_format_type == NDIlib_frame_format_type_progressive
        ? AV_FIELD_PROGRESSIVE : AV_FIELD_TT;

    if (NDIlib_FourCC_type_UYVY == v->FourCC || NDIlib_FourCC_type_UYVA == v->FourCC) {
        st->codecpar->format        = AV_PIX_FMT_UYVY422;
        st->codecpar->codec_tag     = MKTAG('U', 'Y', 'V', 'Y');
        if (NDIlib_FourCC_type_UYVA == v->FourCC)
            av_log(avctx, AV_LOG_WARNING, "Alpha channel ignored\n");
    } else if (NDIlib_FourCC_type_BGRA == v->FourCC) {
        st->codecpar->format        = AV_PIX_FMT_BGRA;
        st->codecpar->codec_tag     = MKTAG('B', 'G', 'R', 'A');
    } else if (NDIlib_FourCC_type_BGRX == v->FourCC) {
        st->codecpar->format        = AV_PIX_FMT_BGR0;
        st->codecpar->codec_tag     = MKTAG('B', 'G', 'R', '0');
    } else if (NDIlib_FourCC_type_RGBA == v->FourCC) {
        st->codecpar->format        = AV_PIX_FMT_RGBA;
        st->codecpar->codec_tag     = MKTAG('R', 'G', 'B', 'A');
    } else if (NDIlib_FourCC_type_RGBX == v->FourCC) {
        st->codecpar->format        = AV_PIX_FMT_RGB0;
        st->codecpar->codec_tag     = MKTAG('R', 'G', 'B', '0');
    } else {
        av_log(avctx, AV_LOG_ERROR, "Unsupported video stream format, v->FourCC=%d\n", v->FourCC);
        return AVERROR(EINVAL);
    }

    avpriv_set_pts_info(st, 64, 1, NDI_TIME_BASE);

    ctx->video_st = st;

    return 0;
}
예제 #15
0
FFmpegVideo::FFmpegVideo()
{
    avcodec_register_all();
    // Encoding

    encoding_codec = NULL ;
    encoding_frame_buffer = NULL ;
    encoding_context = NULL ;

    //AVCodecID codec_id = AV_CODEC_ID_H264 ;
    //AVCodecID codec_id = AV_CODEC_ID_MPEG2VIDEO;
#if LIBAVCODEC_VERSION_MAJOR < 54
    CodecID codec_id = CODEC_ID_MPEG4;
#else
    AVCodecID codec_id = AV_CODEC_ID_MPEG4;
#endif

    /* find the video encoder */
    encoding_codec = avcodec_find_encoder(codec_id);

    if (!encoding_codec) std::cerr << "AV codec not found for codec id " << std::endl;
    if (!encoding_codec) throw std::runtime_error("AV codec not found for codec id ") ;

    encoding_context = avcodec_alloc_context3(encoding_codec);

    if (!encoding_context) std::cerr << "AV: Could not allocate video codec encoding context" << std::endl;
    if (!encoding_context) throw std::runtime_error("AV: Could not allocate video codec encoding context");

    /* put sample parameters */
    encoding_context->bit_rate = 10*1024 ; // default bitrate is 30KB/s
    encoding_context->bit_rate_tolerance = encoding_context->bit_rate ;

#ifdef USE_VARIABLE_BITRATE
    encoding_context->rc_min_rate = 0;
    encoding_context->rc_max_rate = 10*1024;//encoding_context->bit_rate;
    encoding_context->rc_buffer_size = 10*1024*1024;
    encoding_context->rc_initial_buffer_occupancy = (int) ( 0.9 * encoding_context->rc_buffer_size);
    encoding_context->rc_max_available_vbv_use = 1.0;
    encoding_context->rc_min_vbv_overflow_use = 0.0;
#else
    encoding_context->rc_min_rate = 0;
    encoding_context->rc_max_rate = 0;
    encoding_context->rc_buffer_size = 0;
#endif
    if (encoding_codec->capabilities & CODEC_CAP_TRUNCATED)
        encoding_context->flags |= CODEC_FLAG_TRUNCATED;
    encoding_context->flags |= CODEC_FLAG_PSNR;//Peak signal-to-noise ratio
    encoding_context->flags |= CODEC_CAP_PARAM_CHANGE;
    encoding_context->i_quant_factor = 0.769f;
    encoding_context->b_quant_factor = 1.4f;
    encoding_context->time_base.num = 1;
    encoding_context->time_base.den = 15;//framesPerSecond;
    encoding_context->qmin =  1;
    encoding_context->qmax = 51;
    encoding_context->max_qdiff = 4;

    //encoding_context->me_method = ME_HEX;
    //encoding_context->max_b_frames = 4;
    //encoding_context->flags |= CODEC_FLAG_LOW_DELAY;	// MPEG2 only
    //encoding_context->partitions = X264_PART_I4X4 | X264_PART_I8X8 | X264_PART_P8X8 | X264_PART_P4X4 | X264_PART_B8X8;
    //encoding_context->crf = 0.0f;
    //encoding_context->cqp = 26;

    /* resolution must be a multiple of two */
    encoding_context->width = 640;//176;
    encoding_context->height = 480;//144;
    /* frames per second */
    encoding_context->time_base = av_make_q(1, 25);
    /* emit one intra frame every ten frames
     * check frame pict_type before passing frame
     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
     * then gop_size is ignored and the output of encoder
     * will always be I frame irrespective to gop_size
     */
    encoding_context->gop_size = 100;
    //encoding_context->max_b_frames = 1;
#if LIBAVCODEC_VERSION_MAJOR < 54
    encoding_context->pix_fmt = PIX_FMT_YUV420P; //context->pix_fmt = PIX_FMT_RGB24;
    if (codec_id == CODEC_ID_H264) {
#else
    encoding_context->pix_fmt = AV_PIX_FMT_YUV420P; //context->pix_fmt = AV_PIX_FMT_RGB24;
    if (codec_id == AV_CODEC_ID_H264) {
#endif
        av_opt_set(encoding_context->priv_data, "preset", "slow", 0);
    }

    /* open it */
    if (avcodec_open2(encoding_context, encoding_codec, NULL) < 0)
    {
        std::cerr << "AV: Could not open codec context. Something's wrong." << std::endl;
        throw std::runtime_error( "AV: Could not open codec context. Something's wrong.");
    }

#if (LIBAVCODEC_VERSION_MAJOR < 57) | (LIBAVCODEC_VERSION_MAJOR == 57 && LIBAVCODEC_VERSION_MINOR <3 )
    encoding_frame_buffer = avcodec_alloc_frame() ;//(AVFrame*)malloc(sizeof(AVFrame)) ;
#else
    encoding_frame_buffer = av_frame_alloc() ;
#endif

    if(!encoding_frame_buffer) std::cerr << "AV: could not allocate frame buffer." << std::endl;
    if(!encoding_frame_buffer)
        throw std::runtime_error("AV: could not allocate frame buffer.") ;

    encoding_frame_buffer->format = encoding_context->pix_fmt;
    encoding_frame_buffer->width  = encoding_context->width;
    encoding_frame_buffer->height = encoding_context->height;

    /* the image can be allocated by any means and av_image_alloc() is
     * just the most convenient way if av_malloc() is to be used */

    int ret = av_image_alloc(encoding_frame_buffer->data, encoding_frame_buffer->linesize,
                             encoding_context->width, encoding_context->height, encoding_context->pix_fmt, 32);

    if (ret < 0) std::cerr << "AV: Could not allocate raw picture buffer" << std::endl;
    if (ret < 0)
        throw std::runtime_error("AV: Could not allocate raw picture buffer");

    encoding_frame_count = 0 ;

    // Decoding
    decoding_codec = avcodec_find_decoder(codec_id);

    if (!decoding_codec) std::cerr << "AV codec not found for codec id " << std::endl;
    if (!decoding_codec)
        throw("AV codec not found for codec id ") ;

    decoding_context = avcodec_alloc_context3(decoding_codec);

    if(!decoding_context) std::cerr << "AV: Could not allocate video codec decoding context" << std::endl;
    if(!decoding_context)
        throw std::runtime_error("AV: Could not allocate video codec decoding context");

    decoding_context->width = encoding_context->width;
    decoding_context->height = encoding_context->height;
#if LIBAVCODEC_VERSION_MAJOR < 54
    decoding_context->pix_fmt = PIX_FMT_YUV420P;
#else
    decoding_context->pix_fmt = AV_PIX_FMT_YUV420P;
#endif

    if(decoding_codec->capabilities & CODEC_CAP_TRUNCATED)
        decoding_context->flags |= CODEC_FLAG_TRUNCATED; // we do not send complete frames
    //we can receive truncated frames
    decoding_context->flags2 |= CODEC_FLAG2_CHUNKS;

    AVDictionary* dictionary = NULL;
    if(avcodec_open2(decoding_context, decoding_codec, &dictionary) < 0)
    {
        std::cerr << "AV codec open action failed! " << std::endl;
        throw("AV codec open action failed! ") ;
    }

    //decoding_frame_buffer = avcodec_alloc_frame() ;//(AVFrame*)malloc(sizeof(AVFrame)) ;
    decoding_frame_buffer = av_frame_alloc() ;

    av_init_packet(&decoding_buffer);
    decoding_buffer.data = NULL ;
    decoding_buffer.size = 0 ;

    //ret = av_image_alloc(decoding_frame_buffer->data, decoding_frame_buffer->linesize, decoding_context->width, decoding_context->height, decoding_context->pix_fmt, 32);

    //if (ret < 0)
    //throw std::runtime_error("AV: Could not allocate raw picture buffer");

    // debug
#ifdef DEBUG_MPEG_VIDEO
    std::cerr << "Dumping captured data to file tmpvideo.mpg" << std::endl;
    encoding_debug_file = fopen("tmpvideo.mpg","w") ;
#endif
}

FFmpegVideo::~FFmpegVideo()
{
    avcodec_free_context(&encoding_context);
    avcodec_free_context(&decoding_context);
    av_frame_free(&encoding_frame_buffer);
    av_frame_free(&decoding_frame_buffer);
}

#define MAX_FFMPEG_ENCODING_BITRATE 81920

bool FFmpegVideo::encodeData(const QImage& image, uint32_t target_encoding_bitrate, RsVOIPDataChunk& voip_chunk)
{
#ifdef DEBUG_MPEG_VIDEO
	std::cerr << "Encoding frame of size " << image.width() << "x" << image.height() << ", resized to " << encoding_frame_buffer->width << "x" << encoding_frame_buffer->height << " : ";
#endif
	QImage input ;

    if(target_encoding_bitrate > MAX_FFMPEG_ENCODING_BITRATE)
    {
        std::cerr << "Max encodign bitrate eexceeded. Capping to " << MAX_FFMPEG_ENCODING_BITRATE << std::endl;
        target_encoding_bitrate = MAX_FFMPEG_ENCODING_BITRATE ;
    }
	//encoding_context->bit_rate = target_encoding_bitrate;
	encoding_context->rc_max_rate = target_encoding_bitrate;
	//encoding_context->bit_rate_tolerance = target_encoding_bitrate;

	if(image.width() != encoding_frame_buffer->width || image.height() != encoding_frame_buffer->height)
		input = image.scaled(QSize(encoding_frame_buffer->width,encoding_frame_buffer->height),Qt::IgnoreAspectRatio,Qt::SmoothTransformation) ;
	else
		input = image ;

	/* prepare a dummy image */
	/* Y */
	for (int y = 0; y < encoding_context->height/2; y++)
		for (int x = 0; x < encoding_context->width/2; x++)
		{
			QRgb pix00 = input.pixel(QPoint(2*x+0,2*y+0)) ;
			QRgb pix01 = input.pixel(QPoint(2*x+0,2*y+1)) ;
			QRgb pix10 = input.pixel(QPoint(2*x+1,2*y+0)) ;
			QRgb pix11 = input.pixel(QPoint(2*x+1,2*y+1)) ;

			int R00 = (pix00 >> 16) & 0xff ; int G00 = (pix00 >>  8) & 0xff ; int B00 = (pix00 >>  0) & 0xff ;
			int R01 = (pix01 >> 16) & 0xff ; int G01 = (pix01 >>  8) & 0xff ; int B01 = (pix01 >>  0) & 0xff ;
			int R10 = (pix10 >> 16) & 0xff ; int G10 = (pix10 >>  8) & 0xff ; int B10 = (pix10 >>  0) & 0xff ;
			int R11 = (pix11 >> 16) & 0xff ; int G11 = (pix11 >>  8) & 0xff ; int B11 = (pix11 >>  0) & 0xff ;

			int Y00 =  (0.257 * R00) + (0.504 * G00) + (0.098 * B00) + 16  ;
			int Y01 =  (0.257 * R01) + (0.504 * G01) + (0.098 * B01) + 16  ;
			int Y10 =  (0.257 * R10) + (0.504 * G10) + (0.098 * B10) + 16  ;
			int Y11 =  (0.257 * R11) + (0.504 * G11) + (0.098 * B11) + 16  ;

			float R = 0.25*(R00+R01+R10+R11) ;
			float G = 0.25*(G00+G01+G10+G11) ;
			float B = 0.25*(B00+B01+B10+B11) ;

			int U =  (0.439 * R) - (0.368 * G) - (0.071 * B) + 128 ;
			int V = -(0.148 * R) - (0.291 * G) + (0.439 * B) + 128 ;

			encoding_frame_buffer->data[0][(2*y+0) * encoding_frame_buffer->linesize[0] + 2*x+0] = std::min(255,std::max(0,Y00)); // Y
			encoding_frame_buffer->data[0][(2*y+0) * encoding_frame_buffer->linesize[0] + 2*x+1] = std::min(255,std::max(0,Y01)); // Y
			encoding_frame_buffer->data[0][(2*y+1) * encoding_frame_buffer->linesize[0] + 2*x+0] = std::min(255,std::max(0,Y10)); // Y
			encoding_frame_buffer->data[0][(2*y+1) * encoding_frame_buffer->linesize[0] + 2*x+1] = std::min(255,std::max(0,Y11)); // Y

			encoding_frame_buffer->data[1][y * encoding_frame_buffer->linesize[1] + x] = std::min(255,std::max(0,U));// Cr
			encoding_frame_buffer->data[2][y * encoding_frame_buffer->linesize[2] + x] = std::min(255,std::max(0,V));// Cb
		}


	encoding_frame_buffer->pts = encoding_frame_count++;

	/* encode the image */

	int got_output = 0;

	AVPacket pkt ;
	av_init_packet(&pkt);
#if LIBAVCODEC_VERSION_MAJOR < 54
	pkt.size = avpicture_get_size(encoding_context->pix_fmt, encoding_context->width, encoding_context->height);
	pkt.data = (uint8_t*)av_malloc(pkt.size);

	//    do
	//    {
	int ret = avcodec_encode_video(encoding_context, pkt.data, pkt.size, encoding_frame_buffer) ;
	if (ret > 0) {
		got_output = ret;
	}
#else
	pkt.data = NULL;    // packet data will be allocated by the encoder
	pkt.size = 0;

	//    do
	//    {
	int ret = avcodec_encode_video2(encoding_context, &pkt, encoding_frame_buffer, &got_output) ;
#endif

	if (ret < 0)
	{
		std::cerr << "Error encoding frame!" << std::endl;
		return false ;
	}
	//        frame = NULL ;	// next attempts: do not encode anything. Do this to just flush the buffer
	//
	//    } while(got_output) ;

	if(got_output)
	{
		voip_chunk.data = rs_malloc(pkt.size + HEADER_SIZE) ;
		
		if(!voip_chunk.data)
			return false ;
        
		uint32_t flags = 0;

		((unsigned char *)voip_chunk.data)[0] =  VideoProcessor::VIDEO_PROCESSOR_CODEC_ID_MPEG_VIDEO       & 0xff ;
		((unsigned char *)voip_chunk.data)[1] = (VideoProcessor::VIDEO_PROCESSOR_CODEC_ID_MPEG_VIDEO >> 8) & 0xff ;
		((unsigned char *)voip_chunk.data)[2] = flags & 0xff ;
		((unsigned char *)voip_chunk.data)[3] = (flags >> 8) & 0xff ;

		memcpy(&((unsigned char*)voip_chunk.data)[HEADER_SIZE],pkt.data,pkt.size) ;

		voip_chunk.size = pkt.size + HEADER_SIZE;
		voip_chunk.type = RsVOIPDataChunk::RS_VOIP_DATA_TYPE_VIDEO ;

#ifdef DEBUG_MPEG_VIDEO
		std::cerr << "Output : " << pkt.size << " bytes." << std::endl;
		fwrite(pkt.data,1,pkt.size,encoding_debug_file) ;
		fflush(encoding_debug_file) ;
#endif
		av_free_packet(&pkt);

		return true ;
	}
	else
	{
예제 #16
0
static int mediacodec_wrap_hw_buffer(AVCodecContext *avctx,
                                  MediaCodecDecContext *s,
                                  ssize_t index,
                                  FFAMediaCodecBufferInfo *info,
                                  AVFrame *frame)
{
    int ret = 0;
    int status = 0;
    AVMediaCodecBuffer *buffer = NULL;

    frame->buf[0] = NULL;
    frame->width = avctx->width;
    frame->height = avctx->height;
    frame->format = avctx->pix_fmt;

    if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
        frame->pts = av_rescale_q(info->presentationTimeUs,
                                      av_make_q(1, 1000000),
                                      avctx->pkt_timebase);
    } else {
        frame->pts = info->presentationTimeUs;
    }
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
    frame->pkt_pts = frame->pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
    frame->pkt_dts = AV_NOPTS_VALUE;

    buffer = av_mallocz(sizeof(AVMediaCodecBuffer));
    if (!buffer) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    buffer->released = 0;

    frame->buf[0] = av_buffer_create(NULL,
                                     0,
                                     mediacodec_buffer_release,
                                     buffer,
                                     AV_BUFFER_FLAG_READONLY);

    if (!frame->buf[0]) {
        ret = AVERROR(ENOMEM);
        goto fail;

    }

    buffer->ctx = s;
    ff_mediacodec_dec_ref(s);

    buffer->index = index;
    buffer->pts = info->presentationTimeUs;

    frame->data[3] = (uint8_t *)buffer;

    return 0;
fail:
    av_freep(buffer);
    av_buffer_unref(&frame->buf[0]);
    status = ff_AMediaCodec_releaseOutputBuffer(s->codec, index, 0);
    if (status < 0) {
        av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
        ret = AVERROR_EXTERNAL;
    }

    return ret;
}
예제 #17
0
static av_cold int init_subtitles(AVFilterContext *ctx)
{
    int j, ret, sid;
    int k = 0;
    AVDictionary *codec_opts = NULL;
    AVFormatContext *fmt = NULL;
    AVCodecContext *dec_ctx = NULL;
    AVCodec *dec = NULL;
    const AVCodecDescriptor *dec_desc;
    AVStream *st;
    AVPacket pkt;
    AssContext *ass = ctx->priv;

    /* Init libass */
    ret = init(ctx);
    if (ret < 0)
        return ret;
    ass->track = ass_new_track(ass->library);
    if (!ass->track) {
        av_log(ctx, AV_LOG_ERROR, "Could not create a libass track\n");
        return AVERROR(EINVAL);
    }

    /* Open subtitles file */
    ret = avformat_open_input(&fmt, ass->filename, NULL, NULL);
    if (ret < 0) {
        av_log(ctx, AV_LOG_ERROR, "Unable to open %s\n", ass->filename);
        goto end;
    }
    ret = avformat_find_stream_info(fmt, NULL);
    if (ret < 0)
        goto end;

    /* Locate subtitles stream */
    if (ass->stream_index < 0)
        ret = av_find_best_stream(fmt, AVMEDIA_TYPE_SUBTITLE, -1, -1, NULL, 0);
    else {
        ret = -1;
        if (ass->stream_index < fmt->nb_streams) {
            for (j = 0; j < fmt->nb_streams; j++) {
                if (fmt->streams[j]->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) {
                    if (ass->stream_index == k) {
                        ret = j;
                        break;
                    }
                    k++;
                }
            }
        }
    }

    if (ret < 0) {
        av_log(ctx, AV_LOG_ERROR, "Unable to locate subtitle stream in %s\n",
               ass->filename);
        goto end;
    }
    sid = ret;
    st = fmt->streams[sid];

    /* Load attached fonts */
    for (j = 0; j < fmt->nb_streams; j++) {
        AVStream *st = fmt->streams[j];
        if (st->codecpar->codec_type == AVMEDIA_TYPE_ATTACHMENT &&
            attachment_is_font(st)) {
            const AVDictionaryEntry *tag = NULL;
            tag = av_dict_get(st->metadata, "filename", NULL,
                              AV_DICT_MATCH_CASE);

            if (tag) {
                av_log(ctx, AV_LOG_DEBUG, "Loading attached font: %s\n",
                       tag->value);
                ass_add_font(ass->library, tag->value,
                             st->codecpar->extradata,
                             st->codecpar->extradata_size);
            } else {
                av_log(ctx, AV_LOG_WARNING,
                       "Font attachment has no filename, ignored.\n");
            }
        }
    }

    /* Initialize fonts */
    ass_set_fonts(ass->renderer, NULL, NULL, 1, NULL, 1);

    /* Open decoder */
    dec = avcodec_find_decoder(st->codecpar->codec_id);
    if (!dec) {
        av_log(ctx, AV_LOG_ERROR, "Failed to find subtitle codec %s\n",
               avcodec_get_name(st->codecpar->codec_id));
        return AVERROR(EINVAL);
    }
    dec_desc = avcodec_descriptor_get(st->codecpar->codec_id);
    if (dec_desc && !(dec_desc->props & AV_CODEC_PROP_TEXT_SUB)) {
        av_log(ctx, AV_LOG_ERROR,
               "Only text based subtitles are currently supported\n");
        return AVERROR_PATCHWELCOME;
    }
    if (ass->charenc)
        av_dict_set(&codec_opts, "sub_charenc", ass->charenc, 0);
    if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57,26,100))
        av_dict_set(&codec_opts, "sub_text_format", "ass", 0);

    dec_ctx = avcodec_alloc_context3(dec);
    if (!dec_ctx)
        return AVERROR(ENOMEM);

    ret = avcodec_parameters_to_context(dec_ctx, st->codecpar);
    if (ret < 0)
        goto end;

    /*
     * This is required by the decoding process in order to rescale the
     * timestamps: in the current API the decoded subtitles have their pts
     * expressed in AV_TIME_BASE, and thus the lavc internals need to know the
     * stream time base in order to achieve the rescaling.
     *
     * That API is old and needs to be reworked to match behaviour with A/V.
     */
    av_codec_set_pkt_timebase(dec_ctx, st->time_base);

    ret = avcodec_open2(dec_ctx, NULL, &codec_opts);
    if (ret < 0)
        goto end;

    if (ass->force_style) {
        char **list = NULL;
        char *temp = NULL;
        char *ptr = av_strtok(ass->force_style, ",", &temp);
        int i = 0;
        while (ptr) {
            av_dynarray_add(&list, &i, ptr);
            if (!list) {
                ret = AVERROR(ENOMEM);
                goto end;
            }
            ptr = av_strtok(NULL, ",", &temp);
        }
        av_dynarray_add(&list, &i, NULL);
        if (!list) {
            ret = AVERROR(ENOMEM);
            goto end;
        }
        ass_set_style_overrides(ass->library, list);
        av_free(list);
    }
    /* Decode subtitles and push them into the renderer (libass) */
    if (dec_ctx->subtitle_header)
        ass_process_codec_private(ass->track,
                                  dec_ctx->subtitle_header,
                                  dec_ctx->subtitle_header_size);
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
    while (av_read_frame(fmt, &pkt) >= 0) {
        int i, got_subtitle;
        AVSubtitle sub = {0};

        if (pkt.stream_index == sid) {
            ret = avcodec_decode_subtitle2(dec_ctx, &sub, &got_subtitle, &pkt);
            if (ret < 0) {
                av_log(ctx, AV_LOG_WARNING, "Error decoding: %s (ignored)\n",
                       av_err2str(ret));
            } else if (got_subtitle) {
                const int64_t start_time = av_rescale_q(sub.pts, AV_TIME_BASE_Q, av_make_q(1, 1000));
                const int64_t duration   = sub.end_display_time;
                for (i = 0; i < sub.num_rects; i++) {
                    char *ass_line = sub.rects[i]->ass;
                    if (!ass_line)
                        break;
                    if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57,25,100))
                        ass_process_data(ass->track, ass_line, strlen(ass_line));
                    else
                        ass_process_chunk(ass->track, ass_line, strlen(ass_line),
                                          start_time, duration);
                }
            }
        }
        av_packet_unref(&pkt);
        avsubtitle_free(&sub);
    }

end:
    av_dict_free(&codec_opts);
    avcodec_close(dec_ctx);
    avcodec_free_context(&dec_ctx);
    avformat_close_input(&fmt);
    return ret;
}
예제 #18
0
static BOOL ffmpeg_open_context(FREERDP_DSP_CONTEXT* context)
{
	int ret;
	int layout;
	const AUDIO_FORMAT* format;

	if (!context || context->isOpen)
		return FALSE;

	format = &context->format;

	if (!format)
		return FALSE;

	layout = av_get_default_channel_layout(format->nChannels);
	context->id = ffmpeg_get_avcodec(format);

	if (ffmpeg_codec_is_filtered(context->id, context->encoder))
		goto fail;

	if (context->encoder)
		context->codec = avcodec_find_encoder(context->id);
	else
		context->codec = avcodec_find_decoder(context->id);

	if (!context->codec)
		goto fail;

	context->context = avcodec_alloc_context3(context->codec);

	if (!context->context)
		goto fail;

	switch (context->id)
	{
		/* We need support for multichannel and sample rates != 8000 */
		case AV_CODEC_ID_GSM_MS:
			context->context->strict_std_compliance = FF_COMPLIANCE_UNOFFICIAL;
			break;

		default:
			break;
	}

	context->context->channels = format->nChannels;
	context->context->channel_layout = layout;
	context->context->sample_rate = format->nSamplesPerSec;
	context->context->block_align = format->nBlockAlign;
	context->context->bit_rate = format->nAvgBytesPerSec * 8;
	context->context->sample_fmt = ffmpeg_sample_format(format);
	context->context->time_base = av_make_q(1, context->context->sample_rate);

	if ((ret = avcodec_open2(context->context, context->codec, NULL)) < 0)
	{
		const char* err = av_err2str(ret);
		WLog_ERR(TAG, "Error avcodec_open2 %s [%d]", err, ret);
		goto fail;
	}

	context->packet = av_packet_alloc();

	if (!context->packet)
		goto fail;

	context->frame = av_frame_alloc();

	if (!context->frame)
		goto fail;

	context->resampled = av_frame_alloc();

	if (!context->resampled)
		goto fail;

	context->buffered = av_frame_alloc();

	if (!context->buffered)
		goto fail;

	context->rcontext = avresample_alloc_context();

	if (!context->rcontext)
		goto fail;

	context->frame->channel_layout = layout;
	context->frame->channels = format->nChannels;
	context->frame->sample_rate = format->nSamplesPerSec;
	context->frame->format = AV_SAMPLE_FMT_S16;

	if (context->encoder)
	{
		context->resampled->format = context->context->sample_fmt;
		context->resampled->sample_rate = context->context->sample_rate;
	}
	else
	{
		context->resampled->format = AV_SAMPLE_FMT_S16;
		context->resampled->sample_rate = format->nSamplesPerSec;
	}

	context->resampled->channel_layout = layout;
	context->resampled->channels = format->nChannels;

	if (context->context->frame_size > 0)
	{
		context->buffered->channel_layout = context->resampled->channel_layout;
		context->buffered->channels = context->resampled->channels;
		context->buffered->format = context->resampled->format;
		context->buffered->nb_samples = context->context->frame_size;

		if ((ret = av_frame_get_buffer(context->buffered, 1)) < 0)
			goto fail;
	}

	context->isOpen = TRUE;
	return TRUE;
fail:
	ffmpeg_close_context(context);
	return FALSE;
}
예제 #19
0
RawPixelSource::RawPixelSource(UsageEnvironment& env,
                               Frame* content,
							   int avgBitRate,
							   bool robustSyncing)
	:
	FramedSource(env), img_convert_ctx(NULL), content(content), /*encodeBarrier(2),*/ destructing(false), lastPTS(0), robustSyncing(robustSyncing)
{

	gettimeofday(&prevtime, NULL); // If you have a more accurate time - e.g., from an encoder - then use that instead.
	if (referenceCount == 0)
	{
		// Any global initialization of the device would be done here:
		//%%% TO BE WRITTEN %%%
	}

	// Any instance-specific initialization of the device would be done here:

	++referenceCount;
	//myfile = fopen("/Users/tiborgoldschwendt/Desktop/Logs/deviceglxgears.log", "w");

	// initialize frame pool
	for (int i = 0; i < 1; i++)
	{
		AVFrame* frame = av_frame_alloc();
		if (!frame)
		{
			fprintf(stderr, "Could not allocate video frame\n");
			exit(1);
		}
		frame->format = content->getFormat();
		frame->width  = content->getWidth();
		frame->height = content->getHeight();

		/* the image can be allocated by any means and av_image_alloc() is
		* just the most convenient way if av_malloc() is to be used */
		if (av_image_alloc(frame->data, frame->linesize, frame->width, frame->height,
			content->getFormat(), 32) < 0)
		{
			fprintf(stderr, "Could not allocate raw picture buffer\n");
			abort();
		}

		framePool.push(frame);
	}

	for (int i = 0; i < 1; i++)
	{
		AVPacket pkt;
		av_init_packet(&pkt);
		pktPool.push(pkt);
	}

	// Initialize codec and encoder
	AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
	if (!codec)
	{
		fprintf(stderr, "Codec not found\n");
		exit(1);
	}

	codecContext = avcodec_alloc_context3(codec);

	if (!codecContext)
	{
		fprintf(stderr, "could not allocate video codec context\n");
		exit(1);
	}

	/* put sample parameters */
	codecContext->bit_rate = avgBitRate;
	/* resolution must be a multiple of two */
	codecContext->width = content->getWidth();
	codecContext->height = content->getHeight();
	/* frames per second */
	codecContext->time_base = av_make_q(1, FPS);
	codecContext->gop_size = 20; /* emit one intra frame every ten frames */
	codecContext->max_b_frames = 0;
	codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
	//codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;

	av_opt_set(codecContext->priv_data, "preset", PRESET_VAL, 0);
	av_opt_set(codecContext->priv_data, "tune", TUNE_VAL, 0);
	av_opt_set(codecContext->priv_data, "slice-max-size", "2000", 0);

	/* open it */
	if (avcodec_open2(codecContext, codec, NULL) < 0)
	{
		fprintf(stderr, "could not open codec\n");
		exit(1);
	}


	


	// We arrange here for our "deliverFrame" member function to be called
	// whenever the next frame of data becomes available from the device.
	//
	// If the device can be accessed as a readable socket, then one easy way to do this is using a call to
	//     envir().taskScheduler().turnOnBackgroundReadHandling( ... )
	// (See examples of this call in the "liveMedia" directory.)
	//
	// If, however, the device *cannot* be accessed as a readable socket, then instead we can implement it using 'event triggers':
	// Create an 'event trigger' for this device (if it hasn't already been done):
	eventTriggerId = envir().taskScheduler().createEventTrigger(&deliverFrame0);

	//std::cout << this << ": eventTriggerId: " << eventTriggerId  << std::endl;

	frameContentThread = boost::thread(boost::bind(&RawPixelSource::frameContentLoop, this));

	encodeFrameThread  = boost::thread(boost::bind(&RawPixelSource::encodeFrameLoop,  this));

	//eventThread        = boost::thread(boost::bind(&RawPixelSource::eventLoop, this));

	lastFrameTime = av_gettime();
}
예제 #20
0
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    TInterlaceContext *tinterlace = ctx->priv;
    AVFrame *cur, *next, *out;
    int field, tff, ret;

    av_frame_free(&tinterlace->cur);
    tinterlace->cur  = tinterlace->next;
    tinterlace->next = picref;

    cur = tinterlace->cur;
    next = tinterlace->next;
    /* we need at least two frames */
    if (!tinterlace->cur)
        return 0;

    switch (tinterlace->mode) {
    case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into
             * the lower field, generating a double-height video at half framerate */
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, cur);
        out->height = outlink->h;
        out->interlaced_frame = 1;
        out->top_field_first = 1;
        out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));

        /* write odd frame lines into the upper field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, FIELD_UPPER, tinterlace->flags);
        /* write even frame lines into the lower field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)next->data, next->linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, FIELD_LOWER, tinterlace->flags);
        av_frame_free(&tinterlace->next);
        break;

    case MODE_DROP_ODD:  /* only output even frames, odd  frames are dropped; height unchanged, half framerate */
    case MODE_DROP_EVEN: /* only output odd  frames, even frames are dropped; height unchanged, half framerate */
        out = av_frame_clone(tinterlace->mode == MODE_DROP_EVEN ? cur : next);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_free(&tinterlace->next);
        break;

    case MODE_PAD: /* expand each frame to double height, but pad alternate
                    * lines with black; framerate unchanged */
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, cur);
        out->height = outlink->h;
        out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));

        field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER;
        /* copy upper and lower fields */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, field, tinterlace->flags);
        /* pad with black the other field */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)tinterlace->black_data, tinterlace->black_linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, !field, tinterlace->flags);
        break;

        /* interleave upper/lower lines from odd frames with lower/upper lines from even frames,
         * halving the frame rate and preserving image height */
    case MODE_INTERLEAVE_TOP:    /* top    field first */
    case MODE_INTERLEAVE_BOTTOM: /* bottom field first */
        tff = tinterlace->mode == MODE_INTERLEAVE_TOP;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, cur);
        out->interlaced_frame = 1;
        out->top_field_first = tff;

        /* copy upper/lower field from cur */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
                           tinterlace->flags);
        /* copy lower/upper field from next */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)next->data, next->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
                           tinterlace->flags);
        av_frame_free(&tinterlace->next);
        break;
    case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */
        /* output current frame first */
        out = av_frame_clone(cur);
        if (!out)
            return AVERROR(ENOMEM);
        out->interlaced_frame = 1;
        if (cur->pts != AV_NOPTS_VALUE)
            out->pts = cur->pts*2;

        out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
        if ((ret = ff_filter_frame(outlink, out)) < 0)
            return ret;

        /* output mix of current and next frame */
        tff = next->top_field_first;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, next);
        out->interlaced_frame = 1;
        out->top_field_first = !tff;

        if (next->pts != AV_NOPTS_VALUE && cur->pts != AV_NOPTS_VALUE)
            out->pts = cur->pts + next->pts;
        else
            out->pts = AV_NOPTS_VALUE;
        /* write current frame second field lines into the second field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
                           tinterlace->flags);
        /* write next frame first field lines into the first field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)next->data, next->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
                           tinterlace->flags);
        break;
    default:
        av_assert0(0);
    }

    out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
    ret = ff_filter_frame(outlink, out);
    tinterlace->frame++;

    return ret;
}
예제 #21
0
int ff_decklink_set_format(AVFormatContext *avctx,
                               int width, int height,
                               int tb_num, int tb_den,
                               enum AVFieldOrder field_order,
                               decklink_direction_t direction, int num)
{
    struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
    struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
    BMDDisplayModeSupport support;
    IDeckLinkDisplayModeIterator *itermode;
    IDeckLinkDisplayMode *mode;
    int i = 1;
    HRESULT res;

    av_log(avctx, AV_LOG_DEBUG, "Trying to find mode for frame size %dx%d, frame timing %d/%d, field order %d, direction %d, mode number %d, format code %s\n",
        width, height, tb_num, tb_den, field_order, direction, num, (cctx->format_code) ? cctx->format_code : "(unset)");

    if (direction == DIRECTION_IN) {
        res = ctx->dli->GetDisplayModeIterator (&itermode);
    } else {
        res = ctx->dlo->GetDisplayModeIterator (&itermode);
    }

    if (res!= S_OK) {
            av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
            return AVERROR(EIO);
    }

    char format_buf[] = "    ";
    if (cctx->format_code)
        memcpy(format_buf, cctx->format_code, FFMIN(strlen(cctx->format_code), sizeof(format_buf)));
    BMDDisplayMode target_mode = (BMDDisplayMode)AV_RB32(format_buf);
    AVRational target_tb = av_make_q(tb_num, tb_den);
    ctx->bmd_mode = bmdModeUnknown;
    while ((ctx->bmd_mode == bmdModeUnknown) && itermode->Next(&mode) == S_OK) {
        BMDTimeValue bmd_tb_num, bmd_tb_den;
        int bmd_width  = mode->GetWidth();
        int bmd_height = mode->GetHeight();
        BMDDisplayMode bmd_mode = mode->GetDisplayMode();
        BMDFieldDominance bmd_field_dominance = mode->GetFieldDominance();

        mode->GetFrameRate(&bmd_tb_num, &bmd_tb_den);
        AVRational mode_tb = av_make_q(bmd_tb_num, bmd_tb_den);

        if ((bmd_width == width &&
             bmd_height == height &&
             !av_cmp_q(mode_tb, target_tb) &&
             field_order_eq(field_order, bmd_field_dominance))
             || i == num
             || target_mode == bmd_mode) {
            ctx->bmd_mode   = bmd_mode;
            ctx->bmd_width  = bmd_width;
            ctx->bmd_height = bmd_height;
            ctx->bmd_tb_den = bmd_tb_den;
            ctx->bmd_tb_num = bmd_tb_num;
            ctx->bmd_field_dominance = bmd_field_dominance;
            av_log(avctx, AV_LOG_INFO, "Found Decklink mode %d x %d with rate %.2f%s\n",
                bmd_width, bmd_height, 1/av_q2d(mode_tb),
                (ctx->bmd_field_dominance==bmdLowerFieldFirst || ctx->bmd_field_dominance==bmdUpperFieldFirst)?"(i)":"");
        }

        mode->Release();
        i++;
    }

    itermode->Release();

    if (ctx->bmd_mode == bmdModeUnknown)
        return -1;
    if (direction == DIRECTION_IN) {
        if (ctx->dli->DoesSupportVideoMode(ctx->bmd_mode, (BMDPixelFormat) cctx->raw_format,
                                           bmdVideoOutputFlagDefault,
                                           &support, NULL) != S_OK)
            return -1;
    } else {
        if (ctx->dlo->DoesSupportVideoMode(ctx->bmd_mode, bmdFormat8BitYUV,
                                           bmdVideoOutputFlagDefault,
                                           &support, NULL) != S_OK)
        return -1;
    }
    if (support == bmdDisplayModeSupported)
        return 0;

    return -1;
}