static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext *ctx = inlink->dst;
    AlphaMergeContext *merge = ctx->priv;

    int ret = 0;
    int is_alpha = (inlink == ctx->inputs[1]);
    struct FFBufQueue *queue =
        (is_alpha ? &merge->queue_alpha : &merge->queue_main);
    ff_bufqueue_add(ctx, queue, buf);

    do {
        AVFrame *main_buf, *alpha_buf;

        if (!ff_bufqueue_peek(&merge->queue_main, 0) ||
            !ff_bufqueue_peek(&merge->queue_alpha, 0)) break;

        main_buf = ff_bufqueue_get(&merge->queue_main);
        alpha_buf = ff_bufqueue_get(&merge->queue_alpha);

        merge->frame_requested = 0;
        draw_frame(ctx, main_buf, alpha_buf);
        ret = ff_filter_frame(ctx->outputs[0], main_buf);
        av_frame_free(&alpha_buf);
    } while (ret >= 0);
    return ret;
}
Ejemplo n.º 2
0
static int end_frame(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->dst;
    AlphaMergeContext *merge = ctx->priv;

    int is_alpha = (inlink == ctx->inputs[1]);
    struct FFBufQueue *queue =
        (is_alpha ? &merge->queue_alpha : &merge->queue_main);
    ff_bufqueue_add(ctx, queue, inlink->cur_buf);
    inlink->cur_buf = NULL;

    while (1) {
        AVFilterBufferRef *main_buf, *alpha_buf;

        if (!ff_bufqueue_peek(&merge->queue_main, 0) ||
            !ff_bufqueue_peek(&merge->queue_alpha, 0)) break;

        main_buf = ff_bufqueue_get(&merge->queue_main);
        alpha_buf = ff_bufqueue_get(&merge->queue_alpha);

        ctx->outputs[0]->out_buf = main_buf;
        ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(main_buf, ~0));
        merge->frame_requested = 0;
        draw_frame(ctx, main_buf, alpha_buf);
        ff_end_frame(ctx->outputs[0]);
        avfilter_unref_buffer(alpha_buf);
    }
    return 0;
}
Ejemplo n.º 3
0
static av_cold int ffat_init_encoder(AVCodecContext *avctx)
{
    ATDecodeContext *at = avctx->priv_data;
    OSStatus status;

    AudioStreamBasicDescription in_format = {
        .mSampleRate = avctx->sample_rate,
        .mFormatID = kAudioFormatLinearPCM,
        .mFormatFlags = ((avctx->sample_fmt == AV_SAMPLE_FMT_FLT ||
                          avctx->sample_fmt == AV_SAMPLE_FMT_DBL) ? kAudioFormatFlagIsFloat
                        : avctx->sample_fmt == AV_SAMPLE_FMT_U8 ? 0
                        : kAudioFormatFlagIsSignedInteger)
                        | kAudioFormatFlagIsPacked,
        .mBytesPerPacket = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->channels,
        .mFramesPerPacket = 1,
        .mBytesPerFrame = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->channels,
        .mChannelsPerFrame = avctx->channels,
        .mBitsPerChannel = av_get_bytes_per_sample(avctx->sample_fmt) * 8,
    };
    AudioStreamBasicDescription out_format = {
        .mSampleRate = avctx->sample_rate,
        .mFormatID = ffat_get_format_id(avctx->codec_id, avctx->profile),
        .mChannelsPerFrame = in_format.mChannelsPerFrame,
    };
    UInt32 layout_size = sizeof(AudioChannelLayout) +
                         sizeof(AudioChannelDescription) * avctx->channels;
    AudioChannelLayout *channel_layout = av_malloc(layout_size);

    if (!channel_layout)
        return AVERROR(ENOMEM);

    if (avctx->codec_id == AV_CODEC_ID_ILBC) {
        int mode = get_ilbc_mode(avctx);
        out_format.mFramesPerPacket  = 8000 * mode / 1000;
        out_format.mBytesPerPacket   = (mode == 20 ? 38 : 50);
    }

    status = AudioConverterNew(&in_format, &out_format, &at->converter);

    if (status != 0) {
        av_log(avctx, AV_LOG_ERROR, "AudioToolbox init error: %i\n", (int)status);
        av_free(channel_layout);
        return AVERROR_UNKNOWN;
    }

    if (!avctx->channel_layout)
        avctx->channel_layout = av_get_default_channel_layout(avctx->channels);

    if ((status = remap_layout(channel_layout, avctx->channel_layout, avctx->channels)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid channel layout\n");
        av_free(channel_layout);
        return status;
    }

    if (AudioConverterSetProperty(at->converter, kAudioConverterInputChannelLayout,
                                  layout_size, channel_layout)) {
        av_log(avctx, AV_LOG_ERROR, "Unsupported input channel layout\n");
        av_free(channel_layout);
        return AVERROR(EINVAL);
    }
    if (avctx->codec_id == AV_CODEC_ID_AAC) {
        int tag = get_aac_tag(avctx->channel_layout);
        if (tag) {
            channel_layout->mChannelLayoutTag = tag;
            channel_layout->mNumberChannelDescriptions = 0;
        }
    }
    if (AudioConverterSetProperty(at->converter, kAudioConverterOutputChannelLayout,
                                  layout_size, channel_layout)) {
        av_log(avctx, AV_LOG_ERROR, "Unsupported output channel layout\n");
        av_free(channel_layout);
        return AVERROR(EINVAL);
    }
    av_free(channel_layout);

    if (avctx->bits_per_raw_sample)
        AudioConverterSetProperty(at->converter,
                                  kAudioConverterPropertyBitDepthHint,
                                  sizeof(avctx->bits_per_raw_sample),
                                  &avctx->bits_per_raw_sample);

#if !TARGET_OS_IPHONE
    if (at->mode == -1)
        at->mode = (avctx->flags & AV_CODEC_FLAG_QSCALE) ?
                   kAudioCodecBitRateControlMode_Variable :
                   kAudioCodecBitRateControlMode_Constant;

    AudioConverterSetProperty(at->converter, kAudioCodecPropertyBitRateControlMode,
                              sizeof(at->mode), &at->mode);

    if (at->mode == kAudioCodecBitRateControlMode_Variable) {
        int q = avctx->global_quality / FF_QP2LAMBDA;
        if (q < 0 || q > 14) {
            av_log(avctx, AV_LOG_WARNING,
                   "VBR quality %d out of range, should be 0-14\n", q);
            q = av_clip(q, 0, 14);
        }
        q = 127 - q * 9;
        AudioConverterSetProperty(at->converter, kAudioCodecPropertySoundQualityForVBR,
                                  sizeof(q), &q);
    } else
#endif
    if (avctx->bit_rate > 0) {
        UInt32 rate = avctx->bit_rate;
        UInt32 size;
        status = AudioConverterGetPropertyInfo(at->converter,
                                               kAudioConverterApplicableEncodeBitRates,
                                               &size, NULL);
        if (!status && size) {
            UInt32 new_rate = rate;
            int count;
            int i;
            AudioValueRange *ranges = av_malloc(size);
            if (!ranges)
                return AVERROR(ENOMEM);
            AudioConverterGetProperty(at->converter,
                                      kAudioConverterApplicableEncodeBitRates,
                                      &size, ranges);
            count = size / sizeof(AudioValueRange);
            for (i = 0; i < count; i++) {
                AudioValueRange *range = &ranges[i];
                if (rate >= range->mMinimum && rate <= range->mMaximum) {
                    new_rate = rate;
                    break;
                } else if (rate > range->mMaximum) {
                    new_rate = range->mMaximum;
                } else {
                    new_rate = range->mMinimum;
                    break;
                }
            }
            if (new_rate != rate) {
                av_log(avctx, AV_LOG_WARNING,
                       "Bitrate %u not allowed; changing to %u\n", rate, new_rate);
                rate = new_rate;
            }
            av_free(ranges);
        }
        AudioConverterSetProperty(at->converter, kAudioConverterEncodeBitRate,
                                  sizeof(rate), &rate);
    }

    at->quality = 96 - at->quality * 32;
    AudioConverterSetProperty(at->converter, kAudioConverterCodecQuality,
                              sizeof(at->quality), &at->quality);

    if (!AudioConverterGetPropertyInfo(at->converter, kAudioConverterCompressionMagicCookie,
                                       &avctx->extradata_size, NULL) &&
        avctx->extradata_size) {
        int extradata_size = avctx->extradata_size;
        uint8_t *extradata;
        if (!(avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE)))
            return AVERROR(ENOMEM);
        if (avctx->codec_id == AV_CODEC_ID_ALAC) {
            avctx->extradata_size = 0x24;
            AV_WB32(avctx->extradata,     0x24);
            AV_WB32(avctx->extradata + 4, MKBETAG('a','l','a','c'));
            extradata = avctx->extradata + 12;
            avctx->extradata_size = 0x24;
        } else {
            extradata = avctx->extradata;
        }
        status = AudioConverterGetProperty(at->converter,
                                           kAudioConverterCompressionMagicCookie,
                                           &extradata_size, extradata);
        if (status != 0) {
            av_log(avctx, AV_LOG_ERROR, "AudioToolbox cookie error: %i\n", (int)status);
            return AVERROR_UNKNOWN;
        } else if (avctx->codec_id == AV_CODEC_ID_AAC) {
            GetByteContext gb;
            int tag, len;
            bytestream2_init(&gb, extradata, extradata_size);
            do {
                len = read_descr(&gb, &tag);
                if (tag == MP4DecConfigDescrTag) {
                    bytestream2_skip(&gb, 13);
                    len = read_descr(&gb, &tag);
                    if (tag == MP4DecSpecificDescrTag) {
                        len = FFMIN(gb.buffer_end - gb.buffer, len);
                        memmove(extradata, gb.buffer, len);
                        avctx->extradata_size = len;
                        break;
                    }
                } else if (tag == MP4ESDescrTag) {
                    int flags;
                    bytestream2_skip(&gb, 2);
                    flags = bytestream2_get_byte(&gb);
                    if (flags & 0x80) //streamDependenceFlag
                        bytestream2_skip(&gb, 2);
                    if (flags & 0x40) //URL_Flag
                        bytestream2_skip(&gb, bytestream2_get_byte(&gb));
                    if (flags & 0x20) //OCRstreamFlag
                        bytestream2_skip(&gb, 2);
                }
            } while (bytestream2_get_bytes_left(&gb));
        } else if (avctx->codec_id != AV_CODEC_ID_ALAC) {
            avctx->extradata_size = extradata_size;
        }
    }

    ffat_update_ctx(avctx);

#if !TARGET_OS_IPHONE && defined(__MAC_10_9)
    if (at->mode == kAudioCodecBitRateControlMode_Variable && avctx->rc_max_rate) {
        UInt32 max_size = avctx->rc_max_rate * avctx->frame_size / avctx->sample_rate;
        if (max_size)
            AudioConverterSetProperty(at->converter, kAudioCodecPropertyPacketSizeLimitForVBR,
                                      sizeof(max_size), &max_size);
    }
#endif

    ff_af_queue_init(avctx, &at->afq);

    return 0;
}

static OSStatus ffat_encode_callback(AudioConverterRef converter, UInt32 *nb_packets,
                                     AudioBufferList *data,
                                     AudioStreamPacketDescription **packets,
                                     void *inctx)
{
    AVCodecContext *avctx = inctx;
    ATDecodeContext *at = avctx->priv_data;
    AVFrame *frame;

    if (!at->frame_queue.available) {
        if (at->eof) {
            *nb_packets = 0;
            return 0;
        } else {
            *nb_packets = 0;
            return 1;
        }
    }

    frame = ff_bufqueue_get(&at->frame_queue);

    data->mNumberBuffers              = 1;
    data->mBuffers[0].mNumberChannels = avctx->channels;
    data->mBuffers[0].mDataByteSize   = frame->nb_samples *
                                        av_get_bytes_per_sample(avctx->sample_fmt) *
                                        avctx->channels;
    data->mBuffers[0].mData           = frame->data[0];
    if (*nb_packets > frame->nb_samples)
        *nb_packets = frame->nb_samples;

    ff_bufqueue_add(avctx, &at->used_frame_queue, frame);

    return 0;
}

static int ffat_encode(AVCodecContext *avctx, AVPacket *avpkt,
                       const AVFrame *frame, int *got_packet_ptr)
{
    ATDecodeContext *at = avctx->priv_data;
    OSStatus ret;

    AudioBufferList out_buffers = {
        .mNumberBuffers = 1,
        .mBuffers = {
            {
                .mNumberChannels = avctx->channels,
                .mDataByteSize = at->pkt_size,
            }
        }
    };
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ATADenoiseContext *s = ctx->priv;
    AVFrame *out, *in;
    int i;

    if (s->q.available != s->size) {
        if (s->q.available < s->mid) {
            for (i = 0; i < s->mid; i++) {
                out = av_frame_clone(buf);
                if (!out) {
                    av_frame_free(&buf);
                    return AVERROR(ENOMEM);
                }
                ff_bufqueue_add(ctx, &s->q, out);
            }
        }
        if (s->q.available < s->size) {
            ff_bufqueue_add(ctx, &s->q, buf);
            s->available++;
        }
        return 0;
    }

    in = ff_bufqueue_peek(&s->q, s->mid);

    if (!ctx->is_disabled) {
        ThreadData td;

        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out) {
            av_frame_free(&buf);
            return AVERROR(ENOMEM);
        }

        for (i = 0; i < s->size; i++) {
            AVFrame *frame = ff_bufqueue_peek(&s->q, i);

            s->data[0][i] = frame->data[0];
            s->data[1][i] = frame->data[1];
            s->data[2][i] = frame->data[2];
            s->linesize[0][i] = frame->linesize[0];
            s->linesize[1][i] = frame->linesize[1];
            s->linesize[2][i] = frame->linesize[2];
        }

        td.in = in; td.out = out;
        ctx->internal->execute(ctx, s->filter_slice, &td, NULL,
                               FFMIN3(s->planeheight[1],
                                      s->planeheight[2],
                                      ff_filter_get_nb_threads(ctx)));
        av_frame_copy_props(out, in);
    } else {
        out = av_frame_clone(in);
        if (!out) {
            av_frame_free(&buf);
            return AVERROR(ENOMEM);
        }
    }

    in = ff_bufqueue_get(&s->q);
    av_frame_free(&in);
    ff_bufqueue_add(ctx, &s->q, buf);

    return ff_filter_frame(outlink, out);
}