Esempio n. 1
0
static int decode_packet(int *got_frame, int cached)
{
    int ret = 0;
    int decoded = pkt.size;

    *got_frame = 0;
    //avpicture_fill(h264_frame, (uint8_t*)yuv_buff, PIX_FMT_YUV420P, frame->width*SCALE_MULTIPLE, frame->height*SCALE_MULTIPLE);  

    if (pkt.stream_index == video_stream_idx) {
        /* decode video frame */
        ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
            return ret;
        }

        if (*got_frame) {

            if (frame->width != width || frame->height != height ||
                frame->format != pix_fmt) {
                /* To handle this change, one could call av_image_alloc again and
                 * decode the following frames into another rawvideo file. */
                fprintf(stderr, "Error: Width, height and pixel format have to be "
                        "constant in a rawvideo file, but the width, height or "
                        "pixel format of the input video changed:\n"
                        "old: width = %d, height = %d, format = %s\n"
                        "new: width = %d, height = %d, format = %s\n",
                        width, height, av_get_pix_fmt_name(pix_fmt),
                        frame->width, frame->height,
                        av_get_pix_fmt_name(frame->format));
                return -1;
            }

            printf("video_frame%s n:%d coded_n:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   video_frame_count++, frame->coded_picture_number,
                   av_ts2timestr(frame->pts, &video_dec_ctx->time_base));

            /*scale yuv*/
            /* copy decoded frame to destination buffer:
             * this is required since rawvideo expects non aligned data */
            av_image_copy(video_dst_data, video_dst_linesize,
                          (const uint8_t **)(frame->data), frame->linesize,
                          pix_fmt, width, height);
            /* convert to destination format */
            sws_scale(sws_ctx, (const uint8_t * const*)video_dst_data,
                     video_dst_linesize, 0, height, scale_video_dst_data , scale_video_dst_linesize);


	    /*encode yuv to h264*/
	    encode2file();

            /* write to rawvideo file */
            //fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
            fwrite(scale_video_dst_data[0], 1, scale_video_dst_bufsize, video_dst_file);
        }
    } else if (pkt.stream_index == audio_stream_idx) {
        /* decode audio frame */
        ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
            return ret;
        }
        /* Some audio decoders decode only part of the packet, and have to be
         * called again with the remainder of the packet data.
         * Sample: fate-suite/lossless-audio/luckynight-partial.shn
         * Also, some decoders might over-read the packet. */
        decoded = FFMIN(ret, pkt.size);

        if (*got_frame) {
            size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
            printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   audio_frame_count++, frame->nb_samples,
                   av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));

            /* Write the raw audio data samples of the first plane. This works
             * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
             * most audio decoders output planar audio, which uses a separate
             * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
             * In other words, this code will write only the first audio channel
             * in these cases.
             * You should use libswresample or libavfilter to convert the frame
             * to packed data. */
            fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
        }
    }

    /* If we use frame reference counting, we own the data and need
     * to de-reference it when we don't use it anymore */
    if (*got_frame && refcount)
        av_frame_unref(frame);

    return decoded;
}
Esempio n. 2
0
static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
{
    int (*filter_frame)(AVFilterLink *, AVFrame *);
    AVFilterContext *dstctx = link->dst;
    AVFilterPad *dst = link->dstpad;
    AVFrame *out;
    int ret;
    AVFilterCommand *cmd= link->dst->command_queue;
    int64_t pts;

    if (link->closed) {
        av_frame_free(&frame);
        return AVERROR_EOF;
    }

    if (!(filter_frame = dst->filter_frame))
        filter_frame = default_filter_frame;

    /* copy the frame if needed */
    if (dst->needs_writable && !av_frame_is_writable(frame)) {
        av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");

        /* Maybe use ff_copy_buffer_ref instead? */
        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            out = ff_get_video_buffer(link, link->w, link->h);
            break;
        case AVMEDIA_TYPE_AUDIO:
            out = ff_get_audio_buffer(link, frame->nb_samples);
            break;
        default:
            return AVERROR(EINVAL);
        }
        if (!out) {
            av_frame_free(&frame);
            return AVERROR(ENOMEM);
        }
        av_frame_copy_props(out, frame);

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
                          frame->format, frame->width, frame->height);
            break;
        case AVMEDIA_TYPE_AUDIO:
            av_samples_copy(out->extended_data, frame->extended_data,
                            0, 0, frame->nb_samples,
                            av_get_channel_layout_nb_channels(frame->channel_layout),
                            frame->format);
            break;
        default:
            return AVERROR(EINVAL);
        }

        av_frame_free(&frame);
    } else
        out = frame;

    while(cmd && cmd->time <= out->pts * av_q2d(link->time_base)) {
        av_log(link->dst, AV_LOG_DEBUG,
               "Processing command time:%f command:%s arg:%s\n",
               cmd->time, cmd->command, cmd->arg);
        avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
        ff_command_queue_pop(link->dst);
        cmd= link->dst->command_queue;
    }

    pts = out->pts;
    if (dstctx->enable_str) {
        int64_t pos = av_frame_get_pkt_pos(out);
        dstctx->var_values[VAR_N] = link->frame_count;
        dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
        dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;

        dstctx->is_disabled = !av_expr_eval(dstctx->enable, dstctx->var_values, NULL);
        if (dstctx->is_disabled &&
                (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
            filter_frame = default_filter_frame;
    }
    ret = filter_frame(link, out);
    link->frame_count++;
    link->frame_requested = 0;
    ff_update_link_current_pts(link, pts);
    return ret;
}
Esempio n. 3
0
int av_frame_ref(AVFrame *dst, const AVFrame *src)
{
    int i, ret = 0;

    dst->format         = src->format;
    dst->width          = src->width;
    dst->height         = src->height;
    dst->channel_layout = src->channel_layout;
    dst->nb_samples     = src->nb_samples;

    ret = av_frame_copy_props(dst, src);
    if (ret < 0)
        return ret;

    /* duplicate the frame data if it's not refcounted */
    if (!src->buf[0]) {
        ret = av_frame_get_buffer(dst, 32);
        if (ret < 0)
            return ret;

        if (src->nb_samples) {
            int ch = av_get_channel_layout_nb_channels(src->channel_layout);
            av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
                            dst->nb_samples, ch, dst->format);
        } else {
            av_image_copy(dst->data, dst->linesize, src->data, src->linesize,
                          dst->format, dst->width, dst->height);
        }
        return 0;
    }

    /* ref the buffers */
    for (i = 0; i < FF_ARRAY_ELEMS(src->buf) && src->buf[i]; i++) {
        dst->buf[i] = av_buffer_ref(src->buf[i]);
        if (!dst->buf[i]) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
    }

    if (src->extended_buf) {
        dst->extended_buf = av_mallocz(sizeof(*dst->extended_buf) *
                                       src->nb_extended_buf);
        if (!dst->extended_buf) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        dst->nb_extended_buf = src->nb_extended_buf;

        for (i = 0; i < src->nb_extended_buf; i++) {
            dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
            if (!dst->extended_buf[i]) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
        }
    }

    /* duplicate extended data */
    if (src->extended_data != src->data) {
        int ch = av_get_channel_layout_nb_channels(src->channel_layout);

        if (!ch) {
            ret = AVERROR(EINVAL);
            goto fail;
        }

        dst->extended_data = av_malloc(sizeof(*dst->extended_data) * ch);
        if (!dst->extended_data) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
    } else
        dst->extended_data = dst->data;

    memcpy(dst->data,     src->data,     sizeof(src->data));
    memcpy(dst->linesize, src->linesize, sizeof(src->linesize));

    return 0;

fail:
    av_frame_unref(dst);
    return ret;
}
Esempio n. 4
0
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
{
    int (*filter_frame)(AVFilterLink *, AVFrame *);
    AVFilterPad *dst = link->dstpad;
    AVFrame *out = NULL;
    int ret;

    FF_DPRINTF_START(NULL, filter_frame);
    ff_dlog_link(NULL, link, 1);

    if (!(filter_frame = dst->filter_frame))
        filter_frame = default_filter_frame;

    /* copy the frame if needed */
    if (dst->needs_writable && !av_frame_is_writable(frame)) {
        av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            out = ff_get_video_buffer(link, link->w, link->h);
            break;
        case AVMEDIA_TYPE_AUDIO:
            out = ff_get_audio_buffer(link, frame->nb_samples);
            break;
        default:
            ret = AVERROR(EINVAL);
            goto fail;
        }
        if (!out) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        ret = av_frame_copy_props(out, frame);
        if (ret < 0)
            goto fail;

        switch (link->type) {
        case AVMEDIA_TYPE_VIDEO:
            av_image_copy(out->data, out->linesize, frame->data, frame->linesize,
                          frame->format, frame->width, frame->height);
            break;
        case AVMEDIA_TYPE_AUDIO:
            av_samples_copy(out->extended_data, frame->extended_data,
                            0, 0, frame->nb_samples,
                            av_get_channel_layout_nb_channels(frame->channel_layout),
                            frame->format);
            break;
        default:
            ret = AVERROR(EINVAL);
            goto fail;
        }

        av_frame_free(&frame);
    } else
        out = frame;

    return filter_frame(link, out);

fail:
    av_frame_free(&out);
    av_frame_free(&frame);
    return ret;
}
static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
    InputStream *ist = s->opaque;
    VTContext  *vt = ist->hwaccel_ctx;
    CVPixelBufferRef pixbuf = (CVPixelBufferRef)frame->data[3];
    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
    CVReturn err;
    uint8_t *data[4] = { 0 };
    int linesize[4] = { 0 };
    int planes, ret, i;
    char codec_str[32];

    av_frame_unref(vt->tmp_frame);

    switch (pixel_format) {
    case kCVPixelFormatType_420YpCbCr8Planar: vt->tmp_frame->format = AV_PIX_FMT_YUV420P; break;
    case kCVPixelFormatType_422YpCbCr8:       vt->tmp_frame->format = AV_PIX_FMT_UYVY422; break;
    case kCVPixelFormatType_32BGRA:           vt->tmp_frame->format = AV_PIX_FMT_BGRA; break;
#ifdef kCFCoreFoundationVersionNumber10_7
    case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: vt->tmp_frame->format = AV_PIX_FMT_NV12; break;
#endif
    default:
        av_get_codec_tag_string(codec_str, sizeof(codec_str), s->codec_tag);
        av_log(NULL, AV_LOG_ERROR,
               "%s: Unsupported pixel format: %s\n", codec_str, videotoolbox_pixfmt);
        return AVERROR(ENOSYS);
    }

    vt->tmp_frame->width  = frame->width;
    vt->tmp_frame->height = frame->height;
    ret = av_frame_get_buffer(vt->tmp_frame, 32);
    if (ret < 0)
        return ret;

    err = CVPixelBufferLockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (err != kCVReturnSuccess) {
        av_log(NULL, AV_LOG_ERROR, "Error locking the pixel buffer.\n");
        return AVERROR_UNKNOWN;
    }

    if (CVPixelBufferIsPlanar(pixbuf)) {

        planes = CVPixelBufferGetPlaneCount(pixbuf);
        for (i = 0; i < planes; i++) {
            data[i]     = CVPixelBufferGetBaseAddressOfPlane(pixbuf, i);
            linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(pixbuf, i);
        }
    } else {
        data[0] = CVPixelBufferGetBaseAddress(pixbuf);
        linesize[0] = CVPixelBufferGetBytesPerRow(pixbuf);
    }

    av_image_copy(vt->tmp_frame->data, vt->tmp_frame->linesize,
                  (const uint8_t **)data, linesize, vt->tmp_frame->format,
                  frame->width, frame->height);

    ret = av_frame_copy_props(vt->tmp_frame, frame);
    CVPixelBufferUnlockBaseAddress(pixbuf, kCVPixelBufferLock_ReadOnly);
    if (ret < 0)
        return ret;

    av_frame_unref(frame);
    av_frame_move_ref(frame, vt->tmp_frame);

    return 0;
}
Esempio n. 6
0
static int libkvazaar_encode(AVCodecContext *avctx,
                             AVPacket *avpkt,
                             const AVFrame *frame,
                             int *got_packet_ptr)
{
    int retval = 0;
    kvz_picture *img_in = NULL;

    kvz_data_chunk *data_out = NULL;
    uint32_t len_out = 0;
    kvz_picture *recon_pic = NULL;
    kvz_frame_info frame_info;

    LibkvazaarContext *ctx = avctx->priv_data;

    *got_packet_ptr = 0;

    if (frame) {
        if (frame->width != ctx->config->width ||
                frame->height != ctx->config->height) {
            av_log(avctx, AV_LOG_ERROR,
                   "Changing video dimensions during encoding is not supported. "
                   "(changed from %dx%d to %dx%d)\n",
                   ctx->config->width, ctx->config->height,
                   frame->width, frame->height);
            retval = AVERROR_INVALIDDATA;
            goto done;
        }

        if (frame->format != avctx->pix_fmt) {
            av_log(avctx, AV_LOG_ERROR,
                   "Changing pixel format during encoding is not supported. "
                   "(changed from %s to %s)\n",
                   av_get_pix_fmt_name(avctx->pix_fmt),
                   av_get_pix_fmt_name(frame->format));
            retval = AVERROR_INVALIDDATA;
            goto done;
        }

        // Allocate input picture for kvazaar.
        img_in = ctx->api->picture_alloc(frame->width, frame->height);
        if (!img_in) {
            av_log(avctx, AV_LOG_ERROR, "Failed to allocate picture.\n");
            retval = AVERROR(ENOMEM);
            goto done;
        }

        // Copy pixels from frame to img_in.
        {
            int dst_linesizes[4] = {
              frame->width,
              frame->width / 2,
              frame->width / 2,
              0
            };
            av_image_copy(img_in->data, dst_linesizes,
                          frame->data, frame->linesize,
                          frame->format, frame->width, frame->height);
        }

        img_in->pts = frame->pts;
    }

    if (!ctx->api->encoder_encode(ctx->encoder, img_in,
                                  &data_out, &len_out,
                                  &recon_pic, NULL,
                                  &frame_info)) {
        av_log(avctx, AV_LOG_ERROR, "Failed to encode frame.\n");
        retval = AVERROR_EXTERNAL;
        goto done;
    }

    if (data_out) {
        kvz_data_chunk *chunk = NULL;
        uint64_t written = 0;

        retval = ff_alloc_packet(avpkt, len_out);
        if (retval < 0) {
            av_log(avctx, AV_LOG_ERROR, "Failed to allocate output packet.\n");
            goto done;
        }

        for (chunk = data_out; chunk != NULL; chunk = chunk->next) {
            av_assert0(written + chunk->len <= len_out);
            memcpy(avpkt->data + written, chunk->data, chunk->len);
            written += chunk->len;
        }
        *got_packet_ptr = 1;

        ctx->api->chunk_free(data_out);
        data_out = NULL;

        avpkt->pts = recon_pic->pts;
        avpkt->dts = recon_pic->dts;

        avpkt->flags = 0;
        // IRAP VCL NAL unit types span the range
        // [BLA_W_LP (16), RSV_IRAP_VCL23 (23)].
        if (frame_info.nal_unit_type >= KVZ_NAL_BLA_W_LP &&
                frame_info.nal_unit_type <= KVZ_NAL_RSV_IRAP_VCL23) {
            avpkt->flags |= AV_PKT_FLAG_KEY;
        }
    }

done:
    ctx->api->picture_free(img_in);
    ctx->api->picture_free(recon_pic);
    ctx->api->chunk_free(data_out);
    return retval;
}